Compare commits
1 Commits
fix/exec-c
...
v0.1.6
| Author | SHA1 | Date | |
|---|---|---|---|
| d439dbcc29 |
62
.woodpecker/pipeline.yml
Normal file
62
.woodpecker/pipeline.yml
Normal file
@ -0,0 +1,62 @@
|
||||
when:
|
||||
- event: push
|
||||
branch: main
|
||||
|
||||
steps:
|
||||
build-go:
|
||||
image: python:3.13
|
||||
environment:
|
||||
WRENN_API_KEY:
|
||||
from_secret: wrenn_api_key
|
||||
commands:
|
||||
- pip install wrenn
|
||||
- export GO_VERSION=$$(grep '^go ' go.mod | cut -d' ' -f2)
|
||||
- python .woodpecker/scripts/build_go.py
|
||||
depends_on: []
|
||||
|
||||
build-rust:
|
||||
image: python:3.13
|
||||
environment:
|
||||
WRENN_API_KEY:
|
||||
from_secret: wrenn_api_key
|
||||
commands:
|
||||
- pip install wrenn
|
||||
- python .woodpecker/scripts/build_rust.py
|
||||
depends_on: []
|
||||
|
||||
tag-release:
|
||||
image: python:3.13
|
||||
environment:
|
||||
GITEA_TOKEN:
|
||||
from_secret: gitea_token
|
||||
commands:
|
||||
- VERSION=$$(cat VERSION_CP)
|
||||
- git config user.name "R3dRum92"
|
||||
- git config user.email "tksadik@omukk.dev"
|
||||
- git tag "v$${VERSION}"
|
||||
- git push "https://tksadik92:$${GITEA_TOKEN}@git.omukk.dev/tksadik92/wrenn-releases.git" "v$${VERSION}"
|
||||
depends_on: [build-go, build-rust]
|
||||
|
||||
release-notes:
|
||||
image: python:3.13
|
||||
environment:
|
||||
WRENN_API_KEY:
|
||||
from_secret: wrenn_api_key
|
||||
GITEA_TOKEN:
|
||||
from_secret: gitea_token
|
||||
ZHIPU_API_KEY:
|
||||
from_secret: zhipu_api_key
|
||||
commands:
|
||||
- pip install wrenn
|
||||
- python .woodpecker/scripts/release_notes.py
|
||||
depends_on: [tag-release]
|
||||
|
||||
publish-github:
|
||||
image: python:3.13
|
||||
environment:
|
||||
GITHUB_TOKEN:
|
||||
from_secret: github_token
|
||||
commands:
|
||||
- pip install httpx
|
||||
- python .woodpecker/scripts/publish_github.py
|
||||
depends_on: [release-notes]
|
||||
136
.woodpecker/scripts/build_go.py
Normal file
136
.woodpecker/scripts/build_go.py
Normal file
@ -0,0 +1,136 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from wrenn import Capsule, StreamExitEvent, StreamStderrEvent, StreamStdoutEvent
|
||||
from wrenn._git import GitCommandError
|
||||
|
||||
GO_VERSION = os.getenv("GO_VERSION", "1.25.8")
|
||||
REPO_URL = "https://git.omukk.dev/wrenn/wrenn.git"
|
||||
REPO_DIR = "/opt/wrenn"
|
||||
BUILDS_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "builds")
|
||||
|
||||
|
||||
def read_remote_version(capsule: Capsule, filename: str) -> str:
|
||||
content = capsule.files.read_bytes(f"{REPO_DIR}/{filename}")
|
||||
return content.decode("utf-8").strip()
|
||||
|
||||
|
||||
def run(capsule: Capsule, cmd: str, timeout: int = 30) -> int:
|
||||
result = capsule.commands.run(cmd, timeout=timeout)
|
||||
if result.exit_code != 0:
|
||||
print(f"FAIL [{cmd.split()[0]}]: exit={result.exit_code}", file=sys.stderr)
|
||||
if result.stderr:
|
||||
print(result.stderr.strip(), file=sys.stderr)
|
||||
return result.exit_code
|
||||
print(f"OK [{cmd.split()[0]}]")
|
||||
return 0
|
||||
|
||||
|
||||
def install_go(capsule: Capsule) -> bool:
|
||||
tarball = f"go{GO_VERSION}.linux-amd64.tar.gz"
|
||||
url = f"https://go.dev/dl/{tarball}"
|
||||
|
||||
if run(capsule, "apt update", timeout=120) != 0:
|
||||
return False
|
||||
if run(capsule, "apt install -y make build-essential file", timeout=300) != 0:
|
||||
return False
|
||||
if run(capsule, f"curl -LO {url}", timeout=120) != 0:
|
||||
return False
|
||||
if run(capsule, f"tar -C /usr/local -xzf {tarball}", timeout=300) != 0:
|
||||
return False
|
||||
if run(capsule, 'echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.profile') != 0:
|
||||
return False
|
||||
if run(capsule, "rm -f " + tarball) != 0:
|
||||
return False
|
||||
|
||||
result = capsule.commands.run("/usr/local/go/bin/go version")
|
||||
print(result.stdout.strip())
|
||||
return result.exit_code == 0
|
||||
|
||||
|
||||
def clone_repo(capsule: Capsule) -> bool:
|
||||
try:
|
||||
capsule.git.clone(REPO_URL, REPO_DIR)
|
||||
print("OK [git clone]")
|
||||
return True
|
||||
except GitCommandError as e:
|
||||
print(f"FAIL [git clone]: {e}", file=sys.stderr)
|
||||
return False
|
||||
|
||||
|
||||
def build_go(capsule: Capsule) -> bool:
|
||||
command = "CGO_ENABLED=1 make build-cp build-agent"
|
||||
handle = capsule.commands.run(
|
||||
command,
|
||||
background=True,
|
||||
cwd=REPO_DIR,
|
||||
envs={
|
||||
"PATH": "/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
},
|
||||
)
|
||||
print(f"{command} started (pid={handle.pid}), streaming output...")
|
||||
|
||||
exit_code = 0
|
||||
for event in capsule.commands.connect(handle.pid):
|
||||
if isinstance(event, StreamStdoutEvent):
|
||||
print(event.data, end="")
|
||||
elif isinstance(event, StreamStderrEvent):
|
||||
print(event.data, end="", file=sys.stderr)
|
||||
elif isinstance(event, StreamExitEvent):
|
||||
exit_code = event.exit_code
|
||||
|
||||
if exit_code != 0:
|
||||
print(f"FAIL [go build]: exit={exit_code}", file=sys.stderr)
|
||||
return False
|
||||
print("OK [go build]")
|
||||
return True
|
||||
|
||||
|
||||
def download_artifacts(capsule: Capsule) -> bool:
|
||||
remote_dir = f"{REPO_DIR}/builds"
|
||||
entries = capsule.files.list(remote_dir, depth=1)
|
||||
files = [e for e in entries if e.type != "directory"]
|
||||
|
||||
if not files:
|
||||
print("FAIL [download]: no files found in builds/", file=sys.stderr)
|
||||
return False
|
||||
|
||||
local_dir = os.path.normpath(BUILDS_DIR)
|
||||
os.makedirs(local_dir, exist_ok=True)
|
||||
versions = {
|
||||
"wrenn-cp": read_remote_version(capsule, "VERSION_CP"),
|
||||
"wrenn-agent": read_remote_version(capsule, "VERSION_AGENT"),
|
||||
}
|
||||
|
||||
for entry in files:
|
||||
name = entry.name or "unknown"
|
||||
remote_path = f"{remote_dir}/{name}"
|
||||
local_name = f"{name}-{versions[name]}" if name in versions else name
|
||||
local_path = os.path.join(local_dir, local_name)
|
||||
print(f"Downloading {name} as {local_name} ({entry.size or '?'} bytes)...")
|
||||
|
||||
with open(local_path, "wb") as f:
|
||||
for chunk in capsule.files.download_stream(remote_path):
|
||||
f.write(chunk)
|
||||
|
||||
print(f"OK [download {local_name}]")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main() -> None:
|
||||
with Capsule(wait=True, vcpus=4, memory_mb=4096) as capsule:
|
||||
print(f"Capsule: {capsule.capsule_id}")
|
||||
if not install_go(capsule):
|
||||
sys.exit(1)
|
||||
if not clone_repo(capsule):
|
||||
sys.exit(1)
|
||||
if not build_go(capsule):
|
||||
sys.exit(1)
|
||||
if not download_artifacts(capsule):
|
||||
sys.exit(1)
|
||||
print("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
173
.woodpecker/scripts/build_rust.py
Normal file
173
.woodpecker/scripts/build_rust.py
Normal file
@ -0,0 +1,173 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from wrenn import Capsule, StreamExitEvent, StreamStderrEvent, StreamStdoutEvent
|
||||
from wrenn._git import GitCommandError
|
||||
|
||||
RUST_VERSION = os.getenv("RUST_VERSION", "1.95.0")
|
||||
REPO_URL = "https://git.omukk.dev/wrenn/wrenn.git"
|
||||
REPO_DIR = "/opt/wrenn"
|
||||
BUILDS_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "builds")
|
||||
RUST_PATH = (
|
||||
"/root/.cargo/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
)
|
||||
|
||||
|
||||
def read_envd_version(capsule: Capsule) -> str:
|
||||
content = capsule.files.read_bytes(f"{REPO_DIR}/envd-rs/Cargo.toml")
|
||||
for line in content.decode("utf-8").splitlines():
|
||||
stripped = line.strip()
|
||||
if stripped.startswith("version ="):
|
||||
return stripped.split("=", 1)[1].strip().strip('"')
|
||||
print("FAIL [version]: envd-rs/Cargo.toml has no package version", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def run(capsule: Capsule, cmd: str, timeout: int = 30, envs={}) -> int:
|
||||
result = capsule.commands.run(cmd, timeout=timeout, envs=envs)
|
||||
if result.exit_code != 0:
|
||||
print(f"FAIL [{cmd.split()[0]}]: exit={result.exit_code}", file=sys.stderr)
|
||||
if result.stderr:
|
||||
print(result.stderr.strip(), file=sys.stderr)
|
||||
return result.exit_code
|
||||
print(f"OK [{cmd.split()[0]}]")
|
||||
return 0
|
||||
|
||||
|
||||
def install_rust(capsule: Capsule) -> bool:
|
||||
if run(capsule, "apt update", timeout=120) != 0:
|
||||
return False
|
||||
if (
|
||||
run(
|
||||
capsule,
|
||||
"apt install -y make build-essential file curl musl-tools protobuf-compiler",
|
||||
timeout=300,
|
||||
)
|
||||
!= 0
|
||||
):
|
||||
return False
|
||||
if (
|
||||
run(
|
||||
capsule,
|
||||
f"curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain {RUST_VERSION}",
|
||||
timeout=300,
|
||||
)
|
||||
!= 0
|
||||
):
|
||||
return False
|
||||
if (
|
||||
run(
|
||||
capsule,
|
||||
"/root/.cargo/bin/rustup target add x86_64-unknown-linux-musl",
|
||||
timeout=120,
|
||||
)
|
||||
!= 0
|
||||
):
|
||||
return False
|
||||
|
||||
result = capsule.commands.run("/root/.cargo/bin/rustc --version")
|
||||
print(result.stdout.strip())
|
||||
return result.exit_code == 0
|
||||
|
||||
|
||||
def clone_repo(capsule: Capsule) -> bool:
|
||||
try:
|
||||
capsule.git.clone(REPO_URL, REPO_DIR)
|
||||
capsule.commands.run(f"cd {REPO_DIR} && git checkout fix/large-operations")
|
||||
print("OK [git clone]")
|
||||
return True
|
||||
except GitCommandError as e:
|
||||
print(f"FAIL [git clone]: {e}", file=sys.stderr)
|
||||
return False
|
||||
|
||||
|
||||
def build_rust(capsule: Capsule) -> bool:
|
||||
if run(capsule, f"mkdir -p {REPO_DIR}/builds") != 0:
|
||||
return False
|
||||
|
||||
# result = capsule.commands.run("file --version")
|
||||
# print(result.stdout)
|
||||
# result = capsule.commands.run(
|
||||
# 'git rev-parse --short HEAD 2>/dev/null || echo "unknown"'
|
||||
# )
|
||||
# commit = result.stdout
|
||||
|
||||
# run(capsule, f"mkdir -p {REPO_DIR}/builds")
|
||||
# result = capsule.commands.run("which musl-gcc")
|
||||
# print(result.stdout)
|
||||
|
||||
handle = capsule.commands.run(
|
||||
"make build-envd",
|
||||
background=True,
|
||||
cwd=REPO_DIR,
|
||||
envs={"PATH": RUST_PATH},
|
||||
)
|
||||
print(f"rust build started (pid={handle.pid}), streaming output...")
|
||||
|
||||
exit_code = 0
|
||||
for event in capsule.commands.connect(handle.pid):
|
||||
if isinstance(event, StreamStdoutEvent):
|
||||
print(event.data, end="")
|
||||
elif isinstance(event, StreamStderrEvent):
|
||||
print(event.data, end="", file=sys.stderr)
|
||||
elif isinstance(event, StreamExitEvent):
|
||||
exit_code = event.exit_code
|
||||
|
||||
if exit_code != 0:
|
||||
print(f"FAIL [rust build]: exit={exit_code}", file=sys.stderr)
|
||||
return False
|
||||
|
||||
print("OK [rust build]")
|
||||
|
||||
# if (
|
||||
# run(
|
||||
# capsule,
|
||||
# f"cp {REPO_DIR}/envd-rs/target/x86_64-unknown-linux-musl/release/envd {REPO_DIR}/builds/envd",
|
||||
# envs={"BIN_DIR": REPO_DIR},
|
||||
# )
|
||||
# != 0
|
||||
# ):
|
||||
# return False
|
||||
|
||||
# result = capsule.commands.run(f"readelf -d {REPO_DIR}/builds/envd 2>&1")
|
||||
# print(result.stdout, end="")
|
||||
# if result.stderr:
|
||||
# print(result.stderr, end="", file=sys.stderr)
|
||||
# result = capsule.commands.run(f"file {REPO_DIR}/builds/envd 2>&1")
|
||||
# print(result.stdout)
|
||||
return True
|
||||
|
||||
|
||||
def download_artifacts(capsule: Capsule) -> bool:
|
||||
version = read_envd_version(capsule)
|
||||
remote_path = f"{REPO_DIR}/builds/envd"
|
||||
local_dir = os.path.normpath(BUILDS_DIR)
|
||||
local_name = f"envd-{version}"
|
||||
local_path = os.path.join(local_dir, local_name)
|
||||
os.makedirs(local_dir, exist_ok=True)
|
||||
|
||||
print(f"Downloading envd as {local_name}...")
|
||||
with open(local_path, "wb") as f:
|
||||
for chunk in capsule.files.download_stream(remote_path):
|
||||
f.write(chunk)
|
||||
|
||||
print(f"OK [download {local_name}]")
|
||||
return True
|
||||
|
||||
|
||||
def main() -> None:
|
||||
with Capsule(wait=True, vcpus=4, memory_mb=4096) as capsule:
|
||||
print(f"Capsule: {capsule.capsule_id}")
|
||||
if not install_rust(capsule):
|
||||
sys.exit(1)
|
||||
if not clone_repo(capsule):
|
||||
sys.exit(1)
|
||||
if not build_rust(capsule):
|
||||
sys.exit(1)
|
||||
if not download_artifacts(capsule):
|
||||
sys.exit(1)
|
||||
print("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
104
.woodpecker/scripts/publish_github.py
Normal file
104
.woodpecker/scripts/publish_github.py
Normal file
@ -0,0 +1,104 @@
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import httpx
|
||||
|
||||
GITHUB_REPO = "R3dRum92/wrenn-releases"
|
||||
GITHUB_API = "https://api.github.com"
|
||||
GITHUB_UPLOADS = "https://uploads.github.com"
|
||||
BUILDS_DIR = "builds"
|
||||
VERSION_FILE = "VERSION_CP"
|
||||
NOTES_FILE = os.path.join(".woodpecker", "release_notes.md")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
token = os.environ["GITHUB_TOKEN"]
|
||||
|
||||
with open(VERSION_FILE) as f:
|
||||
version = f.read().strip()
|
||||
tag = f"v{version}"
|
||||
|
||||
release_notes = ""
|
||||
if os.path.exists(NOTES_FILE):
|
||||
with open(NOTES_FILE) as f:
|
||||
release_notes = f.read()
|
||||
|
||||
headers = {
|
||||
"Authorization": f"token {token}",
|
||||
"Accept": "application/vnd.github+json",
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
}
|
||||
|
||||
client = httpx.Client(headers=headers, timeout=60)
|
||||
|
||||
print(f"Creating GitHub release for {tag}...")
|
||||
resp = client.post(
|
||||
f"{GITHUB_API}/repos/{GITHUB_REPO}/releases",
|
||||
json={
|
||||
"tag_name": tag,
|
||||
"name": tag,
|
||||
"body": release_notes,
|
||||
"draft": False,
|
||||
"prerelease": False,
|
||||
},
|
||||
)
|
||||
if resp.status_code == 422:
|
||||
print(f"WARN [create release]: release for {tag} already exists, skipping")
|
||||
data = resp.json()
|
||||
errors = data.get("errors", [])
|
||||
if errors:
|
||||
existing_url = errors[0].get("documentation_url", "")
|
||||
print(f" See: {existing_url}")
|
||||
client.close()
|
||||
return
|
||||
if resp.status_code != 201:
|
||||
print(f"FAIL [create release]: {resp.status_code} {resp.text}", file=sys.stderr)
|
||||
client.close()
|
||||
sys.exit(1)
|
||||
|
||||
release_data = resp.json()
|
||||
release_id = release_data["id"]
|
||||
release_url = release_data.get("html_url", "")
|
||||
print(f"OK [create release] id={release_id}")
|
||||
|
||||
builds_path = Path(BUILDS_DIR)
|
||||
if not builds_path.exists():
|
||||
print(f"No {BUILDS_DIR}/ directory found, skipping asset upload")
|
||||
client.close()
|
||||
print(f"Release published: {release_url}")
|
||||
return
|
||||
|
||||
upload_headers = {
|
||||
**headers,
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
|
||||
for artifact in sorted(builds_path.iterdir()):
|
||||
if artifact.is_dir():
|
||||
continue
|
||||
print(f"Uploading {artifact.name}...")
|
||||
|
||||
with open(artifact, "rb") as f:
|
||||
data = f.read()
|
||||
|
||||
resp = client.post(
|
||||
f"{GITHUB_UPLOADS}/repos/{GITHUB_REPO}/releases/{release_id}/assets",
|
||||
params={"name": artifact.name},
|
||||
headers=upload_headers,
|
||||
content=data,
|
||||
)
|
||||
if resp.status_code != 201:
|
||||
print(
|
||||
f"WARN [upload {artifact.name}]: {resp.status_code} {resp.text}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
else:
|
||||
print(f"OK [upload {artifact.name}]")
|
||||
|
||||
client.close()
|
||||
print(f"Release published: {release_url}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
246
.woodpecker/scripts/release_notes.py
Normal file
246
.woodpecker/scripts/release_notes.py
Normal file
@ -0,0 +1,246 @@
|
||||
import base64
|
||||
import os
|
||||
import sys
|
||||
|
||||
from wrenn import Capsule
|
||||
|
||||
REPO_URL = "https://git.omukk.dev/tksadik92/wrenn-releases.git"
|
||||
REPO_DIR = "/opt/wrenn-releases"
|
||||
CAPSULE_OUTPUT = "/tmp/release_notes.md"
|
||||
LOCAL_OUTPUT = os.path.join(os.path.dirname(__file__), "..", "release_notes.md")
|
||||
|
||||
# Default starting configuration
|
||||
ZHIPU_API_KEY = os.environ.get("ZHIPU_API_KEY", "")
|
||||
if ZHIPU_API_KEY:
|
||||
DEFAULT_MODEL = "zhipuai-coding-plan/glm-5.1"
|
||||
else:
|
||||
DEFAULT_MODEL = "opencode/minimax-m2.5-free"
|
||||
|
||||
RELEASE_NOTES_EXAMPLE = """
|
||||
## What's new
|
||||
Sandbox HTTP proxying, terminal reliability, and auth robustness improvements.
|
||||
|
||||
### Proxy
|
||||
- Fixed redirect loops for apps served inside sandboxes (Python HTTP server, Jupyter, etc.)
|
||||
- Proxy traffic no longer interferes with terminal and exec connections
|
||||
- Services that take a moment to start up inside a sandbox are now retried instead of immediately failing
|
||||
|
||||
### Terminal (PTY)
|
||||
- Terminal input is no longer blocked by slow network conditions — fast typing no longer causes timeouts or disconnects
|
||||
- Input bursts are coalesced into fewer round trips — lower latency under fast typing
|
||||
|
||||
### Authentication
|
||||
- WebSocket connections now authenticate correctly for both SDK clients (header-based) and browser clients (message-based)
|
||||
|
||||
### Bug Fixes
|
||||
- Fixed crash in envd when a process exits without a PTY
|
||||
- Fixed goroutine leak on sandbox pause
|
||||
|
||||
### Others
|
||||
- Version bump
|
||||
""".strip()
|
||||
|
||||
|
||||
def run(capsule: Capsule, cmd: str, cwd: str | None = None, timeout: int = 30) -> int:
|
||||
result = capsule.commands.run(cmd, cwd=cwd, timeout=timeout)
|
||||
if result.exit_code != 0:
|
||||
print(f"FAIL [{cmd.split()[0]}]: exit={result.exit_code}", file=sys.stderr)
|
||||
if result.stderr:
|
||||
print(result.stderr.strip(), file=sys.stderr)
|
||||
return result.exit_code
|
||||
print(f"OK [{cmd.split()[0]}]")
|
||||
return 0
|
||||
|
||||
|
||||
def get_tags(capsule: Capsule) -> tuple[str, str | None]:
|
||||
result = capsule.commands.run(
|
||||
f"cd {REPO_DIR} && git tag --sort=-version:refname",
|
||||
cwd=REPO_DIR,
|
||||
timeout=30,
|
||||
)
|
||||
if result.exit_code != 0:
|
||||
print(f"FAIL [git tag]: {result.stderr}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
tags = [t for t in result.stdout.strip().split("\n") if t]
|
||||
if not tags:
|
||||
print("No tags found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
current_tag = tags[0]
|
||||
previous_tag = tags[1] if len(tags) > 1 else None
|
||||
print(f"Current tag: {current_tag}")
|
||||
print(f"Previous tag: {previous_tag}")
|
||||
return current_tag, previous_tag
|
||||
|
||||
|
||||
def get_git_context(
|
||||
capsule: Capsule, current_tag: str, previous_tag: str | None
|
||||
) -> tuple[str, str]:
|
||||
if previous_tag:
|
||||
# FIX: Removed '-n 2' to ensure we grab ALL commits between the two tags
|
||||
log_cmd = f"cd {REPO_DIR} && git log {previous_tag}..{current_tag} --pretty=format:'%s (%h)'"
|
||||
else:
|
||||
# Fallback to limit log size if this is the very first tag in the repo
|
||||
log_cmd = (
|
||||
f"cd {REPO_DIR} && git log {current_tag} --pretty=format:'%s (%h)' -n 50"
|
||||
)
|
||||
|
||||
log_result = capsule.commands.run(log_cmd, cwd=REPO_DIR, timeout=30)
|
||||
if log_result.exit_code != 0:
|
||||
print(f"FAIL [git log]: {log_result.stderr}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# git diff natively compares the entire tree state between tags
|
||||
if previous_tag:
|
||||
diff_cmd = f"cd {REPO_DIR} && git diff {previous_tag}..{current_tag} --stat"
|
||||
else:
|
||||
diff_cmd = f"cd {REPO_DIR} && git show {current_tag} --stat"
|
||||
|
||||
diff_result = capsule.commands.run(diff_cmd, cwd=REPO_DIR, timeout=30)
|
||||
if diff_result.exit_code != 0:
|
||||
print(f"FAIL [git diff]: {diff_result.stderr}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
return log_result.stdout.strip(), diff_result.stdout.strip()
|
||||
|
||||
|
||||
def generate_release_notes(
|
||||
capsule: Capsule,
|
||||
current_tag: str,
|
||||
git_log: str,
|
||||
git_diff: str,
|
||||
output_path: str,
|
||||
model: str,
|
||||
) -> None:
|
||||
prompt = (
|
||||
f"You are writing release notes for version {current_tag} of a software project.\n\n"
|
||||
f"Here is what changed between the previous version and this one:\n\n"
|
||||
f"Commit messages:\n{git_log}\n\n"
|
||||
f"Files and areas that changed:\n{git_diff}\n\n"
|
||||
f"Write the release notes in plain, friendly language that any developer can understand "
|
||||
f"without deep knowledge of the codebase. Avoid jargon like 'goroutine', 'PTY', 'envd', "
|
||||
f"or internal function names — describe what the change means for the user instead. "
|
||||
f"Group related changes under headings that reflect what actually changed. "
|
||||
f"Only include sections that are relevant to these specific changes. "
|
||||
f"Start with a short one-line summary of what this release is about. "
|
||||
f"Keep each bullet point to one clear sentence.\n\n"
|
||||
f"Here is an example of the style to aim for — not a template to copy:\n\n"
|
||||
f"{RELEASE_NOTES_EXAMPLE}\n\n"
|
||||
f"You MUST start the document with `## What's New`\n"
|
||||
f"The very next line MUST be a single short summary sentence.\n"
|
||||
f"Output only the markdown. No intro, no explanation."
|
||||
f"CRITICAL: Do not output any conversational filler, acknowledgments, or thoughts "
|
||||
f"like 'Let me look at the changes'. Output absolutely nothing except the final markdown."
|
||||
)
|
||||
|
||||
prompt_b64 = base64.b64encode(prompt.encode("utf-8")).decode("utf-8")
|
||||
|
||||
write_prompt_cmd = f"echo '{prompt_b64}' | base64 -d > /tmp/oc_prompt.txt"
|
||||
|
||||
result = capsule.commands.run(
|
||||
write_prompt_cmd,
|
||||
cwd=REPO_DIR,
|
||||
timeout=10,
|
||||
)
|
||||
if result.exit_code != 0:
|
||||
print(f"FAIL [write prompt]: {result.stderr}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# FIX: Wrapper function to handle execution and authentication dynamically
|
||||
def run_opencode_with_model(target_model: str) -> int:
|
||||
env = ""
|
||||
if "zhipu" in target_model.lower():
|
||||
env = f"ZHIPU_API_KEY={os.environ.get('ZHIPU_API_KEY', '')}"
|
||||
|
||||
cmd = (
|
||||
f"{env} "
|
||||
f"~/.opencode/bin/opencode run "
|
||||
f'"Read the attached file and generate the release notes. Output ONLY markdown." '
|
||||
f"--model {target_model} "
|
||||
f"--file /tmp/oc_prompt.txt "
|
||||
f"> {output_path}"
|
||||
)
|
||||
|
||||
cmd_result = capsule.commands.run(cmd, cwd=REPO_DIR, timeout=120)
|
||||
|
||||
if cmd_result.exit_code != 0:
|
||||
print(
|
||||
f"FAIL [opencode via {target_model}]: exit={cmd_result.exit_code}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(f"STDOUT:\n{cmd_result.stdout}", file=sys.stderr)
|
||||
print(f"STDERR:\n{cmd_result.stderr}", file=sys.stderr)
|
||||
|
||||
return cmd_result.exit_code
|
||||
|
||||
# First attempt with the target model
|
||||
exit_status = run_opencode_with_model(model)
|
||||
|
||||
# FIX: Catch failures (like Zhipu rate limits) and fallback to MiniMax
|
||||
if exit_status != 0:
|
||||
if "zhipu" in model.lower():
|
||||
print(
|
||||
"\n[!] Zhipu AI failed (likely rate-limited). Falling back to MiniMax...",
|
||||
file=sys.stderr,
|
||||
)
|
||||
fallback_model = "opencode/minimax-m2.5-free"
|
||||
exit_status = run_opencode_with_model(fallback_model)
|
||||
if exit_status != 0:
|
||||
print("FAIL: Fallback model also failed. Exiting.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
result = capsule.commands.run(f"cat {output_path}")
|
||||
print(result.stdout)
|
||||
if result.stderr:
|
||||
print(result.stderr)
|
||||
|
||||
print(f"OK [opencode] release notes written to {output_path}")
|
||||
|
||||
|
||||
def download_release_notes(capsule: Capsule) -> None:
|
||||
local_path = os.path.normpath(LOCAL_OUTPUT)
|
||||
os.makedirs(os.path.dirname(local_path), exist_ok=True)
|
||||
|
||||
print(f"Downloading release notes from capsule...")
|
||||
content = capsule.files.read_bytes(CAPSULE_OUTPUT)
|
||||
with open(local_path, "wb") as f:
|
||||
f.write(content)
|
||||
|
||||
print(f"OK [download] release notes → {local_path}")
|
||||
print(content.decode("utf-8", errors="replace"))
|
||||
|
||||
|
||||
def main() -> None:
|
||||
model = os.environ.get("OPENCODE_MODEL", DEFAULT_MODEL)
|
||||
|
||||
with Capsule(template="opencode", wait=True, vcpus=2, memory_mb=2048) as capsule:
|
||||
print(f"Capsule: {capsule.capsule_id}")
|
||||
|
||||
capsule.git.clone(
|
||||
REPO_URL,
|
||||
REPO_DIR,
|
||||
username="tksadik92",
|
||||
)
|
||||
print("OK [git clone]")
|
||||
|
||||
current_tag, previous_tag = get_tags(capsule)
|
||||
git_log, git_diff = get_git_context(capsule, current_tag, previous_tag)
|
||||
|
||||
# Note: This simply creates the directory string safely
|
||||
output_path = os.path.normpath(CAPSULE_OUTPUT)
|
||||
|
||||
generate_release_notes(
|
||||
capsule,
|
||||
current_tag,
|
||||
git_log,
|
||||
git_diff,
|
||||
output_path,
|
||||
model,
|
||||
)
|
||||
|
||||
download_release_notes(capsule)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -116,7 +116,7 @@ Runs as PID 1 inside the microVM via `wrenn-init.sh` (mounts procfs/sysfs/dev, s
|
||||
**Directory:** `frontend/` — standalone SvelteKit app (Svelte 5, runes mode)
|
||||
|
||||
- **Stack**: SvelteKit + `adapter-static` + Tailwind CSS v4 + Bits UI (headless accessible components)
|
||||
- **Package manager**: Bun
|
||||
- **Package manager**: pnpm
|
||||
- **Routing**: SvelteKit file-based routing under `frontend/src/routes/`
|
||||
- **Routing layout**: `/login` and `/signup` at root, authenticated pages under `/dashboard/*` (e.g. `/dashboard/capsules`, `/dashboard/keys`)
|
||||
- **Build output**: `frontend/build/` — static files served by Caddy
|
||||
|
||||
4
Makefile
4
Makefile
@ -16,7 +16,7 @@ LDFLAGS := -s -w
|
||||
build: build-cp build-agent build-envd
|
||||
|
||||
build-frontend:
|
||||
cd frontend && bun install --frozen-lockfile && bun run build
|
||||
cd frontend && pnpm install --frozen-lockfile && pnpm build
|
||||
|
||||
build-cp:
|
||||
go build -v -ldflags="$(LDFLAGS) -X main.version=$(VERSION_CP) -X main.commit=$(COMMIT)" -o $(BIN_DIR)/wrenn-cp ./cmd/control-plane
|
||||
@ -59,7 +59,7 @@ dev-agent:
|
||||
sudo go run ./cmd/host-agent
|
||||
|
||||
dev-frontend:
|
||||
cd frontend && bun run dev --port 5173 --host 0.0.0.0
|
||||
cd frontend && pnpm dev --port 5173 --host 0.0.0.0
|
||||
|
||||
dev-envd:
|
||||
cd envd-rs && cargo run -- --isnotfc --port 49983
|
||||
|
||||
@ -9,7 +9,7 @@ Secure infrastructure for AI
|
||||
- PostgreSQL
|
||||
- Go 1.25+
|
||||
- Rust 1.88+ with `x86_64-unknown-linux-musl` target (`rustup target add x86_64-unknown-linux-musl`)
|
||||
- Bun (for frontend)
|
||||
- pnpm (for frontend)
|
||||
- Docker (for dev infra and rootfs builds)
|
||||
|
||||
## Build
|
||||
|
||||
@ -1 +1 @@
|
||||
0.2.0
|
||||
0.1.3
|
||||
|
||||
@ -1 +1 @@
|
||||
0.2.0
|
||||
0.1.6
|
||||
|
||||
@ -147,11 +147,6 @@ func main() {
|
||||
|
||||
mgr := sandbox.New(cfg)
|
||||
|
||||
// Set up lifecycle event callback sender so autonomous events
|
||||
// (auto-pause, auto-destroy) are pushed to the CP proactively.
|
||||
cb := hostagent.NewCallbackSender(cpURL, credsFile, creds.HostID)
|
||||
mgr.SetEventSender(hostagent.NewEventSender(cb))
|
||||
|
||||
mgr.StartTTLReaper(ctx)
|
||||
|
||||
// httpServer is declared here so the shutdown func can reference it.
|
||||
@ -231,9 +226,8 @@ func main() {
|
||||
func() {
|
||||
doShutdown("host deleted from CP")
|
||||
},
|
||||
// onCredsRefreshed: hot-swap the TLS certificate and update callback JWT.
|
||||
// onCredsRefreshed: hot-swap the TLS certificate after a JWT refresh.
|
||||
func(tf *hostagent.TokenFile) {
|
||||
cb.UpdateJWT(tf.JWT)
|
||||
if tf.CertPEM == "" || tf.KeyPEM == "" {
|
||||
return
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ ORDER BY created_at DESC;
|
||||
UPDATE sandboxes
|
||||
SET status = 'missing',
|
||||
last_updated = NOW()
|
||||
WHERE host_id = $1 AND status IN ('running', 'starting', 'pending', 'pausing', 'resuming', 'stopping');
|
||||
WHERE host_id = $1 AND status IN ('running', 'starting', 'pending');
|
||||
|
||||
-- name: UpdateSandboxMetadata :exec
|
||||
UPDATE sandboxes
|
||||
@ -80,30 +80,6 @@ SET metadata = $2,
|
||||
last_updated = NOW()
|
||||
WHERE id = $1;
|
||||
|
||||
-- name: UpdateSandboxRunningIf :one
|
||||
-- Conditionally transition a sandbox to running only if the current status
|
||||
-- matches the expected value. Prevents races where a user destroys a sandbox
|
||||
-- while the create/resume goroutine is still in-flight.
|
||||
UPDATE sandboxes
|
||||
SET status = 'running',
|
||||
host_ip = $3,
|
||||
guest_ip = $4,
|
||||
started_at = $5,
|
||||
last_active_at = $5,
|
||||
last_updated = NOW()
|
||||
WHERE id = $1 AND status = $2
|
||||
RETURNING *;
|
||||
|
||||
-- name: UpdateSandboxStatusIf :one
|
||||
-- Atomically update status only when the current status matches the expected value.
|
||||
-- Prevents background goroutines from overwriting a status that has since changed
|
||||
-- (e.g. user destroyed a sandbox while Create was in-flight).
|
||||
UPDATE sandboxes
|
||||
SET status = $3,
|
||||
last_updated = NOW()
|
||||
WHERE id = $1 AND status = $2
|
||||
RETURNING *;
|
||||
|
||||
-- name: BulkRestoreRunning :exec
|
||||
-- Called by the reconciler when a host comes back online and its sandboxes are
|
||||
-- confirmed alive. Restores only sandboxes that are in 'missing' state.
|
||||
|
||||
@ -1,379 +0,0 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"configVersion": 1,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "frontend",
|
||||
"dependencies": {
|
||||
"@xterm/addon-fit": "^0.11.0",
|
||||
"@xterm/addon-web-links": "^0.12.0",
|
||||
"@xterm/xterm": "^6.0.0",
|
||||
"chart.js": "^4.5.1",
|
||||
"shiki": "^4.0.2",
|
||||
},
|
||||
"devDependencies": {
|
||||
"@fontsource-variable/jetbrains-mono": "^5.2.8",
|
||||
"@fontsource-variable/manrope": "^5.2.8",
|
||||
"@fontsource/alice": "^5.2.8",
|
||||
"@fontsource/instrument-serif": "^5.2.8",
|
||||
"@sveltejs/adapter-static": "^3.0.10",
|
||||
"@sveltejs/kit": "^2.50.2",
|
||||
"@sveltejs/vite-plugin-svelte": "^7.0.0",
|
||||
"@tailwindcss/vite": "^4.2.1",
|
||||
"bits-ui": "^2.16.3",
|
||||
"svelte": "^5.51.0",
|
||||
"svelte-check": "^4.4.2",
|
||||
"tailwindcss": "^4.2.1",
|
||||
"typescript": "^6.0.2",
|
||||
"vite": "^8.0.8",
|
||||
},
|
||||
},
|
||||
},
|
||||
"packages": {
|
||||
"@emnapi/core": ["@emnapi/core@1.9.2", "", { "dependencies": { "@emnapi/wasi-threads": "1.2.1", "tslib": "2.8.1" } }, "sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA=="],
|
||||
|
||||
"@emnapi/runtime": ["@emnapi/runtime@1.9.2", "", { "dependencies": { "tslib": "2.8.1" } }, "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw=="],
|
||||
|
||||
"@emnapi/wasi-threads": ["@emnapi/wasi-threads@1.2.1", "", { "dependencies": { "tslib": "2.8.1" } }, "sha512-uTII7OYF+/Mes/MrcIOYp5yOtSMLBWSIoLPpcgwipoiKbli6k322tcoFsxoIIxPDqW01SQGAgko4EzZi2BNv2w=="],
|
||||
|
||||
"@floating-ui/core": ["@floating-ui/core@1.7.5", "", { "dependencies": { "@floating-ui/utils": "0.2.11" } }, "sha512-1Ih4WTWyw0+lKyFMcBHGbb5U5FtuHJuujoyyr5zTaWS5EYMeT6Jb2AuDeftsCsEuchO+mM2ij5+q9crhydzLhQ=="],
|
||||
|
||||
"@floating-ui/dom": ["@floating-ui/dom@1.7.6", "", { "dependencies": { "@floating-ui/core": "1.7.5", "@floating-ui/utils": "0.2.11" } }, "sha512-9gZSAI5XM36880PPMm//9dfiEngYoC6Am2izES1FF406YFsjvyBMmeJ2g4SAju3xWwtuynNRFL2s9hgxpLI5SQ=="],
|
||||
|
||||
"@floating-ui/utils": ["@floating-ui/utils@0.2.11", "", {}, "sha512-RiB/yIh78pcIxl6lLMG0CgBXAZ2Y0eVHqMPYugu+9U0AeT6YBeiJpf7lbdJNIugFP5SIjwNRgo4DhR1Qxi26Gg=="],
|
||||
|
||||
"@fontsource-variable/jetbrains-mono": ["@fontsource-variable/jetbrains-mono@5.2.8", "", {}, "sha512-WBA9elru6Jdp5df2mES55wuOO0WIrn3kpXnI4+W2ek5u3ZgLS9XS4gmIlcQhiZOWEKl95meYdvK7xI+ETLCq/Q=="],
|
||||
|
||||
"@fontsource-variable/manrope": ["@fontsource-variable/manrope@5.2.8", "", {}, "sha512-nc9lOuCRz73UHnovDE2bwXUdghE2SEOc7Aii0qGe3CLyE03W1a7VnY5Z6euRiapiKbCkGS+eXbY3s/kvWeGeSw=="],
|
||||
|
||||
"@fontsource/alice": ["@fontsource/alice@5.2.8", "", {}, "sha512-EDpK9aFXsaRKdyZpgFu8d5+zmE07yIaFxqVeKrYQJjdQpEhWDZA+naLflHwQQmMbLMJK3a4X/RAm5MCScT93NA=="],
|
||||
|
||||
"@fontsource/instrument-serif": ["@fontsource/instrument-serif@5.2.8", "", {}, "sha512-s+bkz+syj2rO00Rmq9g0P+PwuLig33DR1xDR8pTWmovH1pUjwnncrFk++q9mmOex8fUQ7oW80gPpPDaw7V1MMw=="],
|
||||
|
||||
"@internationalized/date": ["@internationalized/date@3.12.0", "", { "dependencies": { "@swc/helpers": "0.5.21" } }, "sha512-/PyIMzK29jtXaGU23qTvNZxvBXRtKbNnGDFD+PY6CZw/Y8Ex8pFUzkuCJCG9aOqmShjqhS9mPqP6Dk5onQY8rQ=="],
|
||||
|
||||
"@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.13", "", { "dependencies": { "@jridgewell/sourcemap-codec": "1.5.5", "@jridgewell/trace-mapping": "0.3.31" } }, "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA=="],
|
||||
|
||||
"@jridgewell/remapping": ["@jridgewell/remapping@2.3.5", "", { "dependencies": { "@jridgewell/gen-mapping": "0.3.13", "@jridgewell/trace-mapping": "0.3.31" } }, "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ=="],
|
||||
|
||||
"@jridgewell/resolve-uri": ["@jridgewell/resolve-uri@3.1.2", "", {}, "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw=="],
|
||||
|
||||
"@jridgewell/sourcemap-codec": ["@jridgewell/sourcemap-codec@1.5.5", "", {}, "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og=="],
|
||||
|
||||
"@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.31", "", { "dependencies": { "@jridgewell/resolve-uri": "3.1.2", "@jridgewell/sourcemap-codec": "1.5.5" } }, "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw=="],
|
||||
|
||||
"@kurkle/color": ["@kurkle/color@0.3.4", "", {}, "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w=="],
|
||||
|
||||
"@napi-rs/wasm-runtime": ["@napi-rs/wasm-runtime@1.1.3", "", { "dependencies": { "@tybys/wasm-util": "0.10.1" }, "peerDependencies": { "@emnapi/core": "1.9.2", "@emnapi/runtime": "1.9.2" } }, "sha512-xK9sGVbJWYb08+mTJt3/YV24WxvxpXcXtP6B172paPZ+Ts69Re9dAr7lKwJoeIx8OoeuimEiRZ7umkiUVClmmQ=="],
|
||||
|
||||
"@oxc-project/types": ["@oxc-project/types@0.124.0", "", {}, "sha512-VBFWMTBvHxS11Z5Lvlr3IWgrwhMTXV+Md+EQF0Xf60+wAdsGFTBx7X7K/hP4pi8N7dcm1RvcHwDxZ16Qx8keUg=="],
|
||||
|
||||
"@polka/url": ["@polka/url@1.0.0-next.29", "", {}, "sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww=="],
|
||||
|
||||
"@rolldown/binding-android-arm64": ["@rolldown/binding-android-arm64@1.0.0-rc.15", "", { "os": "android", "cpu": "arm64" }, "sha512-YYe6aWruPZDtHNpwu7+qAHEMbQ/yRl6atqb/AhznLTnD3UY99Q1jE7ihLSahNWkF4EqRPVC4SiR4O0UkLK02tA=="],
|
||||
|
||||
"@rolldown/binding-darwin-arm64": ["@rolldown/binding-darwin-arm64@1.0.0-rc.15", "", { "os": "darwin", "cpu": "arm64" }, "sha512-oArR/ig8wNTPYsXL+Mzhs0oxhxfuHRfG7Ikw7jXsw8mYOtk71W0OkF2VEVh699pdmzjPQsTjlD1JIOoHkLP1Fg=="],
|
||||
|
||||
"@rolldown/binding-darwin-x64": ["@rolldown/binding-darwin-x64@1.0.0-rc.15", "", { "os": "darwin", "cpu": "x64" }, "sha512-YzeVqOqjPYvUbJSWJ4EDL8ahbmsIXQpgL3JVipmN+MX0XnXMeWomLN3Fb+nwCmP/jfyqte5I3XRSm7OfQrbyxw=="],
|
||||
|
||||
"@rolldown/binding-freebsd-x64": ["@rolldown/binding-freebsd-x64@1.0.0-rc.15", "", { "os": "freebsd", "cpu": "x64" }, "sha512-9Erhx956jeQ0nNTyif1+QWAXDRD38ZNjr//bSHrt6wDwB+QkAfl2q6Mn1k6OBPerznjRmbM10lgRb1Pli4xZPw=="],
|
||||
|
||||
"@rolldown/binding-linux-arm-gnueabihf": ["@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.15", "", { "os": "linux", "cpu": "arm" }, "sha512-cVwk0w8QbZJGTnP/AHQBs5yNwmpgGYStL88t4UIaqcvYJWBfS0s3oqVLZPwsPU6M0zlW4GqjP0Zq5MnAGwFeGA=="],
|
||||
|
||||
"@rolldown/binding-linux-arm64-gnu": ["@rolldown/binding-linux-arm64-gnu@1.0.0-rc.15", "", { "os": "linux", "cpu": "arm64" }, "sha512-eBZ/u8iAK9SoHGanqe/jrPnY0JvBN6iXbVOsbO38mbz+ZJsaobExAm1Iu+rxa4S1l2FjG0qEZn4Rc6X8n+9M+w=="],
|
||||
|
||||
"@rolldown/binding-linux-arm64-musl": ["@rolldown/binding-linux-arm64-musl@1.0.0-rc.15", "", { "os": "linux", "cpu": "arm64" }, "sha512-ZvRYMGrAklV9PEkgt4LQM6MjQX2P58HPAuecwYObY2DhS2t35R0I810bKi0wmaYORt6m/2Sm+Z+nFgb0WhXNcQ=="],
|
||||
|
||||
"@rolldown/binding-linux-ppc64-gnu": ["@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.15", "", { "os": "linux", "cpu": "ppc64" }, "sha512-VDpgGBzgfg5hLg+uBpCLoFG5kVvEyafmfxGUV0UHLcL5irxAK7PKNeC2MwClgk6ZAiNhmo9FLhRYgvMmedLtnQ=="],
|
||||
|
||||
"@rolldown/binding-linux-s390x-gnu": ["@rolldown/binding-linux-s390x-gnu@1.0.0-rc.15", "", { "os": "linux", "cpu": "s390x" }, "sha512-y1uXY3qQWCzcPgRJATPSOUP4tCemh4uBdY7e3EZbVwCJTY3gLJWnQABgeUetvED+bt1FQ01OeZwvhLS2bpNrAQ=="],
|
||||
|
||||
"@rolldown/binding-linux-x64-gnu": ["@rolldown/binding-linux-x64-gnu@1.0.0-rc.15", "", { "os": "linux", "cpu": "x64" }, "sha512-023bTPBod7J3Y/4fzAN6QtpkSABR0rigtrwaP+qSEabUh5zf6ELr9Nc7GujaROuPY3uwdSIXWrvhn1KxOvurWA=="],
|
||||
|
||||
"@rolldown/binding-linux-x64-musl": ["@rolldown/binding-linux-x64-musl@1.0.0-rc.15", "", { "os": "linux", "cpu": "x64" }, "sha512-witB2O0/hU4CgfOOKUoeFgQ4GktPi1eEbAhaLAIpgD6+ZnhcPkUtPsoKKHRzmOoWPZue46IThdSgdo4XneOLYw=="],
|
||||
|
||||
"@rolldown/binding-openharmony-arm64": ["@rolldown/binding-openharmony-arm64@1.0.0-rc.15", "", { "os": "none", "cpu": "arm64" }, "sha512-UCL68NJ0Ud5zRipXZE9dF5PmirzJE4E4BCIOOssEnM7wLDsxjc6Qb0sGDxTNRTP53I6MZpygyCpY8Aa8sPfKPg=="],
|
||||
|
||||
"@rolldown/binding-wasm32-wasi": ["@rolldown/binding-wasm32-wasi@1.0.0-rc.15", "", { "dependencies": { "@emnapi/core": "1.9.2", "@emnapi/runtime": "1.9.2", "@napi-rs/wasm-runtime": "1.1.3" }, "cpu": "none" }, "sha512-ApLruZq/ig+nhaE7OJm4lDjayUnOHVUa77zGeqnqZ9pn0ovdVbbNPerVibLXDmWeUZXjIYIT8V3xkT58Rm9u5Q=="],
|
||||
|
||||
"@rolldown/binding-win32-arm64-msvc": ["@rolldown/binding-win32-arm64-msvc@1.0.0-rc.15", "", { "os": "win32", "cpu": "arm64" }, "sha512-KmoUoU7HnN+Si5YWJigfTws1jz1bKBYDQKdbLspz0UaqjjFkddHsqorgiW1mxcAj88lYUE6NC/zJNwT+SloqtA=="],
|
||||
|
||||
"@rolldown/binding-win32-x64-msvc": ["@rolldown/binding-win32-x64-msvc@1.0.0-rc.15", "", { "os": "win32", "cpu": "x64" }, "sha512-3P2A8L+x75qavWLe/Dll3EYBJLQmtkJN8rfh+U/eR3MqMgL/h98PhYI+JFfXuDPgPeCB7iZAKiqii5vqOvnA0g=="],
|
||||
|
||||
"@rolldown/pluginutils": ["@rolldown/pluginutils@1.0.0-rc.15", "", {}, "sha512-UromN0peaE53IaBRe9W7CjrZgXl90fqGpK+mIZbA3qSTeYqg3pqpROBdIPvOG3F5ereDHNwoHBI2e50n1BDr1g=="],
|
||||
|
||||
"@shikijs/core": ["@shikijs/core@4.0.2", "", { "dependencies": { "@shikijs/primitive": "4.0.2", "@shikijs/types": "4.0.2", "@shikijs/vscode-textmate": "10.0.2", "@types/hast": "3.0.4", "hast-util-to-html": "9.0.5" } }, "sha512-hxT0YF4ExEqB8G/qFdtJvpmHXBYJ2lWW7qTHDarVkIudPFE6iCIrqdgWxGn5s+ppkGXI0aEGlibI0PAyzP3zlw=="],
|
||||
|
||||
"@shikijs/engine-javascript": ["@shikijs/engine-javascript@4.0.2", "", { "dependencies": { "@shikijs/types": "4.0.2", "@shikijs/vscode-textmate": "10.0.2", "oniguruma-to-es": "4.3.5" } }, "sha512-7PW0Nm49DcoUIQEXlJhNNBHyoGMjalRETTCcjMqEaMoJRLljy1Bi/EGV3/qLBgLKQejdspiiYuHGQW6dX94Nag=="],
|
||||
|
||||
"@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@4.0.2", "", { "dependencies": { "@shikijs/types": "4.0.2", "@shikijs/vscode-textmate": "10.0.2" } }, "sha512-UpCB9Y2sUKlS9z8juFSKz7ZtysmeXCgnRF0dlhXBkmQnek7lAToPte8DkxmEYGNTMii72zU/lyXiCB6StuZeJg=="],
|
||||
|
||||
"@shikijs/langs": ["@shikijs/langs@4.0.2", "", { "dependencies": { "@shikijs/types": "4.0.2" } }, "sha512-KaXby5dvoeuZzN0rYQiPMjFoUrz4hgwIE+D6Du9owcHcl6/g16/yT5BQxSW5cGt2MZBz6Hl0YuRqf12omRfUUg=="],
|
||||
|
||||
"@shikijs/primitive": ["@shikijs/primitive@4.0.2", "", { "dependencies": { "@shikijs/types": "4.0.2", "@shikijs/vscode-textmate": "10.0.2", "@types/hast": "3.0.4" } }, "sha512-M6UMPrSa3fN5ayeJwFVl9qWofl273wtK1VG8ySDZ1mQBfhCpdd8nEx7nPZ/tk7k+TYcpqBZzj/AnwxT9lO+HJw=="],
|
||||
|
||||
"@shikijs/themes": ["@shikijs/themes@4.0.2", "", { "dependencies": { "@shikijs/types": "4.0.2" } }, "sha512-mjCafwt8lJJaVSsQvNVrJumbnnj1RI8jbUKrPKgE6E3OvQKxnuRoBaYC51H4IGHePsGN/QtALglWBU7DoKDFnA=="],
|
||||
|
||||
"@shikijs/types": ["@shikijs/types@4.0.2", "", { "dependencies": { "@shikijs/vscode-textmate": "10.0.2", "@types/hast": "3.0.4" } }, "sha512-qzbeRooUTPnLE+sHD/Z8DStmaDgnbbc/pMrU203950aRqjX/6AFHeDYT+j00y2lPdz0ywJKx7o/7qnqTivtlXg=="],
|
||||
|
||||
"@shikijs/vscode-textmate": ["@shikijs/vscode-textmate@10.0.2", "", {}, "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg=="],
|
||||
|
||||
"@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@sveltejs/acorn-typescript": ["@sveltejs/acorn-typescript@1.0.9", "", { "peerDependencies": { "acorn": "8.16.0" } }, "sha512-lVJX6qEgs/4DOcRTpo56tmKzVPtoWAaVbL4hfO7t7NVwl9AAXzQR6cihesW1BmNMPl+bK6dreu2sOKBP2Q9CIA=="],
|
||||
|
||||
"@sveltejs/adapter-static": ["@sveltejs/adapter-static@3.0.10", "", { "peerDependencies": { "@sveltejs/kit": "2.57.1" } }, "sha512-7D9lYFWJmB7zxZyTE/qxjksvMqzMuYrrsyh1f4AlZqeZeACPRySjbC3aFiY55wb1tWUaKOQG9PVbm74JcN2Iew=="],
|
||||
|
||||
"@sveltejs/kit": ["@sveltejs/kit@2.57.1", "", { "dependencies": { "@standard-schema/spec": "1.1.0", "@sveltejs/acorn-typescript": "1.0.9", "@types/cookie": "0.6.0", "acorn": "8.16.0", "cookie": "0.6.0", "devalue": "5.7.1", "esm-env": "1.2.2", "kleur": "4.1.5", "magic-string": "0.30.21", "mrmime": "2.0.1", "set-cookie-parser": "3.1.0", "sirv": "3.0.2" }, "optionalDependencies": { "typescript": "6.0.2" }, "peerDependencies": { "@sveltejs/vite-plugin-svelte": "7.0.0", "svelte": "5.55.3", "vite": "8.0.8" }, "bin": { "svelte-kit": "svelte-kit.js" } }, "sha512-VRdSbB96cI1EnRh09CqmnQqP/YJvET5buj8S6k7CxaJqBJD4bw4fRKDjcarAj/eX9k2eHifQfDH8NtOh+ZxxPw=="],
|
||||
|
||||
"@sveltejs/vite-plugin-svelte": ["@sveltejs/vite-plugin-svelte@7.0.0", "", { "dependencies": { "deepmerge": "4.3.1", "magic-string": "0.30.21", "obug": "2.1.1", "vitefu": "1.1.3" }, "peerDependencies": { "svelte": "5.55.3", "vite": "8.0.8" } }, "sha512-ILXmxC7HAsnkK2eslgPetrqqW1BKSL7LktsFgqzNj83MaivMGZzluWq32m25j2mDOjmSKX7GGWahePhuEs7P/g=="],
|
||||
|
||||
"@swc/helpers": ["@swc/helpers@0.5.21", "", { "dependencies": { "tslib": "2.8.1" } }, "sha512-jI/VAmtdjB/RnI8GTnokyX7Ug8c+g+ffD6QRLa6XQewtnGyukKkKSk3wLTM3b5cjt1jNh9x0jfVlagdN2gDKQg=="],
|
||||
|
||||
"@tailwindcss/node": ["@tailwindcss/node@4.2.2", "", { "dependencies": { "@jridgewell/remapping": "2.3.5", "enhanced-resolve": "5.20.1", "jiti": "2.6.1", "lightningcss": "1.32.0", "magic-string": "0.30.21", "source-map-js": "1.2.1", "tailwindcss": "4.2.2" } }, "sha512-pXS+wJ2gZpVXqFaUEjojq7jzMpTGf8rU6ipJz5ovJV6PUGmlJ+jvIwGrzdHdQ80Sg+wmQxUFuoW1UAAwHNEdFA=="],
|
||||
|
||||
"@tailwindcss/oxide": ["@tailwindcss/oxide@4.2.2", "", { "optionalDependencies": { "@tailwindcss/oxide-android-arm64": "4.2.2", "@tailwindcss/oxide-darwin-arm64": "4.2.2", "@tailwindcss/oxide-darwin-x64": "4.2.2", "@tailwindcss/oxide-freebsd-x64": "4.2.2", "@tailwindcss/oxide-linux-arm-gnueabihf": "4.2.2", "@tailwindcss/oxide-linux-arm64-gnu": "4.2.2", "@tailwindcss/oxide-linux-arm64-musl": "4.2.2", "@tailwindcss/oxide-linux-x64-gnu": "4.2.2", "@tailwindcss/oxide-linux-x64-musl": "4.2.2", "@tailwindcss/oxide-wasm32-wasi": "4.2.2", "@tailwindcss/oxide-win32-arm64-msvc": "4.2.2", "@tailwindcss/oxide-win32-x64-msvc": "4.2.2" } }, "sha512-qEUA07+E5kehxYp9BVMpq9E8vnJuBHfJEC0vPC5e7iL/hw7HR61aDKoVoKzrG+QKp56vhNZe4qwkRmMC0zDLvg=="],
|
||||
|
||||
"@tailwindcss/oxide-android-arm64": ["@tailwindcss/oxide-android-arm64@4.2.2", "", { "os": "android", "cpu": "arm64" }, "sha512-dXGR1n+P3B6748jZO/SvHZq7qBOqqzQ+yFrXpoOWWALWndF9MoSKAT3Q0fYgAzYzGhxNYOoysRvYlpixRBBoDg=="],
|
||||
|
||||
"@tailwindcss/oxide-darwin-arm64": ["@tailwindcss/oxide-darwin-arm64@4.2.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-iq9Qjr6knfMpZHj55/37ouZeykwbDqF21gPFtfnhCCKGDcPI/21FKC9XdMO/XyBM7qKORx6UIhGgg6jLl7BZlg=="],
|
||||
|
||||
"@tailwindcss/oxide-darwin-x64": ["@tailwindcss/oxide-darwin-x64@4.2.2", "", { "os": "darwin", "cpu": "x64" }, "sha512-BlR+2c3nzc8f2G639LpL89YY4bdcIdUmiOOkv2GQv4/4M0vJlpXEa0JXNHhCHU7VWOKWT/CjqHdTP8aUuDJkuw=="],
|
||||
|
||||
"@tailwindcss/oxide-freebsd-x64": ["@tailwindcss/oxide-freebsd-x64@4.2.2", "", { "os": "freebsd", "cpu": "x64" }, "sha512-YUqUgrGMSu2CDO82hzlQ5qSb5xmx3RUrke/QgnoEx7KvmRJHQuZHZmZTLSuuHwFf0DJPybFMXMYf+WJdxHy/nQ=="],
|
||||
|
||||
"@tailwindcss/oxide-linux-arm-gnueabihf": ["@tailwindcss/oxide-linux-arm-gnueabihf@4.2.2", "", { "os": "linux", "cpu": "arm" }, "sha512-FPdhvsW6g06T9BWT0qTwiVZYE2WIFo2dY5aCSpjG/S/u1tby+wXoslXS0kl3/KXnULlLr1E3NPRRw0g7t2kgaQ=="],
|
||||
|
||||
"@tailwindcss/oxide-linux-arm64-gnu": ["@tailwindcss/oxide-linux-arm64-gnu@4.2.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-4og1V+ftEPXGttOO7eCmW7VICmzzJWgMx+QXAJRAhjrSjumCwWqMfkDrNu1LXEQzNAwz28NCUpucgQPrR4S2yw=="],
|
||||
|
||||
"@tailwindcss/oxide-linux-arm64-musl": ["@tailwindcss/oxide-linux-arm64-musl@4.2.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-oCfG/mS+/+XRlwNjnsNLVwnMWYH7tn/kYPsNPh+JSOMlnt93mYNCKHYzylRhI51X+TbR+ufNhhKKzm6QkqX8ag=="],
|
||||
|
||||
"@tailwindcss/oxide-linux-x64-gnu": ["@tailwindcss/oxide-linux-x64-gnu@4.2.2", "", { "os": "linux", "cpu": "x64" }, "sha512-rTAGAkDgqbXHNp/xW0iugLVmX62wOp2PoE39BTCGKjv3Iocf6AFbRP/wZT/kuCxC9QBh9Pu8XPkv/zCZB2mcMg=="],
|
||||
|
||||
"@tailwindcss/oxide-linux-x64-musl": ["@tailwindcss/oxide-linux-x64-musl@4.2.2", "", { "os": "linux", "cpu": "x64" }, "sha512-XW3t3qwbIwiSyRCggeO2zxe3KWaEbM0/kW9e8+0XpBgyKU4ATYzcVSMKteZJ1iukJ3HgHBjbg9P5YPRCVUxlnQ=="],
|
||||
|
||||
"@tailwindcss/oxide-wasm32-wasi": ["@tailwindcss/oxide-wasm32-wasi@4.2.2", "", { "cpu": "none" }, "sha512-eKSztKsmEsn1O5lJ4ZAfyn41NfG7vzCg496YiGtMDV86jz1q/irhms5O0VrY6ZwTUkFy/EKG3RfWgxSI3VbZ8Q=="],
|
||||
|
||||
"@tailwindcss/oxide-win32-arm64-msvc": ["@tailwindcss/oxide-win32-arm64-msvc@4.2.2", "", { "os": "win32", "cpu": "arm64" }, "sha512-qPmaQM4iKu5mxpsrWZMOZRgZv1tOZpUm+zdhhQP0VhJfyGGO3aUKdbh3gDZc/dPLQwW4eSqWGrrcWNBZWUWaXQ=="],
|
||||
|
||||
"@tailwindcss/oxide-win32-x64-msvc": ["@tailwindcss/oxide-win32-x64-msvc@4.2.2", "", { "os": "win32", "cpu": "x64" }, "sha512-1T/37VvI7WyH66b+vqHj/cLwnCxt7Qt3WFu5Q8hk65aOvlwAhs7rAp1VkulBJw/N4tMirXjVnylTR72uI0HGcA=="],
|
||||
|
||||
"@tailwindcss/vite": ["@tailwindcss/vite@4.2.2", "", { "dependencies": { "@tailwindcss/node": "4.2.2", "@tailwindcss/oxide": "4.2.2", "tailwindcss": "4.2.2" }, "peerDependencies": { "vite": "8.0.8" } }, "sha512-mEiF5HO1QqCLXoNEfXVA1Tzo+cYsrqV7w9Juj2wdUFyW07JRenqMG225MvPwr3ZD9N1bFQj46X7r33iHxLUW0w=="],
|
||||
|
||||
"@tybys/wasm-util": ["@tybys/wasm-util@0.10.1", "", { "dependencies": { "tslib": "2.8.1" } }, "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg=="],
|
||||
|
||||
"@types/cookie": ["@types/cookie@0.6.0", "", {}, "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA=="],
|
||||
|
||||
"@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="],
|
||||
|
||||
"@types/hast": ["@types/hast@3.0.4", "", { "dependencies": { "@types/unist": "3.0.3" } }, "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ=="],
|
||||
|
||||
"@types/mdast": ["@types/mdast@4.0.4", "", { "dependencies": { "@types/unist": "3.0.3" } }, "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA=="],
|
||||
|
||||
"@types/trusted-types": ["@types/trusted-types@2.0.7", "", {}, "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw=="],
|
||||
|
||||
"@types/unist": ["@types/unist@3.0.3", "", {}, "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="],
|
||||
|
||||
"@ungap/structured-clone": ["@ungap/structured-clone@1.3.0", "", {}, "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="],
|
||||
|
||||
"@xterm/addon-fit": ["@xterm/addon-fit@0.11.0", "", {}, "sha512-jYcgT6xtVYhnhgxh3QgYDnnNMYTcf8ElbxxFzX0IZo+vabQqSPAjC3c1wJrKB5E19VwQei89QCiZZP86DCPF7g=="],
|
||||
|
||||
"@xterm/addon-web-links": ["@xterm/addon-web-links@0.12.0", "", {}, "sha512-4Smom3RPyVp7ZMYOYDoC/9eGJJJqYhnPLGGqJ6wOBfB8VxPViJNSKdgRYb8NpaM6YSelEKbA2SStD7lGyqaobw=="],
|
||||
|
||||
"@xterm/xterm": ["@xterm/xterm@6.0.0", "", {}, "sha512-TQwDdQGtwwDt+2cgKDLn0IRaSxYu1tSUjgKarSDkUM0ZNiSRXFpjxEsvc/Zgc5kq5omJ+V0a8/kIM2WD3sMOYg=="],
|
||||
|
||||
"acorn": ["acorn@8.16.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw=="],
|
||||
|
||||
"aria-query": ["aria-query@5.3.1", "", {}, "sha512-Z/ZeOgVl7bcSYZ/u/rh0fOpvEpq//LZmdbkXyc7syVzjPAhfOa9ebsdTSjEBDU4vs5nC98Kfduj1uFo0qyET3g=="],
|
||||
|
||||
"axobject-query": ["axobject-query@4.1.0", "", {}, "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ=="],
|
||||
|
||||
"bits-ui": ["bits-ui@2.17.3", "", { "dependencies": { "@floating-ui/core": "1.7.5", "@floating-ui/dom": "1.7.6", "esm-env": "1.2.2", "runed": "0.35.1", "svelte-toolbelt": "0.10.6", "tabbable": "6.4.0" }, "peerDependencies": { "@internationalized/date": "3.12.0", "svelte": "5.55.3" } }, "sha512-Bef41uY9U2jaBJHPhcPvmBNkGec5Wx2z6eioDsTmsaR2vH4QoaOcPi75gzCG3+/2TNr6v/qBwzgWNPYCxNtrEA=="],
|
||||
|
||||
"ccount": ["ccount@2.0.1", "", {}, "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="],
|
||||
|
||||
"character-entities-html4": ["character-entities-html4@2.1.0", "", {}, "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA=="],
|
||||
|
||||
"character-entities-legacy": ["character-entities-legacy@3.0.0", "", {}, "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ=="],
|
||||
|
||||
"chart.js": ["chart.js@4.5.1", "", { "dependencies": { "@kurkle/color": "0.3.4" } }, "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw=="],
|
||||
|
||||
"chokidar": ["chokidar@4.0.3", "", { "dependencies": { "readdirp": "4.1.2" } }, "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA=="],
|
||||
|
||||
"clsx": ["clsx@2.1.1", "", {}, "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="],
|
||||
|
||||
"comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="],
|
||||
|
||||
"cookie": ["cookie@0.6.0", "", {}, "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw=="],
|
||||
|
||||
"deepmerge": ["deepmerge@4.3.1", "", {}, "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A=="],
|
||||
|
||||
"dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="],
|
||||
|
||||
"detect-libc": ["detect-libc@2.1.2", "", {}, "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ=="],
|
||||
|
||||
"devalue": ["devalue@5.7.1", "", {}, "sha512-MUbZ586EgQqdRnC4yDrlod3BEdyvE4TapGYHMW2CiaW+KkkFmWEFqBUaLltEZCGi0iFXCEjRF0OjF0DV2QHjOA=="],
|
||||
|
||||
"devlop": ["devlop@1.1.0", "", { "dependencies": { "dequal": "2.0.3" } }, "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA=="],
|
||||
|
||||
"enhanced-resolve": ["enhanced-resolve@5.20.1", "", { "dependencies": { "graceful-fs": "4.2.11", "tapable": "2.3.2" } }, "sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA=="],
|
||||
|
||||
"esm-env": ["esm-env@1.2.2", "", {}, "sha512-Epxrv+Nr/CaL4ZcFGPJIYLWFom+YeV1DqMLHJoEd9SYRxNbaFruBwfEX/kkHUJf55j2+TUbmDcmuilbP1TmXHA=="],
|
||||
|
||||
"esrap": ["esrap@2.2.5", "", { "dependencies": { "@jridgewell/sourcemap-codec": "1.5.5" } }, "sha512-/yLB1538mag+dn0wsePTe8C0rDIjUOaJpMs2McodSzmM2msWcZsBSdRtg6HOBt0A/r82BN+Md3pgwSc/uWt2Ig=="],
|
||||
|
||||
"fdir": ["fdir@6.5.0", "", { "optionalDependencies": { "picomatch": "4.0.4" } }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="],
|
||||
|
||||
"fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="],
|
||||
|
||||
"graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="],
|
||||
|
||||
"hast-util-to-html": ["hast-util-to-html@9.0.5", "", { "dependencies": { "@types/hast": "3.0.4", "@types/unist": "3.0.3", "ccount": "2.0.1", "comma-separated-tokens": "2.0.3", "hast-util-whitespace": "3.0.0", "html-void-elements": "3.0.0", "mdast-util-to-hast": "13.2.1", "property-information": "7.1.0", "space-separated-tokens": "2.0.2", "stringify-entities": "4.0.4", "zwitch": "2.0.4" } }, "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw=="],
|
||||
|
||||
"hast-util-whitespace": ["hast-util-whitespace@3.0.0", "", { "dependencies": { "@types/hast": "3.0.4" } }, "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw=="],
|
||||
|
||||
"html-void-elements": ["html-void-elements@3.0.0", "", {}, "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg=="],
|
||||
|
||||
"inline-style-parser": ["inline-style-parser@0.2.7", "", {}, "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA=="],
|
||||
|
||||
"is-reference": ["is-reference@3.0.3", "", { "dependencies": { "@types/estree": "1.0.8" } }, "sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw=="],
|
||||
|
||||
"jiti": ["jiti@2.6.1", "", { "bin": { "jiti": "lib/jiti-cli.mjs" } }, "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ=="],
|
||||
|
||||
"kleur": ["kleur@4.1.5", "", {}, "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ=="],
|
||||
|
||||
"lightningcss": ["lightningcss@1.32.0", "", { "dependencies": { "detect-libc": "2.1.2" }, "optionalDependencies": { "lightningcss-android-arm64": "1.32.0", "lightningcss-darwin-arm64": "1.32.0", "lightningcss-darwin-x64": "1.32.0", "lightningcss-freebsd-x64": "1.32.0", "lightningcss-linux-arm-gnueabihf": "1.32.0", "lightningcss-linux-arm64-gnu": "1.32.0", "lightningcss-linux-arm64-musl": "1.32.0", "lightningcss-linux-x64-gnu": "1.32.0", "lightningcss-linux-x64-musl": "1.32.0", "lightningcss-win32-arm64-msvc": "1.32.0", "lightningcss-win32-x64-msvc": "1.32.0" } }, "sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ=="],
|
||||
|
||||
"lightningcss-android-arm64": ["lightningcss-android-arm64@1.32.0", "", { "os": "android", "cpu": "arm64" }, "sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg=="],
|
||||
|
||||
"lightningcss-darwin-arm64": ["lightningcss-darwin-arm64@1.32.0", "", { "os": "darwin", "cpu": "arm64" }, "sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ=="],
|
||||
|
||||
"lightningcss-darwin-x64": ["lightningcss-darwin-x64@1.32.0", "", { "os": "darwin", "cpu": "x64" }, "sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w=="],
|
||||
|
||||
"lightningcss-freebsd-x64": ["lightningcss-freebsd-x64@1.32.0", "", { "os": "freebsd", "cpu": "x64" }, "sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig=="],
|
||||
|
||||
"lightningcss-linux-arm-gnueabihf": ["lightningcss-linux-arm-gnueabihf@1.32.0", "", { "os": "linux", "cpu": "arm" }, "sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw=="],
|
||||
|
||||
"lightningcss-linux-arm64-gnu": ["lightningcss-linux-arm64-gnu@1.32.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ=="],
|
||||
|
||||
"lightningcss-linux-arm64-musl": ["lightningcss-linux-arm64-musl@1.32.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg=="],
|
||||
|
||||
"lightningcss-linux-x64-gnu": ["lightningcss-linux-x64-gnu@1.32.0", "", { "os": "linux", "cpu": "x64" }, "sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA=="],
|
||||
|
||||
"lightningcss-linux-x64-musl": ["lightningcss-linux-x64-musl@1.32.0", "", { "os": "linux", "cpu": "x64" }, "sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg=="],
|
||||
|
||||
"lightningcss-win32-arm64-msvc": ["lightningcss-win32-arm64-msvc@1.32.0", "", { "os": "win32", "cpu": "arm64" }, "sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw=="],
|
||||
|
||||
"lightningcss-win32-x64-msvc": ["lightningcss-win32-x64-msvc@1.32.0", "", { "os": "win32", "cpu": "x64" }, "sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q=="],
|
||||
|
||||
"locate-character": ["locate-character@3.0.0", "", {}, "sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA=="],
|
||||
|
||||
"lz-string": ["lz-string@1.5.0", "", { "bin": { "lz-string": "bin/bin.js" } }, "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ=="],
|
||||
|
||||
"magic-string": ["magic-string@0.30.21", "", { "dependencies": { "@jridgewell/sourcemap-codec": "1.5.5" } }, "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ=="],
|
||||
|
||||
"mdast-util-to-hast": ["mdast-util-to-hast@13.2.1", "", { "dependencies": { "@types/hast": "3.0.4", "@types/mdast": "4.0.4", "@ungap/structured-clone": "1.3.0", "devlop": "1.1.0", "micromark-util-sanitize-uri": "2.0.1", "trim-lines": "3.0.1", "unist-util-position": "5.0.0", "unist-util-visit": "5.1.0", "vfile": "6.0.3" } }, "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA=="],
|
||||
|
||||
"micromark-util-character": ["micromark-util-character@2.1.1", "", { "dependencies": { "micromark-util-symbol": "2.0.1", "micromark-util-types": "2.0.2" } }, "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q=="],
|
||||
|
||||
"micromark-util-encode": ["micromark-util-encode@2.0.1", "", {}, "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw=="],
|
||||
|
||||
"micromark-util-sanitize-uri": ["micromark-util-sanitize-uri@2.0.1", "", { "dependencies": { "micromark-util-character": "2.1.1", "micromark-util-encode": "2.0.1", "micromark-util-symbol": "2.0.1" } }, "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ=="],
|
||||
|
||||
"micromark-util-symbol": ["micromark-util-symbol@2.0.1", "", {}, "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q=="],
|
||||
|
||||
"micromark-util-types": ["micromark-util-types@2.0.2", "", {}, "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA=="],
|
||||
|
||||
"mri": ["mri@1.2.0", "", {}, "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA=="],
|
||||
|
||||
"mrmime": ["mrmime@2.0.1", "", {}, "sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ=="],
|
||||
|
||||
"nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="],
|
||||
|
||||
"obug": ["obug@2.1.1", "", {}, "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ=="],
|
||||
|
||||
"oniguruma-parser": ["oniguruma-parser@0.12.1", "", {}, "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w=="],
|
||||
|
||||
"oniguruma-to-es": ["oniguruma-to-es@4.3.5", "", { "dependencies": { "oniguruma-parser": "0.12.1", "regex": "6.1.0", "regex-recursion": "6.0.2" } }, "sha512-Zjygswjpsewa0NLTsiizVuMQZbp0MDyM6lIt66OxsF21npUDlzpHi1Mgb/qhQdkb+dWFTzJmFbEWdvZgRho8eQ=="],
|
||||
|
||||
"picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="],
|
||||
|
||||
"picomatch": ["picomatch@4.0.4", "", {}, "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A=="],
|
||||
|
||||
"postcss": ["postcss@8.5.9", "", { "dependencies": { "nanoid": "3.3.11", "picocolors": "1.1.1", "source-map-js": "1.2.1" } }, "sha512-7a70Nsot+EMX9fFU3064K/kdHWZqGVY+BADLyXc8Dfv+mTLLVl6JzJpPaCZ2kQL9gIJvKXSLMHhqdRRjwQeFtw=="],
|
||||
|
||||
"property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="],
|
||||
|
||||
"readdirp": ["readdirp@4.1.2", "", {}, "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg=="],
|
||||
|
||||
"regex": ["regex@6.1.0", "", { "dependencies": { "regex-utilities": "2.3.0" } }, "sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg=="],
|
||||
|
||||
"regex-recursion": ["regex-recursion@6.0.2", "", { "dependencies": { "regex-utilities": "2.3.0" } }, "sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg=="],
|
||||
|
||||
"regex-utilities": ["regex-utilities@2.3.0", "", {}, "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng=="],
|
||||
|
||||
"rolldown": ["rolldown@1.0.0-rc.15", "", { "dependencies": { "@oxc-project/types": "0.124.0", "@rolldown/pluginutils": "1.0.0-rc.15" }, "optionalDependencies": { "@rolldown/binding-android-arm64": "1.0.0-rc.15", "@rolldown/binding-darwin-arm64": "1.0.0-rc.15", "@rolldown/binding-darwin-x64": "1.0.0-rc.15", "@rolldown/binding-freebsd-x64": "1.0.0-rc.15", "@rolldown/binding-linux-arm-gnueabihf": "1.0.0-rc.15", "@rolldown/binding-linux-arm64-gnu": "1.0.0-rc.15", "@rolldown/binding-linux-arm64-musl": "1.0.0-rc.15", "@rolldown/binding-linux-ppc64-gnu": "1.0.0-rc.15", "@rolldown/binding-linux-s390x-gnu": "1.0.0-rc.15", "@rolldown/binding-linux-x64-gnu": "1.0.0-rc.15", "@rolldown/binding-linux-x64-musl": "1.0.0-rc.15", "@rolldown/binding-openharmony-arm64": "1.0.0-rc.15", "@rolldown/binding-wasm32-wasi": "1.0.0-rc.15", "@rolldown/binding-win32-arm64-msvc": "1.0.0-rc.15", "@rolldown/binding-win32-x64-msvc": "1.0.0-rc.15" }, "bin": { "rolldown": "bin/cli.mjs" } }, "sha512-Ff31guA5zT6WjnGp0SXw76X6hzGRk/OQq2hE+1lcDe+lJdHSgnSX6nK3erbONHyCbpSj9a9E+uX/OvytZoWp2g=="],
|
||||
|
||||
"runed": ["runed@0.35.1", "", { "dependencies": { "dequal": "2.0.3", "esm-env": "1.2.2", "lz-string": "1.5.0" }, "optionalDependencies": { "@sveltejs/kit": "2.57.1" }, "peerDependencies": { "svelte": "5.55.3" } }, "sha512-2F4Q/FZzbeJTFdIS/PuOoPRSm92sA2LhzTnv6FXhCoENb3huf5+fDuNOg1LNvGOouy3u/225qxmuJvcV3IZK5Q=="],
|
||||
|
||||
"sade": ["sade@1.8.1", "", { "dependencies": { "mri": "1.2.0" } }, "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A=="],
|
||||
|
||||
"set-cookie-parser": ["set-cookie-parser@3.1.0", "", {}, "sha512-kjnC1DXBHcxaOaOXBHBeRtltsDG2nUiUni+jP92M9gYdW12rsmx92UsfpH7o5tDRs7I1ZZPSQJQGv3UaRfCiuw=="],
|
||||
|
||||
"shiki": ["shiki@4.0.2", "", { "dependencies": { "@shikijs/core": "4.0.2", "@shikijs/engine-javascript": "4.0.2", "@shikijs/engine-oniguruma": "4.0.2", "@shikijs/langs": "4.0.2", "@shikijs/themes": "4.0.2", "@shikijs/types": "4.0.2", "@shikijs/vscode-textmate": "10.0.2", "@types/hast": "3.0.4" } }, "sha512-eAVKTMedR5ckPo4xne/PjYQYrU3qx78gtJZ+sHlXEg5IHhhoQhMfZVzetTYuaJS0L2Ef3AcCRzCHV8T0WI6nIQ=="],
|
||||
|
||||
"sirv": ["sirv@3.0.2", "", { "dependencies": { "@polka/url": "1.0.0-next.29", "mrmime": "2.0.1", "totalist": "3.0.1" } }, "sha512-2wcC/oGxHis/BoHkkPwldgiPSYcpZK3JU28WoMVv55yHJgcZ8rlXvuG9iZggz+sU1d4bRgIGASwyWqjxu3FM0g=="],
|
||||
|
||||
"source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="],
|
||||
|
||||
"space-separated-tokens": ["space-separated-tokens@2.0.2", "", {}, "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q=="],
|
||||
|
||||
"stringify-entities": ["stringify-entities@4.0.4", "", { "dependencies": { "character-entities-html4": "2.1.0", "character-entities-legacy": "3.0.0" } }, "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg=="],
|
||||
|
||||
"style-to-object": ["style-to-object@1.0.14", "", { "dependencies": { "inline-style-parser": "0.2.7" } }, "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw=="],
|
||||
|
||||
"svelte": ["svelte@5.55.3", "", { "dependencies": { "@jridgewell/remapping": "2.3.5", "@jridgewell/sourcemap-codec": "1.5.5", "@sveltejs/acorn-typescript": "1.0.9", "@types/estree": "1.0.8", "@types/trusted-types": "2.0.7", "acorn": "8.16.0", "aria-query": "5.3.1", "axobject-query": "4.1.0", "clsx": "2.1.1", "devalue": "5.7.1", "esm-env": "1.2.2", "esrap": "2.2.5", "is-reference": "3.0.3", "locate-character": "3.0.0", "magic-string": "0.30.21", "zimmerframe": "1.1.4" } }, "sha512-dS1N+i3bA1v+c4UDb750MlN5vCO82G6vxh8HeTsPsTdJ1BLsN1zxSyDlIdBBqUjqZ/BxEwM8UrFf98aaoVnZFQ=="],
|
||||
|
||||
"svelte-check": ["svelte-check@4.4.6", "", { "dependencies": { "@jridgewell/trace-mapping": "0.3.31", "chokidar": "4.0.3", "fdir": "6.5.0", "picocolors": "1.1.1", "sade": "1.8.1" }, "peerDependencies": { "svelte": "5.55.3", "typescript": "6.0.2" }, "bin": { "svelte-check": "bin/svelte-check" } }, "sha512-kP1zG81EWaFe9ZyTv4ZXv44Csi6Pkdpb7S3oj6m+K2ec/IcDg/a8LsFsnVLqm2nxtkSwsd5xPj/qFkTBgXHXjg=="],
|
||||
|
||||
"svelte-toolbelt": ["svelte-toolbelt@0.10.6", "", { "dependencies": { "clsx": "2.1.1", "runed": "0.35.1", "style-to-object": "1.0.14" }, "peerDependencies": { "svelte": "5.55.3" } }, "sha512-YWuX+RE+CnWYx09yseAe4ZVMM7e7GRFZM6OYWpBKOb++s+SQ8RBIMMe+Bs/CznBMc0QPLjr+vDBxTAkozXsFXQ=="],
|
||||
|
||||
"tabbable": ["tabbable@6.4.0", "", {}, "sha512-05PUHKSNE8ou2dwIxTngl4EzcnsCDZGJ/iCLtDflR/SHB/ny14rXc+qU5P4mG9JkusiV7EivzY9Mhm55AzAvCg=="],
|
||||
|
||||
"tailwindcss": ["tailwindcss@4.2.2", "", {}, "sha512-KWBIxs1Xb6NoLdMVqhbhgwZf2PGBpPEiwOqgI4pFIYbNTfBXiKYyWoTsXgBQ9WFg/OlhnvHaY+AEpW7wSmFo2Q=="],
|
||||
|
||||
"tapable": ["tapable@2.3.2", "", {}, "sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA=="],
|
||||
|
||||
"tinyglobby": ["tinyglobby@0.2.16", "", { "dependencies": { "fdir": "6.5.0", "picomatch": "4.0.4" } }, "sha512-pn99VhoACYR8nFHhxqix+uvsbXineAasWm5ojXoN8xEwK5Kd3/TrhNn1wByuD52UxWRLy8pu+kRMniEi6Eq9Zg=="],
|
||||
|
||||
"totalist": ["totalist@3.0.1", "", {}, "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ=="],
|
||||
|
||||
"trim-lines": ["trim-lines@3.0.1", "", {}, "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg=="],
|
||||
|
||||
"tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"typescript": ["typescript@6.0.2", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-bGdAIrZ0wiGDo5l8c++HWtbaNCWTS4UTv7RaTH/ThVIgjkveJt83m74bBHMJkuCbslY8ixgLBVZJIOiQlQTjfQ=="],
|
||||
|
||||
"unist-util-is": ["unist-util-is@6.0.1", "", { "dependencies": { "@types/unist": "3.0.3" } }, "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g=="],
|
||||
|
||||
"unist-util-position": ["unist-util-position@5.0.0", "", { "dependencies": { "@types/unist": "3.0.3" } }, "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA=="],
|
||||
|
||||
"unist-util-stringify-position": ["unist-util-stringify-position@4.0.0", "", { "dependencies": { "@types/unist": "3.0.3" } }, "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ=="],
|
||||
|
||||
"unist-util-visit": ["unist-util-visit@5.1.0", "", { "dependencies": { "@types/unist": "3.0.3", "unist-util-is": "6.0.1", "unist-util-visit-parents": "6.0.2" } }, "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg=="],
|
||||
|
||||
"unist-util-visit-parents": ["unist-util-visit-parents@6.0.2", "", { "dependencies": { "@types/unist": "3.0.3", "unist-util-is": "6.0.1" } }, "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ=="],
|
||||
|
||||
"vfile": ["vfile@6.0.3", "", { "dependencies": { "@types/unist": "3.0.3", "vfile-message": "4.0.3" } }, "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q=="],
|
||||
|
||||
"vfile-message": ["vfile-message@4.0.3", "", { "dependencies": { "@types/unist": "3.0.3", "unist-util-stringify-position": "4.0.0" } }, "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw=="],
|
||||
|
||||
"vite": ["vite@8.0.8", "", { "dependencies": { "lightningcss": "1.32.0", "picomatch": "4.0.4", "postcss": "8.5.9", "rolldown": "1.0.0-rc.15", "tinyglobby": "0.2.16" }, "optionalDependencies": { "fsevents": "2.3.3", "jiti": "2.6.1" }, "bin": { "vite": "bin/vite.js" } }, "sha512-dbU7/iLVa8KZALJyLOBOQ88nOXtNG8vxKuOT4I2mD+Ya70KPceF4IAmDsmU0h1Qsn5bPrvsY9HJstCRh3hG6Uw=="],
|
||||
|
||||
"vitefu": ["vitefu@1.1.3", "", { "optionalDependencies": { "vite": "8.0.8" } }, "sha512-ub4okH7Z5KLjb6hDyjqrGXqWtWvoYdU3IGm/NorpgHncKoLTCfRIbvlhBm7r0YstIaQRYlp4yEbFqDcKSzXSSg=="],
|
||||
|
||||
"zimmerframe": ["zimmerframe@1.1.4", "", {}, "sha512-B58NGBEoc8Y9MWWCQGl/gq9xBCe4IiKM0a2x7GZdQKOW5Exr8S1W24J6OgM1njK8xCRGvAJIL/MxXHf6SkmQKQ=="],
|
||||
|
||||
"zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="],
|
||||
}
|
||||
}
|
||||
1564
frontend/pnpm-lock.yaml
generated
Normal file
1564
frontend/pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
@ -2,19 +2,6 @@ import { auth } from '$lib/auth.svelte';
|
||||
|
||||
export type ApiResult<T> = { ok: true; data: T } | { ok: false; error: string };
|
||||
|
||||
async function parseResponse<T>(res: Response): Promise<ApiResult<T>> {
|
||||
if (res.status === 204 || res.status === 202) {
|
||||
const text = await res.text();
|
||||
if (!text) return { ok: true, data: undefined as T };
|
||||
const data = JSON.parse(text);
|
||||
return { ok: true, data: data as T };
|
||||
}
|
||||
|
||||
const data = await res.json();
|
||||
if (!res.ok) return { ok: false, error: data?.error?.message ?? 'Something went wrong' };
|
||||
return { ok: true, data: data as T };
|
||||
}
|
||||
|
||||
export async function apiFetch<T>(method: string, path: string, body?: unknown): Promise<ApiResult<T>> {
|
||||
try {
|
||||
const headers: Record<string, string> = { 'Content-Type': 'application/json' };
|
||||
@ -26,7 +13,11 @@ export async function apiFetch<T>(method: string, path: string, body?: unknown):
|
||||
body: body ? JSON.stringify(body) : undefined
|
||||
});
|
||||
|
||||
return await parseResponse<T>(res);
|
||||
if (res.status === 204) return { ok: true, data: undefined as T };
|
||||
|
||||
const data = await res.json();
|
||||
if (!res.ok) return { ok: false, error: data?.error?.message ?? 'Something went wrong' };
|
||||
return { ok: true, data: data as T };
|
||||
} catch {
|
||||
return { ok: false, error: 'Unable to connect to the server' };
|
||||
}
|
||||
@ -43,7 +34,11 @@ export async function apiFetchMultipart<T>(method: string, path: string, formDat
|
||||
body: formData
|
||||
});
|
||||
|
||||
return await parseResponse<T>(res);
|
||||
if (res.status === 204) return { ok: true, data: undefined as T };
|
||||
|
||||
const data = await res.json();
|
||||
if (!res.ok) return { ok: false, error: data?.error?.message ?? 'Something went wrong' };
|
||||
return { ok: true, data: data as T };
|
||||
} catch {
|
||||
return { ok: false, error: 'Unable to connect to the server' };
|
||||
}
|
||||
|
||||
@ -149,8 +149,6 @@
|
||||
case 'running': return 'var(--color-accent)';
|
||||
case 'paused': return 'var(--color-amber)';
|
||||
case 'error': return 'var(--color-red)';
|
||||
case 'starting': case 'resuming': case 'pausing': case 'stopping':
|
||||
return 'var(--color-blue)';
|
||||
default: return 'var(--color-text-muted)';
|
||||
}
|
||||
}
|
||||
@ -160,8 +158,6 @@
|
||||
case 'running': return 'rgba(94,140,88,0.12)';
|
||||
case 'paused': return 'rgba(212,167,60,0.12)';
|
||||
case 'error': return 'rgba(207,129,114,0.12)';
|
||||
case 'starting': case 'resuming': case 'pausing': case 'stopping':
|
||||
return 'rgba(90,159,212,0.12)';
|
||||
default: return 'rgba(255,255,255,0.05)';
|
||||
}
|
||||
}
|
||||
@ -171,8 +167,6 @@
|
||||
case 'running': return 'rgba(94,140,88,0.3)';
|
||||
case 'paused': return 'rgba(212,167,60,0.3)';
|
||||
case 'error': return 'rgba(207,129,114,0.3)';
|
||||
case 'starting': case 'resuming': case 'pausing': case 'stopping':
|
||||
return 'rgba(90,159,212,0.3)';
|
||||
default: return 'rgba(255,255,255,0.08)';
|
||||
}
|
||||
}
|
||||
@ -424,8 +418,7 @@
|
||||
</div>
|
||||
{:else}
|
||||
{#each filteredCapsules as capsule, i (capsule.id)}
|
||||
{@const isTransient = ['starting', 'resuming', 'pausing', 'stopping'].includes(capsule.status)}
|
||||
{@const stripeColor = capsule.status === 'running' ? 'bg-[var(--color-accent)]' : capsule.status === 'paused' ? 'bg-[var(--color-amber)]' : capsule.status === 'error' ? 'bg-[var(--color-red)]' : isTransient ? 'bg-[var(--color-blue)]' : 'bg-[var(--color-text-muted)]'}
|
||||
{@const stripeColor = capsule.status === 'running' ? 'bg-[var(--color-accent)]' : capsule.status === 'paused' ? 'bg-[var(--color-amber)]' : capsule.status === 'error' ? 'bg-[var(--color-red)]' : 'bg-[var(--color-text-muted)]'}
|
||||
<div
|
||||
class="capsule-row relative grid grid-cols-[1.6fr_0.9fr_0.5fr_0.5fr_1fr_0.7fr_0.8fr] items-center overflow-hidden border-b border-[var(--color-border)] transition-colors duration-150 hover:bg-[var(--color-bg-3)] last:border-b-0 {newCapsuleId === capsule.id ? 'capsule-born' : ''}"
|
||||
style={initialAnimationDone ? '' : `animation: fadeUp 0.35s ease both; animation-delay: ${i * 40}ms`}
|
||||
@ -444,11 +437,6 @@
|
||||
<span class="inline-flex h-[6px] w-[6px] shrink-0 rounded-full bg-[var(--color-amber)]"></span>
|
||||
{:else if capsule.status === 'error'}
|
||||
<span class="inline-flex h-[6px] w-[6px] shrink-0 rounded-full bg-[var(--color-red)]"></span>
|
||||
{:else if isTransient}
|
||||
<span class="relative flex h-[6px] w-[6px] shrink-0">
|
||||
<span class="animate-status-ping absolute inline-flex h-full w-full rounded-full bg-[var(--color-blue)]"></span>
|
||||
<span class="relative inline-flex h-[6px] w-[6px] rounded-full bg-[var(--color-blue)]"></span>
|
||||
</span>
|
||||
{:else}
|
||||
<span class="inline-flex h-[6px] w-[6px] shrink-0 rounded-full bg-[var(--color-text-muted)]"></span>
|
||||
{/if}
|
||||
|
||||
@ -470,8 +470,7 @@
|
||||
</div>
|
||||
{:else}
|
||||
{#each filteredCapsules as capsule, i (capsule.id)}
|
||||
{@const isTransient = ['starting', 'resuming', 'pausing', 'stopping'].includes(capsule.status)}
|
||||
{@const stripeColor = capsule.status === 'running' ? 'bg-[var(--color-accent)]' : capsule.status === 'paused' ? 'bg-[var(--color-amber)]' : isTransient ? 'bg-[var(--color-blue)]' : 'bg-[var(--color-text-muted)]'}
|
||||
{@const stripeColor = capsule.status === 'running' ? 'bg-[var(--color-accent)]' : capsule.status === 'paused' ? 'bg-[var(--color-amber)]' : 'bg-[var(--color-text-muted)]'}
|
||||
<div
|
||||
class="capsule-row relative grid grid-cols-[1.6fr_0.8fr_0.5fr_0.5fr_0.6fr_1fr_0.9fr] items-center overflow-hidden border-b border-[var(--color-border)] transition-colors duration-150 hover:bg-[var(--color-bg-3)] last:border-b-0 {newCapsuleId === capsule.id ? 'capsule-born' : ''}"
|
||||
style={initialAnimationDone ? '' : `animation: fadeUp 0.35s ease both; animation-delay: ${i * 40}ms`}
|
||||
@ -488,11 +487,6 @@
|
||||
</span>
|
||||
{:else if capsule.status === 'paused'}
|
||||
<span class="inline-flex h-[6px] w-[6px] shrink-0 rounded-full bg-[var(--color-amber)]"></span>
|
||||
{:else if isTransient}
|
||||
<span class="relative flex h-[6px] w-[6px] shrink-0">
|
||||
<span class="animate-status-ping absolute inline-flex h-full w-full rounded-full bg-[var(--color-blue)]"></span>
|
||||
<span class="relative inline-flex h-[6px] w-[6px] rounded-full bg-[var(--color-blue)]"></span>
|
||||
</span>
|
||||
{:else}
|
||||
<span class="inline-flex h-[6px] w-[6px] shrink-0 rounded-full bg-[var(--color-text-muted)]"></span>
|
||||
{/if}
|
||||
@ -562,7 +556,7 @@
|
||||
openMenuId = capsule.id;
|
||||
}
|
||||
}}
|
||||
class="inline-flex items-center gap-1.5 rounded-[var(--radius-button)] border px-2.5 py-1 text-label font-semibold uppercase tracking-[0.04em] transition-colors duration-150 {capsule.status === 'running' ? 'border-[var(--color-accent)]/40 bg-[var(--color-accent-glow)] text-[var(--color-accent-mid)] hover:border-[var(--color-accent)]/70 hover:text-[var(--color-accent-bright)]' : capsule.status === 'paused' ? 'border-[var(--color-amber)]/30 bg-[var(--color-amber)]/5 text-[var(--color-amber)] hover:border-[var(--color-amber)]/60' : isTransient ? 'border-[var(--color-blue)]/30 bg-[var(--color-blue)]/5 text-[var(--color-blue)]' : 'border-[var(--color-border)] bg-[var(--color-bg-2)] text-[var(--color-text-secondary)] hover:border-[var(--color-border-mid)] hover:text-[var(--color-text-primary)]'}"
|
||||
class="inline-flex items-center gap-1.5 rounded-[var(--radius-button)] border px-2.5 py-1 text-label font-semibold uppercase tracking-[0.04em] transition-colors duration-150 {capsule.status === 'running' ? 'border-[var(--color-accent)]/40 bg-[var(--color-accent-glow)] text-[var(--color-accent-mid)] hover:border-[var(--color-accent)]/70 hover:text-[var(--color-accent-bright)]' : capsule.status === 'paused' ? 'border-[var(--color-amber)]/30 bg-[var(--color-amber)]/5 text-[var(--color-amber)] hover:border-[var(--color-amber)]/60' : 'border-[var(--color-border)] bg-[var(--color-bg-2)] text-[var(--color-text-secondary)] hover:border-[var(--color-border-mid)] hover:text-[var(--color-text-primary)]'}"
|
||||
>
|
||||
{capsule.status}
|
||||
<svg
|
||||
|
||||
@ -404,8 +404,6 @@
|
||||
case 'running': return 'var(--color-accent)';
|
||||
case 'paused': return 'var(--color-amber)';
|
||||
case 'error': return 'var(--color-red)';
|
||||
case 'starting': case 'resuming': case 'pausing': case 'stopping':
|
||||
return 'var(--color-blue)';
|
||||
default: return 'var(--color-text-muted)';
|
||||
}
|
||||
}
|
||||
@ -415,8 +413,6 @@
|
||||
case 'running': return 'rgba(94,140,88,0.12)';
|
||||
case 'paused': return 'rgba(212,167,60,0.12)';
|
||||
case 'error': return 'rgba(207,129,114,0.12)';
|
||||
case 'starting': case 'resuming': case 'pausing': case 'stopping':
|
||||
return 'rgba(90,159,212,0.12)';
|
||||
default: return 'rgba(255,255,255,0.05)';
|
||||
}
|
||||
}
|
||||
@ -426,8 +422,6 @@
|
||||
case 'running': return 'rgba(94,140,88,0.3)';
|
||||
case 'paused': return 'rgba(212,167,60,0.3)';
|
||||
case 'error': return 'rgba(207,129,114,0.3)';
|
||||
case 'starting': case 'resuming': case 'pausing': case 'stopping':
|
||||
return 'rgba(90,159,212,0.3)';
|
||||
default: return 'rgba(255,255,255,0.08)';
|
||||
}
|
||||
}
|
||||
|
||||
@ -57,7 +57,7 @@ func (h *adminCapsuleHandler) Create(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
ac.TeamID = id.PlatformTeamID
|
||||
h.audit.LogSandboxCreate(r.Context(), ac, sb.ID, sb.Template)
|
||||
writeJSON(w, http.StatusAccepted, sandboxToResponse(sb))
|
||||
writeJSON(w, http.StatusCreated, sandboxToResponse(sb))
|
||||
}
|
||||
|
||||
// List handles GET /v1/admin/capsules.
|
||||
@ -113,7 +113,7 @@ func (h *adminCapsuleHandler) Destroy(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
h.audit.LogSandboxDestroy(r.Context(), ac, sandboxID)
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
type adminSnapshotRequest struct {
|
||||
|
||||
@ -142,8 +142,6 @@ func (h *execHandler) Exec(w http.ResponseWriter, r *http.Request) {
|
||||
Cmd: req.Cmd,
|
||||
Args: req.Args,
|
||||
TimeoutSec: req.TimeoutSec,
|
||||
Envs: req.Envs,
|
||||
Cwd: req.Cwd,
|
||||
}))
|
||||
if err != nil {
|
||||
status, code, msg := agentErrToHTTP(err)
|
||||
|
||||
@ -35,11 +35,9 @@ var upgrader = websocket.Upgrader{
|
||||
|
||||
// wsStartMsg is the first message the client sends to start a process.
|
||||
type wsStartMsg struct {
|
||||
Type string `json:"type"` // "start"
|
||||
Cmd string `json:"cmd"`
|
||||
Args []string `json:"args"`
|
||||
Envs map[string]string `json:"envs"`
|
||||
Cwd string `json:"cwd"`
|
||||
Type string `json:"type"` // "start"
|
||||
Cmd string `json:"cmd"`
|
||||
Args []string `json:"args"`
|
||||
}
|
||||
|
||||
// wsOutMsg is sent by the server for process events.
|
||||
@ -135,8 +133,6 @@ func (h *execStreamHandler) runExecStream(ctx context.Context, conn *websocket.C
|
||||
SandboxId: sandboxIDStr,
|
||||
Cmd: startMsg.Cmd,
|
||||
Args: startMsg.Args,
|
||||
Envs: startMsg.Envs,
|
||||
Cwd: startMsg.Cwd,
|
||||
}))
|
||||
if err != nil {
|
||||
sendWSError(conn, "failed to start exec stream: "+err.Error())
|
||||
|
||||
@ -108,7 +108,7 @@ func (h *sandboxHandler) Create(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
h.audit.LogSandboxCreate(r.Context(), ac, sb.ID, sb.Template)
|
||||
writeJSON(w, http.StatusAccepted, sandboxToResponse(sb))
|
||||
writeJSON(w, http.StatusCreated, sandboxToResponse(sb))
|
||||
}
|
||||
|
||||
// List handles GET /v1/capsules.
|
||||
@ -167,7 +167,7 @@ func (h *sandboxHandler) Pause(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
h.audit.LogSandboxPause(r.Context(), ac, sandboxID)
|
||||
writeJSON(w, http.StatusAccepted, sandboxToResponse(sb))
|
||||
writeJSON(w, http.StatusOK, sandboxToResponse(sb))
|
||||
}
|
||||
|
||||
// Resume handles POST /v1/capsules/{id}/resume.
|
||||
@ -189,7 +189,7 @@ func (h *sandboxHandler) Resume(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
h.audit.LogSandboxResume(r.Context(), ac, sandboxID)
|
||||
writeJSON(w, http.StatusAccepted, sandboxToResponse(sb))
|
||||
writeJSON(w, http.StatusOK, sandboxToResponse(sb))
|
||||
}
|
||||
|
||||
// Ping handles POST /v1/capsules/{id}/ping.
|
||||
@ -230,5 +230,5 @@ func (h *sandboxHandler) Destroy(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
h.audit.LogSandboxDestroy(r.Context(), ac, sandboxID)
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
@ -1,65 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
"git.omukk.dev/wrenn/wrenn/pkg/auth"
|
||||
"git.omukk.dev/wrenn/wrenn/pkg/db"
|
||||
"git.omukk.dev/wrenn/wrenn/pkg/id"
|
||||
)
|
||||
|
||||
type sandboxEventHandler struct {
|
||||
db *db.Queries
|
||||
rdb *redis.Client
|
||||
}
|
||||
|
||||
func newSandboxEventHandler(queries *db.Queries, rdb *redis.Client) *sandboxEventHandler {
|
||||
return &sandboxEventHandler{db: queries, rdb: rdb}
|
||||
}
|
||||
|
||||
type sandboxEventRequest struct {
|
||||
Event string `json:"event"`
|
||||
SandboxID string `json:"sandbox_id"`
|
||||
HostID string `json:"host_id"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
// Handle receives lifecycle event callbacks from host agents and publishes
|
||||
// them to the internal Redis stream for the SandboxEventConsumer to process.
|
||||
func (h *sandboxEventHandler) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
var req sandboxEventRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body")
|
||||
return
|
||||
}
|
||||
|
||||
if req.Event == "" || req.SandboxID == "" || req.HostID == "" {
|
||||
writeError(w, http.StatusBadRequest, "invalid_request", "event, sandbox_id, and host_id are required")
|
||||
return
|
||||
}
|
||||
|
||||
// Validate that the calling host matches the host_id in the payload.
|
||||
hc := auth.MustHostFromContext(r.Context())
|
||||
callerHostID := id.FormatHostID(hc.HostID)
|
||||
if callerHostID != req.HostID {
|
||||
writeError(w, http.StatusForbidden, "forbidden", "host_id does not match authenticated host")
|
||||
return
|
||||
}
|
||||
|
||||
if req.Timestamp == 0 {
|
||||
req.Timestamp = time.Now().Unix()
|
||||
}
|
||||
|
||||
PublishSandboxEvent(r.Context(), h.rdb, SandboxEvent{
|
||||
Event: req.Event,
|
||||
SandboxID: req.SandboxID,
|
||||
HostID: req.HostID,
|
||||
Timestamp: req.Timestamp,
|
||||
})
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
@ -213,49 +213,4 @@ func (m *HostMonitor) checkHost(ctx context.Context, host db.Host) {
|
||||
slog.Warn("host monitor: failed to mark stopped", "host_id", id.FormatHostID(host.ID), "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Reconcile transient statuses (starting, resuming, pausing, stopping) ---
|
||||
// These represent in-flight operations. If the sandbox is no longer alive on
|
||||
// the host, infer the final state based on the transient status.
|
||||
|
||||
transientSandboxes, err := m.db.ListSandboxesByHostAndStatus(ctx, db.ListSandboxesByHostAndStatusParams{
|
||||
HostID: host.ID,
|
||||
Column2: []string{"starting", "resuming", "pausing", "stopping"},
|
||||
})
|
||||
if err != nil {
|
||||
slog.Warn("host monitor: failed to list transient sandboxes", "host_id", id.FormatHostID(host.ID), "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, sb := range transientSandboxes {
|
||||
sbIDStr := id.FormatSandboxID(sb.ID)
|
||||
if _, ok := alive[sbIDStr]; ok {
|
||||
// Sandbox is alive on host — the background goroutine should
|
||||
// finalize the transition. For starting/resuming, if the sandbox
|
||||
// is alive it means creation/resume succeeded.
|
||||
if sb.Status == "starting" || sb.Status == "resuming" {
|
||||
if _, err := m.db.UpdateSandboxStatusIf(ctx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sb.ID, Status: sb.Status, Status_2: "running",
|
||||
}); err == nil {
|
||||
slog.Info("host monitor: promoted transient sandbox to running", "sandbox_id", sbIDStr, "from", sb.Status)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Sandbox is not alive on host — infer final state.
|
||||
var finalStatus string
|
||||
switch sb.Status {
|
||||
case "starting", "resuming":
|
||||
finalStatus = "error"
|
||||
case "pausing":
|
||||
finalStatus = "paused"
|
||||
case "stopping":
|
||||
finalStatus = "stopped"
|
||||
}
|
||||
if _, err := m.db.UpdateSandboxStatusIf(ctx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sb.ID, Status: sb.Status, Status_2: finalStatus,
|
||||
}); err == nil {
|
||||
slog.Info("host monitor: resolved transient sandbox", "sandbox_id", sbIDStr, "from", sb.Status, "to", finalStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
openapi: "3.1.0"
|
||||
info:
|
||||
title: Wrenn API
|
||||
description: AI agent execution platform API.
|
||||
version: "0.2.0"
|
||||
description: MicroVM-based code execution platform API.
|
||||
version: "0.1.4"
|
||||
|
||||
servers:
|
||||
- url: http://localhost:8080
|
||||
@ -866,8 +866,8 @@ paths:
|
||||
schema:
|
||||
$ref: "#/components/schemas/CreateCapsuleRequest"
|
||||
responses:
|
||||
"202":
|
||||
description: Capsule creation initiated (status will be "starting")
|
||||
"201":
|
||||
description: Capsule created
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
@ -988,8 +988,8 @@ paths:
|
||||
security:
|
||||
- apiKeyAuth: []
|
||||
responses:
|
||||
"202":
|
||||
description: Capsule destruction initiated
|
||||
"204":
|
||||
description: Capsule destroyed
|
||||
|
||||
/v1/capsules/{id}/exec:
|
||||
parameters:
|
||||
@ -1260,8 +1260,8 @@ paths:
|
||||
destroys all running resources. The capsule exists only as files on
|
||||
disk and can be resumed later.
|
||||
responses:
|
||||
"202":
|
||||
description: Capsule pause initiated (status will be "pausing")
|
||||
"200":
|
||||
description: Capsule paused (snapshot taken, resources released)
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
@ -1292,8 +1292,8 @@ paths:
|
||||
memory loading. Boots a fresh Firecracker process, sets up a new
|
||||
network slot, and waits for envd to become ready.
|
||||
responses:
|
||||
"202":
|
||||
description: Capsule resume initiated (status will be "resuming")
|
||||
"200":
|
||||
description: Capsule resumed (new VM booted from snapshot)
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
@ -2035,51 +2035,6 @@ paths:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
|
||||
/v1/hosts/sandbox-events:
|
||||
post:
|
||||
summary: Sandbox lifecycle event callback
|
||||
operationId: sandboxEventCallback
|
||||
tags: [hosts]
|
||||
security:
|
||||
- hostTokenAuth: []
|
||||
description: |
|
||||
Receives autonomous lifecycle events from host agents (e.g. auto-pause
|
||||
from the TTL reaper). The event is published to an internal Redis stream
|
||||
for the control plane's event consumer to process.
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required: [event, sandbox_id, host_id]
|
||||
properties:
|
||||
event:
|
||||
type: string
|
||||
enum: [sandbox.auto_paused]
|
||||
sandbox_id:
|
||||
type: string
|
||||
host_id:
|
||||
type: string
|
||||
timestamp:
|
||||
type: integer
|
||||
format: int64
|
||||
responses:
|
||||
"204":
|
||||
description: Event accepted
|
||||
"400":
|
||||
description: Invalid request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
"403":
|
||||
description: Host ID mismatch
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
|
||||
/v1/hosts/auth/refresh:
|
||||
post:
|
||||
summary: Refresh host JWT
|
||||
@ -2637,7 +2592,7 @@ components:
|
||||
type: string
|
||||
status:
|
||||
type: string
|
||||
enum: [pending, starting, running, pausing, paused, resuming, stopping, hibernated, stopped, missing, error]
|
||||
enum: [pending, starting, running, paused, hibernated, stopped, missing, error]
|
||||
template:
|
||||
type: string
|
||||
vcpus:
|
||||
|
||||
@ -1,236 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
"git.omukk.dev/wrenn/wrenn/pkg/audit"
|
||||
"git.omukk.dev/wrenn/wrenn/pkg/db"
|
||||
"git.omukk.dev/wrenn/wrenn/pkg/id"
|
||||
)
|
||||
|
||||
const (
|
||||
sandboxEventStream = "wrenn:sandbox-events"
|
||||
sandboxEventGroup = "wrenn-sandbox-events-v1"
|
||||
sandboxEventConsumer = "cp-0"
|
||||
)
|
||||
|
||||
// SandboxEvent is the canonical event payload published to the Redis stream
|
||||
// by both the CP background goroutines (for explicit lifecycle ops) and
|
||||
// the agent callback endpoint (for autonomous events like auto-pause).
|
||||
type SandboxEvent struct {
|
||||
Event string `json:"event"`
|
||||
SandboxID string `json:"sandbox_id"`
|
||||
HostID string `json:"host_id"`
|
||||
HostIP string `json:"host_ip,omitempty"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
// Sandbox event type constants.
|
||||
const (
|
||||
SandboxEventStarted = "sandbox.started"
|
||||
SandboxEventPaused = "sandbox.paused"
|
||||
SandboxEventResumed = "sandbox.resumed"
|
||||
SandboxEventStopped = "sandbox.stopped"
|
||||
SandboxEventFailed = "sandbox.failed"
|
||||
SandboxEventAutoPaused = "sandbox.auto_paused"
|
||||
)
|
||||
|
||||
// SandboxEventConsumer reads sandbox lifecycle events from the Redis stream
|
||||
// and updates database state accordingly. It follows the same XREADGROUP
|
||||
// pattern as pkg/channels/dispatcher.go.
|
||||
type SandboxEventConsumer struct {
|
||||
rdb *redis.Client
|
||||
db *db.Queries
|
||||
audit *audit.AuditLogger
|
||||
}
|
||||
|
||||
// NewSandboxEventConsumer creates a consumer.
|
||||
func NewSandboxEventConsumer(rdb *redis.Client, queries *db.Queries, al *audit.AuditLogger) *SandboxEventConsumer {
|
||||
return &SandboxEventConsumer{rdb: rdb, db: queries, audit: al}
|
||||
}
|
||||
|
||||
// Start launches the consumer goroutine.
|
||||
func (c *SandboxEventConsumer) Start(ctx context.Context) {
|
||||
go c.run(ctx)
|
||||
}
|
||||
|
||||
func (c *SandboxEventConsumer) run(ctx context.Context) {
|
||||
err := c.rdb.XGroupCreateMkStream(ctx, sandboxEventStream, sandboxEventGroup, "$").Err()
|
||||
if err != nil && err.Error() != "BUSYGROUP Consumer Group name already exists" {
|
||||
slog.Error("sandbox event consumer: failed to create consumer group", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
streams, err := c.rdb.XReadGroup(ctx, &redis.XReadGroupArgs{
|
||||
Group: sandboxEventGroup,
|
||||
Consumer: sandboxEventConsumer,
|
||||
Streams: []string{sandboxEventStream, ">"},
|
||||
Count: 10,
|
||||
Block: 5 * time.Second,
|
||||
}).Result()
|
||||
|
||||
if err != nil {
|
||||
if err == redis.Nil || ctx.Err() != nil {
|
||||
continue
|
||||
}
|
||||
slog.Warn("sandbox event consumer: xreadgroup error", "error", err)
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, stream := range streams {
|
||||
for _, msg := range stream.Messages {
|
||||
c.handleMessage(ctx, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *SandboxEventConsumer) handleMessage(ctx context.Context, msg redis.XMessage) {
|
||||
// Use a non-cancellable context for XAck so shutdown doesn't leave
|
||||
// messages permanently stuck in the pending entries list.
|
||||
defer func() {
|
||||
ackCtx, ackCancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer ackCancel()
|
||||
if err := c.rdb.XAck(ackCtx, sandboxEventStream, sandboxEventGroup, msg.ID).Err(); err != nil {
|
||||
slog.Warn("sandbox event consumer: xack failed", "id", msg.ID, "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
payload, ok := msg.Values["payload"].(string)
|
||||
if !ok {
|
||||
slog.Warn("sandbox event consumer: message missing payload", "id", msg.ID)
|
||||
return
|
||||
}
|
||||
|
||||
var event SandboxEvent
|
||||
if err := json.Unmarshal([]byte(payload), &event); err != nil {
|
||||
slog.Warn("sandbox event consumer: failed to unmarshal event", "id", msg.ID, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
sandboxID, err := id.ParseSandboxID(event.SandboxID)
|
||||
if err != nil {
|
||||
slog.Warn("sandbox event consumer: invalid sandbox ID", "sandbox_id", event.SandboxID, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch event.Event {
|
||||
case SandboxEventStarted:
|
||||
c.handleStarted(ctx, sandboxID, event, "starting")
|
||||
case SandboxEventResumed:
|
||||
c.handleStarted(ctx, sandboxID, event, "resuming")
|
||||
case SandboxEventPaused:
|
||||
c.handlePaused(ctx, sandboxID, event)
|
||||
case SandboxEventStopped:
|
||||
c.handleStopped(ctx, sandboxID, event)
|
||||
case SandboxEventFailed:
|
||||
c.handleFailed(ctx, sandboxID)
|
||||
case SandboxEventAutoPaused:
|
||||
c.handleAutoPaused(ctx, sandboxID, event)
|
||||
default:
|
||||
slog.Warn("sandbox event consumer: unknown event type", "event", event.Event)
|
||||
}
|
||||
}
|
||||
|
||||
// handleStarted is a fallback writer for sandbox.started and sandbox.resumed
|
||||
// events. The background goroutine in SandboxService is the primary writer;
|
||||
// this only succeeds if the goroutine's conditional update was missed.
|
||||
func (c *SandboxEventConsumer) handleStarted(ctx context.Context, sandboxID pgtype.UUID, event SandboxEvent, fromStatus string) {
|
||||
now := time.Now()
|
||||
if _, err := c.db.UpdateSandboxRunningIf(ctx, db.UpdateSandboxRunningIfParams{
|
||||
ID: sandboxID,
|
||||
Status: fromStatus,
|
||||
HostIp: event.HostIP,
|
||||
StartedAt: pgtype.Timestamptz{
|
||||
Time: now,
|
||||
Valid: true,
|
||||
},
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(event.Metadata) > 0 {
|
||||
metaJSON, _ := json.Marshal(event.Metadata)
|
||||
_ = c.db.UpdateSandboxMetadata(ctx, db.UpdateSandboxMetadataParams{
|
||||
ID: sandboxID,
|
||||
Metadata: metaJSON,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *SandboxEventConsumer) handlePaused(ctx context.Context, sandboxID pgtype.UUID, event SandboxEvent) {
|
||||
if _, err := c.db.UpdateSandboxStatusIf(ctx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sandboxID,
|
||||
Status: "pausing",
|
||||
Status_2: "paused",
|
||||
}); err != nil && !errors.Is(err, pgx.ErrNoRows) {
|
||||
slog.Warn("sandbox event consumer: failed to update sandbox to paused", "sandbox_id", event.SandboxID, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *SandboxEventConsumer) handleStopped(ctx context.Context, sandboxID pgtype.UUID, event SandboxEvent) {
|
||||
if _, err := c.db.UpdateSandboxStatusIf(ctx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sandboxID,
|
||||
Status: "stopping",
|
||||
Status_2: "stopped",
|
||||
}); err != nil && !errors.Is(err, pgx.ErrNoRows) {
|
||||
slog.Warn("sandbox event consumer: failed to update sandbox to stopped", "sandbox_id", event.SandboxID, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// handleFailed is a no-op fallback — the background goroutine already
|
||||
// performed the conditional DB update before publishing this event.
|
||||
// We keep the case arm so unknown event types are flagged, but avoid
|
||||
// an unconditional status write that could clobber concurrent operations.
|
||||
func (c *SandboxEventConsumer) handleFailed(_ context.Context, _ pgtype.UUID) {}
|
||||
|
||||
func (c *SandboxEventConsumer) handleAutoPaused(ctx context.Context, sandboxID pgtype.UUID, _ SandboxEvent) {
|
||||
sb, err := c.db.UpdateSandboxStatusIf(ctx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sandboxID,
|
||||
Status: "running",
|
||||
Status_2: "paused",
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
c.audit.LogSandboxAutoPause(ctx, sb.TeamID, sandboxID)
|
||||
}
|
||||
|
||||
// PublishSandboxEvent writes a sandbox lifecycle event to the Redis stream.
|
||||
// Used by both the SandboxService background goroutines and the callback endpoint.
|
||||
func PublishSandboxEvent(ctx context.Context, rdb *redis.Client, event SandboxEvent) {
|
||||
payload, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
slog.Warn("sandbox event: failed to marshal", "event", event.Event, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := rdb.XAdd(ctx, &redis.XAddArgs{
|
||||
Stream: sandboxEventStream,
|
||||
MaxLen: 50000,
|
||||
Approx: true,
|
||||
Values: map[string]any{
|
||||
"payload": string(payload),
|
||||
},
|
||||
}).Err(); err != nil {
|
||||
slog.Warn("sandbox event: failed to publish", "event", event.Event, "error", err)
|
||||
}
|
||||
}
|
||||
@ -1,7 +1,6 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@ -64,17 +63,6 @@ func New(
|
||||
|
||||
// Shared service layer.
|
||||
sandboxSvc := &service.SandboxService{DB: queries, Pool: pool, Scheduler: sched}
|
||||
sandboxSvc.PublishEvent = func(ctx context.Context, event service.SandboxStateEvent) {
|
||||
PublishSandboxEvent(ctx, rdb, SandboxEvent{
|
||||
Event: event.Event,
|
||||
SandboxID: event.SandboxID,
|
||||
HostID: event.HostID,
|
||||
HostIP: event.HostIP,
|
||||
Metadata: event.Metadata,
|
||||
Error: event.Error,
|
||||
Timestamp: event.Timestamp,
|
||||
})
|
||||
}
|
||||
apiKeySvc := &service.APIKeyService{DB: queries}
|
||||
templateSvc := &service.TemplateService{DB: queries}
|
||||
hostSvc := &service.HostService{DB: queries, Redis: rdb, JWT: jwtSecret, Pool: pool, CA: ca}
|
||||
@ -107,7 +95,6 @@ func New(
|
||||
ptyH := newPtyHandler(queries, pool, jwtSecret)
|
||||
processH := newProcessHandler(queries, pool, jwtSecret)
|
||||
adminCapsules := newAdminCapsuleHandler(sandboxSvc, queries, pool, al)
|
||||
sandboxEvtH := newSandboxEventHandler(queries, rdb)
|
||||
meH := newMeHandler(queries, pgPool, rdb, jwtSecret, mailer, oauthRegistry, oauthRedirectURL, teamSvc)
|
||||
|
||||
// Health check.
|
||||
@ -234,9 +221,8 @@ func New(
|
||||
// Unauthenticated: refresh token exchange.
|
||||
r.Post("/auth/refresh", hostH.RefreshToken)
|
||||
|
||||
// Host-token-authenticated: heartbeat and lifecycle callbacks.
|
||||
// Host-token-authenticated: heartbeat.
|
||||
r.With(requireHostToken(jwtSecret)).Post("/{id}/heartbeat", hostH.Heartbeat)
|
||||
r.With(requireHostToken(jwtSecret)).Post("/sandbox-events", sandboxEvtH.Handle)
|
||||
|
||||
// JWT-authenticated: host CRUD and tags.
|
||||
r.Group(func(r chi.Router) {
|
||||
|
||||
@ -80,20 +80,14 @@ type ExecResult struct {
|
||||
|
||||
// Exec runs a command inside the sandbox and collects all stdout/stderr output.
|
||||
// It blocks until the command completes.
|
||||
func (c *Client) Exec(ctx context.Context, cmd string, args []string, envs map[string]string, cwd string) (*ExecResult, error) {
|
||||
func (c *Client) Exec(ctx context.Context, cmd string, args ...string) (*ExecResult, error) {
|
||||
stdin := false
|
||||
cfg := &envdpb.ProcessConfig{
|
||||
Cmd: cmd,
|
||||
Args: args,
|
||||
Envs: envs,
|
||||
}
|
||||
if cwd != "" {
|
||||
cfg.Cwd = &cwd
|
||||
}
|
||||
|
||||
req := connect.NewRequest(&envdpb.StartRequest{
|
||||
Process: cfg,
|
||||
Stdin: &stdin,
|
||||
Process: &envdpb.ProcessConfig{
|
||||
Cmd: cmd,
|
||||
Args: args,
|
||||
},
|
||||
Stdin: &stdin,
|
||||
})
|
||||
|
||||
stream, err := c.process.Start(ctx, req)
|
||||
@ -156,20 +150,14 @@ type ExecStreamEvent struct {
|
||||
|
||||
// ExecStream runs a command inside the sandbox and returns a channel of output events.
|
||||
// The channel is closed when the process ends or the context is cancelled.
|
||||
func (c *Client) ExecStream(ctx context.Context, cmd string, args []string, envs map[string]string, cwd string) (<-chan ExecStreamEvent, error) {
|
||||
func (c *Client) ExecStream(ctx context.Context, cmd string, args ...string) (<-chan ExecStreamEvent, error) {
|
||||
stdin := false
|
||||
cfg := &envdpb.ProcessConfig{
|
||||
Cmd: cmd,
|
||||
Args: args,
|
||||
Envs: envs,
|
||||
}
|
||||
if cwd != "" {
|
||||
cfg.Cwd = &cwd
|
||||
}
|
||||
|
||||
req := connect.NewRequest(&envdpb.StartRequest{
|
||||
Process: cfg,
|
||||
Stdin: &stdin,
|
||||
Process: &envdpb.ProcessConfig{
|
||||
Cmd: cmd,
|
||||
Args: args,
|
||||
},
|
||||
Stdin: &stdin,
|
||||
})
|
||||
|
||||
stream, err := c.process.Start(ctx, req)
|
||||
|
||||
@ -1,129 +0,0 @@
|
||||
package hostagent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CallbackEvent is the payload sent to the CP's sandbox event callback endpoint.
|
||||
type CallbackEvent struct {
|
||||
Event string `json:"event"`
|
||||
SandboxID string `json:"sandbox_id"`
|
||||
HostID string `json:"host_id"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
// CallbackSender sends sandbox lifecycle events to the CP via HTTP POST.
|
||||
// Used for autonomous agent-side events (auto-pause, auto-destroy) that
|
||||
// the CP cannot observe through its own RPC goroutines.
|
||||
type CallbackSender struct {
|
||||
cpURL string
|
||||
hostID string
|
||||
credFile string
|
||||
client *http.Client
|
||||
mu sync.RWMutex
|
||||
jwt string
|
||||
}
|
||||
|
||||
// NewCallbackSender creates a callback sender.
|
||||
func NewCallbackSender(cpURL, credFile, hostID string) *CallbackSender {
|
||||
jwt := ""
|
||||
if tf, err := LoadTokenFile(credFile); err == nil {
|
||||
jwt = tf.JWT
|
||||
}
|
||||
return &CallbackSender{
|
||||
cpURL: strings.TrimRight(cpURL, "/"),
|
||||
hostID: hostID,
|
||||
credFile: credFile,
|
||||
client: &http.Client{Timeout: 10 * time.Second},
|
||||
jwt: jwt,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateJWT refreshes the JWT used for callback authentication.
|
||||
// Called from the heartbeat's onCredsRefreshed hook.
|
||||
func (s *CallbackSender) UpdateJWT(jwt string) {
|
||||
s.mu.Lock()
|
||||
s.jwt = jwt
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *CallbackSender) getJWT() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.jwt
|
||||
}
|
||||
|
||||
// Send sends a callback event to the CP synchronously with retries.
|
||||
func (s *CallbackSender) Send(ctx context.Context, ev CallbackEvent) error {
|
||||
ev.HostID = s.hostID
|
||||
if ev.Timestamp == 0 {
|
||||
ev.Timestamp = time.Now().Unix()
|
||||
}
|
||||
|
||||
body, err := json.Marshal(ev)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal callback event: %w", err)
|
||||
}
|
||||
|
||||
url := s.cpURL + "/v1/hosts/sandbox-events"
|
||||
|
||||
var lastErr error
|
||||
for attempt := 0; attempt < 3; attempt++ {
|
||||
if attempt > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(time.Duration(attempt) * 500 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create callback request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-Host-Token", s.getJWT())
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden {
|
||||
if newCreds, refreshErr := RefreshCredentials(ctx, s.cpURL, s.credFile); refreshErr == nil {
|
||||
s.UpdateJWT(newCreds.JWT)
|
||||
}
|
||||
lastErr = fmt.Errorf("callback auth failed: %d", resp.StatusCode)
|
||||
continue
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||
return nil
|
||||
}
|
||||
|
||||
lastErr = fmt.Errorf("callback failed: status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return fmt.Errorf("callback failed after 3 attempts: %w", lastErr)
|
||||
}
|
||||
|
||||
// SendAsync sends a callback event in a background goroutine.
|
||||
func (s *CallbackSender) SendAsync(ev CallbackEvent) {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
if err := s.Send(ctx, ev); err != nil {
|
||||
slog.Warn("callback send failed (reconciler will catch it)", "event", ev.Event, "sandbox_id", ev.SandboxID, "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -1,22 +0,0 @@
|
||||
package hostagent
|
||||
|
||||
import (
|
||||
"git.omukk.dev/wrenn/wrenn/internal/sandbox"
|
||||
)
|
||||
|
||||
// callbackAdapter adapts CallbackSender to satisfy sandbox.EventSender.
|
||||
type callbackAdapter struct {
|
||||
sender *CallbackSender
|
||||
}
|
||||
|
||||
// NewEventSender wraps a CallbackSender as a sandbox.EventSender.
|
||||
func NewEventSender(sender *CallbackSender) sandbox.EventSender {
|
||||
return &callbackAdapter{sender: sender}
|
||||
}
|
||||
|
||||
func (a *callbackAdapter) SendAsync(event sandbox.LifecycleEvent) {
|
||||
a.sender.SendAsync(CallbackEvent{
|
||||
Event: event.Event,
|
||||
SandboxID: event.SandboxID,
|
||||
})
|
||||
}
|
||||
@ -215,7 +215,7 @@ func (s *Server) Exec(
|
||||
execCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.mgr.Exec(execCtx, msg.SandboxId, msg.Cmd, msg.Args, msg.Envs, msg.Cwd)
|
||||
result, err := s.mgr.Exec(execCtx, msg.SandboxId, msg.Cmd, msg.Args...)
|
||||
if err != nil {
|
||||
return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("exec: %w", err))
|
||||
}
|
||||
@ -342,7 +342,7 @@ func (s *Server) ExecStream(
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
events, err := s.mgr.ExecStream(execCtx, msg.SandboxId, msg.Cmd, msg.Args, msg.Envs, msg.Cwd)
|
||||
events, err := s.mgr.ExecStream(execCtx, msg.SandboxId, msg.Cmd, msg.Args...)
|
||||
if err != nil {
|
||||
return connect.NewError(connect.CodeInternal, fmt.Errorf("exec stream: %w", err))
|
||||
}
|
||||
|
||||
@ -41,17 +41,6 @@ type Config struct {
|
||||
AgentVersion string // host agent version (injected via ldflags)
|
||||
}
|
||||
|
||||
// LifecycleEvent describes an autonomous state change initiated by the agent.
|
||||
type LifecycleEvent struct {
|
||||
Event string
|
||||
SandboxID string
|
||||
}
|
||||
|
||||
// EventSender sends autonomous lifecycle events to the control plane.
|
||||
type EventSender interface {
|
||||
SendAsync(event LifecycleEvent)
|
||||
}
|
||||
|
||||
// Manager orchestrates sandbox lifecycle: VM, network, filesystem, envd.
|
||||
type Manager struct {
|
||||
cfg Config
|
||||
@ -68,11 +57,6 @@ type Manager struct {
|
||||
// onDestroy is called with the sandbox ID after cleanup completes.
|
||||
// Used by ProxyHandler to evict cached reverse proxies.
|
||||
onDestroy func(sandboxID string)
|
||||
|
||||
// eventSender sends autonomous lifecycle events (auto-pause, auto-destroy)
|
||||
// to the CP via HTTP callback. Optional — nil means events are only
|
||||
// propagated through the HostMonitor reconciler.
|
||||
eventSender EventSender
|
||||
}
|
||||
|
||||
// SetOnDestroy registers a callback invoked after each sandbox is cleaned up.
|
||||
@ -80,11 +64,6 @@ func (m *Manager) SetOnDestroy(fn func(sandboxID string)) {
|
||||
m.onDestroy = fn
|
||||
}
|
||||
|
||||
// SetEventSender registers the callback sender for autonomous lifecycle events.
|
||||
func (m *Manager) SetEventSender(sender EventSender) {
|
||||
m.eventSender = sender
|
||||
}
|
||||
|
||||
// sandboxState holds the runtime state for a single sandbox.
|
||||
type sandboxState struct {
|
||||
models.Sandbox
|
||||
@ -1071,7 +1050,7 @@ func (m *Manager) FlattenRootfs(ctx context.Context, sandboxID string, teamID, t
|
||||
func() {
|
||||
syncCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
if _, err := sb.client.Exec(syncCtx, "/bin/sync", nil, nil, ""); err != nil {
|
||||
if _, err := sb.client.Exec(syncCtx, "/bin/sync"); err != nil {
|
||||
slog.Warn("flatten: guest sync failed (non-fatal)", "id", sb.ID, "error", err)
|
||||
}
|
||||
}()
|
||||
@ -1373,7 +1352,7 @@ func (m *Manager) createFromSnapshot(ctx context.Context, sandboxID string, team
|
||||
}
|
||||
|
||||
// Exec runs a command inside a sandbox.
|
||||
func (m *Manager) Exec(ctx context.Context, sandboxID string, cmd string, args []string, envs map[string]string, cwd string) (*envdclient.ExecResult, error) {
|
||||
func (m *Manager) Exec(ctx context.Context, sandboxID string, cmd string, args ...string) (*envdclient.ExecResult, error) {
|
||||
sb, err := m.get(sandboxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1387,11 +1366,11 @@ func (m *Manager) Exec(ctx context.Context, sandboxID string, cmd string, args [
|
||||
sb.LastActiveAt = time.Now()
|
||||
m.mu.Unlock()
|
||||
|
||||
return sb.client.Exec(ctx, cmd, args, envs, cwd)
|
||||
return sb.client.Exec(ctx, cmd, args...)
|
||||
}
|
||||
|
||||
// ExecStream runs a command inside a sandbox and returns a channel of streaming events.
|
||||
func (m *Manager) ExecStream(ctx context.Context, sandboxID string, cmd string, args []string, envs map[string]string, cwd string) (<-chan envdclient.ExecStreamEvent, error) {
|
||||
func (m *Manager) ExecStream(ctx context.Context, sandboxID string, cmd string, args ...string) (<-chan envdclient.ExecStreamEvent, error) {
|
||||
sb, err := m.get(sandboxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1405,7 +1384,7 @@ func (m *Manager) ExecStream(ctx context.Context, sandboxID string, cmd string,
|
||||
sb.LastActiveAt = time.Now()
|
||||
m.mu.Unlock()
|
||||
|
||||
return sb.client.ExecStream(ctx, cmd, args, envs, cwd)
|
||||
return sb.client.ExecStream(ctx, cmd, args...)
|
||||
}
|
||||
|
||||
// List returns all sandboxes.
|
||||
@ -1702,13 +1681,6 @@ func (m *Manager) reapExpired(_ context.Context) {
|
||||
m.autoPausedMu.Lock()
|
||||
m.autoPausedIDs = append(m.autoPausedIDs, id)
|
||||
m.autoPausedMu.Unlock()
|
||||
|
||||
if m.eventSender != nil {
|
||||
m.eventSender.SendAsync(LifecycleEvent{
|
||||
Event: "sandbox.auto_paused",
|
||||
SandboxID: id,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -143,8 +143,8 @@ func (c *fcClient) setMMDS(ctx context.Context, sandboxID, templateID string) er
|
||||
// Must be called before startVM.
|
||||
func (c *fcClient) setBalloon(ctx context.Context, amountMiB int, deflateOnOom bool, statsIntervalS int) error {
|
||||
return c.do(ctx, http.MethodPut, "/balloon", map[string]any{
|
||||
"amount_mib": amountMiB,
|
||||
"deflate_on_oom": deflateOnOom,
|
||||
"amount_mib": amountMiB,
|
||||
"deflate_on_oom": deflateOnOom,
|
||||
"stats_polling_interval_s": statsIntervalS,
|
||||
})
|
||||
}
|
||||
|
||||
@ -187,13 +187,8 @@ func Run(opts ...Option) {
|
||||
// Start channel event dispatcher.
|
||||
channelDispatcher.Start(ctx)
|
||||
|
||||
// Start sandbox event consumer (processes lifecycle events from Redis stream).
|
||||
sandboxEventConsumer := api.NewSandboxEventConsumer(rdb, queries, al)
|
||||
sandboxEventConsumer.Start(ctx)
|
||||
|
||||
// Start host monitor (passive + active reconciliation every 60s).
|
||||
// Reduced from 15s since async events handle the normal case.
|
||||
monitor := api.NewHostMonitor(queries, hostPool, al, 60*time.Second)
|
||||
// Start host monitor (passive + active reconciliation every 30s).
|
||||
monitor := api.NewHostMonitor(queries, hostPool, al, 15*time.Second)
|
||||
monitor.Start(ctx)
|
||||
|
||||
// Hard-delete accounts that have been soft-deleted for more than 15 days (runs every 24h).
|
||||
|
||||
@ -375,7 +375,7 @@ const markSandboxesMissingByHost = `-- name: MarkSandboxesMissingByHost :exec
|
||||
UPDATE sandboxes
|
||||
SET status = 'missing',
|
||||
last_updated = NOW()
|
||||
WHERE host_id = $1 AND status IN ('running', 'starting', 'pending', 'pausing', 'resuming', 'stopping')
|
||||
WHERE host_id = $1 AND status IN ('running', 'starting', 'pending')
|
||||
`
|
||||
|
||||
// Called when the host monitor marks a host unreachable.
|
||||
@ -470,61 +470,6 @@ func (q *Queries) UpdateSandboxRunning(ctx context.Context, arg UpdateSandboxRun
|
||||
return i, err
|
||||
}
|
||||
|
||||
const updateSandboxRunningIf = `-- name: UpdateSandboxRunningIf :one
|
||||
UPDATE sandboxes
|
||||
SET status = 'running',
|
||||
host_ip = $3,
|
||||
guest_ip = $4,
|
||||
started_at = $5,
|
||||
last_active_at = $5,
|
||||
last_updated = NOW()
|
||||
WHERE id = $1 AND status = $2
|
||||
RETURNING id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata
|
||||
`
|
||||
|
||||
type UpdateSandboxRunningIfParams struct {
|
||||
ID pgtype.UUID `json:"id"`
|
||||
Status string `json:"status"`
|
||||
HostIp string `json:"host_ip"`
|
||||
GuestIp string `json:"guest_ip"`
|
||||
StartedAt pgtype.Timestamptz `json:"started_at"`
|
||||
}
|
||||
|
||||
// Conditionally transition a sandbox to running only if the current status
|
||||
// matches the expected value. Prevents races where a user destroys a sandbox
|
||||
// while the create/resume goroutine is still in-flight.
|
||||
func (q *Queries) UpdateSandboxRunningIf(ctx context.Context, arg UpdateSandboxRunningIfParams) (Sandbox, error) {
|
||||
row := q.db.QueryRow(ctx, updateSandboxRunningIf,
|
||||
arg.ID,
|
||||
arg.Status,
|
||||
arg.HostIp,
|
||||
arg.GuestIp,
|
||||
arg.StartedAt,
|
||||
)
|
||||
var i Sandbox
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.TeamID,
|
||||
&i.HostID,
|
||||
&i.Template,
|
||||
&i.Status,
|
||||
&i.Vcpus,
|
||||
&i.MemoryMb,
|
||||
&i.TimeoutSec,
|
||||
&i.DiskSizeMb,
|
||||
&i.GuestIp,
|
||||
&i.HostIp,
|
||||
&i.CreatedAt,
|
||||
&i.StartedAt,
|
||||
&i.LastActiveAt,
|
||||
&i.LastUpdated,
|
||||
&i.TemplateID,
|
||||
&i.TemplateTeamID,
|
||||
&i.Metadata,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const updateSandboxStatus = `-- name: UpdateSandboxStatus :one
|
||||
UPDATE sandboxes
|
||||
SET status = $2,
|
||||
@ -563,46 +508,3 @@ func (q *Queries) UpdateSandboxStatus(ctx context.Context, arg UpdateSandboxStat
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const updateSandboxStatusIf = `-- name: UpdateSandboxStatusIf :one
|
||||
UPDATE sandboxes
|
||||
SET status = $3,
|
||||
last_updated = NOW()
|
||||
WHERE id = $1 AND status = $2
|
||||
RETURNING id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata
|
||||
`
|
||||
|
||||
type UpdateSandboxStatusIfParams struct {
|
||||
ID pgtype.UUID `json:"id"`
|
||||
Status string `json:"status"`
|
||||
Status_2 string `json:"status_2"`
|
||||
}
|
||||
|
||||
// Atomically update status only when the current status matches the expected value.
|
||||
// Prevents background goroutines from overwriting a status that has since changed
|
||||
// (e.g. user destroyed a sandbox while Create was in-flight).
|
||||
func (q *Queries) UpdateSandboxStatusIf(ctx context.Context, arg UpdateSandboxStatusIfParams) (Sandbox, error) {
|
||||
row := q.db.QueryRow(ctx, updateSandboxStatusIf, arg.ID, arg.Status, arg.Status_2)
|
||||
var i Sandbox
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.TeamID,
|
||||
&i.HostID,
|
||||
&i.Template,
|
||||
&i.Status,
|
||||
&i.Vcpus,
|
||||
&i.MemoryMb,
|
||||
&i.TimeoutSec,
|
||||
&i.DiskSizeMb,
|
||||
&i.GuestIp,
|
||||
&i.HostIp,
|
||||
&i.CreatedAt,
|
||||
&i.StartedAt,
|
||||
&i.LastActiveAt,
|
||||
&i.LastUpdated,
|
||||
&i.TemplateID,
|
||||
&i.TemplateTeamID,
|
||||
&i.Metadata,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
@ -18,27 +18,12 @@ import (
|
||||
pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen"
|
||||
)
|
||||
|
||||
// SandboxEventPublisher writes sandbox lifecycle events to the Redis stream.
|
||||
type SandboxEventPublisher func(ctx context.Context, event SandboxStateEvent)
|
||||
|
||||
// SandboxStateEvent is the event payload published to the Redis stream.
|
||||
type SandboxStateEvent struct {
|
||||
Event string `json:"event"`
|
||||
SandboxID string `json:"sandbox_id"`
|
||||
HostID string `json:"host_id"`
|
||||
HostIP string `json:"host_ip,omitempty"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
// SandboxService provides sandbox lifecycle operations shared between the
|
||||
// REST API and the dashboard.
|
||||
type SandboxService struct {
|
||||
DB *db.Queries
|
||||
Pool *lifecycle.HostClientPool
|
||||
Scheduler scheduler.HostScheduler
|
||||
PublishEvent SandboxEventPublisher
|
||||
DB *db.Queries
|
||||
Pool *lifecycle.HostClientPool
|
||||
Scheduler scheduler.HostScheduler
|
||||
}
|
||||
|
||||
// SandboxCreateParams holds the parameters for creating a sandbox.
|
||||
@ -68,12 +53,6 @@ func (s *SandboxService) agentForSandbox(ctx context.Context, sandboxID pgtype.U
|
||||
return agent, sb, nil
|
||||
}
|
||||
|
||||
func (s *SandboxService) publishEvent(ctx context.Context, event SandboxStateEvent) {
|
||||
if s.PublishEvent != nil {
|
||||
s.PublishEvent(ctx, event)
|
||||
}
|
||||
}
|
||||
|
||||
// hostagentClient is a local alias to avoid the full package path in signatures.
|
||||
type hostagentClient = interface {
|
||||
CreateSandbox(ctx context.Context, req *connect.Request[pb.CreateSandboxRequest]) (*connect.Response[pb.CreateSandboxResponse], error)
|
||||
@ -85,10 +64,8 @@ type hostagentClient = interface {
|
||||
FlushSandboxMetrics(ctx context.Context, req *connect.Request[pb.FlushSandboxMetricsRequest]) (*connect.Response[pb.FlushSandboxMetricsResponse], error)
|
||||
}
|
||||
|
||||
// Create creates a new sandbox asynchronously: picks a host, inserts a
|
||||
// "starting" DB record, fires the agent RPC in a background goroutine, and
|
||||
// returns the sandbox immediately. The background goroutine publishes a
|
||||
// sandbox event to the Redis stream when the operation completes.
|
||||
// Create creates a new sandbox: picks a host via the scheduler, inserts a pending
|
||||
// DB record, calls the host agent, and updates the record to running.
|
||||
func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db.Sandbox, error) {
|
||||
if p.Template == "" {
|
||||
p.Template = "minimal"
|
||||
@ -119,9 +96,11 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db.
|
||||
templateTeamID = tmpl.TeamID
|
||||
templateID = tmpl.ID
|
||||
templateDefaultUser = tmpl.DefaultUser
|
||||
// Parse default_env JSONB into a map.
|
||||
if len(tmpl.DefaultEnv) > 0 {
|
||||
_ = json.Unmarshal(tmpl.DefaultEnv, &templateDefaultEnv)
|
||||
}
|
||||
// If the template is a snapshot, use its baked-in vcpus/memory.
|
||||
if tmpl.Type == "snapshot" {
|
||||
p.VCPUs = tmpl.Vcpus
|
||||
p.MemoryMB = tmpl.MemoryMb
|
||||
@ -132,11 +111,13 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db.
|
||||
return db.Sandbox{}, fmt.Errorf("invalid request: team_id is required")
|
||||
}
|
||||
|
||||
// Determine whether this team uses BYOC hosts or platform hosts.
|
||||
team, err := s.DB.GetTeam(ctx, p.TeamID)
|
||||
if err != nil {
|
||||
return db.Sandbox{}, fmt.Errorf("team not found: %w", err)
|
||||
}
|
||||
|
||||
// Pick a host for this sandbox.
|
||||
host, err := s.Scheduler.SelectHost(ctx, p.TeamID, team.IsByoc, p.MemoryMB, p.DiskSizeMB)
|
||||
if err != nil {
|
||||
return db.Sandbox{}, fmt.Errorf("select host: %w", err)
|
||||
@ -149,14 +130,13 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db.
|
||||
|
||||
sandboxID := id.NewSandboxID()
|
||||
sandboxIDStr := id.FormatSandboxID(sandboxID)
|
||||
hostIDStr := id.FormatHostID(host.ID)
|
||||
|
||||
sb, err := s.DB.InsertSandbox(ctx, db.InsertSandboxParams{
|
||||
if _, err := s.DB.InsertSandbox(ctx, db.InsertSandboxParams{
|
||||
ID: sandboxID,
|
||||
TeamID: p.TeamID,
|
||||
HostID: host.ID,
|
||||
Template: p.Template,
|
||||
Status: "starting",
|
||||
Status: "pending",
|
||||
Vcpus: p.VCPUs,
|
||||
MemoryMb: p.MemoryMB,
|
||||
TimeoutSec: p.TimeoutSec,
|
||||
@ -164,26 +144,11 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db.
|
||||
TemplateID: templateID,
|
||||
TemplateTeamID: templateTeamID,
|
||||
Metadata: []byte("{}"),
|
||||
})
|
||||
if err != nil {
|
||||
}); err != nil {
|
||||
return db.Sandbox{}, fmt.Errorf("insert sandbox: %w", err)
|
||||
}
|
||||
|
||||
go s.createInBackground(sandboxID, sandboxIDStr, hostIDStr, agent, p, templateTeamID, templateID, templateDefaultUser, templateDefaultEnv)
|
||||
|
||||
return sb, nil
|
||||
}
|
||||
|
||||
func (s *SandboxService) createInBackground(
|
||||
sandboxID pgtype.UUID, sandboxIDStr, hostIDStr string,
|
||||
agent hostagentClient, p SandboxCreateParams,
|
||||
templateTeamID, templateID pgtype.UUID,
|
||||
defaultUser string, defaultEnv map[string]string,
|
||||
) {
|
||||
bgCtx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
resp, err := agent.CreateSandbox(bgCtx, connect.NewRequest(&pb.CreateSandboxRequest{
|
||||
resp, err := agent.CreateSandbox(ctx, connect.NewRequest(&pb.CreateSandboxRequest{
|
||||
SandboxId: sandboxIDStr,
|
||||
Template: p.Template,
|
||||
TeamId: id.UUIDString(templateTeamID),
|
||||
@ -192,52 +157,45 @@ func (s *SandboxService) createInBackground(
|
||||
MemoryMb: p.MemoryMB,
|
||||
TimeoutSec: p.TimeoutSec,
|
||||
DiskSizeMb: p.DiskSizeMB,
|
||||
DefaultUser: defaultUser,
|
||||
DefaultEnv: defaultEnv,
|
||||
DefaultUser: templateDefaultUser,
|
||||
DefaultEnv: templateDefaultEnv,
|
||||
}))
|
||||
if err != nil {
|
||||
slog.Warn("background create failed", "sandbox_id", sandboxIDStr, "error", err)
|
||||
errCtx, errCancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer errCancel()
|
||||
if _, dbErr := s.DB.UpdateSandboxStatusIf(errCtx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sandboxID, Status: "starting", Status_2: "error",
|
||||
if _, dbErr := s.DB.UpdateSandboxStatus(ctx, db.UpdateSandboxStatusParams{
|
||||
ID: sandboxID, Status: "error",
|
||||
}); dbErr != nil {
|
||||
slog.Warn("failed to update sandbox to error after create failure", "id", sandboxIDStr, "error", dbErr)
|
||||
slog.Warn("failed to update sandbox status to error", "id", sandboxIDStr, "error", dbErr)
|
||||
}
|
||||
s.publishEvent(errCtx, SandboxStateEvent{
|
||||
Event: "sandbox.failed", SandboxID: sandboxIDStr, HostID: hostIDStr,
|
||||
Error: err.Error(), Timestamp: time.Now().Unix(),
|
||||
})
|
||||
return
|
||||
return db.Sandbox{}, fmt.Errorf("agent create: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if _, dbErr := s.DB.UpdateSandboxRunningIf(bgCtx, db.UpdateSandboxRunningIfParams{
|
||||
ID: sandboxID,
|
||||
Status: "starting",
|
||||
HostIp: resp.Msg.HostIp,
|
||||
sb, err := s.DB.UpdateSandboxRunning(ctx, db.UpdateSandboxRunningParams{
|
||||
ID: sandboxID,
|
||||
HostIp: resp.Msg.HostIp,
|
||||
GuestIp: "",
|
||||
StartedAt: pgtype.Timestamptz{
|
||||
Time: now,
|
||||
Valid: true,
|
||||
},
|
||||
}); dbErr != nil {
|
||||
slog.Warn("failed to update sandbox running after create", "id", sandboxIDStr, "error", dbErr)
|
||||
})
|
||||
if err != nil {
|
||||
return db.Sandbox{}, fmt.Errorf("update sandbox running: %w", err)
|
||||
}
|
||||
|
||||
// Store runtime metadata from the agent (envd/kernel/firecracker/agent versions).
|
||||
if meta := resp.Msg.Metadata; len(meta) > 0 {
|
||||
metaJSON, _ := json.Marshal(meta)
|
||||
if err := s.DB.UpdateSandboxMetadata(bgCtx, db.UpdateSandboxMetadataParams{
|
||||
ID: sandboxID, Metadata: metaJSON,
|
||||
if err := s.DB.UpdateSandboxMetadata(ctx, db.UpdateSandboxMetadataParams{
|
||||
ID: sandboxID,
|
||||
Metadata: metaJSON,
|
||||
}); err != nil {
|
||||
slog.Warn("failed to store sandbox metadata", "id", sandboxIDStr, "error", err)
|
||||
}
|
||||
sb.Metadata = metaJSON
|
||||
}
|
||||
|
||||
s.publishEvent(bgCtx, SandboxStateEvent{
|
||||
Event: "sandbox.started", SandboxID: sandboxIDStr, HostID: hostIDStr,
|
||||
HostIP: resp.Msg.HostIp, Metadata: resp.Msg.Metadata,
|
||||
Timestamp: now.Unix(),
|
||||
})
|
||||
return sb, nil
|
||||
}
|
||||
|
||||
// List returns active sandboxes (excludes stopped/error) belonging to the given team.
|
||||
@ -250,9 +208,7 @@ func (s *SandboxService) Get(ctx context.Context, sandboxID, teamID pgtype.UUID)
|
||||
return s.DB.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: teamID})
|
||||
}
|
||||
|
||||
// Pause snapshots and freezes a running sandbox to disk asynchronously.
|
||||
// Pre-marks the DB status as "pausing" and fires the agent RPC in a
|
||||
// background goroutine.
|
||||
// Pause snapshots and freezes a running sandbox to disk.
|
||||
func (s *SandboxService) Pause(ctx context.Context, sandboxID, teamID pgtype.UUID) (db.Sandbox, error) {
|
||||
sb, err := s.DB.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: teamID})
|
||||
if err != nil {
|
||||
@ -268,29 +224,25 @@ func (s *SandboxService) Pause(ctx context.Context, sandboxID, teamID pgtype.UUI
|
||||
}
|
||||
|
||||
sandboxIDStr := id.FormatSandboxID(sandboxID)
|
||||
hostIDStr := id.FormatHostID(sb.HostID)
|
||||
|
||||
sb, err = s.DB.UpdateSandboxStatusIf(ctx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sandboxID, Status: "running", Status_2: "pausing",
|
||||
})
|
||||
if err != nil {
|
||||
return db.Sandbox{}, fmt.Errorf("sandbox status changed concurrently")
|
||||
// Pre-mark as "paused" in DB before the RPC so the reconciler does not
|
||||
// mark the sandbox "stopped" while the host agent processes the pause.
|
||||
if _, err := s.DB.UpdateSandboxStatus(ctx, db.UpdateSandboxStatusParams{
|
||||
ID: sandboxID, Status: "paused",
|
||||
}); err != nil {
|
||||
return db.Sandbox{}, fmt.Errorf("pre-mark paused: %w", err)
|
||||
}
|
||||
|
||||
go s.pauseInBackground(sandboxID, sandboxIDStr, hostIDStr, agent)
|
||||
// Flush all metrics tiers before pausing so data survives in DB.
|
||||
s.flushAndPersistMetrics(ctx, agent, sandboxID, true)
|
||||
|
||||
return sb, nil
|
||||
}
|
||||
|
||||
func (s *SandboxService) pauseInBackground(sandboxID pgtype.UUID, sandboxIDStr, hostIDStr string, agent hostagentClient) {
|
||||
bgCtx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
s.flushAndPersistMetrics(bgCtx, agent, sandboxID, true)
|
||||
|
||||
if _, err := agent.PauseSandbox(bgCtx, connect.NewRequest(&pb.PauseSandboxRequest{
|
||||
if _, err := agent.PauseSandbox(ctx, connect.NewRequest(&pb.PauseSandboxRequest{
|
||||
SandboxId: sandboxIDStr,
|
||||
})); err != nil {
|
||||
// Check if the agent still has this sandbox. If it was destroyed
|
||||
// (e.g. frozen VM couldn't be resumed), mark as "error" instead of
|
||||
// reverting to "running" — which would create a ghost record.
|
||||
// Use a fresh context since the original ctx may already be expired.
|
||||
revertStatus := "running"
|
||||
pingCtx, pingCancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if _, pingErr := agent.PingSandbox(pingCtx, connect.NewRequest(&pb.PingSandboxRequest{
|
||||
@ -301,37 +253,23 @@ func (s *SandboxService) pauseInBackground(sandboxID pgtype.UUID, sandboxIDStr,
|
||||
}
|
||||
pingCancel()
|
||||
dbCtx, dbCancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
if _, dbErr := s.DB.UpdateSandboxStatusIf(dbCtx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sandboxID, Status: "pausing", Status_2: revertStatus,
|
||||
if _, dbErr := s.DB.UpdateSandboxStatus(dbCtx, db.UpdateSandboxStatusParams{
|
||||
ID: sandboxID, Status: revertStatus,
|
||||
}); dbErr != nil {
|
||||
slog.Warn("failed to revert sandbox status after pause error", "sandbox_id", sandboxIDStr, "error", dbErr)
|
||||
}
|
||||
dbCancel()
|
||||
|
||||
evtCtx, evtCancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
s.publishEvent(evtCtx, SandboxStateEvent{
|
||||
Event: "sandbox.failed", SandboxID: sandboxIDStr, HostID: hostIDStr,
|
||||
Error: err.Error(), Timestamp: time.Now().Unix(),
|
||||
})
|
||||
evtCancel()
|
||||
return
|
||||
return db.Sandbox{}, fmt.Errorf("agent pause: %w", err)
|
||||
}
|
||||
|
||||
if _, err := s.DB.UpdateSandboxStatusIf(bgCtx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sandboxID, Status: "pausing", Status_2: "paused",
|
||||
}); err != nil {
|
||||
slog.Warn("failed to update sandbox to paused", "sandbox_id", sandboxIDStr, "error", err)
|
||||
sb, err = s.DB.GetSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
return db.Sandbox{}, fmt.Errorf("get sandbox after pause: %w", err)
|
||||
}
|
||||
|
||||
s.publishEvent(bgCtx, SandboxStateEvent{
|
||||
Event: "sandbox.paused", SandboxID: sandboxIDStr, HostID: hostIDStr,
|
||||
Timestamp: time.Now().Unix(),
|
||||
})
|
||||
return sb, nil
|
||||
}
|
||||
|
||||
// Resume restores a paused sandbox from snapshot asynchronously.
|
||||
// Pre-marks the DB status as "resuming" and fires the agent RPC in a
|
||||
// background goroutine.
|
||||
// Resume restores a paused sandbox from snapshot.
|
||||
func (s *SandboxService) Resume(ctx context.Context, sandboxID, teamID pgtype.UUID) (db.Sandbox, error) {
|
||||
sb, err := s.DB.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: teamID})
|
||||
if err != nil {
|
||||
@ -347,8 +285,8 @@ func (s *SandboxService) Resume(ctx context.Context, sandboxID, teamID pgtype.UU
|
||||
}
|
||||
|
||||
sandboxIDStr := id.FormatSandboxID(sandboxID)
|
||||
hostIDStr := id.FormatHostID(sb.HostID)
|
||||
|
||||
// Look up template defaults for resume.
|
||||
var resumeDefaultUser string
|
||||
var resumeDefaultEnv map[string]string
|
||||
if sb.TemplateID.Valid {
|
||||
@ -361,6 +299,7 @@ func (s *SandboxService) Resume(ctx context.Context, sandboxID, teamID pgtype.UU
|
||||
}
|
||||
}
|
||||
|
||||
// Extract kernel version hint from existing sandbox metadata.
|
||||
var kernelVersion string
|
||||
if len(sb.Metadata) > 0 {
|
||||
var meta map[string]string
|
||||
@ -369,88 +308,52 @@ func (s *SandboxService) Resume(ctx context.Context, sandboxID, teamID pgtype.UU
|
||||
}
|
||||
}
|
||||
|
||||
sb, err = s.DB.UpdateSandboxStatusIf(ctx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sandboxID, Status: "paused", Status_2: "resuming",
|
||||
})
|
||||
if err != nil {
|
||||
return db.Sandbox{}, fmt.Errorf("sandbox status changed concurrently")
|
||||
}
|
||||
|
||||
go s.resumeInBackground(sandboxID, sandboxIDStr, hostIDStr, agent, sb.TimeoutSec, resumeDefaultUser, resumeDefaultEnv, kernelVersion)
|
||||
|
||||
return sb, nil
|
||||
}
|
||||
|
||||
func (s *SandboxService) resumeInBackground(
|
||||
sandboxID pgtype.UUID, sandboxIDStr, hostIDStr string,
|
||||
agent hostagentClient, timeoutSec int32,
|
||||
defaultUser string, defaultEnv map[string]string, kernelVersion string,
|
||||
) {
|
||||
bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
resp, err := agent.ResumeSandbox(bgCtx, connect.NewRequest(&pb.ResumeSandboxRequest{
|
||||
resp, err := agent.ResumeSandbox(ctx, connect.NewRequest(&pb.ResumeSandboxRequest{
|
||||
SandboxId: sandboxIDStr,
|
||||
TimeoutSec: timeoutSec,
|
||||
DefaultUser: defaultUser,
|
||||
DefaultEnv: defaultEnv,
|
||||
TimeoutSec: sb.TimeoutSec,
|
||||
DefaultUser: resumeDefaultUser,
|
||||
DefaultEnv: resumeDefaultEnv,
|
||||
KernelVersion: kernelVersion,
|
||||
}))
|
||||
if err != nil {
|
||||
slog.Warn("background resume failed", "sandbox_id", sandboxIDStr, "error", err)
|
||||
errCtx, errCancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer errCancel()
|
||||
if _, dbErr := s.DB.UpdateSandboxStatusIf(errCtx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sandboxID, Status: "resuming", Status_2: "paused",
|
||||
}); dbErr != nil {
|
||||
slog.Warn("failed to revert sandbox to paused after resume failure", "id", sandboxIDStr, "error", dbErr)
|
||||
}
|
||||
s.publishEvent(errCtx, SandboxStateEvent{
|
||||
Event: "sandbox.failed", SandboxID: sandboxIDStr, HostID: hostIDStr,
|
||||
Error: err.Error(), Timestamp: time.Now().Unix(),
|
||||
})
|
||||
return
|
||||
return db.Sandbox{}, fmt.Errorf("agent resume: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if _, dbErr := s.DB.UpdateSandboxRunningIf(bgCtx, db.UpdateSandboxRunningIfParams{
|
||||
ID: sandboxID,
|
||||
Status: "resuming",
|
||||
HostIp: resp.Msg.HostIp,
|
||||
sb, err = s.DB.UpdateSandboxRunning(ctx, db.UpdateSandboxRunningParams{
|
||||
ID: sandboxID,
|
||||
HostIp: resp.Msg.HostIp,
|
||||
GuestIp: "",
|
||||
StartedAt: pgtype.Timestamptz{
|
||||
Time: now,
|
||||
Valid: true,
|
||||
},
|
||||
}); dbErr != nil {
|
||||
slog.Warn("failed to update sandbox running after resume", "id", sandboxIDStr, "error", dbErr)
|
||||
})
|
||||
if err != nil {
|
||||
return db.Sandbox{}, fmt.Errorf("update status: %w", err)
|
||||
}
|
||||
|
||||
// Update metadata with actual versions used after resume.
|
||||
if meta := resp.Msg.Metadata; len(meta) > 0 {
|
||||
metaJSON, _ := json.Marshal(meta)
|
||||
if err := s.DB.UpdateSandboxMetadata(bgCtx, db.UpdateSandboxMetadataParams{
|
||||
ID: sandboxID, Metadata: metaJSON,
|
||||
if err := s.DB.UpdateSandboxMetadata(ctx, db.UpdateSandboxMetadataParams{
|
||||
ID: sandboxID,
|
||||
Metadata: metaJSON,
|
||||
}); err != nil {
|
||||
slog.Warn("failed to update sandbox metadata after resume", "id", sandboxIDStr, "error", err)
|
||||
}
|
||||
sb.Metadata = metaJSON
|
||||
}
|
||||
|
||||
s.publishEvent(bgCtx, SandboxStateEvent{
|
||||
Event: "sandbox.resumed", SandboxID: sandboxIDStr, HostID: hostIDStr,
|
||||
HostIP: resp.Msg.HostIp, Metadata: resp.Msg.Metadata,
|
||||
Timestamp: now.Unix(),
|
||||
})
|
||||
return sb, nil
|
||||
}
|
||||
|
||||
// Destroy stops a sandbox asynchronously. Pre-marks the DB status as
|
||||
// "stopping" and fires the agent RPC in a background goroutine.
|
||||
// Destroy stops a sandbox and marks it as stopped.
|
||||
func (s *SandboxService) Destroy(ctx context.Context, sandboxID, teamID pgtype.UUID) error {
|
||||
sb, err := s.DB.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: teamID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("sandbox not found: %w", err)
|
||||
}
|
||||
if sb.Status == "stopped" || sb.Status == "error" {
|
||||
return nil
|
||||
}
|
||||
|
||||
agent, _, err := s.agentForSandbox(ctx, sandboxID)
|
||||
if err != nil {
|
||||
@ -458,53 +361,35 @@ func (s *SandboxService) Destroy(ctx context.Context, sandboxID, teamID pgtype.U
|
||||
}
|
||||
|
||||
sandboxIDStr := id.FormatSandboxID(sandboxID)
|
||||
hostIDStr := id.FormatHostID(sb.HostID)
|
||||
prevStatus := sb.Status
|
||||
|
||||
if _, err := s.DB.UpdateSandboxStatus(ctx, db.UpdateSandboxStatusParams{
|
||||
ID: sandboxID, Status: "stopping",
|
||||
}); err != nil {
|
||||
return fmt.Errorf("pre-mark stopping: %w", err)
|
||||
// If running, flush 24h tier metrics for analytics before destroying.
|
||||
if sb.Status == "running" {
|
||||
s.flushAndPersistMetrics(ctx, agent, sandboxID, false)
|
||||
}
|
||||
|
||||
go s.destroyInBackground(sandboxID, sandboxIDStr, hostIDStr, agent, prevStatus)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SandboxService) destroyInBackground(sandboxID pgtype.UUID, sandboxIDStr, hostIDStr string, agent hostagentClient, prevStatus string) {
|
||||
bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
if prevStatus == "running" || prevStatus == "pausing" {
|
||||
s.flushAndPersistMetrics(bgCtx, agent, sandboxID, false)
|
||||
}
|
||||
|
||||
if _, err := agent.DestroySandbox(bgCtx, connect.NewRequest(&pb.DestroySandboxRequest{
|
||||
// Destroy on host agent. A not-found response is fine — sandbox is already gone.
|
||||
if _, err := agent.DestroySandbox(ctx, connect.NewRequest(&pb.DestroySandboxRequest{
|
||||
SandboxId: sandboxIDStr,
|
||||
})); err != nil && connect.CodeOf(err) != connect.CodeNotFound {
|
||||
slog.Warn("background destroy failed", "sandbox_id", sandboxIDStr, "error", err)
|
||||
return fmt.Errorf("agent destroy: %w", err)
|
||||
}
|
||||
|
||||
if prevStatus == "paused" {
|
||||
_ = s.DB.DeleteSandboxMetricPointsByTier(bgCtx, db.DeleteSandboxMetricPointsByTierParams{
|
||||
// For a paused sandbox, only keep 24h tier; remove the finer-grained tiers.
|
||||
if sb.Status == "paused" {
|
||||
_ = s.DB.DeleteSandboxMetricPointsByTier(ctx, db.DeleteSandboxMetricPointsByTierParams{
|
||||
SandboxID: sandboxID, Tier: "10m",
|
||||
})
|
||||
_ = s.DB.DeleteSandboxMetricPointsByTier(bgCtx, db.DeleteSandboxMetricPointsByTierParams{
|
||||
_ = s.DB.DeleteSandboxMetricPointsByTier(ctx, db.DeleteSandboxMetricPointsByTierParams{
|
||||
SandboxID: sandboxID, Tier: "2h",
|
||||
})
|
||||
}
|
||||
|
||||
if _, err := s.DB.UpdateSandboxStatusIf(bgCtx, db.UpdateSandboxStatusIfParams{
|
||||
ID: sandboxID, Status: "stopping", Status_2: "stopped",
|
||||
if _, err := s.DB.UpdateSandboxStatus(ctx, db.UpdateSandboxStatusParams{
|
||||
ID: sandboxID, Status: "stopped",
|
||||
}); err != nil {
|
||||
slog.Warn("failed to update sandbox to stopped", "sandbox_id", sandboxIDStr, "error", err)
|
||||
return fmt.Errorf("update status: %w", err)
|
||||
}
|
||||
|
||||
s.publishEvent(bgCtx, SandboxStateEvent{
|
||||
Event: "sandbox.stopped", SandboxID: sandboxIDStr, HostID: hostIDStr,
|
||||
Timestamp: time.Now().Unix(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// flushAndPersistMetrics calls FlushSandboxMetrics on the agent and stores
|
||||
|
||||
@ -759,9 +759,7 @@ type ExecRequest struct {
|
||||
Cmd string `protobuf:"bytes,2,opt,name=cmd,proto3" json:"cmd,omitempty"`
|
||||
Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
|
||||
// Timeout for the command in seconds (default: 30).
|
||||
TimeoutSec int32 `protobuf:"varint,4,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"`
|
||||
Envs map[string]string `protobuf:"bytes,5,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
Cwd string `protobuf:"bytes,6,opt,name=cwd,proto3" json:"cwd,omitempty"`
|
||||
TimeoutSec int32 `protobuf:"varint,4,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@ -824,20 +822,6 @@ func (x *ExecRequest) GetTimeoutSec() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExecRequest) GetEnvs() map[string]string {
|
||||
if x != nil {
|
||||
return x.Envs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecRequest) GetCwd() string {
|
||||
if x != nil {
|
||||
return x.Cwd
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ExecResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Stdout []byte `protobuf:"bytes,1,opt,name=stdout,proto3" json:"stdout,omitempty"`
|
||||
@ -1320,8 +1304,6 @@ type ExecStreamRequest struct {
|
||||
Cmd string `protobuf:"bytes,2,opt,name=cmd,proto3" json:"cmd,omitempty"`
|
||||
Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
|
||||
TimeoutSec int32 `protobuf:"varint,4,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"`
|
||||
Envs map[string]string `protobuf:"bytes,5,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
Cwd string `protobuf:"bytes,6,opt,name=cwd,proto3" json:"cwd,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@ -1384,20 +1366,6 @@ func (x *ExecStreamRequest) GetTimeoutSec() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExecStreamRequest) GetEnvs() map[string]string {
|
||||
if x != nil {
|
||||
return x.Envs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecStreamRequest) GetCwd() string {
|
||||
if x != nil {
|
||||
return x.Cwd
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ExecStreamResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// Types that are valid to be assigned to Event:
|
||||
@ -4228,19 +4196,14 @@ const file_hostagent_proto_rawDesc = "" +
|
||||
"\ateam_id\x18\x02 \x01(\tR\x06teamId\x12\x1f\n" +
|
||||
"\vtemplate_id\x18\x03 \x01(\tR\n" +
|
||||
"templateId\"\x18\n" +
|
||||
"\x16DeleteSnapshotResponse\"\xf7\x01\n" +
|
||||
"\x16DeleteSnapshotResponse\"s\n" +
|
||||
"\vExecRequest\x12\x1d\n" +
|
||||
"\n" +
|
||||
"sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x10\n" +
|
||||
"\x03cmd\x18\x02 \x01(\tR\x03cmd\x12\x12\n" +
|
||||
"\x04args\x18\x03 \x03(\tR\x04args\x12\x1f\n" +
|
||||
"\vtimeout_sec\x18\x04 \x01(\x05R\n" +
|
||||
"timeoutSec\x127\n" +
|
||||
"\x04envs\x18\x05 \x03(\v2#.hostagent.v1.ExecRequest.EnvsEntryR\x04envs\x12\x10\n" +
|
||||
"\x03cwd\x18\x06 \x01(\tR\x03cwd\x1a7\n" +
|
||||
"\tEnvsEntry\x12\x10\n" +
|
||||
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
||||
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"[\n" +
|
||||
"timeoutSec\"[\n" +
|
||||
"\fExecResponse\x12\x16\n" +
|
||||
"\x06stdout\x18\x01 \x01(\fR\x06stdout\x12\x16\n" +
|
||||
"\x06stderr\x18\x02 \x01(\fR\x06stderr\x12\x1b\n" +
|
||||
@ -4280,19 +4243,14 @@ const file_hostagent_proto_rawDesc = "" +
|
||||
"sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" +
|
||||
"\x04path\x18\x02 \x01(\tR\x04path\",\n" +
|
||||
"\x10ReadFileResponse\x12\x18\n" +
|
||||
"\acontent\x18\x01 \x01(\fR\acontent\"\x83\x02\n" +
|
||||
"\acontent\x18\x01 \x01(\fR\acontent\"y\n" +
|
||||
"\x11ExecStreamRequest\x12\x1d\n" +
|
||||
"\n" +
|
||||
"sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x10\n" +
|
||||
"\x03cmd\x18\x02 \x01(\tR\x03cmd\x12\x12\n" +
|
||||
"\x04args\x18\x03 \x03(\tR\x04args\x12\x1f\n" +
|
||||
"\vtimeout_sec\x18\x04 \x01(\x05R\n" +
|
||||
"timeoutSec\x12=\n" +
|
||||
"\x04envs\x18\x05 \x03(\v2).hostagent.v1.ExecStreamRequest.EnvsEntryR\x04envs\x12\x10\n" +
|
||||
"\x03cwd\x18\x06 \x01(\tR\x03cwd\x1a7\n" +
|
||||
"\tEnvsEntry\x12\x10\n" +
|
||||
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
||||
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb9\x01\n" +
|
||||
"timeoutSec\"\xb9\x01\n" +
|
||||
"\x12ExecStreamResponse\x125\n" +
|
||||
"\x05start\x18\x01 \x01(\v2\x1d.hostagent.v1.ExecStreamStartH\x00R\x05start\x122\n" +
|
||||
"\x04data\x18\x02 \x01(\v2\x1c.hostagent.v1.ExecStreamDataH\x00R\x04data\x12/\n" +
|
||||
@ -4528,7 +4486,7 @@ func file_hostagent_proto_rawDescGZIP() []byte {
|
||||
return file_hostagent_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_hostagent_proto_msgTypes = make([]protoimpl.MessageInfo, 78)
|
||||
var file_hostagent_proto_msgTypes = make([]protoimpl.MessageInfo, 76)
|
||||
var file_hostagent_proto_goTypes = []any{
|
||||
(*CreateSandboxRequest)(nil), // 0: hostagent.v1.CreateSandboxRequest
|
||||
(*CreateSandboxResponse)(nil), // 1: hostagent.v1.CreateSandboxResponse
|
||||
@ -4603,103 +4561,99 @@ var file_hostagent_proto_goTypes = []any{
|
||||
nil, // 70: hostagent.v1.CreateSandboxResponse.MetadataEntry
|
||||
nil, // 71: hostagent.v1.ResumeSandboxRequest.DefaultEnvEntry
|
||||
nil, // 72: hostagent.v1.ResumeSandboxResponse.MetadataEntry
|
||||
nil, // 73: hostagent.v1.ExecRequest.EnvsEntry
|
||||
nil, // 74: hostagent.v1.SandboxInfo.MetadataEntry
|
||||
nil, // 75: hostagent.v1.ExecStreamRequest.EnvsEntry
|
||||
nil, // 76: hostagent.v1.PtyAttachRequest.EnvsEntry
|
||||
nil, // 77: hostagent.v1.StartBackgroundRequest.EnvsEntry
|
||||
nil, // 73: hostagent.v1.SandboxInfo.MetadataEntry
|
||||
nil, // 74: hostagent.v1.PtyAttachRequest.EnvsEntry
|
||||
nil, // 75: hostagent.v1.StartBackgroundRequest.EnvsEntry
|
||||
}
|
||||
var file_hostagent_proto_depIdxs = []int32{
|
||||
69, // 0: hostagent.v1.CreateSandboxRequest.default_env:type_name -> hostagent.v1.CreateSandboxRequest.DefaultEnvEntry
|
||||
70, // 1: hostagent.v1.CreateSandboxResponse.metadata:type_name -> hostagent.v1.CreateSandboxResponse.MetadataEntry
|
||||
71, // 2: hostagent.v1.ResumeSandboxRequest.default_env:type_name -> hostagent.v1.ResumeSandboxRequest.DefaultEnvEntry
|
||||
72, // 3: hostagent.v1.ResumeSandboxResponse.metadata:type_name -> hostagent.v1.ResumeSandboxResponse.MetadataEntry
|
||||
73, // 4: hostagent.v1.ExecRequest.envs:type_name -> hostagent.v1.ExecRequest.EnvsEntry
|
||||
16, // 5: hostagent.v1.ListSandboxesResponse.sandboxes:type_name -> hostagent.v1.SandboxInfo
|
||||
74, // 6: hostagent.v1.SandboxInfo.metadata:type_name -> hostagent.v1.SandboxInfo.MetadataEntry
|
||||
75, // 7: hostagent.v1.ExecStreamRequest.envs:type_name -> hostagent.v1.ExecStreamRequest.EnvsEntry
|
||||
23, // 8: hostagent.v1.ExecStreamResponse.start:type_name -> hostagent.v1.ExecStreamStart
|
||||
24, // 9: hostagent.v1.ExecStreamResponse.data:type_name -> hostagent.v1.ExecStreamData
|
||||
25, // 10: hostagent.v1.ExecStreamResponse.end:type_name -> hostagent.v1.ExecStreamEnd
|
||||
27, // 11: hostagent.v1.WriteFileStreamRequest.meta:type_name -> hostagent.v1.WriteFileStreamMeta
|
||||
33, // 12: hostagent.v1.ListDirResponse.entries:type_name -> hostagent.v1.FileEntry
|
||||
33, // 13: hostagent.v1.MakeDirResponse.entry:type_name -> hostagent.v1.FileEntry
|
||||
42, // 14: hostagent.v1.GetSandboxMetricsResponse.points:type_name -> hostagent.v1.MetricPoint
|
||||
42, // 15: hostagent.v1.FlushSandboxMetricsResponse.points_10m:type_name -> hostagent.v1.MetricPoint
|
||||
42, // 16: hostagent.v1.FlushSandboxMetricsResponse.points_2h:type_name -> hostagent.v1.MetricPoint
|
||||
42, // 17: hostagent.v1.FlushSandboxMetricsResponse.points_24h:type_name -> hostagent.v1.MetricPoint
|
||||
76, // 18: hostagent.v1.PtyAttachRequest.envs:type_name -> hostagent.v1.PtyAttachRequest.EnvsEntry
|
||||
51, // 19: hostagent.v1.PtyAttachResponse.started:type_name -> hostagent.v1.PtyStarted
|
||||
52, // 20: hostagent.v1.PtyAttachResponse.output:type_name -> hostagent.v1.PtyOutput
|
||||
53, // 21: hostagent.v1.PtyAttachResponse.exited:type_name -> hostagent.v1.PtyExited
|
||||
77, // 22: hostagent.v1.StartBackgroundRequest.envs:type_name -> hostagent.v1.StartBackgroundRequest.EnvsEntry
|
||||
63, // 23: hostagent.v1.ListProcessesResponse.processes:type_name -> hostagent.v1.ProcessEntry
|
||||
23, // 24: hostagent.v1.ConnectProcessResponse.start:type_name -> hostagent.v1.ExecStreamStart
|
||||
24, // 25: hostagent.v1.ConnectProcessResponse.data:type_name -> hostagent.v1.ExecStreamData
|
||||
25, // 26: hostagent.v1.ConnectProcessResponse.end:type_name -> hostagent.v1.ExecStreamEnd
|
||||
0, // 27: hostagent.v1.HostAgentService.CreateSandbox:input_type -> hostagent.v1.CreateSandboxRequest
|
||||
2, // 28: hostagent.v1.HostAgentService.DestroySandbox:input_type -> hostagent.v1.DestroySandboxRequest
|
||||
4, // 29: hostagent.v1.HostAgentService.PauseSandbox:input_type -> hostagent.v1.PauseSandboxRequest
|
||||
6, // 30: hostagent.v1.HostAgentService.ResumeSandbox:input_type -> hostagent.v1.ResumeSandboxRequest
|
||||
12, // 31: hostagent.v1.HostAgentService.Exec:input_type -> hostagent.v1.ExecRequest
|
||||
14, // 32: hostagent.v1.HostAgentService.ListSandboxes:input_type -> hostagent.v1.ListSandboxesRequest
|
||||
17, // 33: hostagent.v1.HostAgentService.WriteFile:input_type -> hostagent.v1.WriteFileRequest
|
||||
19, // 34: hostagent.v1.HostAgentService.ReadFile:input_type -> hostagent.v1.ReadFileRequest
|
||||
31, // 35: hostagent.v1.HostAgentService.ListDir:input_type -> hostagent.v1.ListDirRequest
|
||||
34, // 36: hostagent.v1.HostAgentService.MakeDir:input_type -> hostagent.v1.MakeDirRequest
|
||||
36, // 37: hostagent.v1.HostAgentService.RemovePath:input_type -> hostagent.v1.RemovePathRequest
|
||||
8, // 38: hostagent.v1.HostAgentService.CreateSnapshot:input_type -> hostagent.v1.CreateSnapshotRequest
|
||||
10, // 39: hostagent.v1.HostAgentService.DeleteSnapshot:input_type -> hostagent.v1.DeleteSnapshotRequest
|
||||
21, // 40: hostagent.v1.HostAgentService.ExecStream:input_type -> hostagent.v1.ExecStreamRequest
|
||||
26, // 41: hostagent.v1.HostAgentService.WriteFileStream:input_type -> hostagent.v1.WriteFileStreamRequest
|
||||
29, // 42: hostagent.v1.HostAgentService.ReadFileStream:input_type -> hostagent.v1.ReadFileStreamRequest
|
||||
38, // 43: hostagent.v1.HostAgentService.PingSandbox:input_type -> hostagent.v1.PingSandboxRequest
|
||||
40, // 44: hostagent.v1.HostAgentService.Terminate:input_type -> hostagent.v1.TerminateRequest
|
||||
43, // 45: hostagent.v1.HostAgentService.GetSandboxMetrics:input_type -> hostagent.v1.GetSandboxMetricsRequest
|
||||
45, // 46: hostagent.v1.HostAgentService.FlushSandboxMetrics:input_type -> hostagent.v1.FlushSandboxMetricsRequest
|
||||
47, // 47: hostagent.v1.HostAgentService.FlattenRootfs:input_type -> hostagent.v1.FlattenRootfsRequest
|
||||
49, // 48: hostagent.v1.HostAgentService.PtyAttach:input_type -> hostagent.v1.PtyAttachRequest
|
||||
54, // 49: hostagent.v1.HostAgentService.PtySendInput:input_type -> hostagent.v1.PtySendInputRequest
|
||||
56, // 50: hostagent.v1.HostAgentService.PtyResize:input_type -> hostagent.v1.PtyResizeRequest
|
||||
58, // 51: hostagent.v1.HostAgentService.PtyKill:input_type -> hostagent.v1.PtyKillRequest
|
||||
60, // 52: hostagent.v1.HostAgentService.StartBackground:input_type -> hostagent.v1.StartBackgroundRequest
|
||||
62, // 53: hostagent.v1.HostAgentService.ListProcesses:input_type -> hostagent.v1.ListProcessesRequest
|
||||
65, // 54: hostagent.v1.HostAgentService.KillProcess:input_type -> hostagent.v1.KillProcessRequest
|
||||
67, // 55: hostagent.v1.HostAgentService.ConnectProcess:input_type -> hostagent.v1.ConnectProcessRequest
|
||||
1, // 56: hostagent.v1.HostAgentService.CreateSandbox:output_type -> hostagent.v1.CreateSandboxResponse
|
||||
3, // 57: hostagent.v1.HostAgentService.DestroySandbox:output_type -> hostagent.v1.DestroySandboxResponse
|
||||
5, // 58: hostagent.v1.HostAgentService.PauseSandbox:output_type -> hostagent.v1.PauseSandboxResponse
|
||||
7, // 59: hostagent.v1.HostAgentService.ResumeSandbox:output_type -> hostagent.v1.ResumeSandboxResponse
|
||||
13, // 60: hostagent.v1.HostAgentService.Exec:output_type -> hostagent.v1.ExecResponse
|
||||
15, // 61: hostagent.v1.HostAgentService.ListSandboxes:output_type -> hostagent.v1.ListSandboxesResponse
|
||||
18, // 62: hostagent.v1.HostAgentService.WriteFile:output_type -> hostagent.v1.WriteFileResponse
|
||||
20, // 63: hostagent.v1.HostAgentService.ReadFile:output_type -> hostagent.v1.ReadFileResponse
|
||||
32, // 64: hostagent.v1.HostAgentService.ListDir:output_type -> hostagent.v1.ListDirResponse
|
||||
35, // 65: hostagent.v1.HostAgentService.MakeDir:output_type -> hostagent.v1.MakeDirResponse
|
||||
37, // 66: hostagent.v1.HostAgentService.RemovePath:output_type -> hostagent.v1.RemovePathResponse
|
||||
9, // 67: hostagent.v1.HostAgentService.CreateSnapshot:output_type -> hostagent.v1.CreateSnapshotResponse
|
||||
11, // 68: hostagent.v1.HostAgentService.DeleteSnapshot:output_type -> hostagent.v1.DeleteSnapshotResponse
|
||||
22, // 69: hostagent.v1.HostAgentService.ExecStream:output_type -> hostagent.v1.ExecStreamResponse
|
||||
28, // 70: hostagent.v1.HostAgentService.WriteFileStream:output_type -> hostagent.v1.WriteFileStreamResponse
|
||||
30, // 71: hostagent.v1.HostAgentService.ReadFileStream:output_type -> hostagent.v1.ReadFileStreamResponse
|
||||
39, // 72: hostagent.v1.HostAgentService.PingSandbox:output_type -> hostagent.v1.PingSandboxResponse
|
||||
41, // 73: hostagent.v1.HostAgentService.Terminate:output_type -> hostagent.v1.TerminateResponse
|
||||
44, // 74: hostagent.v1.HostAgentService.GetSandboxMetrics:output_type -> hostagent.v1.GetSandboxMetricsResponse
|
||||
46, // 75: hostagent.v1.HostAgentService.FlushSandboxMetrics:output_type -> hostagent.v1.FlushSandboxMetricsResponse
|
||||
48, // 76: hostagent.v1.HostAgentService.FlattenRootfs:output_type -> hostagent.v1.FlattenRootfsResponse
|
||||
50, // 77: hostagent.v1.HostAgentService.PtyAttach:output_type -> hostagent.v1.PtyAttachResponse
|
||||
55, // 78: hostagent.v1.HostAgentService.PtySendInput:output_type -> hostagent.v1.PtySendInputResponse
|
||||
57, // 79: hostagent.v1.HostAgentService.PtyResize:output_type -> hostagent.v1.PtyResizeResponse
|
||||
59, // 80: hostagent.v1.HostAgentService.PtyKill:output_type -> hostagent.v1.PtyKillResponse
|
||||
61, // 81: hostagent.v1.HostAgentService.StartBackground:output_type -> hostagent.v1.StartBackgroundResponse
|
||||
64, // 82: hostagent.v1.HostAgentService.ListProcesses:output_type -> hostagent.v1.ListProcessesResponse
|
||||
66, // 83: hostagent.v1.HostAgentService.KillProcess:output_type -> hostagent.v1.KillProcessResponse
|
||||
68, // 84: hostagent.v1.HostAgentService.ConnectProcess:output_type -> hostagent.v1.ConnectProcessResponse
|
||||
56, // [56:85] is the sub-list for method output_type
|
||||
27, // [27:56] is the sub-list for method input_type
|
||||
27, // [27:27] is the sub-list for extension type_name
|
||||
27, // [27:27] is the sub-list for extension extendee
|
||||
0, // [0:27] is the sub-list for field type_name
|
||||
16, // 4: hostagent.v1.ListSandboxesResponse.sandboxes:type_name -> hostagent.v1.SandboxInfo
|
||||
73, // 5: hostagent.v1.SandboxInfo.metadata:type_name -> hostagent.v1.SandboxInfo.MetadataEntry
|
||||
23, // 6: hostagent.v1.ExecStreamResponse.start:type_name -> hostagent.v1.ExecStreamStart
|
||||
24, // 7: hostagent.v1.ExecStreamResponse.data:type_name -> hostagent.v1.ExecStreamData
|
||||
25, // 8: hostagent.v1.ExecStreamResponse.end:type_name -> hostagent.v1.ExecStreamEnd
|
||||
27, // 9: hostagent.v1.WriteFileStreamRequest.meta:type_name -> hostagent.v1.WriteFileStreamMeta
|
||||
33, // 10: hostagent.v1.ListDirResponse.entries:type_name -> hostagent.v1.FileEntry
|
||||
33, // 11: hostagent.v1.MakeDirResponse.entry:type_name -> hostagent.v1.FileEntry
|
||||
42, // 12: hostagent.v1.GetSandboxMetricsResponse.points:type_name -> hostagent.v1.MetricPoint
|
||||
42, // 13: hostagent.v1.FlushSandboxMetricsResponse.points_10m:type_name -> hostagent.v1.MetricPoint
|
||||
42, // 14: hostagent.v1.FlushSandboxMetricsResponse.points_2h:type_name -> hostagent.v1.MetricPoint
|
||||
42, // 15: hostagent.v1.FlushSandboxMetricsResponse.points_24h:type_name -> hostagent.v1.MetricPoint
|
||||
74, // 16: hostagent.v1.PtyAttachRequest.envs:type_name -> hostagent.v1.PtyAttachRequest.EnvsEntry
|
||||
51, // 17: hostagent.v1.PtyAttachResponse.started:type_name -> hostagent.v1.PtyStarted
|
||||
52, // 18: hostagent.v1.PtyAttachResponse.output:type_name -> hostagent.v1.PtyOutput
|
||||
53, // 19: hostagent.v1.PtyAttachResponse.exited:type_name -> hostagent.v1.PtyExited
|
||||
75, // 20: hostagent.v1.StartBackgroundRequest.envs:type_name -> hostagent.v1.StartBackgroundRequest.EnvsEntry
|
||||
63, // 21: hostagent.v1.ListProcessesResponse.processes:type_name -> hostagent.v1.ProcessEntry
|
||||
23, // 22: hostagent.v1.ConnectProcessResponse.start:type_name -> hostagent.v1.ExecStreamStart
|
||||
24, // 23: hostagent.v1.ConnectProcessResponse.data:type_name -> hostagent.v1.ExecStreamData
|
||||
25, // 24: hostagent.v1.ConnectProcessResponse.end:type_name -> hostagent.v1.ExecStreamEnd
|
||||
0, // 25: hostagent.v1.HostAgentService.CreateSandbox:input_type -> hostagent.v1.CreateSandboxRequest
|
||||
2, // 26: hostagent.v1.HostAgentService.DestroySandbox:input_type -> hostagent.v1.DestroySandboxRequest
|
||||
4, // 27: hostagent.v1.HostAgentService.PauseSandbox:input_type -> hostagent.v1.PauseSandboxRequest
|
||||
6, // 28: hostagent.v1.HostAgentService.ResumeSandbox:input_type -> hostagent.v1.ResumeSandboxRequest
|
||||
12, // 29: hostagent.v1.HostAgentService.Exec:input_type -> hostagent.v1.ExecRequest
|
||||
14, // 30: hostagent.v1.HostAgentService.ListSandboxes:input_type -> hostagent.v1.ListSandboxesRequest
|
||||
17, // 31: hostagent.v1.HostAgentService.WriteFile:input_type -> hostagent.v1.WriteFileRequest
|
||||
19, // 32: hostagent.v1.HostAgentService.ReadFile:input_type -> hostagent.v1.ReadFileRequest
|
||||
31, // 33: hostagent.v1.HostAgentService.ListDir:input_type -> hostagent.v1.ListDirRequest
|
||||
34, // 34: hostagent.v1.HostAgentService.MakeDir:input_type -> hostagent.v1.MakeDirRequest
|
||||
36, // 35: hostagent.v1.HostAgentService.RemovePath:input_type -> hostagent.v1.RemovePathRequest
|
||||
8, // 36: hostagent.v1.HostAgentService.CreateSnapshot:input_type -> hostagent.v1.CreateSnapshotRequest
|
||||
10, // 37: hostagent.v1.HostAgentService.DeleteSnapshot:input_type -> hostagent.v1.DeleteSnapshotRequest
|
||||
21, // 38: hostagent.v1.HostAgentService.ExecStream:input_type -> hostagent.v1.ExecStreamRequest
|
||||
26, // 39: hostagent.v1.HostAgentService.WriteFileStream:input_type -> hostagent.v1.WriteFileStreamRequest
|
||||
29, // 40: hostagent.v1.HostAgentService.ReadFileStream:input_type -> hostagent.v1.ReadFileStreamRequest
|
||||
38, // 41: hostagent.v1.HostAgentService.PingSandbox:input_type -> hostagent.v1.PingSandboxRequest
|
||||
40, // 42: hostagent.v1.HostAgentService.Terminate:input_type -> hostagent.v1.TerminateRequest
|
||||
43, // 43: hostagent.v1.HostAgentService.GetSandboxMetrics:input_type -> hostagent.v1.GetSandboxMetricsRequest
|
||||
45, // 44: hostagent.v1.HostAgentService.FlushSandboxMetrics:input_type -> hostagent.v1.FlushSandboxMetricsRequest
|
||||
47, // 45: hostagent.v1.HostAgentService.FlattenRootfs:input_type -> hostagent.v1.FlattenRootfsRequest
|
||||
49, // 46: hostagent.v1.HostAgentService.PtyAttach:input_type -> hostagent.v1.PtyAttachRequest
|
||||
54, // 47: hostagent.v1.HostAgentService.PtySendInput:input_type -> hostagent.v1.PtySendInputRequest
|
||||
56, // 48: hostagent.v1.HostAgentService.PtyResize:input_type -> hostagent.v1.PtyResizeRequest
|
||||
58, // 49: hostagent.v1.HostAgentService.PtyKill:input_type -> hostagent.v1.PtyKillRequest
|
||||
60, // 50: hostagent.v1.HostAgentService.StartBackground:input_type -> hostagent.v1.StartBackgroundRequest
|
||||
62, // 51: hostagent.v1.HostAgentService.ListProcesses:input_type -> hostagent.v1.ListProcessesRequest
|
||||
65, // 52: hostagent.v1.HostAgentService.KillProcess:input_type -> hostagent.v1.KillProcessRequest
|
||||
67, // 53: hostagent.v1.HostAgentService.ConnectProcess:input_type -> hostagent.v1.ConnectProcessRequest
|
||||
1, // 54: hostagent.v1.HostAgentService.CreateSandbox:output_type -> hostagent.v1.CreateSandboxResponse
|
||||
3, // 55: hostagent.v1.HostAgentService.DestroySandbox:output_type -> hostagent.v1.DestroySandboxResponse
|
||||
5, // 56: hostagent.v1.HostAgentService.PauseSandbox:output_type -> hostagent.v1.PauseSandboxResponse
|
||||
7, // 57: hostagent.v1.HostAgentService.ResumeSandbox:output_type -> hostagent.v1.ResumeSandboxResponse
|
||||
13, // 58: hostagent.v1.HostAgentService.Exec:output_type -> hostagent.v1.ExecResponse
|
||||
15, // 59: hostagent.v1.HostAgentService.ListSandboxes:output_type -> hostagent.v1.ListSandboxesResponse
|
||||
18, // 60: hostagent.v1.HostAgentService.WriteFile:output_type -> hostagent.v1.WriteFileResponse
|
||||
20, // 61: hostagent.v1.HostAgentService.ReadFile:output_type -> hostagent.v1.ReadFileResponse
|
||||
32, // 62: hostagent.v1.HostAgentService.ListDir:output_type -> hostagent.v1.ListDirResponse
|
||||
35, // 63: hostagent.v1.HostAgentService.MakeDir:output_type -> hostagent.v1.MakeDirResponse
|
||||
37, // 64: hostagent.v1.HostAgentService.RemovePath:output_type -> hostagent.v1.RemovePathResponse
|
||||
9, // 65: hostagent.v1.HostAgentService.CreateSnapshot:output_type -> hostagent.v1.CreateSnapshotResponse
|
||||
11, // 66: hostagent.v1.HostAgentService.DeleteSnapshot:output_type -> hostagent.v1.DeleteSnapshotResponse
|
||||
22, // 67: hostagent.v1.HostAgentService.ExecStream:output_type -> hostagent.v1.ExecStreamResponse
|
||||
28, // 68: hostagent.v1.HostAgentService.WriteFileStream:output_type -> hostagent.v1.WriteFileStreamResponse
|
||||
30, // 69: hostagent.v1.HostAgentService.ReadFileStream:output_type -> hostagent.v1.ReadFileStreamResponse
|
||||
39, // 70: hostagent.v1.HostAgentService.PingSandbox:output_type -> hostagent.v1.PingSandboxResponse
|
||||
41, // 71: hostagent.v1.HostAgentService.Terminate:output_type -> hostagent.v1.TerminateResponse
|
||||
44, // 72: hostagent.v1.HostAgentService.GetSandboxMetrics:output_type -> hostagent.v1.GetSandboxMetricsResponse
|
||||
46, // 73: hostagent.v1.HostAgentService.FlushSandboxMetrics:output_type -> hostagent.v1.FlushSandboxMetricsResponse
|
||||
48, // 74: hostagent.v1.HostAgentService.FlattenRootfs:output_type -> hostagent.v1.FlattenRootfsResponse
|
||||
50, // 75: hostagent.v1.HostAgentService.PtyAttach:output_type -> hostagent.v1.PtyAttachResponse
|
||||
55, // 76: hostagent.v1.HostAgentService.PtySendInput:output_type -> hostagent.v1.PtySendInputResponse
|
||||
57, // 77: hostagent.v1.HostAgentService.PtyResize:output_type -> hostagent.v1.PtyResizeResponse
|
||||
59, // 78: hostagent.v1.HostAgentService.PtyKill:output_type -> hostagent.v1.PtyKillResponse
|
||||
61, // 79: hostagent.v1.HostAgentService.StartBackground:output_type -> hostagent.v1.StartBackgroundResponse
|
||||
64, // 80: hostagent.v1.HostAgentService.ListProcesses:output_type -> hostagent.v1.ListProcessesResponse
|
||||
66, // 81: hostagent.v1.HostAgentService.KillProcess:output_type -> hostagent.v1.KillProcessResponse
|
||||
68, // 82: hostagent.v1.HostAgentService.ConnectProcess:output_type -> hostagent.v1.ConnectProcessResponse
|
||||
54, // [54:83] is the sub-list for method output_type
|
||||
25, // [25:54] is the sub-list for method input_type
|
||||
25, // [25:25] is the sub-list for extension type_name
|
||||
25, // [25:25] is the sub-list for extension extendee
|
||||
0, // [0:25] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_hostagent_proto_init() }
|
||||
@ -4745,7 +4699,7 @@ func file_hostagent_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_hostagent_proto_rawDesc), len(file_hostagent_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 78,
|
||||
NumMessages: 76,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
|
||||
@ -222,8 +222,6 @@ message ExecRequest {
|
||||
repeated string args = 3;
|
||||
// Timeout for the command in seconds (default: 30).
|
||||
int32 timeout_sec = 4;
|
||||
map<string, string> envs = 5;
|
||||
string cwd = 6;
|
||||
}
|
||||
|
||||
message ExecResponse {
|
||||
@ -284,8 +282,6 @@ message ExecStreamRequest {
|
||||
string cmd = 2;
|
||||
repeated string args = 3;
|
||||
int32 timeout_sec = 4;
|
||||
map<string, string> envs = 5;
|
||||
string cwd = 6;
|
||||
}
|
||||
|
||||
message ExecStreamResponse {
|
||||
|
||||
Reference in New Issue
Block a user