diff --git a/.env.example b/.env.example index f9318cc..7446591 100644 --- a/.env.example +++ b/.env.example @@ -1,3 +1,7 @@ +# Shared (applies to both control plane and host agent) +WRENN_DIR=/var/lib/wrenn +LOG_LEVEL=info + # Database DATABASE_URL=postgres://wrenn:wrenn@localhost:5432/wrenn?sslmode=disable @@ -5,24 +9,14 @@ DATABASE_URL=postgres://wrenn:wrenn@localhost:5432/wrenn?sslmode=disable REDIS_URL=redis://localhost:6379/0 # Control Plane -WRENN_CP_LISTEN_ADDR=:8080 +WRENN_CP_LISTEN_ADDR=:9725 # Host Agent WRENN_HOST_LISTEN_ADDR=:50051 -WRENN_DIR=/var/lib/wrenn WRENN_HOST_INTERFACE=eth0 -WRENN_CP_URL=http://localhost:8080 - -# Lago (billing — external service) -LAGO_API_URL=http://localhost:3000 -LAGO_API_KEY= - -# Object Storage (hibernate snapshots — Hetzner Object Storage, S3-compatible) -S3_BUCKET=wrenn-snapshots -S3_REGION=fsn1 -S3_ENDPOINT=https://fsn1.your-objectstorage.com -AWS_ACCESS_KEY_ID= -AWS_SECRET_ACCESS_KEY= +WRENN_CP_URL=http://localhost:9725 +WRENN_DEFAULT_ROOTFS_SIZE=5Gi +WRENN_FIRECRACKER_BIN=/usr/local/bin/firecracker # Auth JWT_SECRET= @@ -43,4 +37,11 @@ WRENN_ENCRYPTION_KEY= OAUTH_GITHUB_CLIENT_ID= OAUTH_GITHUB_CLIENT_SECRET= OAUTH_REDIRECT_URL=https://app.wrenn.dev -CP_PUBLIC_URL=https://api.wrenn.dev +CP_PUBLIC_URL=https://app.wrenn.dev + +# SMTP — transactional email (optional; omit SMTP_HOST to disable) +SMTP_HOST= +SMTP_PORT=587 +SMTP_USERNAME= +SMTP_PASSWORD= +SMTP_FROM_EMAIL=noreply@wrenn.dev diff --git a/.gitignore b/.gitignore index 96b55a4..4be2db8 100644 --- a/.gitignore +++ b/.gitignore @@ -47,4 +47,5 @@ frontend/build/ ## Dashboard embedded static (built from frontend, not committed) internal/dashboard/static/* -!internal/dashboard/static/.gitkeep \ No newline at end of file +!internal/dashboard/static/.gitkeep.dual-graph/ +.dual-graph/ diff --git a/CLAUDE.md b/CLAUDE.md index d3cfa02..56fdbbc 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -12,10 +12,10 @@ All commands go through the Makefile. Never use raw `go build` or `go run`. ```bash make build # Build all binaries → builds/ -make build-cp # Control plane only (builds frontend first) +make build-cp # Control plane only make build-agent # Host agent only make build-envd # envd static binary (verified statically linked) -make build-frontend # SvelteKit dashboard → internal/dashboard/static/ +make build-frontend # SvelteKit dashboard → frontend/build/ (served by Caddy) make dev # Full local dev: infra + migrate + control plane make dev-infra # Start PostgreSQL + Prometheus + Grafana (Docker) @@ -55,7 +55,7 @@ User SDK → HTTPS/WS → Control Plane → Connect RPC → Host Agent → HTTP/ | Binary | Module | Entry point | Runs as | |--------|--------|-------------|---------| | wrenn-cp | `git.omukk.dev/wrenn/wrenn` | `cmd/control-plane/main.go` | Unprivileged | -| wrenn-agent | `git.omukk.dev/wrenn/wrenn` | `cmd/host-agent/main.go` | Root (NET_ADMIN + /dev/kvm) | +| wrenn-agent | `git.omukk.dev/wrenn/wrenn` | `cmd/host-agent/main.go` | `wrenn` user with capabilities (SYS_ADMIN, NET_ADMIN, NET_RAW, SYS_PTRACE, KILL, DAC_OVERRIDE, MKNOD) via setcap; also accepts root | | envd | `git.omukk.dev/wrenn/wrenn/envd` (standalone `envd/go.mod`) | `envd/main.go` | PID 1 inside guest VM | envd is a **completely independent Go module**. It is never imported by the main module. The only connection is the protobuf contract. It compiles to a static binary baked into rootfs images. @@ -64,21 +64,31 @@ envd is a **completely independent Go module**. It is never imported by the main ### Control Plane -**Packages:** `internal/api/`, `internal/dashboard/`, `internal/auth/`, `internal/scheduler/`, `internal/lifecycle/`, `internal/config/`, `internal/db/` +**Internal packages:** `internal/api/`, `internal/email/` -Startup (`cmd/control-plane/main.go`) wires: config (env vars) → pgxpool → `db.Queries` (sqlc-generated) → Connect RPC client to host agent → `api.Server`. Everything flows through constructor injection. +**Public packages (importable by cloud repo):** `pkg/config/`, `pkg/db/`, `pkg/auth/`, `pkg/auth/oauth/`, `pkg/scheduler/`, `pkg/lifecycle/`, `pkg/channels/`, `pkg/audit/`, `pkg/service/`, `pkg/events/`, `pkg/id/`, `pkg/validate/` -- **API Server** (`internal/api/server.go`): chi router with middleware. Creates handler structs (`sandboxHandler`, `execHandler`, `filesHandler`, etc.) injected with `db.Queries` and the host agent Connect RPC client. Routes under `/v1/sandboxes/*`. +**Extension framework:** `pkg/cpextension/` (shared `Extension` interface + `ServerContext`), `pkg/cpserver/` (exported `Run()` entrypoint with functional options for cloud `main.go`) + +The cloud repo imports this module as a Go dependency and calls `cpserver.Run(cpserver.WithExtensions(myExt))`. Each extension implements two methods: `RegisterRoutes(r chi.Router, sctx ServerContext)` to add HTTP routes, and `BackgroundWorkers(sctx ServerContext) []func(context.Context)` to add long-running goroutines. `ServerContext` carries all OSS services (DB, scheduler, auth, etc.) so extensions can use them without reimplementing anything. To expose a new OSS service to extensions, add it to `ServerContext` in `pkg/cpextension/extension.go` and populate it in `pkg/cpserver/run.go`. + +**pkg/ vs internal/ decision rule:** A package belongs in `pkg/` only if the cloud repo needs to import it directly. Everything else stays in `internal/`. New OSS services (e.g. email, notifications) go in `internal/` — the cloud repo accesses them through `ServerContext`, not by importing the package. Do not put a service in `pkg/` just because the cloud repo uses it. + +Startup (`cmd/control-plane/main.go`) is a thin wrapper: `cpserver.Run(cpserver.WithVersion(...))`. All 20 initialization steps live in `pkg/cpserver/run.go`: config → pgxpool → `db.Queries` → Redis → mTLS CA → host client pool → scheduler → OAuth → channels → audit logger → `api.New()` → background workers → HTTP server. Everything flows through constructor injection. + +- **API Server** (`internal/api/server.go`): chi router with middleware. Creates handler structs (`sandboxHandler`, `execHandler`, `filesHandler`, etc.) injected with `db.Queries` and the host agent Connect RPC client. Routes under `/v1/capsules/*`. Accepts `[]cpextension.Extension` — each extension's `RegisterRoutes()` is called after all core routes are registered. - **Reconciler** (`internal/api/reconciler.go`): background goroutine (every 30s) that compares DB records against `agent.ListSandboxes()` RPC. Marks orphaned DB entries as "stopped". -- **Dashboard** (SvelteKit + Tailwind + Bits UI, statically built and embedded via `go:embed`, served as catch-all at root) -- **Database**: PostgreSQL via pgx/v5. Queries generated by sqlc from `db/queries/sandboxes.sql`. Migrations in `db/migrations/` (goose, plain SQL). -- **Config** (`internal/config/config.go`): purely environment variables (`DATABASE_URL`, `CP_LISTEN_ADDR`, `CP_HOST_AGENT_ADDR`), no YAML/file config. +- **Dashboard** (SvelteKit + Tailwind + Bits UI, built to static files in `frontend/build/`, served by Caddy as a reverse proxy) +- **Database**: PostgreSQL via pgx/v5. Queries generated by sqlc from `db/queries/*.sql` → `pkg/db/`. Migrations in `db/migrations/` (goose, plain SQL). `db/migrations/embed.go` exposes `migrations.FS` so the cloud repo can run OSS migrations via `go:embed`. +- **Config** (`pkg/config/config.go`): purely environment variables (`DATABASE_URL`, `CP_LISTEN_ADDR`, `CP_HOST_AGENT_ADDR`), no YAML/file config. ### Host Agent **Packages:** `internal/hostagent/`, `internal/sandbox/`, `internal/vm/`, `internal/network/`, `internal/devicemapper/`, `internal/envdclient/`, `internal/snapshot/` -Startup (`cmd/host-agent/main.go`) wires: root check → enable IP forwarding → clean up stale dm devices → `sandbox.Manager` (containing `vm.Manager` + `network.SlotAllocator` + `devicemapper.LoopRegistry`) → `hostagent.Server` (Connect RPC handler) → HTTP server. +**Production deployment:** `scripts/prepare-wrenn-user.sh` creates the `wrenn` system user, sets Linux capabilities (setcap) on wrenn-agent and all child binaries (iptables, losetup, dmsetup, etc.), installs an apt hook to restore capabilities after package updates, configures udev rules for `/dev/net/tun`, loads required kernel modules, and writes systemd unit files for both services. No sudo grants — all privilege is via capabilities. + +Startup (`cmd/host-agent/main.go`) wires: root/capabilities check → enable IP forwarding → clean up stale dm devices → `sandbox.Manager` (containing `vm.Manager` + `network.SlotAllocator` + `devicemapper.LoopRegistry`) → `hostagent.Server` (Connect RPC handler) → HTTP server. - **RPC Server** (`internal/hostagent/server.go`): implements `hostagentv1connect.HostAgentServiceHandler`. Thin wrapper — every method delegates to `sandbox.Manager`. Maps Connect error codes on return. - **Sandbox Manager** (`internal/sandbox/manager.go`): the core orchestration layer. Maintains in-memory state in `boxes map[string]*sandboxState` (protected by `sync.RWMutex`). Each `sandboxState` holds a `models.Sandbox`, a `*network.Slot`, and an `*envdclient.Client`. Runs a TTL reaper (every 10s) that auto-destroys timed-out sandboxes. @@ -105,8 +115,8 @@ Runs as PID 1 inside the microVM via `wrenn-init.sh` (mounts procfs/sysfs/dev, s - **Package manager**: pnpm - **Routing**: SvelteKit file-based routing under `frontend/src/routes/` - **Routing layout**: `/login` and `/signup` at root, authenticated pages under `/dashboard/*` (e.g. `/dashboard/capsules`, `/dashboard/keys`) -- **Build output**: `frontend/build/` → copied to `internal/dashboard/static/` → embedded via `go:embed` into the control plane binary -- **Serving**: `internal/dashboard/dashboard.go` registers a `NotFound` catch-all SPA handler with fallback to `index.html`. API routes (`/v1/*`, `/openapi.yaml`, `/docs`) are registered first and take priority +- **Build output**: `frontend/build/` — static files served by Caddy +- **Serving**: Caddy reverse-proxies API requests to the control plane and serves the SvelteKit SPA directly. The control plane does not serve frontend assets. - **Dev workflow**: `make dev-frontend` runs Vite dev server on port 5173 with HMR. API calls proxy to `http://localhost:8000` - **Fonts**: Manrope (UI), Instrument Serif (headings), JetBrains Mono (code), Alice (brand wordmark) — all self-hosted via `@fontsource` - **Dark mode**: class-based (`.dark` on ``) with system preference detection + localStorage persistence @@ -147,19 +157,19 @@ HIBERNATED → RUNNING (cold snapshot resume, slower) ### Key Request Flows -**Sandbox creation** (`POST /v1/sandboxes`): +**Sandbox creation** (`POST /v1/capsules`): 1. API handler generates sandbox ID, inserts into DB as "pending" 2. RPC `CreateSandbox` → host agent → `sandbox.Manager.Create()` 3. Manager: resolve base rootfs → acquire shared loop device → create dm-snapshot (sparse CoW file) → allocate network slot → `CreateNetwork()` (netns + veth + tap + NAT) → `vm.Create()` (start Firecracker with `/dev/mapper/wrenn-{id}`, configure via HTTP API, boot) → `envdclient.WaitUntilReady()` (poll /health) → store in-memory state 4. API handler updates DB to "running" with host_ip -**Command execution** (`POST /v1/sandboxes/{id}/exec`): +**Command execution** (`POST /v1/capsules/{id}/exec`): 1. API handler verifies sandbox is "running" in DB 2. RPC `Exec` → host agent → `sandbox.Manager.Exec()` → `envdclient.Exec()` 3. envd client opens bidirectional Connect RPC stream (`process.Start`), collects stdout/stderr/exit_code 4. API handler checks UTF-8 validity (base64-encodes if binary), updates last_active_at, returns result -**Streaming exec** (`WS /v1/sandboxes/{id}/exec/stream`): +**Streaming exec** (`WS /v1/capsules/{id}/exec/stream`): 1. WebSocket upgrade, read first message for cmd/args 2. RPC `ExecStream` → host agent → `sandbox.Manager.ExecStream()` → `envdclient.ExecStream()` 3. envd client returns a channel of events; host agent forwards events through the RPC stream @@ -189,7 +199,7 @@ To add a new RPC method: edit the `.proto` file → `make proto` → implement t ### sqlc -Config: `sqlc.yaml` (project root). Reads queries from `db/queries/*.sql`, reads schema from `db/migrations/`, outputs to `internal/db/`. +Config: `sqlc.yaml` (project root). Reads queries from `db/queries/*.sql`, reads schema from `db/migrations/`, outputs to `pkg/db/`. To add a new query: add it to the appropriate `.sql` file in `db/queries/` → `make generate` → use the new method on `*db.Queries`. @@ -201,7 +211,7 @@ To add a new query: add it to the appropriate `.sql` file in `db/queries/` → ` - **TAP networking** (not vsock) for host-to-envd communication - **Device-mapper snapshots** for rootfs CoW — shared read-only loop device per base template, per-sandbox sparse CoW file, Firecracker gets `/dev/mapper/wrenn-{id}` - **PostgreSQL** via pgx/v5 + sqlc (type-safe query generation). Goose for migrations (plain SQL, up/down) -- **Dashboard**: SvelteKit (Svelte 5, adapter-static) + Tailwind CSS v4 + Bits UI. Built to static files, embedded into the Go binary via `go:embed`, served as catch-all at root +- **Dashboard**: SvelteKit (Svelte 5, adapter-static) + Tailwind CSS v4 + Bits UI. Built to static files in `frontend/build/`, served by Caddy (not embedded in the Go binary) - **Lago** for billing (external service, not in this codebase) ## Coding Conventions @@ -233,7 +243,9 @@ The main module (`go.mod`) and envd (`envd/go.mod`) are fully independent. `make ## Design Context ### Users -Developers across the full spectrum — solo engineers building side projects, startup teams integrating sandboxed execution into products, and platform/infra engineers at larger organizations. The interface must feel at home for all three: approachable enough not to intimidate a hacker, precise enough to earn the trust of a production ops team. Never condescend, never oversimplify. Trust the user to understand what they're looking at. +Developers across the full spectrum — solo engineers building side projects, startup teams integrating sandboxed execution into products, and platform/infra engineers at larger organizations running production workloads on Firecracker microVMs. They arrive with context: they know what a process is, what a rootfs is, what a TTY means. The interface must feel at home for all three: approachable enough not to intimidate a hacker, precise enough to earn the trust of a production ops team. Never condescend, never oversimplify. Trust the user to understand what they're looking at. + +**Primary job to be done:** Understand what's running, act on it confidently, and get back to code. ### Brand Personality **Precise. Warm. Uncompromising.** @@ -243,9 +255,9 @@ Wrenn is an engineer's favorite tool — built with visible care, not assembled Emotional goal: **in control.** Users leave a session with full confidence in what's running, what happened, and what comes next. Nothing is hidden, nothing is ambiguous. ### Aesthetic Direction -**Dark-first, industrial-warm, data-forward.** +**Dark-only (permanently), industrial-warm, data-forward.** -The near-black-green background palette (`#0a0c0b` through `#2a302d`) reads as "black with intention" — not pitch black (cold) and not charcoal (dated). The sage green accent (`#5e8c58`) is muted and organic, a meaningful departure from the startup-green neon that saturates the developer tool space. +No light mode planned. All design decisions should optimize for dark. The near-black-green background palette (`#0a0c0b` through `#2a302d`) reads as "black with intention" — not pitch black (cold) and not charcoal (dated). The sage green accent (`#5e8c58`) is muted and organic, a meaningful departure from the startup-green neon that saturates the developer tool space. **Anti-references:** - **Supabase**: avoid the friendly, approachable startup-green energy — too generic, too eager to please @@ -259,30 +271,95 @@ The near-black-green background palette (`#0a0c0b` through `#2a302d`) reads as " ### Type System Four fonts with strict roles — this is the design system's strongest personality trait and must be respected: -| Font | Role | When to use | -|------|------|-------------| -| **Manrope** (variable, sans) | UI workhorse | All body copy, nav, labels, buttons, form text | -| **Instrument Serif** | Display / editorial | Page titles (h1), dialog headings, metric values, hero moments | -| **JetBrains Mono** (variable) | Data / code | IDs, timestamps, key prefixes, file paths, terminal output, metrics | -| **Alice** | Brand wordmark | "Wrenn" in sidebar and login only — nowhere else | +| Font | CSS Class | Role | When to use | +|------|-----------|------|-------------| +| **Manrope** (variable, sans) | `font-sans` | UI workhorse | All body copy, nav, labels, buttons, form text | +| **Instrument Serif** | `font-serif` | Display / editorial | Page titles (h1), dialog headings, metric values, hero moments | +| **JetBrains Mono** (variable) | `font-mono` | Data / code | IDs, timestamps, key prefixes, file paths, terminal output, metrics | +| **Alice** | brand wordmark only | Brand wordmark | "Wrenn" in sidebar and login only — nowhere else | Instrument Serif at scale creates the signature editorial moments. Mono provides the precision signal for technical data. Never swap these roles. +**Tracking overrides (app.css):** +- `.font-serif` — `letter-spacing: 0.015em` (positive tracking; Instrument Serif reads less condensed at display sizes) +- `.font-mono` — `font-variant-numeric: tabular-nums` (numbers align in tables and metric displays) + +**Type scale (root: 87.5% = 14px base):** +| Token | Value | Use | +|---|---|---| +| `--text-display` | 2.571rem (~36px) | Auth section headings | +| `--text-page` | 2rem (~28px) | Page h1 titles | +| `--text-heading` | 1.429rem (~20px) | Dialog headings, empty states | +| `--text-body` | 1rem (~14px) | Primary body, buttons, inputs | +| `--text-ui` | 0.929rem (~13px) | Nav labels, table cells | +| `--text-meta` | 0.857rem (~12px) | Key prefixes, minor info | +| `--text-label` | 0.786rem (~11px) | Uppercase section labels | +| `--text-badge` | 0.714rem (~10px) | Live badges, tiny indicators | + ### Color System -``` -Backgrounds: bg-0 (#0a0c0b) through bg-5 (#2a302d) — 6 steps -Text: bright > primary > secondary > tertiary > muted — 5 levels -Accent: accent (#5e8c58) / accent-mid / accent-bright / glow / glow-mid -Status: amber (#d4a73c) / red (#cf8172) / blue (#5a9fd4) -``` -Use accent sparingly. It should feel earned — reserved for live/active state indicators, primary CTAs, focus rings, and active nav. When accent appears, it should register. +All values are CSS custom properties in `frontend/src/app.css`. -### Upcoming Surfaces (design must accommodate) -- **Terminal / shell output**: streaming exec output, TTY sessions. Needs strong mono treatment, high contrast for long sessions. -- **File browser**: filesystem tree inside capsule. Density matters — breadcrumbs, file icons, permission bits. -- **SDK / docs embedding**: code samples, quickstart flows inline in dashboard. Code blocks must feel premium, not afterthought. -- **Billing / usage charts**: pool consumption, cost curves, usage over time. Instrument Serif at large scale for metrics; chart containers should feel like instruments, not dashboards. +**Backgrounds (6-step near-black-green scale):** +| Token | Value | Use | +|---|---|---| +| `--color-bg-0` | `#0a0c0b` | Page base, sidebar deepest layer | +| `--color-bg-1` | `#0f1211` | Sidebar surface | +| `--color-bg-2` | `#141817` | Card backgrounds | +| `--color-bg-3` | `#1a1e1c` | Table headers, elevated surfaces | +| `--color-bg-4` | `#212624` | Hover states, inputs | +| `--color-bg-5` | `#2a302d` | Highlighted items, selected rows | + +**Text (5-level hierarchy):** +| Token | Value | Use | +|---|---|---| +| `--color-text-bright` | `#eae7e2` | H1s, dialog headings | +| `--color-text-primary` | `#d0cdc6` | Body copy, primary labels | +| `--color-text-secondary` | `#9b9790` | Secondary labels, descriptions | +| `--color-text-tertiary` | `#6b6862` | Hints, placeholders | +| `--color-text-muted` | `#454340` | Dividers as text, ultra-subtle | + +**Accent (sage green — use sparingly, must feel earned):** +| Token | Value | Use | +|---|---|---| +| `--color-accent` | `#5e8c58` | Primary CTA, live indicators, focus rings, active nav | +| `--color-accent-mid` | `#89a785` | Hover accent text | +| `--color-accent-bright` | `#a4c89f` | Accent on dark backgrounds | +| `--color-accent-glow` | `rgba(94,140,88,0.07)` | Subtle tinted backgrounds | +| `--color-accent-glow-mid` | `rgba(94,140,88,0.14)` | Hover tint on accent items | + +**Status semantics:** +| Token | Value | Use | +|---|---|---| +| `--color-amber` | `#d4a73c` | Warning, paused state | +| `--color-red` | `#cf8172` | Error, destructive actions | +| `--color-blue` | `#5a9fd4` | Info, neutral system states | + +**Borders:** `--color-border` (`#1f2321`) default; `--color-border-mid` (`#2a2f2c`) for inputs/hover. + +### Component Patterns + +**Buttons:** +- Primary: solid sage green (`--color-accent`), hover brightness boost + micro-lift (`-translate-y-px`) +- Secondary: bordered (`--color-border-mid`), text transitions to accent on hover +- Danger: red text + subtle red background on hover +- All: `transition-all duration-150` + +**Inputs:** +- Border `--color-border`, background `--color-bg-2`; focus transitions border and icon to accent +- Group focus pattern: `group` wrapper + `group-focus-within:text-[var(--color-accent)]` on icon + +**Tables / data lists:** +- Grid layout; header `bg-3` + uppercase `--text-label`; row hover `hover:bg-[var(--color-bg-3)]` +- Status stripe: left border color matches sandbox state + +**Status indicators:** Running = animated ping + sage green dot; Paused = amber dot; Stopped = muted gray. Color is never the sole differentiator. + +**Modals & dialogs:** Border + shadow only — no accent gradient bars/strips. `fadeUp` 0.35s entrance. + +**Empty states:** Large icon with glow, Instrument Serif heading, secondary body text, CTA below, `iconFloat` 4s animation. + +**Animations (always respect `prefers-reduced-motion`):** `fadeUp` (entrance), `status-ping` (live indicator), `iconFloat` (empty states), `spin-once` (refresh), staggered `animation-delay` on lists. ### Design Principles diff --git a/Makefile b/Makefile index 2dbcc76..e80869c 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,10 @@ DATABASE_URL ?= postgres://wrenn:wrenn@localhost:5432/wrenn?sslmode=disable GOBIN := $(shell pwd)/builds ENVD_DIR := envd +COMMIT := $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") +VERSION_CP := $(shell cat VERSION_CP 2>/dev/null | tr -d '[:space:]' || echo "0.0.0-dev") +VERSION_AGENT := $(shell cat VERSION_AGENT 2>/dev/null | tr -d '[:space:]' || echo "0.0.0-dev") +VERSION_ENVD := $(shell cat envd/VERSION 2>/dev/null | tr -d '[:space:]' || echo "0.0.0-dev") LDFLAGS := -s -w # ═══════════════════════════════════════════════════ @@ -17,14 +21,14 @@ build-frontend: cd frontend && pnpm install --frozen-lockfile && pnpm build build-cp: - go build -v -ldflags="$(LDFLAGS)" -o $(GOBIN)/wrenn-cp ./cmd/control-plane + go build -v -ldflags="$(LDFLAGS) -X main.version=$(VERSION_CP) -X main.commit=$(COMMIT)" -o $(GOBIN)/wrenn-cp ./cmd/control-plane build-agent: - go build -v -ldflags="$(LDFLAGS)" -o $(GOBIN)/wrenn-agent ./cmd/host-agent + go build -v -ldflags="$(LDFLAGS) -X main.version=$(VERSION_AGENT) -X main.commit=$(COMMIT)" -o $(GOBIN)/wrenn-agent ./cmd/host-agent build-envd: cd $(ENVD_DIR) && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \ - go build -ldflags="$(LDFLAGS)" -o $(GOBIN)/envd . + go build -ldflags="$(LDFLAGS) -X main.Version=$(VERSION_ENVD) -X main.commitSHA=$(COMMIT)" -o $(GOBIN)/envd . @file $(GOBIN)/envd | grep -q "statically linked" || \ (echo "ERROR: envd is not statically linked!" && exit 1) diff --git a/README.md b/README.md index c312494..765be5a 100644 --- a/README.md +++ b/README.md @@ -2,16 +2,16 @@ Secure infrastructure for AI -## Deployment - -### Prerequisites +## Prerequisites - Linux host with `/dev/kvm` access (bare metal or nested virt) - Firecracker binary at `/usr/local/bin/firecracker` - PostgreSQL - Go 1.25+ +- pnpm (for frontend) +- Docker (for dev infra and rootfs builds) -### Build +## Build ```bash make build # outputs to builds/ @@ -19,30 +19,77 @@ make build # outputs to builds/ Produces three binaries: `wrenn-cp` (control plane), `wrenn-agent` (host agent), `envd` (guest agent). -### Host setup +## Host setup -The host agent machine needs: +The host agent needs a kernel, a minimal rootfs image, and working directories on the host machine. -```bash -# Kernel for guest VMs -mkdir -p /var/lib/wrenn/kernels -# Place a vmlinux kernel at /var/lib/wrenn/kernels/vmlinux +### Directory structure -# Rootfs images -mkdir -p /var/lib/wrenn/images -# Build or place .ext4 rootfs images (e.g., minimal.ext4) - -# Sandbox working directory -mkdir -p /var/lib/wrenn/sandboxes - -# Snapshots directory -mkdir -p /var/lib/wrenn/snapshots - -# Enable IP forwarding -sysctl -w net.ipv4.ip_forward=1 +``` +/var/lib/wrenn/ +├── kernels/ +│ └── vmlinux # uncompressed Linux kernel (not bzImage) +├── images/ +│ └── minimal/ +│ └── rootfs.ext4 # base rootfs (all other templates snapshot from this) +├── sandboxes/ # per-sandbox CoW files (created at runtime) +└── snapshots/ # pause/hibernate snapshot files (created at runtime) ``` -### Configure +Create the directories: + +```bash +sudo mkdir -p /var/lib/wrenn/{kernels,images/minimal,sandboxes,snapshots} +``` + +### Kernel + +Place an uncompressed `vmlinux` kernel at `/var/lib/wrenn/kernels/vmlinux`. Versioned kernels (`vmlinux-{semver}`) are also supported — the agent picks the latest by semver. + +### Minimal rootfs + +The minimal rootfs is the base image that all other templates (Python, Node, etc.) are built on top of via device-mapper snapshots. It must contain: + +| Package | Why | +|---------|-----| +| `socat` | Bidirectional relay for port forwarding | +| `chrony` | Time sync from KVM PTP clock (`/dev/ptp0`) | +| `tini` | PID 1 zombie reaper (injected by build script, not apt) | +| `sudo` | User privilege management inside the guest | +| `wget` | HTTP fetching | +| `curl` | HTTP client | +| `ca-certificates` | TLS certificate verification | + +**To build a rootfs from a Docker container:** + +1. Create and configure a container with the required packages: + ```bash + docker run -it --name wrenn-minimal debian:bookworm bash + # Inside the container: + apt update && apt install -y socat chrony sudo wget curl ca-certificates + exit + ``` + +2. Export to a rootfs image (builds envd, injects wrenn-init + tini, shrinks to minimum size): + ```bash + sudo bash scripts/rootfs-from-container.sh wrenn-minimal minimal + ``` + +**To update an existing rootfs** after changing envd or `wrenn-init.sh`: + +```bash +bash scripts/update-minimal-rootfs.sh +``` + +This rebuilds envd via `make build-envd` and copies the fresh binaries into the mounted rootfs image. + +### IP forwarding + +```bash +sudo sysctl -w net.ipv4.ip_forward=1 +``` + +## Configure Copy `.env.example` to `.env` and edit: @@ -59,25 +106,21 @@ WRENN_HOST_LISTEN_ADDR=:50051 WRENN_DIR=/var/lib/wrenn ``` -### Run +## Development ```bash -# Apply database migrations -make migrate-up - -# Start control plane -./builds/wrenn-cp +make dev # Start PostgreSQL (Docker), run migrations, start control plane +make dev-agent # Start host agent (separate terminal, sudo) +make dev-frontend # Vite dev server with HMR (port 5173) +make check # fmt + vet + lint + test ``` -Control plane listens on `WRENN_CP_LISTEN_ADDR` (default `:8000`). - ### Host registration Hosts must be registered with the control plane before they can serve sandboxes. 1. **Create a host record** (via API or dashboard): ```bash - # As an admin (JWT auth) curl -X POST http://localhost:8000/v1/hosts \ -H "Authorization: Bearer $JWT_TOKEN" \ -H "Content-Type: application/json" \ @@ -87,17 +130,16 @@ Hosts must be registered with the control plane before they can serve sandboxes. 2. **Start the host agent** with the registration token and its externally-reachable address: ```bash - sudo WRENN_CP_URL=http://cp-host:8000 \ + sudo WRENN_CP_URL=http://localhost:8000 \ ./builds/wrenn-agent \ --register \ - --address 10.0.1.5:50051 + --address :50051 ``` On first startup the agent sends its specs (arch, CPU, memory, disk) to the control plane, receives a long-lived host JWT, and saves it to `$WRENN_DIR/host-token`. 3. **Subsequent startups** don't need `--register` — the agent loads the saved JWT automatically: ```bash - sudo WRENN_CP_URL=http://cp-host:8000 \ - ./builds/wrenn-agent --address 10.0.1.5:50051 + sudo ./builds/wrenn-agent --address :50051 ``` 4. **If registration fails** (e.g., network error after token was consumed), regenerate a token: @@ -107,23 +149,6 @@ Hosts must be registered with the control plane before they can serve sandboxes. ``` Then restart the agent with the new token. -The agent sends heartbeats to the control plane every 30 seconds. Host agent listens on `WRENN_HOST_LISTEN_ADDR` (default `:50051`). - -### Rootfs images - -envd must be baked into every rootfs image. After building: - -```bash -make build-envd -bash scripts/update-debug-rootfs.sh /var/lib/wrenn/images/minimal.ext4 -``` - -## Development - -```bash -make dev # Start PostgreSQL (Docker), run migrations, start control plane -make dev-agent # Start host agent (separate terminal, sudo) -make check # fmt + vet + lint + test -``` +The agent sends heartbeats to the control plane every 30 seconds. See `CLAUDE.md` for full architecture documentation. diff --git a/VERSION_AGENT b/VERSION_AGENT new file mode 100644 index 0000000..6e8bf73 --- /dev/null +++ b/VERSION_AGENT @@ -0,0 +1 @@ +0.1.0 diff --git a/VERSION_CP b/VERSION_CP new file mode 100644 index 0000000..6e8bf73 --- /dev/null +++ b/VERSION_CP @@ -0,0 +1 @@ +0.1.0 diff --git a/cmd/control-plane/main.go b/cmd/control-plane/main.go index d7ae18d..26c57be 100644 --- a/cmd/control-plane/main.go +++ b/cmd/control-plane/main.go @@ -1,191 +1,15 @@ package main -import ( - "context" - "log/slog" - "net/http" - "os" - "os/signal" - "strings" - "syscall" - "time" +import "git.omukk.dev/wrenn/wrenn/pkg/cpserver" - "github.com/jackc/pgx/v5/pgxpool" - "github.com/redis/go-redis/v9" - - "git.omukk.dev/wrenn/wrenn/internal/api" - "git.omukk.dev/wrenn/wrenn/internal/audit" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/auth/oauth" - "git.omukk.dev/wrenn/wrenn/internal/channels" - "git.omukk.dev/wrenn/wrenn/internal/config" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" - "git.omukk.dev/wrenn/wrenn/internal/scheduler" +// Set via -ldflags at build time. +var ( + version = "dev" + commit = "unknown" ) func main() { - slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ - Level: slog.LevelDebug, - }))) - - cfg := config.Load() - - if len(cfg.JWTSecret) < 32 { - slog.Error("JWT_SECRET must be at least 32 characters") - os.Exit(1) - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Database connection pool. - pool, err := pgxpool.New(ctx, cfg.DatabaseURL) - if err != nil { - slog.Error("failed to connect to database", "error", err) - os.Exit(1) - } - defer pool.Close() - - if err := pool.Ping(ctx); err != nil { - slog.Error("failed to ping database", "error", err) - os.Exit(1) - } - slog.Info("connected to database") - - queries := db.New(pool) - - // Redis client. - redisOpts, err := redis.ParseURL(cfg.RedisURL) - if err != nil { - slog.Error("failed to parse REDIS_URL", "error", err) - os.Exit(1) - } - rdb := redis.NewClient(redisOpts) - defer rdb.Close() - - if err := rdb.Ping(ctx).Err(); err != nil { - slog.Error("failed to ping redis", "error", err) - os.Exit(1) - } - slog.Info("connected to redis") - - // mTLS is mandatory — parse internal CA for CP↔agent communication. - if cfg.CACert == "" || cfg.CAKey == "" { - slog.Error("WRENN_CA_CERT and WRENN_CA_KEY are required — mTLS is mandatory for CP↔agent communication") - os.Exit(1) - } - ca, err := auth.ParseCA(cfg.CACert, cfg.CAKey) - if err != nil { - slog.Error("failed to parse mTLS CA from environment", "error", err) - os.Exit(1) - } - slog.Info("mTLS enabled: CA loaded") - - // Host client pool — manages Connect RPC clients to host agents. - cpCertStore, err := auth.NewCPCertStore(ca) - if err != nil { - slog.Error("failed to issue CP client certificate", "error", err) - os.Exit(1) - } - // Renew the CP client certificate periodically so it never expires - // while the control plane is running (TTL = 24h, renewal = every 12h). - go func() { - ticker := time.NewTicker(auth.CPCertRenewInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if err := cpCertStore.Refresh(); err != nil { - slog.Error("failed to renew CP client certificate", "error", err) - } else { - slog.Info("CP client certificate renewed") - } - } - } - }() - hostPool := lifecycle.NewHostClientPoolTLS(auth.CPClientTLSConfig(ca, cpCertStore)) - slog.Info("host client pool: mTLS enabled") - - // Scheduler — picks a host for each new sandbox (round-robin for now). - hostScheduler := scheduler.NewRoundRobinScheduler(queries) - - // OAuth provider registry. - oauthRegistry := oauth.NewRegistry() - if cfg.OAuthGitHubClientID != "" && cfg.OAuthGitHubClientSecret != "" { - if cfg.CPPublicURL == "" { - slog.Error("CP_PUBLIC_URL must be set when OAuth providers are configured") - os.Exit(1) - } - callbackURL := strings.TrimRight(cfg.CPPublicURL, "/") + "/auth/oauth/github/callback" - ghProvider := oauth.NewGitHubProvider(cfg.OAuthGitHubClientID, cfg.OAuthGitHubClientSecret, callbackURL) - oauthRegistry.Register(ghProvider) - slog.Info("registered OAuth provider", "provider", "github") - } - - // Channels: publisher, service, dispatcher. - if len(cfg.EncryptionKeyHex) != 64 { - slog.Error("WRENN_ENCRYPTION_KEY must be a hex-encoded 32-byte key (64 hex chars)") - os.Exit(1) - } - channelPub := channels.NewPublisher(rdb) - channelSvc := &channels.Service{DB: queries, EncKey: cfg.EncryptionKey} - channelDispatcher := channels.NewDispatcher(rdb, queries, cfg.EncryptionKey) - - // Shared audit logger with event publishing. - al := audit.NewWithPublisher(queries, channelPub) - - // API server. - srv := api.New(queries, hostPool, hostScheduler, pool, rdb, []byte(cfg.JWTSecret), oauthRegistry, cfg.OAuthRedirectURL, ca, al, channelSvc) - - // Start template build workers (2 concurrent). - stopBuildWorkers := srv.BuildSvc.StartWorkers(ctx, 2) - defer stopBuildWorkers() - - // Start channel event dispatcher. - channelDispatcher.Start(ctx) - - // Start host monitor (passive + active reconciliation every 30s). - monitor := api.NewHostMonitor(queries, hostPool, al, 30*time.Second) - monitor.Start(ctx) - - // Start metrics sampler (records per-team sandbox stats every 10s). - sampler := api.NewMetricsSampler(queries, 10*time.Second) - sampler.Start(ctx) - - // Wrap the API handler with the sandbox proxy so that requests with - // {port}-{sandbox_id}.{domain} Host headers are routed to the sandbox's - // host agent. All other requests pass through to the normal API router. - proxyWrapper := api.NewSandboxProxyWrapper(srv.Handler(), queries, hostPool) - - httpServer := &http.Server{ - Addr: cfg.ListenAddr, - Handler: proxyWrapper, - } - - // Graceful shutdown on signal. - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) - go func() { - sig := <-sigCh - slog.Info("received signal, shutting down", "signal", sig) - cancel() - - shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) - defer shutdownCancel() - - if err := httpServer.Shutdown(shutdownCtx); err != nil { - slog.Error("http server shutdown error", "error", err) - } - }() - - slog.Info("control plane starting", "addr", cfg.ListenAddr) - if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - slog.Error("http server error", "error", err) - os.Exit(1) - } - - slog.Info("control plane stopped") + cpserver.Run( + cpserver.WithVersion(version, commit), + ) } diff --git a/cmd/host-agent/main.go b/cmd/host-agent/main.go index 4e5d8ed..5896c2c 100644 --- a/cmd/host-agent/main.go +++ b/cmd/host-agent/main.go @@ -1,28 +1,40 @@ package main import ( + "bufio" "context" "crypto/tls" "flag" + "fmt" "log/slog" "net/http" "os" "os/signal" "path/filepath" + "strconv" + "strings" "sync" "syscall" "time" "github.com/joho/godotenv" - "git.omukk.dev/wrenn/wrenn/internal/auth" "git.omukk.dev/wrenn/wrenn/internal/devicemapper" "git.omukk.dev/wrenn/wrenn/internal/hostagent" + "git.omukk.dev/wrenn/wrenn/internal/layout" "git.omukk.dev/wrenn/wrenn/internal/network" "git.omukk.dev/wrenn/wrenn/internal/sandbox" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/logging" "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen/hostagentv1connect" ) +// Set via -ldflags at build time. +var ( + version = "dev" + commit = "unknown" +) + func main() { // Best-effort load — missing .env file is fine. _ = godotenv.Load() @@ -31,18 +43,24 @@ func main() { advertiseAddr := flag.String("address", "", "Externally-reachable address (ip:port) for this host agent") flag.Parse() - slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ - Level: slog.LevelDebug, - }))) + rootDir := envOrDefault("WRENN_DIR", "/var/lib/wrenn") + cleanupLog := logging.Setup(filepath.Join(rootDir, "logs"), "host-agent") + defer cleanupLog() - if os.Geteuid() != 0 { - slog.Error("host agent must run as root") + if err := checkPrivileges(); err != nil { + slog.Error("insufficient privileges", "error", err) os.Exit(1) } - // Enable IP forwarding (required for NAT). + // Enable IP forwarding (required for NAT). The write may fail if running + // as non-root without DAC_OVERRIDE on this path — that's OK if the systemd + // unit's ExecStartPre already set it. We verify the value regardless. if err := os.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte("1"), 0644); err != nil { - slog.Warn("failed to enable ip_forward", "error", err) + slog.Warn("failed to enable ip_forward (may have been set by systemd unit)", "error", err) + } + if b, err := os.ReadFile("/proc/sys/net/ipv4/ip_forward"); err != nil || strings.TrimSpace(string(b)) != "1" { + slog.Error("ip_forward is not enabled — sandbox networking will be broken", "error", err) + os.Exit(1) } // Clean up stale resources from a previous crash. @@ -50,7 +68,6 @@ func main() { network.CleanupStaleNamespaces() listenAddr := envOrDefault("WRENN_HOST_LISTEN_ADDR", ":50051") - rootDir := envOrDefault("WRENN_DIR", "/var/lib/wrenn") cpURL := os.Getenv("WRENN_CP_URL") credsFile := filepath.Join(rootDir, "host-credentials.json") @@ -63,15 +80,50 @@ func main() { os.Exit(1) } - // Expand base images to the standard disk size (sparse, no extra physical + // Parse default rootfs size from env (e.g. "5G", "2Gi", "1000M"). + defaultRootfsSizeMB := sandbox.DefaultDiskSizeMB + if sizeStr := os.Getenv("WRENN_DEFAULT_ROOTFS_SIZE"); sizeStr != "" { + parsed, err := sandbox.ParseSizeToMB(sizeStr) + if err != nil { + slog.Error("invalid WRENN_DEFAULT_ROOTFS_SIZE", "value", sizeStr, "error", err) + os.Exit(1) + } + defaultRootfsSizeMB = parsed + slog.Info("using custom rootfs size", "size_mb", defaultRootfsSizeMB) + } + + // Expand base images to the configured disk size (sparse, no extra physical // disk). This ensures dm-snapshot sandboxes see the full size from boot. - if err := sandbox.EnsureImageSizes(rootDir, sandbox.DefaultDiskSizeMB); err != nil { + if err := sandbox.EnsureImageSizes(rootDir, defaultRootfsSizeMB); err != nil { slog.Error("failed to expand base images", "error", err) os.Exit(1) } + // Resolve latest kernel version. + kernelPath, kernelVersion, err := layout.LatestKernel(rootDir) + if err != nil { + slog.Error("failed to find kernel", "error", err) + os.Exit(1) + } + slog.Info("resolved kernel", "version", kernelVersion, "path", kernelPath) + + // Detect firecracker version. + fcBin := envOrDefault("WRENN_FIRECRACKER_BIN", "/usr/local/bin/firecracker") + fcVersion, err := sandbox.DetectFirecrackerVersion(fcBin) + if err != nil { + slog.Error("failed to detect firecracker version", "error", err) + os.Exit(1) + } + slog.Info("resolved firecracker", "version", fcVersion, "path", fcBin) + cfg := sandbox.Config{ - WrennDir: rootDir, + WrennDir: rootDir, + DefaultRootfsSizeMB: defaultRootfsSizeMB, + KernelPath: kernelPath, + KernelVersion: kernelVersion, + FirecrackerBin: fcBin, + FirecrackerVersion: fcVersion, + AgentVersion: version, } mgr := sandbox.New(cfg) @@ -128,6 +180,7 @@ func main() { shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) defer shutdownCancel() mgr.Shutdown(shutdownCtx) + sandbox.ShrinkMinimalImage(rootDir) if err := httpServer.Shutdown(shutdownCtx); err != nil { slog.Error("http server shutdown error", "error", err) } @@ -180,7 +233,7 @@ func main() { doShutdown("signal: " + sig.String()) }() - slog.Info("host agent starting", "addr", listenAddr, "host_id", creds.HostID) + slog.Info("host agent starting", "addr", listenAddr, "host_id", creds.HostID, "version", version, "commit", commit) // TLSConfig is always set (mTLS is mandatory). Create the TLS listener // manually because ListenAndServeTLS requires on-disk cert/key paths // but we use GetCertificate callback for hot-swap support. @@ -203,3 +256,63 @@ func envOrDefault(key, def string) string { } return def } + +// checkPrivileges verifies the process has the required Linux capabilities. +// Always reads CapEff — even for root — because a root process inside a +// restricted container (e.g. docker --cap-drop=all) may not have all caps. +func checkPrivileges() error { + capEff, err := readEffectiveCaps() + if err != nil { + return fmt.Errorf("read capabilities: %w", err) + } + + // All capabilities required by the host agent at runtime. + required := []struct { + bit uint + name string + }{ + {1, "CAP_DAC_OVERRIDE"}, // /dev/loop*, /dev/mapper/*, /dev/net/tun + {5, "CAP_KILL"}, // SIGTERM/SIGKILL to Firecracker processes + {12, "CAP_NET_ADMIN"}, // netlink, iptables, routing, TAP/veth + {13, "CAP_NET_RAW"}, // raw sockets (iptables) + {19, "CAP_SYS_PTRACE"}, // reading /proc/self/ns/net (netns.Get) + {21, "CAP_SYS_ADMIN"}, // netns, mount ns, losetup, dmsetup + {27, "CAP_MKNOD"}, // device-mapper node creation + } + + var missing []string + for _, cap := range required { + if capEff&(1< 0 { + return fmt.Errorf("missing capabilities: %s — run as root or apply setcap to the binary", + strings.Join(missing, ", ")) + } + + return nil +} + +// readEffectiveCaps parses the CapEff bitmask from /proc/self/status. +func readEffectiveCaps() (uint64, error) { + f, err := os.Open("/proc/self/status") + if err != nil { + return 0, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + if hexStr, ok := strings.CutPrefix(line, "CapEff:"); ok { + return strconv.ParseUint(strings.TrimSpace(hexStr), 16, 64) + } + } + + if err := scanner.Err(); err != nil { + return 0, fmt.Errorf("read /proc/self/status: %w", err) + } + return 0, fmt.Errorf("CapEff not found in /proc/self/status") +} diff --git a/db/migrations/20260310094104_initial.sql b/db/migrations/20260310094104_initial.sql index 6c8afc4..22544a5 100644 --- a/db/migrations/20260310094104_initial.sql +++ b/db/migrations/20260310094104_initial.sql @@ -171,7 +171,7 @@ CREATE TABLE audit_logs ( metadata JSONB NOT NULL DEFAULT '{}', created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() ); -CREATE INDEX idx_audit_logs_team_time ON audit_logs(team_id, created_at DESC); +CREATE INDEX idx_audit_logs_team_time ON audit_logs(team_id, created_at DESC, id DESC); CREATE INDEX idx_audit_logs_team_resource ON audit_logs(team_id, resource_type, created_at DESC); -- sandbox_metrics_snapshots diff --git a/db/migrations/20260411182550_template_defaults.sql b/db/migrations/20260411182550_template_defaults.sql new file mode 100644 index 0000000..3378453 --- /dev/null +++ b/db/migrations/20260411182550_template_defaults.sql @@ -0,0 +1,17 @@ +-- +goose Up +ALTER TABLE templates + ADD COLUMN default_user TEXT NOT NULL DEFAULT 'root', + ADD COLUMN default_env JSONB NOT NULL DEFAULT '{}'; + +ALTER TABLE template_builds + ADD COLUMN default_user TEXT NOT NULL DEFAULT 'root', + ADD COLUMN default_env JSONB NOT NULL DEFAULT '{}'; + +-- +goose Down +ALTER TABLE template_builds + DROP COLUMN default_env, + DROP COLUMN default_user; + +ALTER TABLE templates + DROP COLUMN default_env, + DROP COLUMN default_user; diff --git a/db/migrations/20260412213141_seed_platform_team.sql b/db/migrations/20260412213141_seed_platform_team.sql new file mode 100644 index 0000000..751a0cc --- /dev/null +++ b/db/migrations/20260412213141_seed_platform_team.sql @@ -0,0 +1,12 @@ +-- +goose Up + +-- Seed the platform team row. This is the sentinel team (all-zeros UUID) that +-- owns platform-wide resources: global templates, admin-created capsules, etc. +-- No user can become a member of this team — it exists solely to satisfy +-- foreign key constraints and to act as a namespace for platform resources. +INSERT INTO teams (id, name, slug) +VALUES ('00000000-0000-0000-0000-000000000000', 'Platform', 'platform') +ON CONFLICT (id) DO NOTHING; + +-- +goose Down +DELETE FROM teams WHERE id = '00000000-0000-0000-0000-000000000000'; diff --git a/db/migrations/20260414213729_add_user_active_deleted.sql b/db/migrations/20260414213729_add_user_active_deleted.sql new file mode 100644 index 0000000..b54b588 --- /dev/null +++ b/db/migrations/20260414213729_add_user_active_deleted.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE users ADD COLUMN is_active BOOLEAN NOT NULL DEFAULT TRUE; +ALTER TABLE users ADD COLUMN deleted_at TIMESTAMPTZ; + +-- +goose Down +ALTER TABLE users DROP COLUMN deleted_at; +ALTER TABLE users DROP COLUMN is_active; diff --git a/db/migrations/20260415134310_add_metadata.sql b/db/migrations/20260415134310_add_metadata.sql new file mode 100644 index 0000000..ada4e35 --- /dev/null +++ b/db/migrations/20260415134310_add_metadata.sql @@ -0,0 +1,9 @@ +-- +goose Up +ALTER TABLE sandboxes ADD COLUMN metadata JSONB NOT NULL DEFAULT '{}'; +ALTER TABLE templates ADD COLUMN metadata JSONB NOT NULL DEFAULT '{}'; +ALTER TABLE template_builds ADD COLUMN metadata JSONB NOT NULL DEFAULT '{}'; + +-- +goose Down +ALTER TABLE sandboxes DROP COLUMN metadata; +ALTER TABLE templates DROP COLUMN metadata; +ALTER TABLE template_builds DROP COLUMN metadata; diff --git a/db/migrations/20260415215033_replace_is_active_with_status.sql b/db/migrations/20260415215033_replace_is_active_with_status.sql new file mode 100644 index 0000000..2ea091a --- /dev/null +++ b/db/migrations/20260415215033_replace_is_active_with_status.sql @@ -0,0 +1,15 @@ +-- +goose Up +ALTER TABLE users ADD COLUMN status TEXT NOT NULL DEFAULT 'active'; + +-- Backfill from existing columns. +UPDATE users SET status = 'deleted' WHERE deleted_at IS NOT NULL; +UPDATE users SET status = 'disabled' WHERE is_active = false AND deleted_at IS NULL; + +ALTER TABLE users DROP COLUMN is_active; + +-- +goose Down +ALTER TABLE users ADD COLUMN is_active BOOLEAN NOT NULL DEFAULT TRUE; + +UPDATE users SET is_active = false WHERE status IN ('inactive', 'disabled', 'deleted'); + +ALTER TABLE users DROP COLUMN status; diff --git a/db/migrations/20260415221116_cascade_user_delete.sql b/db/migrations/20260415221116_cascade_user_delete.sql new file mode 100644 index 0000000..ac01674 --- /dev/null +++ b/db/migrations/20260415221116_cascade_user_delete.sql @@ -0,0 +1,72 @@ +-- +goose Up + +-- users_teams: remove membership when user is deleted +ALTER TABLE users_teams DROP CONSTRAINT users_teams_user_id_fkey; +ALTER TABLE users_teams ADD CONSTRAINT users_teams_user_id_fkey + FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; + +-- oauth_providers: remove auth links when user is deleted +ALTER TABLE oauth_providers DROP CONSTRAINT oauth_providers_user_id_fkey; +ALTER TABLE oauth_providers ADD CONSTRAINT oauth_providers_user_id_fkey + FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; + +-- admin_permissions: remove permissions when user is deleted +ALTER TABLE admin_permissions DROP CONSTRAINT admin_permissions_user_id_fkey; +ALTER TABLE admin_permissions ADD CONSTRAINT admin_permissions_user_id_fkey + FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; + +-- team_api_keys.created_by: make nullable, SET NULL on user delete +ALTER TABLE team_api_keys ALTER COLUMN created_by DROP NOT NULL; +ALTER TABLE team_api_keys DROP CONSTRAINT team_api_keys_created_by_fkey; +ALTER TABLE team_api_keys ADD CONSTRAINT team_api_keys_created_by_fkey + FOREIGN KEY (created_by) REFERENCES users(id) ON DELETE SET NULL; + +-- hosts.created_by: make nullable, SET NULL on user delete +ALTER TABLE hosts ALTER COLUMN created_by DROP NOT NULL; +ALTER TABLE hosts DROP CONSTRAINT hosts_created_by_fkey; +ALTER TABLE hosts ADD CONSTRAINT hosts_created_by_fkey + FOREIGN KEY (created_by) REFERENCES users(id) ON DELETE SET NULL; + +-- host_tokens.created_by: make nullable, SET NULL on user delete +ALTER TABLE host_tokens ALTER COLUMN created_by DROP NOT NULL; +ALTER TABLE host_tokens DROP CONSTRAINT host_tokens_created_by_fkey; +ALTER TABLE host_tokens ADD CONSTRAINT host_tokens_created_by_fkey + FOREIGN KEY (created_by) REFERENCES users(id) ON DELETE SET NULL; + +-- +goose Down + +-- Revert host_tokens.created_by +ALTER TABLE host_tokens DROP CONSTRAINT host_tokens_created_by_fkey; +UPDATE host_tokens SET created_by = '00000000-0000-0000-0000-000000000000' WHERE created_by IS NULL; +ALTER TABLE host_tokens ALTER COLUMN created_by SET NOT NULL; +ALTER TABLE host_tokens ADD CONSTRAINT host_tokens_created_by_fkey + FOREIGN KEY (created_by) REFERENCES users(id); + +-- Revert hosts.created_by +ALTER TABLE hosts DROP CONSTRAINT hosts_created_by_fkey; +UPDATE hosts SET created_by = '00000000-0000-0000-0000-000000000000' WHERE created_by IS NULL; +ALTER TABLE hosts ALTER COLUMN created_by SET NOT NULL; +ALTER TABLE hosts ADD CONSTRAINT hosts_created_by_fkey + FOREIGN KEY (created_by) REFERENCES users(id); + +-- Revert team_api_keys.created_by +ALTER TABLE team_api_keys DROP CONSTRAINT team_api_keys_created_by_fkey; +UPDATE team_api_keys SET created_by = '00000000-0000-0000-0000-000000000000' WHERE created_by IS NULL; +ALTER TABLE team_api_keys ALTER COLUMN created_by SET NOT NULL; +ALTER TABLE team_api_keys ADD CONSTRAINT team_api_keys_created_by_fkey + FOREIGN KEY (created_by) REFERENCES users(id); + +-- Revert admin_permissions +ALTER TABLE admin_permissions DROP CONSTRAINT admin_permissions_user_id_fkey; +ALTER TABLE admin_permissions ADD CONSTRAINT admin_permissions_user_id_fkey + FOREIGN KEY (user_id) REFERENCES users(id); + +-- Revert oauth_providers +ALTER TABLE oauth_providers DROP CONSTRAINT oauth_providers_user_id_fkey; +ALTER TABLE oauth_providers ADD CONSTRAINT oauth_providers_user_id_fkey + FOREIGN KEY (user_id) REFERENCES users(id); + +-- Revert users_teams +ALTER TABLE users_teams DROP CONSTRAINT users_teams_user_id_fkey; +ALTER TABLE users_teams ADD CONSTRAINT users_teams_user_id_fkey + FOREIGN KEY (user_id) REFERENCES users(id); diff --git a/db/migrations/embed.go b/db/migrations/embed.go new file mode 100644 index 0000000..d5bbfcb --- /dev/null +++ b/db/migrations/embed.go @@ -0,0 +1,10 @@ +// Package migrations embeds the SQL migration files so that external modules +// (such as the enterprise edition) can access them programmatically. +package migrations + +import "embed" + +// FS contains all SQL migration files. +// +//go:embed *.sql +var FS embed.FS diff --git a/db/queries/api_keys.sql b/db/queries/api_keys.sql index 7ea9645..f57bf5b 100644 --- a/db/queries/api_keys.sql +++ b/db/queries/api_keys.sql @@ -13,7 +13,7 @@ SELECT * FROM team_api_keys WHERE team_id = $1 ORDER BY created_at DESC; SELECT k.id, k.team_id, k.name, k.key_hash, k.key_prefix, k.created_by, k.created_at, k.last_used, u.email AS creator_email FROM team_api_keys k -JOIN users u ON u.id = k.created_by +LEFT JOIN users u ON u.id = k.created_by WHERE k.team_id = $1 ORDER BY k.created_at DESC; @@ -22,3 +22,9 @@ DELETE FROM team_api_keys WHERE id = $1 AND team_id = $2; -- name: UpdateAPIKeyLastUsed :exec UPDATE team_api_keys SET last_used = NOW() WHERE id = $1; + +-- name: DeleteAPIKeysByTeam :exec +DELETE FROM team_api_keys WHERE team_id = $1; + +-- name: DeleteAPIKeysByCreator :exec +DELETE FROM team_api_keys WHERE created_by = $1; diff --git a/db/queries/channels.sql b/db/queries/channels.sql index 5772c99..6df0449 100644 --- a/db/queries/channels.sql +++ b/db/queries/channels.sql @@ -22,6 +22,9 @@ RETURNING *; -- name: DeleteChannelByTeam :exec DELETE FROM channels WHERE id = $1 AND team_id = $2; +-- name: DeleteAllChannelsByTeam :exec +DELETE FROM channels WHERE team_id = $1; + -- name: ListChannelsForEvent :many SELECT * FROM channels WHERE team_id = $1 diff --git a/db/queries/hosts.sql b/db/queries/hosts.sql index 0a5a150..3e133cb 100644 --- a/db/queries/hosts.sql +++ b/db/queries/hosts.sql @@ -81,6 +81,41 @@ SELECT * FROM hosts WHERE id = $1 AND team_id = $2; -- Returns all hosts that have completed registration (not pending/offline). SELECT * FROM hosts WHERE status NOT IN ('pending', 'offline') ORDER BY created_at; +-- name: GetHostsWithLoad :many +-- Returns all online hosts with raw per-host sandbox resource consumption. +-- Separates running and paused sandbox totals so the caller can apply its own formulas. +SELECT + h.id, + h.type, + h.team_id, + h.provider, + h.availability_zone, + h.arch, + h.cpu_cores, + h.memory_mb, + h.disk_gb, + h.address, + h.status, + h.last_heartbeat_at, + h.metadata, + h.created_by, + h.created_at, + h.updated_at, + h.cert_fingerprint, + h.cert_expires_at, + COALESCE(SUM(s.vcpus) FILTER (WHERE s.status IN ('running', 'starting', 'pending')), 0)::int AS running_vcpus, + COALESCE(SUM(s.memory_mb) FILTER (WHERE s.status IN ('running', 'starting', 'pending')), 0)::int AS running_memory_mb, + COALESCE(SUM(s.disk_size_mb) FILTER (WHERE s.status IN ('running', 'starting', 'pending')), 0)::int AS running_disk_mb, + COALESCE(SUM(s.memory_mb) FILTER (WHERE s.status = 'paused'), 0)::int AS paused_memory_mb, + COALESCE(SUM(s.disk_size_mb) FILTER (WHERE s.status = 'paused'), 0)::int AS paused_disk_mb +FROM hosts h +LEFT JOIN sandboxes s ON s.host_id = h.id + AND s.status IN ('running', 'paused', 'starting', 'pending') +WHERE h.status = 'online' + AND h.address != '' +GROUP BY h.id +ORDER BY h.created_at; + -- name: UpdateHostHeartbeatAndStatus :execrows -- Updates last_heartbeat_at and transitions unreachable hosts back to online. -- Returns 0 if no host was found (deleted), which the caller treats as 404. diff --git a/db/queries/metrics.sql b/db/queries/metrics.sql index f58d480..6c612c6 100644 --- a/db/queries/metrics.sql +++ b/db/queries/metrics.sql @@ -51,6 +51,13 @@ WHERE sandbox_id = $1 AND tier = $2; DELETE FROM sandbox_metric_points WHERE ts < EXTRACT(EPOCH FROM NOW() - INTERVAL '30 days')::BIGINT; +-- name: DeleteMetricsSnapshotsByTeam :exec +DELETE FROM sandbox_metrics_snapshots WHERE team_id = $1; + +-- name: DeleteMetricPointsByTeam :exec +DELETE FROM sandbox_metric_points +WHERE sandbox_id IN (SELECT id FROM sandboxes WHERE team_id = $1); + -- name: SampleSandboxMetrics :many -- Aggregates per-team resource usage from the live sandboxes table. -- Groups by all teams that have any sandbox row (including stopped) so that diff --git a/db/queries/oauth.sql b/db/queries/oauth.sql index 31b1ff8..9dc7929 100644 --- a/db/queries/oauth.sql +++ b/db/queries/oauth.sql @@ -5,3 +5,9 @@ VALUES ($1, $2, $3, $4); -- name: GetOAuthProvider :one SELECT * FROM oauth_providers WHERE provider = $1 AND provider_id = $2; + +-- name: GetOAuthProvidersByUserID :many +SELECT * FROM oauth_providers WHERE user_id = $1; + +-- name: DeleteOAuthProvider :exec +DELETE FROM oauth_providers WHERE user_id = $1 AND provider = $2; diff --git a/db/queries/sandboxes.sql b/db/queries/sandboxes.sql index 2b19574..2bf5db7 100644 --- a/db/queries/sandboxes.sql +++ b/db/queries/sandboxes.sql @@ -1,6 +1,6 @@ -- name: InsertSandbox :one -INSERT INTO sandboxes (id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, template_id, template_team_id) -VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) +INSERT INTO sandboxes (id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, template_id, template_team_id, metadata) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING *; -- name: GetSandbox :one @@ -15,7 +15,7 @@ SELECT * FROM sandboxes WHERE id = $1 AND team_id = $2; SELECT s.status, h.address AS host_address FROM sandboxes s JOIN hosts h ON h.id = s.host_id -WHERE s.id = $1 AND s.team_id = $2; +WHERE s.id = $1; -- name: ListSandboxes :many SELECT * FROM sandboxes ORDER BY created_at DESC; @@ -62,7 +62,7 @@ WHERE id = ANY($1::uuid[]); -- name: ListActiveSandboxesByTeam :many SELECT * FROM sandboxes -WHERE team_id = $1 AND status IN ('running', 'paused', 'starting') +WHERE team_id = $1 AND status IN ('running', 'paused', 'starting', 'hibernated') ORDER BY created_at DESC; -- name: MarkSandboxesMissingByHost :exec @@ -74,6 +74,12 @@ SET status = 'missing', last_updated = NOW() WHERE host_id = $1 AND status IN ('running', 'starting', 'pending'); +-- name: UpdateSandboxMetadata :exec +UPDATE sandboxes +SET metadata = $2, + last_updated = NOW() +WHERE id = $1; + -- name: BulkRestoreRunning :exec -- Called by the reconciler when a host comes back online and its sandboxes are -- confirmed alive. Restores only sandboxes that are in 'missing' state. diff --git a/db/queries/teams.sql b/db/queries/teams.sql index 2117e95..f4de808 100644 --- a/db/queries/teams.sql +++ b/db/queries/teams.sql @@ -53,3 +53,48 @@ UPDATE users_teams SET role = $3 WHERE team_id = $1 AND user_id = $2; -- name: DeleteTeamMember :exec DELETE FROM users_teams WHERE team_id = $1 AND user_id = $2; + +-- name: ListTeamsAdmin :many +SELECT + t.id, + t.name, + t.slug, + t.is_byoc, + t.created_at, + t.deleted_at, + (SELECT COUNT(*) FROM users_teams ut WHERE ut.team_id = t.id)::int AS member_count, + COALESCE(owner_u.name, '') AS owner_name, + COALESCE(owner_u.email, '') AS owner_email, + (SELECT COUNT(*) FROM sandboxes s WHERE s.team_id = t.id AND s.status IN ('running', 'paused', 'starting'))::int AS active_sandbox_count, + (SELECT COUNT(*) FROM channels c WHERE c.team_id = t.id)::int AS channel_count +FROM teams t +LEFT JOIN users_teams owner_ut ON owner_ut.team_id = t.id AND owner_ut.role = 'owner' +LEFT JOIN users owner_u ON owner_u.id = owner_ut.user_id +WHERE t.id != '00000000-0000-0000-0000-000000000000' +ORDER BY t.deleted_at ASC NULLS FIRST, t.created_at DESC +LIMIT $1 OFFSET $2; + +-- name: ListSoleOwnedTeams :many +-- Returns teams where the user is the owner and no other members exist. +SELECT t.id FROM teams t +JOIN users_teams ut ON ut.team_id = t.id +WHERE ut.user_id = $1 + AND ut.role = 'owner' + AND t.deleted_at IS NULL + AND NOT EXISTS ( + SELECT 1 FROM users_teams ut2 + WHERE ut2.team_id = t.id AND ut2.user_id <> $1 + ); + +-- name: GetOwnedTeamIDs :many +-- Returns team IDs where the given user has the 'owner' role. +SELECT t.id FROM teams t +JOIN users_teams ut ON ut.team_id = t.id +WHERE ut.user_id = $1 + AND ut.role = 'owner' + AND t.deleted_at IS NULL; + +-- name: CountTeamsAdmin :one +SELECT COUNT(*)::int AS total +FROM teams +WHERE id != '00000000-0000-0000-0000-000000000000'; diff --git a/db/queries/template_builds.sql b/db/queries/template_builds.sql index 1fb07be..69eebc5 100644 --- a/db/queries/template_builds.sql +++ b/db/queries/template_builds.sql @@ -31,3 +31,8 @@ WHERE id = $1; UPDATE template_builds SET error = $2, status = 'failed', completed_at = NOW() WHERE id = $1; + +-- name: UpdateBuildDefaults :exec +UPDATE template_builds +SET default_user = $2, default_env = $3, metadata = $4 +WHERE id = $1; diff --git a/db/queries/templates.sql b/db/queries/templates.sql index de4d6f2..7c50ea6 100644 --- a/db/queries/templates.sql +++ b/db/queries/templates.sql @@ -1,6 +1,6 @@ -- name: InsertTemplate :one -INSERT INTO templates (id, name, type, vcpus, memory_mb, size_bytes, team_id) -VALUES ($1, $2, $3, $4, $5, $6, $7) +INSERT INTO templates (id, name, type, vcpus, memory_mb, size_bytes, team_id, default_user, default_env, metadata) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING *; -- name: GetTemplate :one diff --git a/db/queries/users.sql b/db/queries/users.sql index a244fc9..eb41d00 100644 --- a/db/queries/users.sql +++ b/db/queries/users.sql @@ -4,16 +4,21 @@ VALUES ($1, $2, $3, $4) RETURNING *; -- name: GetUserByEmail :one -SELECT * FROM users WHERE email = $1; +SELECT * FROM users WHERE email = $1 AND status != 'deleted'; -- name: GetUserByID :one -SELECT * FROM users WHERE id = $1; +SELECT * FROM users WHERE id = $1 AND status != 'deleted'; -- name: InsertUserOAuth :one INSERT INTO users (id, email, name) VALUES ($1, $2, $3) RETURNING *; +-- name: InsertUserInactive :one +INSERT INTO users (id, email, password_hash, name, status) +VALUES ($1, $2, $3, $4, 'inactive') +RETURNING *; + -- name: SetUserAdmin :exec UPDATE users SET is_admin = $2, updated_at = NOW() WHERE id = $1; @@ -35,8 +40,59 @@ SELECT EXISTS( SELECT 1 FROM admin_permissions WHERE user_id = $1 AND permission = $2 ) AS has_permission; +-- name: CountUsers :one +SELECT COUNT(*) FROM users; + +-- name: CountActiveUsers :one +SELECT COUNT(*) FROM users WHERE status = 'active'; + -- name: SearchUsersByEmailPrefix :many SELECT id, email FROM users WHERE email LIKE $1 || '%' ORDER BY email LIMIT 10; -- name: UpdateUserName :exec UPDATE users SET name = $2, updated_at = NOW() WHERE id = $1; + +-- name: ListUsersAdmin :many +SELECT + u.id, + u.email, + u.name, + u.is_admin, + u.status, + u.created_at, + (SELECT COUNT(*) FROM users_teams ut WHERE ut.user_id = u.id)::int AS teams_joined, + (SELECT COUNT(*) FROM users_teams ut WHERE ut.user_id = u.id AND ut.role = 'owner')::int AS teams_owned +FROM users u +WHERE u.status != 'deleted' +ORDER BY u.created_at DESC +LIMIT $1 OFFSET $2; + +-- name: CountUsersAdmin :one +SELECT COUNT(*)::int AS total +FROM users +WHERE status != 'deleted'; + +-- name: SetUserStatus :exec +UPDATE users SET status = $2, updated_at = NOW() WHERE id = $1; + +-- name: UpdateUserPassword :exec +UPDATE users SET password_hash = $2, updated_at = NOW() WHERE id = $1; + +-- name: SoftDeleteUser :exec +UPDATE users SET deleted_at = NOW(), status = 'deleted', updated_at = NOW() WHERE id = $1; + +-- name: CountUserOwnedTeamsWithOtherMembers :one +SELECT COUNT(DISTINCT ut.team_id)::int +FROM users_teams ut +WHERE ut.user_id = $1 + AND ut.role = 'owner' + AND EXISTS ( + SELECT 1 FROM users_teams ut2 + WHERE ut2.team_id = ut.team_id AND ut2.user_id <> $1 + ); + +-- name: HardDeleteExpiredUsers :exec +DELETE FROM users WHERE deleted_at IS NOT NULL AND deleted_at < NOW() - INTERVAL '15 days'; + +-- name: HardDeleteUser :exec +DELETE FROM users WHERE id = $1; diff --git a/deploy/Caddyfile.dev b/deploy/Caddyfile.dev index 789f8df..bbeb7d7 100644 --- a/deploy/Caddyfile.dev +++ b/deploy/Caddyfile.dev @@ -8,7 +8,7 @@ # Option 2: Use dnsmasq: address=/.localhost/127.0.0.1 # Option 3: Use systemd-resolved (Ubuntu default — *.localhost resolves to 127.0.0.1) http://*.localhost { - reverse_proxy host.docker.internal:8080 + reverse_proxy host.docker.internal:9725 } # Main entry point: API + frontend @@ -16,21 +16,21 @@ http://localhost { # API routes — strip /api prefix and proxy to the control plane. # The frontend calls /api/v1/... which becomes /v1/... at the CP. handle_path /api/* { - reverse_proxy host.docker.internal:8080 + reverse_proxy host.docker.internal:9725 } # Backend routes served directly (SDK clients, OAuth initiation) handle /v1/* { - reverse_proxy host.docker.internal:8080 + reverse_proxy host.docker.internal:9725 } handle /openapi.yaml { - reverse_proxy host.docker.internal:8080 + reverse_proxy host.docker.internal:9725 } handle /docs { - reverse_proxy host.docker.internal:8080 + reverse_proxy host.docker.internal:9725 } handle /auth/oauth/* { - reverse_proxy host.docker.internal:8080 + reverse_proxy host.docker.internal:9725 } # Everything else — proxy to the frontend dev server diff --git a/deploy/ansible/playbook.yml b/deploy/ansible/playbook.yml deleted file mode 100644 index e69de29..0000000 diff --git a/deploy/logrotate/wrenn b/deploy/logrotate/wrenn new file mode 100644 index 0000000..f05a606 --- /dev/null +++ b/deploy/logrotate/wrenn @@ -0,0 +1,19 @@ +/var/lib/wrenn/logs/control-plane.log +/var/lib/wrenn/logs/host-agent.log +{ + daily + rotate 3 + missingok + notifempty + dateext + dateformat -%Y-%m-%d + compress + delaycompress + sharedscripts + postrotate + # Signal the processes to reopen their log files. + # Use SIGHUP — both binaries handle it gracefully. + pkill -HUP -f wrenn-cp || true + pkill -HUP -f wrenn-agent || true + endscript +} diff --git a/deploy/systemd/wrenn-control-plane.service b/deploy/systemd/wrenn-control-plane.service deleted file mode 100644 index e69de29..0000000 diff --git a/deploy/systemd/wrenn-host-agent.service b/deploy/systemd/wrenn-host-agent.service deleted file mode 100644 index e69de29..0000000 diff --git a/envd/VERSION b/envd/VERSION new file mode 100644 index 0000000..6e8bf73 --- /dev/null +++ b/envd/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/envd/go.mod b/envd/go.mod index c739bc6..200e8ef 100644 --- a/envd/go.mod +++ b/envd/go.mod @@ -19,7 +19,7 @@ require ( github.com/shirou/gopsutil/v4 v4.26.2 github.com/stretchr/testify v1.11.1 github.com/txn2/txeh v1.8.0 - golang.org/x/sys v0.42.0 + golang.org/x/sys v0.43.0 google.golang.org/protobuf v1.36.11 ) @@ -37,6 +37,6 @@ require ( github.com/tklauser/go-sysconf v0.3.16 // indirect github.com/tklauser/numcpus v0.11.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - golang.org/x/crypto v0.41.0 // indirect + golang.org/x/crypto v0.50.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/envd/go.sum b/envd/go.sum index a051cf5..3e857f5 100644 --- a/envd/go.sum +++ b/envd/go.sum @@ -72,15 +72,15 @@ github.com/txn2/txeh v1.8.0 h1:G1vZgom6+P/xWwU53AMOpcZgC5ni382ukcPP1TDVYHk= github.com/txn2/txeh v1.8.0/go.mod h1:rRI3Egi3+AFmEXQjft051YdYbxeCT3nFmBLsNCZZaxM= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= diff --git a/envd/internal/api/api.gen.go b/envd/internal/api/api.gen.go index 512747b..257326d 100644 --- a/envd/internal/api/api.gen.go +++ b/envd/internal/api/api.gen.go @@ -1,6 +1,6 @@ // Package api provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.5.1 DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.6.0 DO NOT EDIT. package api import ( @@ -23,6 +23,16 @@ const ( File EntryInfoType = "file" ) +// Valid indicates whether the value is a known member of the EntryInfoType enum. +func (e EntryInfoType) Valid() bool { + switch e { + case File: + return true + default: + return false + } +} + // EntryInfo defines model for EntryInfo. type EntryInfo struct { // Name Name of the file @@ -193,6 +203,9 @@ type ServerInterface interface { // Get the stats of the service // (GET /metrics) GetMetrics(w http.ResponseWriter, r *http.Request) + // Quiesce continuous goroutines before Firecracker snapshot + // (POST /snapshot/prepare) + PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) } // Unimplemented server implementation that returns http.StatusNotImplemented for each endpoint. @@ -235,6 +248,12 @@ func (_ Unimplemented) GetMetrics(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotImplemented) } +// Quiesce continuous goroutines before Firecracker snapshot +// (POST /snapshot/prepare) +func (_ Unimplemented) PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} + // ServerInterfaceWrapper converts contexts to parameters. type ServerInterfaceWrapper struct { Handler ServerInterface @@ -280,7 +299,7 @@ func (siw *ServerInterfaceWrapper) GetFiles(w http.ResponseWriter, r *http.Reque // ------------- Optional query parameter "path" ------------- - err = runtime.BindQueryParameter("form", true, false, "path", r.URL.Query(), ¶ms.Path) + err = runtime.BindQueryParameterWithOptions("form", true, false, "path", r.URL.Query(), ¶ms.Path, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) if err != nil { siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "path", Err: err}) return @@ -288,7 +307,7 @@ func (siw *ServerInterfaceWrapper) GetFiles(w http.ResponseWriter, r *http.Reque // ------------- Optional query parameter "username" ------------- - err = runtime.BindQueryParameter("form", true, false, "username", r.URL.Query(), ¶ms.Username) + err = runtime.BindQueryParameterWithOptions("form", true, false, "username", r.URL.Query(), ¶ms.Username, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) if err != nil { siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "username", Err: err}) return @@ -296,7 +315,7 @@ func (siw *ServerInterfaceWrapper) GetFiles(w http.ResponseWriter, r *http.Reque // ------------- Optional query parameter "signature" ------------- - err = runtime.BindQueryParameter("form", true, false, "signature", r.URL.Query(), ¶ms.Signature) + err = runtime.BindQueryParameterWithOptions("form", true, false, "signature", r.URL.Query(), ¶ms.Signature, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) if err != nil { siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "signature", Err: err}) return @@ -304,7 +323,7 @@ func (siw *ServerInterfaceWrapper) GetFiles(w http.ResponseWriter, r *http.Reque // ------------- Optional query parameter "signature_expiration" ------------- - err = runtime.BindQueryParameter("form", true, false, "signature_expiration", r.URL.Query(), ¶ms.SignatureExpiration) + err = runtime.BindQueryParameterWithOptions("form", true, false, "signature_expiration", r.URL.Query(), ¶ms.SignatureExpiration, runtime.BindQueryParameterOptions{Type: "integer", Format: ""}) if err != nil { siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "signature_expiration", Err: err}) return @@ -337,7 +356,7 @@ func (siw *ServerInterfaceWrapper) PostFiles(w http.ResponseWriter, r *http.Requ // ------------- Optional query parameter "path" ------------- - err = runtime.BindQueryParameter("form", true, false, "path", r.URL.Query(), ¶ms.Path) + err = runtime.BindQueryParameterWithOptions("form", true, false, "path", r.URL.Query(), ¶ms.Path, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) if err != nil { siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "path", Err: err}) return @@ -345,7 +364,7 @@ func (siw *ServerInterfaceWrapper) PostFiles(w http.ResponseWriter, r *http.Requ // ------------- Optional query parameter "username" ------------- - err = runtime.BindQueryParameter("form", true, false, "username", r.URL.Query(), ¶ms.Username) + err = runtime.BindQueryParameterWithOptions("form", true, false, "username", r.URL.Query(), ¶ms.Username, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) if err != nil { siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "username", Err: err}) return @@ -353,7 +372,7 @@ func (siw *ServerInterfaceWrapper) PostFiles(w http.ResponseWriter, r *http.Requ // ------------- Optional query parameter "signature" ------------- - err = runtime.BindQueryParameter("form", true, false, "signature", r.URL.Query(), ¶ms.Signature) + err = runtime.BindQueryParameterWithOptions("form", true, false, "signature", r.URL.Query(), ¶ms.Signature, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) if err != nil { siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "signature", Err: err}) return @@ -361,7 +380,7 @@ func (siw *ServerInterfaceWrapper) PostFiles(w http.ResponseWriter, r *http.Requ // ------------- Optional query parameter "signature_expiration" ------------- - err = runtime.BindQueryParameter("form", true, false, "signature_expiration", r.URL.Query(), ¶ms.SignatureExpiration) + err = runtime.BindQueryParameterWithOptions("form", true, false, "signature_expiration", r.URL.Query(), ¶ms.SignatureExpiration, runtime.BindQueryParameterOptions{Type: "integer", Format: ""}) if err != nil { siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "signature_expiration", Err: err}) return @@ -432,6 +451,20 @@ func (siw *ServerInterfaceWrapper) GetMetrics(w http.ResponseWriter, r *http.Req handler.ServeHTTP(w, r) } +// PostSnapshotPrepare operation middleware +func (siw *ServerInterfaceWrapper) PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) { + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.PostSnapshotPrepare(w, r) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + type UnescapedCookieParamError struct { ParamName string Err error @@ -563,6 +596,9 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl r.Group(func(r chi.Router) { r.Get(options.BaseURL+"/metrics", wrapper.GetMetrics) }) + r.Group(func(r chi.Router) { + r.Post(options.BaseURL+"/snapshot/prepare", wrapper.PostSnapshotPrepare) + }) return r } diff --git a/envd/internal/api/auth.go b/envd/internal/api/auth.go index b626f5a..fc6b97c 100644 --- a/envd/internal/api/auth.go +++ b/envd/internal/api/auth.go @@ -1,4 +1,5 @@ // SPDX-License-Identifier: Apache-2.0 +// Modifications by M/S Omukk package api @@ -30,6 +31,7 @@ var authExcludedPaths = []string{ "GET/files", "POST/files", "POST/init", + "POST/snapshot/prepare", } func (a *API) WithAuthorization(handler http.Handler) http.Handler { diff --git a/envd/internal/api/download.go b/envd/internal/api/download.go index b90a8ac..0a2119e 100644 --- a/envd/internal/api/download.go +++ b/envd/internal/api/download.go @@ -1,4 +1,5 @@ // SPDX-License-Identifier: Apache-2.0 +// Modifications by M/S Omukk package api @@ -106,6 +107,17 @@ func (a *API) GetFiles(w http.ResponseWriter, r *http.Request, params GetFilesPa return } + // Reject anything that isn't a regular file (devices, pipes, sockets, etc.). + // Reading device files like /dev/zero or /dev/urandom produces infinite data + // and will exhaust memory on all layers of the stack. + if !stat.Mode().IsRegular() { + errMsg = fmt.Errorf("path '%s' is not a regular file", resolvedPath) + errorCode = http.StatusBadRequest + jsonError(w, errorCode, errMsg) + + return + } + // Validate Accept-Encoding header encoding, err := parseAcceptEncoding(r) if err != nil { diff --git a/envd/internal/api/download_test.go b/envd/internal/api/download_test.go index 235a613..a4379cc 100644 --- a/envd/internal/api/download_test.go +++ b/envd/internal/api/download_test.go @@ -1,10 +1,12 @@ // SPDX-License-Identifier: Apache-2.0 +// Modifications by M/S Omukk package api import ( "bytes" "compress/gzip" + "context" "io" "mime/multipart" "net/http" @@ -97,7 +99,7 @@ func TestGetFilesContentDisposition(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false) + api := New(&logger, defaults, nil, false, context.Background(), nil, "test") // Create request and response recorder req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) @@ -146,7 +148,7 @@ func TestGetFilesContentDispositionWithNestedPath(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false) + api := New(&logger, defaults, nil, false, context.Background(), nil, "test") // Create request and response recorder req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) @@ -189,7 +191,7 @@ func TestGetFiles_GzipEncoding_ExplicitIdentityOffWithRange(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false) + api := New(&logger, defaults, nil, false, context.Background(), nil, "test") // Create request and response recorder req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) @@ -230,7 +232,7 @@ func TestGetFiles_GzipDownload(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false) + api := New(&logger, defaults, nil, false, context.Background(), nil, "test") req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) req.Header.Set("Accept-Encoding", "gzip") @@ -295,7 +297,7 @@ func TestPostFiles_GzipUpload(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false) + api := New(&logger, defaults, nil, false, context.Background(), nil, "test") req := httptest.NewRequest(http.MethodPost, "/files?path="+url.QueryEscape(destPath), &gzBuf) req.Header.Set("Content-Type", mpWriter.FormDataContentType()) @@ -355,7 +357,7 @@ func TestGzipUploadThenGzipDownload(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false) + api := New(&logger, defaults, nil, false, context.Background(), nil, "test") uploadReq := httptest.NewRequest(http.MethodPost, "/files?path="+url.QueryEscape(destPath), &gzBuf) uploadReq.Header.Set("Content-Type", mpWriter.FormDataContentType()) diff --git a/envd/internal/api/init.go b/envd/internal/api/init.go index 301400c..3b2be4b 100644 --- a/envd/internal/api/init.go +++ b/envd/internal/api/init.go @@ -150,6 +150,13 @@ func (a *API) PostInit(w http.ResponseWriter, r *http.Request) { host.PollForMMDSOpts(ctx, a.mmdsChan, a.defaults.EnvVars) }() + // Start the port scanner and forwarder if they were stopped by a + // pre-snapshot prepare call. Start is a no-op if already running, + // so this is safe on first boot and only takes effect after restore. + if a.portSubsystem != nil { + a.portSubsystem.Start(a.rootCtx) + } + w.Header().Set("Cache-Control", "no-store") w.Header().Set("Content-Type", "") diff --git a/envd/internal/api/init_test.go b/envd/internal/api/init_test.go index f3db361..18ee203 100644 --- a/envd/internal/api/init_test.go +++ b/envd/internal/api/init_test.go @@ -79,7 +79,7 @@ func newTestAPI(accessToken *SecureToken, mmdsClient MMDSClient) *API { defaults := &execcontext.Defaults{ EnvVars: utils.NewMap[string, string](), } - api := New(&logger, defaults, nil, false) + api := New(&logger, defaults, nil, false, context.Background(), nil, "test") if accessToken != nil { api.accessToken.TakeFrom(accessToken) } diff --git a/envd/internal/api/snapshot.go b/envd/internal/api/snapshot.go new file mode 100644 index 0000000..d9e2edd --- /dev/null +++ b/envd/internal/api/snapshot.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 +// Modifications by M/S Omukk + +package api + +import ( + "net/http" +) + +// PostSnapshotPrepare quiesces continuous goroutines (port scanner, forwarder) +// and forces a GC cycle before Firecracker takes a VM snapshot. This ensures +// the Go runtime's page allocator is in a consistent state when vCPUs are frozen. +// +// Called by the host agent as a best-effort signal before vm.Pause(). +func (a *API) PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if a.portSubsystem != nil { + a.portSubsystem.Stop() + a.logger.Info().Msg("snapshot/prepare: port subsystem quiesced") + } + + w.Header().Set("Cache-Control", "no-store") + w.WriteHeader(http.StatusNoContent) +} diff --git a/envd/internal/api/store.go b/envd/internal/api/store.go index 088222a..ca97957 100644 --- a/envd/internal/api/store.go +++ b/envd/internal/api/store.go @@ -1,4 +1,5 @@ // SPDX-License-Identifier: Apache-2.0 +// Modifications by M/S Omukk package api @@ -12,6 +13,7 @@ import ( "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" "git.omukk.dev/wrenn/sandbox/envd/internal/host" + publicport "git.omukk.dev/wrenn/sandbox/envd/internal/port" "git.omukk.dev/wrenn/sandbox/envd/internal/utils" ) @@ -32,6 +34,7 @@ type API struct { logger *zerolog.Logger accessToken *SecureToken defaults *execcontext.Defaults + version string mmdsChan chan *host.MMDSOpts hyperloopLock sync.Mutex @@ -39,17 +42,25 @@ type API struct { lastSetTime *utils.AtomicMax initLock sync.Mutex + + // rootCtx is the parent context from main(), used to restart + // long-lived goroutines after snapshot restore. + rootCtx context.Context + portSubsystem *publicport.PortSubsystem } -func New(l *zerolog.Logger, defaults *execcontext.Defaults, mmdsChan chan *host.MMDSOpts, isNotFC bool) *API { +func New(l *zerolog.Logger, defaults *execcontext.Defaults, mmdsChan chan *host.MMDSOpts, isNotFC bool, rootCtx context.Context, portSubsystem *publicport.PortSubsystem, version string) *API { return &API{ - logger: l, - defaults: defaults, - mmdsChan: mmdsChan, - isNotFC: isNotFC, - mmdsClient: &DefaultMMDSClient{}, - lastSetTime: utils.NewAtomicMax(), - accessToken: &SecureToken{}, + logger: l, + defaults: defaults, + mmdsChan: mmdsChan, + isNotFC: isNotFC, + mmdsClient: &DefaultMMDSClient{}, + lastSetTime: utils.NewAtomicMax(), + accessToken: &SecureToken{}, + rootCtx: rootCtx, + portSubsystem: portSubsystem, + version: version, } } @@ -59,9 +70,11 @@ func (a *API) GetHealth(w http.ResponseWriter, r *http.Request) { a.logger.Trace().Msg("Health check") w.Header().Set("Cache-Control", "no-store") - w.Header().Set("Content-Type", "") + w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(map[string]string{ + "version": a.version, + }) } func (a *API) GetMetrics(w http.ResponseWriter, r *http.Request) { diff --git a/envd/internal/port/forward.go b/envd/internal/port/forward.go index bf516ff..cc71a41 100644 --- a/envd/internal/port/forward.go +++ b/envd/internal/port/forward.go @@ -1,4 +1,5 @@ // SPDX-License-Identifier: Apache-2.0 +// Modifications by M/S Omukk // portf (port forward) periodaically scans opened TCP ports on the 127.0.0.1 (or localhost) // and launches `socat` process for every such port in the background. @@ -80,8 +81,16 @@ func (f *Forwarder) StartForwarding(ctx context.Context) { } for { - // procs is an array of currently opened ports. - if procs, ok := <-f.scannerSubscriber.Messages; ok { + select { + case <-ctx.Done(): + f.stopAllForwarding() + return + case procs, ok := <-f.scannerSubscriber.Messages: + if !ok { + f.stopAllForwarding() + return + } + // Now we are going to refresh all ports that are being forwarded in the `ports` map. Maybe add new ones // and maybe remove some. @@ -133,11 +142,22 @@ func (f *Forwarder) StartForwarding(ctx context.Context) { } } -func (f *Forwarder) startPortForwarding(ctx context.Context, p *PortToForward) { +func (f *Forwarder) stopAllForwarding() { + for _, p := range f.ports { + f.stopPortForwarding(p) + } + f.ports = make(map[string]*PortToForward) +} + +func (f *Forwarder) startPortForwarding(_ context.Context, p *PortToForward) { // https://unix.stackexchange.com/questions/311492/redirect-application-listening-on-localhost-to-listening-on-external-interface // socat -d -d TCP4-LISTEN:4000,bind=169.254.0.21,fork TCP4:localhost:4000 // reuseaddr is used to fix the "Address already in use" error when restarting socat quickly. - cmd := exec.CommandContext(ctx, + // + // We use exec.Command (not CommandContext) because stopAllForwarding kills + // socat via SIGKILL to the process group. CommandContext would also call + // cmd.Wait() on context cancellation, racing with the wait goroutine below. + cmd := exec.Command( "socat", "-d", "-d", "-d", fmt.Sprintf("TCP4-LISTEN:%v,bind=%s,reuseaddr,fork", p.port, f.sourceIP.To4()), fmt.Sprintf("TCP%d:localhost:%v", p.family, p.port), diff --git a/envd/internal/port/scan.go b/envd/internal/port/scan.go index 2b15523..878b361 100644 --- a/envd/internal/port/scan.go +++ b/envd/internal/port/scan.go @@ -1,8 +1,10 @@ // SPDX-License-Identifier: Apache-2.0 +// Modifications by M/S Omukk package port import ( + "context" "sync" "time" @@ -10,8 +12,7 @@ import ( ) type Scanner struct { - scanExit chan struct{} - period time.Duration + period time.Duration // Plain mutex-protected map instead of concurrent-map. The concurrent-map // library's Items() spawns goroutines and uses a WaitGroup internally, @@ -20,15 +21,10 @@ type Scanner struct { subs map[string]*ScannerSubscriber } -func (s *Scanner) Destroy() { - close(s.scanExit) -} - func NewScanner(period time.Duration) *Scanner { return &Scanner{ - period: period, - subs: make(map[string]*ScannerSubscriber), - scanExit: make(chan struct{}), + period: period, + subs: make(map[string]*ScannerSubscriber), } } @@ -51,7 +47,8 @@ func (s *Scanner) Unsubscribe(sub *ScannerSubscriber) { } // ScanAndBroadcast starts scanning open TCP ports and broadcasts every open port to all subscribers. -func (s *Scanner) ScanAndBroadcast() { +// It exits when ctx is cancelled. +func (s *Scanner) ScanAndBroadcast(ctx context.Context) { for { // Read directly from /proc/net/tcp and /proc/net/tcp6 instead of // using gopsutil's net.Connections(), which walks /proc/{pid}/fd @@ -60,15 +57,14 @@ func (s *Scanner) ScanAndBroadcast() { s.mu.RLock() for _, sub := range s.subs { - sub.Signal(conns) + sub.Signal(ctx, conns) } s.mu.RUnlock() select { - case <-s.scanExit: + case <-ctx.Done(): return - default: - time.Sleep(s.period) + case <-time.After(s.period): } } } diff --git a/envd/internal/port/scanSubscriber.go b/envd/internal/port/scanSubscriber.go index bad9908..312f8d2 100644 --- a/envd/internal/port/scanSubscriber.go +++ b/envd/internal/port/scanSubscriber.go @@ -1,8 +1,11 @@ // SPDX-License-Identifier: Apache-2.0 +// Modifications by M/S Omukk package port import ( + "context" + "github.com/rs/zerolog" ) @@ -33,19 +36,26 @@ func (ss *ScannerSubscriber) Destroy() { close(ss.Messages) } -func (ss *ScannerSubscriber) Signal(conns []ConnStat) { - // Filter isn't specified. Accept everything. +// Signal sends the (filtered) connection list to the subscriber. It respects +// ctx cancellation so the scanner goroutine is never stuck waiting for a +// consumer that has already exited. +func (ss *ScannerSubscriber) Signal(ctx context.Context, conns []ConnStat) { + var payload []ConnStat + if ss.filter == nil { - ss.Messages <- conns + payload = conns } else { filtered := []ConnStat{} for i := range conns { - // We need to access the list directly otherwise there will be implicit memory aliasing - // If the filter matched a connection, we will send it to a channel. if ss.filter.Match(&conns[i]) { filtered = append(filtered, conns[i]) } } - ss.Messages <- filtered + payload = filtered + } + + select { + case ss.Messages <- payload: + case <-ctx.Done(): } } diff --git a/envd/internal/port/subsystem.go b/envd/internal/port/subsystem.go new file mode 100644 index 0000000..094b2c4 --- /dev/null +++ b/envd/internal/port/subsystem.go @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: Apache-2.0 +// Modifications by M/S Omukk + +package port + +import ( + "context" + "runtime" + "runtime/debug" + "sync" + "time" + + "github.com/rs/zerolog" + + "git.omukk.dev/wrenn/sandbox/envd/internal/services/cgroups" +) + +// PortSubsystem owns the port scanner and forwarder lifecycle. +// It supports stop/restart across Firecracker snapshot/restore cycles. +type PortSubsystem struct { + logger *zerolog.Logger + cgroupManager cgroups.Manager + period time.Duration + + mu sync.Mutex + cancel context.CancelFunc + wg *sync.WaitGroup // per-cycle WaitGroup; nil when not running + running bool +} + +// NewPortSubsystem creates a new PortSubsystem. Call Start() to begin scanning. +func NewPortSubsystem(logger *zerolog.Logger, cgroupManager cgroups.Manager, period time.Duration) *PortSubsystem { + return &PortSubsystem{ + logger: logger, + cgroupManager: cgroupManager, + period: period, + } +} + +// Start creates a fresh scanner and forwarder, launching their goroutines. +// Safe to call multiple times; does nothing if already running. +func (p *PortSubsystem) Start(parentCtx context.Context) { + p.mu.Lock() + defer p.mu.Unlock() + + if p.running { + return + } + + ctx, cancel := context.WithCancel(parentCtx) + p.cancel = cancel + p.running = true + + // Allocate a fresh WaitGroup for this lifecycle so a concurrent Stop + // on the previous cycle's WaitGroup cannot interfere. + wg := &sync.WaitGroup{} + p.wg = wg + + scanner := NewScanner(p.period) + forwarder := NewForwarder(p.logger, scanner, p.cgroupManager) + + wg.Add(2) + + go func() { + defer wg.Done() + forwarder.StartForwarding(ctx) + }() + + go func() { + defer wg.Done() + scanner.ScanAndBroadcast(ctx) + }() +} + +// Stop quiesces the scanner and forwarder goroutines and forces a GC cycle +// to put the Go runtime's page allocator in a consistent state before snapshot. +// Blocks until both goroutines have exited. Safe to call if already stopped. +func (p *PortSubsystem) Stop() { + p.mu.Lock() + if !p.running { + p.mu.Unlock() + return + } + cancelFn := p.cancel + wg := p.wg + p.cancel = nil + p.wg = nil + p.running = false + p.mu.Unlock() + + cancelFn() + wg.Wait() + + // Force two GC cycles to ensure all spans are swept and the page + // allocator summary tree is fully consistent before the VM is frozen. + runtime.GC() + runtime.GC() + debug.FreeOSMemory() +} + +// Restart stops the subsystem (if running) and starts it again with a fresh +// scanner and forwarder. Used after snapshot restore via PostInit. +func (p *PortSubsystem) Restart(parentCtx context.Context) { + p.Stop() + p.Start(parentCtx) +} diff --git a/envd/internal/services/process/handler/multiplex.go b/envd/internal/services/process/handler/multiplex.go index 4fe696e..88f0916 100644 --- a/envd/internal/services/process/handler/multiplex.go +++ b/envd/internal/services/process/handler/multiplex.go @@ -25,34 +25,38 @@ func NewMultiplexedChannel[T any](buffer int) *MultiplexedChannel[T] { c.mu.RLock() for _, cons := range c.channels { - cons <- v + select { + case cons <- v: + default: + // Consumer not reading — skip to prevent deadlock + } } c.mu.RUnlock() } + c.mu.Lock() c.exited.Store(true) - for _, cons := range c.channels { close(cons) } + c.mu.Unlock() }() return c } func (m *MultiplexedChannel[T]) Fork() (chan T, func()) { - if m.exited.Load() { - ch := make(chan T) - close(ch) - - return ch, func() {} - } - m.mu.Lock() defer m.mu.Unlock() - consumer := make(chan T) + if m.exited.Load() { + ch := make(chan T) + close(ch) + return ch, func() {} + } + + consumer := make(chan T, 4096) m.channels = append(m.channels, consumer) diff --git a/envd/internal/services/process/service.go b/envd/internal/services/process/service.go index e00f345..9b89521 100644 --- a/envd/internal/services/process/service.go +++ b/envd/internal/services/process/service.go @@ -62,16 +62,15 @@ func (s *Service) getProcess(selector *rpc.ProcessSelector) (*handler.Handler, e s.processes.Range(func(_ uint32, value *handler.Handler) bool { if value.Tag == nil { - return true + return true // no tag, keep looking } if *value.Tag == tag { proc = value - - return true + return false // found, stop iterating } - return false + return true // different tag, keep looking }) if proc == nil { diff --git a/envd/main.go b/envd/main.go index 751788d..1cd9403 100644 --- a/envd/main.go +++ b/envd/main.go @@ -50,7 +50,7 @@ const ( ) var ( - Version = "0.5.4" + Version = "0.1.0" commitSHA string @@ -190,7 +190,14 @@ func main() { processLogger := l.With().Str("logger", "process").Logger() processService := processRpc.Handle(m, &processLogger, defaults, cgroupManager) - service := api.New(&envLogger, defaults, mmdsChan, isNotFC) + // Port scanner and forwarder are managed by PortSubsystem, which + // supports stop/restart across Firecracker snapshot/restore cycles. + portLogger := l.With().Str("logger", "port-forwarder").Logger() + portSubsystem := publicport.NewPortSubsystem(&portLogger, cgroupManager, portScannerInterval) + portSubsystem.Start(ctx) + defer portSubsystem.Stop() + + service := api.New(&envLogger, defaults, mmdsChan, isNotFC, ctx, portSubsystem, Version) handler := api.HandlerFromMux(service, m) middleware := authn.NewMiddleware(permissions.AuthenticateUsername) @@ -229,16 +236,6 @@ func main() { } } - // Bind all open ports on 127.0.0.1 and localhost to the eth0 interface - portScanner := publicport.NewScanner(portScannerInterval) - defer portScanner.Destroy() - - portLogger := l.With().Str("logger", "port-forwarder").Logger() - portForwarder := publicport.NewForwarder(&portLogger, portScanner, cgroupManager) - go portForwarder.StartForwarding(ctx) - - go portScanner.ScanAndBroadcast() - err := s.ListenAndServe() if err != nil { log.Fatalf("error starting server: %v", err) diff --git a/envd/spec/envd.yaml b/envd/spec/envd.yaml index b86d563..5160ab7 100644 --- a/envd/spec/envd.yaml +++ b/envd/spec/envd.yaml @@ -1,4 +1,5 @@ # SPDX-License-Identifier: Apache-2.0 +# Modifications by M/S Omukk openapi: 3.0.0 info: @@ -70,6 +71,13 @@ paths: "204": description: Env vars set, the time and metadata is synced with the host + /snapshot/prepare: + post: + summary: Quiesce continuous goroutines before Firecracker snapshot + responses: + "204": + description: Goroutines quiesced, safe to snapshot + /envs: get: summary: Get the environment variables diff --git a/frontend/package.json b/frontend/package.json index 85030ec..73dd6b7 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -18,16 +18,20 @@ "@fontsource/instrument-serif": "^5.2.8", "@sveltejs/adapter-static": "^3.0.10", "@sveltejs/kit": "^2.50.2", - "@sveltejs/vite-plugin-svelte": "^6.2.4", + "@sveltejs/vite-plugin-svelte": "^7.0.0", "@tailwindcss/vite": "^4.2.1", "bits-ui": "^2.16.3", "svelte": "^5.51.0", "svelte-check": "^4.4.2", "tailwindcss": "^4.2.1", - "typescript": "^5.9.3", - "vite": "^7.3.1" + "typescript": "^6.0.2", + "vite": "^8.0.8" }, "dependencies": { - "chart.js": "^4.5.1" + "@xterm/addon-fit": "^0.11.0", + "@xterm/addon-web-links": "^0.12.0", + "@xterm/xterm": "^6.0.0", + "chart.js": "^4.5.1", + "shiki": "^4.0.2" } } diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index 5b60992..3521570 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -8,9 +8,21 @@ importers: .: dependencies: + '@xterm/addon-fit': + specifier: ^0.11.0 + version: 0.11.0 + '@xterm/addon-web-links': + specifier: ^0.12.0 + version: 0.12.0 + '@xterm/xterm': + specifier: ^6.0.0 + version: 6.0.0 chart.js: specifier: ^4.5.1 version: 4.5.1 + shiki: + specifier: ^4.0.2 + version: 4.0.2 devDependencies: '@fontsource-variable/jetbrains-mono': specifier: ^5.2.8 @@ -26,192 +38,45 @@ importers: version: 5.2.8 '@sveltejs/adapter-static': specifier: ^3.0.10 - version: 3.0.10(@sveltejs/kit@2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1))) + version: 3.0.10(@sveltejs/kit@2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1))) '@sveltejs/kit': specifier: ^2.50.2 - version: 2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)) + version: 2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)) '@sveltejs/vite-plugin-svelte': - specifier: ^6.2.4 - version: 6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)) + specifier: ^7.0.0 + version: 7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)) '@tailwindcss/vite': specifier: ^4.2.1 - version: 4.2.1(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)) + version: 4.2.2(vite@8.0.8(jiti@2.6.1)) bits-ui: specifier: ^2.16.3 - version: 2.16.3(@internationalized/date@3.12.0)(@sveltejs/kit@2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12) + version: 2.17.3(@internationalized/date@3.12.0)(@sveltejs/kit@2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3) svelte: specifier: ^5.51.0 - version: 5.53.12 + version: 5.55.3 svelte-check: specifier: ^4.4.2 - version: 4.4.5(picomatch@4.0.3)(svelte@5.53.12)(typescript@5.9.3) + version: 4.4.6(picomatch@4.0.4)(svelte@5.55.3)(typescript@6.0.2) tailwindcss: specifier: ^4.2.1 - version: 4.2.1 + version: 4.2.2 typescript: - specifier: ^5.9.3 - version: 5.9.3 + specifier: ^6.0.2 + version: 6.0.2 vite: - specifier: ^7.3.1 - version: 7.3.1(jiti@2.6.1)(lightningcss@1.31.1) + specifier: ^8.0.8 + version: 8.0.8(jiti@2.6.1) packages: - '@esbuild/aix-ppc64@0.27.4': - resolution: {integrity: sha512-cQPwL2mp2nSmHHJlCyoXgHGhbEPMrEEU5xhkcy3Hs/O7nGZqEpZ2sUtLaL9MORLtDfRvVl2/3PAuEkYZH0Ty8Q==} - engines: {node: '>=18'} - cpu: [ppc64] - os: [aix] + '@emnapi/core@1.9.2': + resolution: {integrity: sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA==} - '@esbuild/android-arm64@0.27.4': - resolution: {integrity: sha512-gdLscB7v75wRfu7QSm/zg6Rx29VLdy9eTr2t44sfTW7CxwAtQghZ4ZnqHk3/ogz7xao0QAgrkradbBzcqFPasw==} - engines: {node: '>=18'} - cpu: [arm64] - os: [android] + '@emnapi/runtime@1.9.2': + resolution: {integrity: sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==} - '@esbuild/android-arm@0.27.4': - resolution: {integrity: sha512-X9bUgvxiC8CHAGKYufLIHGXPJWnr0OCdR0anD2e21vdvgCI8lIfqFbnoeOz7lBjdrAGUhqLZLcQo6MLhTO2DKQ==} - engines: {node: '>=18'} - cpu: [arm] - os: [android] - - '@esbuild/android-x64@0.27.4': - resolution: {integrity: sha512-PzPFnBNVF292sfpfhiyiXCGSn9HZg5BcAz+ivBuSsl6Rk4ga1oEXAamhOXRFyMcjwr2DVtm40G65N3GLeH1Lvw==} - engines: {node: '>=18'} - cpu: [x64] - os: [android] - - '@esbuild/darwin-arm64@0.27.4': - resolution: {integrity: sha512-b7xaGIwdJlht8ZFCvMkpDN6uiSmnxxK56N2GDTMYPr2/gzvfdQN8rTfBsvVKmIVY/X7EM+/hJKEIbbHs9oA4tQ==} - engines: {node: '>=18'} - cpu: [arm64] - os: [darwin] - - '@esbuild/darwin-x64@0.27.4': - resolution: {integrity: sha512-sR+OiKLwd15nmCdqpXMnuJ9W2kpy0KigzqScqHI3Hqwr7IXxBp3Yva+yJwoqh7rE8V77tdoheRYataNKL4QrPw==} - engines: {node: '>=18'} - cpu: [x64] - os: [darwin] - - '@esbuild/freebsd-arm64@0.27.4': - resolution: {integrity: sha512-jnfpKe+p79tCnm4GVav68A7tUFeKQwQyLgESwEAUzyxk/TJr4QdGog9sqWNcUbr/bZt/O/HXouspuQDd9JxFSw==} - engines: {node: '>=18'} - cpu: [arm64] - os: [freebsd] - - '@esbuild/freebsd-x64@0.27.4': - resolution: {integrity: sha512-2kb4ceA/CpfUrIcTUl1wrP/9ad9Atrp5J94Lq69w7UwOMolPIGrfLSvAKJp0RTvkPPyn6CIWrNy13kyLikZRZQ==} - engines: {node: '>=18'} - cpu: [x64] - os: [freebsd] - - '@esbuild/linux-arm64@0.27.4': - resolution: {integrity: sha512-7nQOttdzVGth1iz57kxg9uCz57dxQLHWxopL6mYuYthohPKEK0vU0C3O21CcBK6KDlkYVcnDXY099HcCDXd9dA==} - engines: {node: '>=18'} - cpu: [arm64] - os: [linux] - - '@esbuild/linux-arm@0.27.4': - resolution: {integrity: sha512-aBYgcIxX/wd5n2ys0yESGeYMGF+pv6g0DhZr3G1ZG4jMfruU9Tl1i2Z+Wnj9/KjGz1lTLCcorqE2viePZqj4Eg==} - engines: {node: '>=18'} - cpu: [arm] - os: [linux] - - '@esbuild/linux-ia32@0.27.4': - resolution: {integrity: sha512-oPtixtAIzgvzYcKBQM/qZ3R+9TEUd1aNJQu0HhGyqtx6oS7qTpvjheIWBbes4+qu1bNlo2V4cbkISr8q6gRBFA==} - engines: {node: '>=18'} - cpu: [ia32] - os: [linux] - - '@esbuild/linux-loong64@0.27.4': - resolution: {integrity: sha512-8mL/vh8qeCoRcFH2nM8wm5uJP+ZcVYGGayMavi8GmRJjuI3g1v6Z7Ni0JJKAJW+m0EtUuARb6Lmp4hMjzCBWzA==} - engines: {node: '>=18'} - cpu: [loong64] - os: [linux] - - '@esbuild/linux-mips64el@0.27.4': - resolution: {integrity: sha512-1RdrWFFiiLIW7LQq9Q2NES+HiD4NyT8Itj9AUeCl0IVCA459WnPhREKgwrpaIfTOe+/2rdntisegiPWn/r/aAw==} - engines: {node: '>=18'} - cpu: [mips64el] - os: [linux] - - '@esbuild/linux-ppc64@0.27.4': - resolution: {integrity: sha512-tLCwNG47l3sd9lpfyx9LAGEGItCUeRCWeAx6x2Jmbav65nAwoPXfewtAdtbtit/pJFLUWOhpv0FpS6GQAmPrHA==} - engines: {node: '>=18'} - cpu: [ppc64] - os: [linux] - - '@esbuild/linux-riscv64@0.27.4': - resolution: {integrity: sha512-BnASypppbUWyqjd1KIpU4AUBiIhVr6YlHx/cnPgqEkNoVOhHg+YiSVxM1RLfiy4t9cAulbRGTNCKOcqHrEQLIw==} - engines: {node: '>=18'} - cpu: [riscv64] - os: [linux] - - '@esbuild/linux-s390x@0.27.4': - resolution: {integrity: sha512-+eUqgb/Z7vxVLezG8bVB9SfBie89gMueS+I0xYh2tJdw3vqA/0ImZJ2ROeWwVJN59ihBeZ7Tu92dF/5dy5FttA==} - engines: {node: '>=18'} - cpu: [s390x] - os: [linux] - - '@esbuild/linux-x64@0.27.4': - resolution: {integrity: sha512-S5qOXrKV8BQEzJPVxAwnryi2+Iq5pB40gTEIT69BQONqR7JH1EPIcQ/Uiv9mCnn05jff9umq/5nqzxlqTOg9NA==} - engines: {node: '>=18'} - cpu: [x64] - os: [linux] - - '@esbuild/netbsd-arm64@0.27.4': - resolution: {integrity: sha512-xHT8X4sb0GS8qTqiwzHqpY00C95DPAq7nAwX35Ie/s+LO9830hrMd3oX0ZMKLvy7vsonee73x0lmcdOVXFzd6Q==} - engines: {node: '>=18'} - cpu: [arm64] - os: [netbsd] - - '@esbuild/netbsd-x64@0.27.4': - resolution: {integrity: sha512-RugOvOdXfdyi5Tyv40kgQnI0byv66BFgAqjdgtAKqHoZTbTF2QqfQrFwa7cHEORJf6X2ht+l9ABLMP0dnKYsgg==} - engines: {node: '>=18'} - cpu: [x64] - os: [netbsd] - - '@esbuild/openbsd-arm64@0.27.4': - resolution: {integrity: sha512-2MyL3IAaTX+1/qP0O1SwskwcwCoOI4kV2IBX1xYnDDqthmq5ArrW94qSIKCAuRraMgPOmG0RDTA74mzYNQA9ow==} - engines: {node: '>=18'} - cpu: [arm64] - os: [openbsd] - - '@esbuild/openbsd-x64@0.27.4': - resolution: {integrity: sha512-u8fg/jQ5aQDfsnIV6+KwLOf1CmJnfu1ShpwqdwC0uA7ZPwFws55Ngc12vBdeUdnuWoQYx/SOQLGDcdlfXhYmXQ==} - engines: {node: '>=18'} - cpu: [x64] - os: [openbsd] - - '@esbuild/openharmony-arm64@0.27.4': - resolution: {integrity: sha512-JkTZrl6VbyO8lDQO3yv26nNr2RM2yZzNrNHEsj9bm6dOwwu9OYN28CjzZkH57bh4w0I2F7IodpQvUAEd1mbWXg==} - engines: {node: '>=18'} - cpu: [arm64] - os: [openharmony] - - '@esbuild/sunos-x64@0.27.4': - resolution: {integrity: sha512-/gOzgaewZJfeJTlsWhvUEmUG4tWEY2Spp5M20INYRg2ZKl9QPO3QEEgPeRtLjEWSW8FilRNacPOg8R1uaYkA6g==} - engines: {node: '>=18'} - cpu: [x64] - os: [sunos] - - '@esbuild/win32-arm64@0.27.4': - resolution: {integrity: sha512-Z9SExBg2y32smoDQdf1HRwHRt6vAHLXcxD2uGgO/v2jK7Y718Ix4ndsbNMU/+1Qiem9OiOdaqitioZwxivhXYg==} - engines: {node: '>=18'} - cpu: [arm64] - os: [win32] - - '@esbuild/win32-ia32@0.27.4': - resolution: {integrity: sha512-DAyGLS0Jz5G5iixEbMHi5KdiApqHBWMGzTtMiJ72ZOLhbu/bzxgAe8Ue8CTS3n3HbIUHQz/L51yMdGMeoxXNJw==} - engines: {node: '>=18'} - cpu: [ia32] - os: [win32] - - '@esbuild/win32-x64@0.27.4': - resolution: {integrity: sha512-+knoa0BDoeXgkNvvV1vvbZX4+hizelrkwmGJBdT17t8FNPwG2lKemmuMZlmaNQ3ws3DKKCxpb4zRZEIp3UxFCg==} - engines: {node: '>=18'} - cpu: [x64] - os: [win32] + '@emnapi/wasi-threads@1.2.1': + resolution: {integrity: sha512-uTII7OYF+/Mes/MrcIOYp5yOtSMLBWSIoLPpcgwipoiKbli6k322tcoFsxoIIxPDqW01SQGAgko4EzZi2BNv2w==} '@floating-ui/core@1.7.5': resolution: {integrity: sha512-1Ih4WTWyw0+lKyFMcBHGbb5U5FtuHJuujoyyr5zTaWS5EYMeT6Jb2AuDeftsCsEuchO+mM2ij5+q9crhydzLhQ==} @@ -256,133 +121,140 @@ packages: '@kurkle/color@0.3.4': resolution: {integrity: sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==} + '@napi-rs/wasm-runtime@1.1.3': + resolution: {integrity: sha512-xK9sGVbJWYb08+mTJt3/YV24WxvxpXcXtP6B172paPZ+Ts69Re9dAr7lKwJoeIx8OoeuimEiRZ7umkiUVClmmQ==} + peerDependencies: + '@emnapi/core': ^1.7.1 + '@emnapi/runtime': ^1.7.1 + + '@oxc-project/types@0.124.0': + resolution: {integrity: sha512-VBFWMTBvHxS11Z5Lvlr3IWgrwhMTXV+Md+EQF0Xf60+wAdsGFTBx7X7K/hP4pi8N7dcm1RvcHwDxZ16Qx8keUg==} + '@polka/url@1.0.0-next.29': resolution: {integrity: sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==} - '@rollup/rollup-android-arm-eabi@4.59.0': - resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==} - cpu: [arm] - os: [android] - - '@rollup/rollup-android-arm64@4.59.0': - resolution: {integrity: sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==} + '@rolldown/binding-android-arm64@1.0.0-rc.15': + resolution: {integrity: sha512-YYe6aWruPZDtHNpwu7+qAHEMbQ/yRl6atqb/AhznLTnD3UY99Q1jE7ihLSahNWkF4EqRPVC4SiR4O0UkLK02tA==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@rollup/rollup-darwin-arm64@4.59.0': - resolution: {integrity: sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==} + '@rolldown/binding-darwin-arm64@1.0.0-rc.15': + resolution: {integrity: sha512-oArR/ig8wNTPYsXL+Mzhs0oxhxfuHRfG7Ikw7jXsw8mYOtk71W0OkF2VEVh699pdmzjPQsTjlD1JIOoHkLP1Fg==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@rollup/rollup-darwin-x64@4.59.0': - resolution: {integrity: sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==} + '@rolldown/binding-darwin-x64@1.0.0-rc.15': + resolution: {integrity: sha512-YzeVqOqjPYvUbJSWJ4EDL8ahbmsIXQpgL3JVipmN+MX0XnXMeWomLN3Fb+nwCmP/jfyqte5I3XRSm7OfQrbyxw==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@rollup/rollup-freebsd-arm64@4.59.0': - resolution: {integrity: sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==} - cpu: [arm64] - os: [freebsd] - - '@rollup/rollup-freebsd-x64@4.59.0': - resolution: {integrity: sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==} + '@rolldown/binding-freebsd-x64@1.0.0-rc.15': + resolution: {integrity: sha512-9Erhx956jeQ0nNTyif1+QWAXDRD38ZNjr//bSHrt6wDwB+QkAfl2q6Mn1k6OBPerznjRmbM10lgRb1Pli4xZPw==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@rollup/rollup-linux-arm-gnueabihf@4.59.0': - resolution: {integrity: sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==} + '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.15': + resolution: {integrity: sha512-cVwk0w8QbZJGTnP/AHQBs5yNwmpgGYStL88t4UIaqcvYJWBfS0s3oqVLZPwsPU6M0zlW4GqjP0Zq5MnAGwFeGA==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm-musleabihf@4.59.0': - resolution: {integrity: sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==} - cpu: [arm] - os: [linux] - - '@rollup/rollup-linux-arm64-gnu@4.59.0': - resolution: {integrity: sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==} + '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.15': + resolution: {integrity: sha512-eBZ/u8iAK9SoHGanqe/jrPnY0JvBN6iXbVOsbO38mbz+ZJsaobExAm1Iu+rxa4S1l2FjG0qEZn4Rc6X8n+9M+w==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-arm64-musl@4.59.0': - resolution: {integrity: sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==} + '@rolldown/binding-linux-arm64-musl@1.0.0-rc.15': + resolution: {integrity: sha512-ZvRYMGrAklV9PEkgt4LQM6MjQX2P58HPAuecwYObY2DhS2t35R0I810bKi0wmaYORt6m/2Sm+Z+nFgb0WhXNcQ==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-loong64-gnu@4.59.0': - resolution: {integrity: sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==} - cpu: [loong64] - os: [linux] - - '@rollup/rollup-linux-loong64-musl@4.59.0': - resolution: {integrity: sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==} - cpu: [loong64] - os: [linux] - - '@rollup/rollup-linux-ppc64-gnu@4.59.0': - resolution: {integrity: sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==} + '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.15': + resolution: {integrity: sha512-VDpgGBzgfg5hLg+uBpCLoFG5kVvEyafmfxGUV0UHLcL5irxAK7PKNeC2MwClgk6ZAiNhmo9FLhRYgvMmedLtnQ==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] - '@rollup/rollup-linux-ppc64-musl@4.59.0': - resolution: {integrity: sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==} - cpu: [ppc64] - os: [linux] - - '@rollup/rollup-linux-riscv64-gnu@4.59.0': - resolution: {integrity: sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==} - cpu: [riscv64] - os: [linux] - - '@rollup/rollup-linux-riscv64-musl@4.59.0': - resolution: {integrity: sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==} - cpu: [riscv64] - os: [linux] - - '@rollup/rollup-linux-s390x-gnu@4.59.0': - resolution: {integrity: sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==} + '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.15': + resolution: {integrity: sha512-y1uXY3qQWCzcPgRJATPSOUP4tCemh4uBdY7e3EZbVwCJTY3gLJWnQABgeUetvED+bt1FQ01OeZwvhLS2bpNrAQ==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] - '@rollup/rollup-linux-x64-gnu@4.59.0': - resolution: {integrity: sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==} + '@rolldown/binding-linux-x64-gnu@1.0.0-rc.15': + resolution: {integrity: sha512-023bTPBod7J3Y/4fzAN6QtpkSABR0rigtrwaP+qSEabUh5zf6ELr9Nc7GujaROuPY3uwdSIXWrvhn1KxOvurWA==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@rollup/rollup-linux-x64-musl@4.59.0': - resolution: {integrity: sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==} + '@rolldown/binding-linux-x64-musl@1.0.0-rc.15': + resolution: {integrity: sha512-witB2O0/hU4CgfOOKUoeFgQ4GktPi1eEbAhaLAIpgD6+ZnhcPkUtPsoKKHRzmOoWPZue46IThdSgdo4XneOLYw==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@rollup/rollup-openbsd-x64@4.59.0': - resolution: {integrity: sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==} - cpu: [x64] - os: [openbsd] - - '@rollup/rollup-openharmony-arm64@4.59.0': - resolution: {integrity: sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==} + '@rolldown/binding-openharmony-arm64@1.0.0-rc.15': + resolution: {integrity: sha512-UCL68NJ0Ud5zRipXZE9dF5PmirzJE4E4BCIOOssEnM7wLDsxjc6Qb0sGDxTNRTP53I6MZpygyCpY8Aa8sPfKPg==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@rollup/rollup-win32-arm64-msvc@4.59.0': - resolution: {integrity: sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==} + '@rolldown/binding-wasm32-wasi@1.0.0-rc.15': + resolution: {integrity: sha512-ApLruZq/ig+nhaE7OJm4lDjayUnOHVUa77zGeqnqZ9pn0ovdVbbNPerVibLXDmWeUZXjIYIT8V3xkT58Rm9u5Q==} + engines: {node: '>=14.0.0'} + cpu: [wasm32] + + '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.15': + resolution: {integrity: sha512-KmoUoU7HnN+Si5YWJigfTws1jz1bKBYDQKdbLspz0UaqjjFkddHsqorgiW1mxcAj88lYUE6NC/zJNwT+SloqtA==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@rollup/rollup-win32-ia32-msvc@4.59.0': - resolution: {integrity: sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==} - cpu: [ia32] - os: [win32] - - '@rollup/rollup-win32-x64-gnu@4.59.0': - resolution: {integrity: sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==} + '@rolldown/binding-win32-x64-msvc@1.0.0-rc.15': + resolution: {integrity: sha512-3P2A8L+x75qavWLe/Dll3EYBJLQmtkJN8rfh+U/eR3MqMgL/h98PhYI+JFfXuDPgPeCB7iZAKiqii5vqOvnA0g==} + engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] - '@rollup/rollup-win32-x64-msvc@4.59.0': - resolution: {integrity: sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==} - cpu: [x64] - os: [win32] + '@rolldown/pluginutils@1.0.0-rc.15': + resolution: {integrity: sha512-UromN0peaE53IaBRe9W7CjrZgXl90fqGpK+mIZbA3qSTeYqg3pqpROBdIPvOG3F5ereDHNwoHBI2e50n1BDr1g==} + + '@shikijs/core@4.0.2': + resolution: {integrity: sha512-hxT0YF4ExEqB8G/qFdtJvpmHXBYJ2lWW7qTHDarVkIudPFE6iCIrqdgWxGn5s+ppkGXI0aEGlibI0PAyzP3zlw==} + engines: {node: '>=20'} + + '@shikijs/engine-javascript@4.0.2': + resolution: {integrity: sha512-7PW0Nm49DcoUIQEXlJhNNBHyoGMjalRETTCcjMqEaMoJRLljy1Bi/EGV3/qLBgLKQejdspiiYuHGQW6dX94Nag==} + engines: {node: '>=20'} + + '@shikijs/engine-oniguruma@4.0.2': + resolution: {integrity: sha512-UpCB9Y2sUKlS9z8juFSKz7ZtysmeXCgnRF0dlhXBkmQnek7lAToPte8DkxmEYGNTMii72zU/lyXiCB6StuZeJg==} + engines: {node: '>=20'} + + '@shikijs/langs@4.0.2': + resolution: {integrity: sha512-KaXby5dvoeuZzN0rYQiPMjFoUrz4hgwIE+D6Du9owcHcl6/g16/yT5BQxSW5cGt2MZBz6Hl0YuRqf12omRfUUg==} + engines: {node: '>=20'} + + '@shikijs/primitive@4.0.2': + resolution: {integrity: sha512-M6UMPrSa3fN5ayeJwFVl9qWofl273wtK1VG8ySDZ1mQBfhCpdd8nEx7nPZ/tk7k+TYcpqBZzj/AnwxT9lO+HJw==} + engines: {node: '>=20'} + + '@shikijs/themes@4.0.2': + resolution: {integrity: sha512-mjCafwt8lJJaVSsQvNVrJumbnnj1RI8jbUKrPKgE6E3OvQKxnuRoBaYC51H4IGHePsGN/QtALglWBU7DoKDFnA==} + engines: {node: '>=20'} + + '@shikijs/types@4.0.2': + resolution: {integrity: sha512-qzbeRooUTPnLE+sHD/Z8DStmaDgnbbc/pMrU203950aRqjX/6AFHeDYT+j00y2lPdz0ywJKx7o/7qnqTivtlXg==} + engines: {node: '>=20'} + + '@shikijs/vscode-textmate@10.0.2': + resolution: {integrity: sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==} '@standard-schema/spec@1.1.0': resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} @@ -397,15 +269,15 @@ packages: peerDependencies: '@sveltejs/kit': ^2.0.0 - '@sveltejs/kit@2.55.0': - resolution: {integrity: sha512-MdFRjevVxmAknf2NbaUkDF16jSIzXMWd4Nfah0Qp8TtQVoSp3bV4jKt8mX7z7qTUTWvgSaxtR0EG5WJf53gcuA==} + '@sveltejs/kit@2.57.1': + resolution: {integrity: sha512-VRdSbB96cI1EnRh09CqmnQqP/YJvET5buj8S6k7CxaJqBJD4bw4fRKDjcarAj/eX9k2eHifQfDH8NtOh+ZxxPw==} engines: {node: '>=18.13'} hasBin: true peerDependencies: '@opentelemetry/api': ^1.0.0 '@sveltejs/vite-plugin-svelte': ^3.0.0 || ^4.0.0-next.1 || ^5.0.0 || ^6.0.0-next.0 || ^7.0.0 svelte: ^4.0.0 || ^5.0.0-next.0 - typescript: ^5.3.3 + typescript: ^5.3.3 || ^6.0.0 vite: ^5.0.3 || ^6.0.0 || ^7.0.0-beta.0 || ^8.0.0 peerDependenciesMeta: '@opentelemetry/api': @@ -413,83 +285,75 @@ packages: typescript: optional: true - '@sveltejs/vite-plugin-svelte-inspector@5.0.2': - resolution: {integrity: sha512-TZzRTcEtZffICSAoZGkPSl6Etsj2torOVrx6Uw0KpXxrec9Gg6jFWQ60Q3+LmNGfZSxHRCZL7vXVZIWmuV50Ig==} + '@sveltejs/vite-plugin-svelte@7.0.0': + resolution: {integrity: sha512-ILXmxC7HAsnkK2eslgPetrqqW1BKSL7LktsFgqzNj83MaivMGZzluWq32m25j2mDOjmSKX7GGWahePhuEs7P/g==} engines: {node: ^20.19 || ^22.12 || >=24} peerDependencies: - '@sveltejs/vite-plugin-svelte': ^6.0.0-next.0 - svelte: ^5.0.0 - vite: ^6.3.0 || ^7.0.0 + svelte: ^5.46.4 + vite: ^8.0.0-beta.7 || ^8.0.0 - '@sveltejs/vite-plugin-svelte@6.2.4': - resolution: {integrity: sha512-ou/d51QSdTyN26D7h6dSpusAKaZkAiGM55/AKYi+9AGZw7q85hElbjK3kEyzXHhLSnRISHOYzVge6x0jRZ7DXA==} - engines: {node: ^20.19 || ^22.12 || >=24} - peerDependencies: - svelte: ^5.0.0 - vite: ^6.3.0 || ^7.0.0 + '@swc/helpers@0.5.21': + resolution: {integrity: sha512-jI/VAmtdjB/RnI8GTnokyX7Ug8c+g+ffD6QRLa6XQewtnGyukKkKSk3wLTM3b5cjt1jNh9x0jfVlagdN2gDKQg==} - '@swc/helpers@0.5.19': - resolution: {integrity: sha512-QamiFeIK3txNjgUTNppE6MiG3p7TdninpZu0E0PbqVh1a9FNLT2FRhisaa4NcaX52XVhA5l7Pk58Ft7Sqi/2sA==} + '@tailwindcss/node@4.2.2': + resolution: {integrity: sha512-pXS+wJ2gZpVXqFaUEjojq7jzMpTGf8rU6ipJz5ovJV6PUGmlJ+jvIwGrzdHdQ80Sg+wmQxUFuoW1UAAwHNEdFA==} - '@tailwindcss/node@4.2.1': - resolution: {integrity: sha512-jlx6sLk4EOwO6hHe1oCGm1Q4AN/s0rSrTTPBGPM0/RQ6Uylwq17FuU8IeJJKEjtc6K6O07zsvP+gDO6MMWo7pg==} - - '@tailwindcss/oxide-android-arm64@4.2.1': - resolution: {integrity: sha512-eZ7G1Zm5EC8OOKaesIKuw77jw++QJ2lL9N+dDpdQiAB/c/B2wDh0QPFHbkBVrXnwNugvrbJFk1gK2SsVjwWReg==} + '@tailwindcss/oxide-android-arm64@4.2.2': + resolution: {integrity: sha512-dXGR1n+P3B6748jZO/SvHZq7qBOqqzQ+yFrXpoOWWALWndF9MoSKAT3Q0fYgAzYzGhxNYOoysRvYlpixRBBoDg==} engines: {node: '>= 20'} cpu: [arm64] os: [android] - '@tailwindcss/oxide-darwin-arm64@4.2.1': - resolution: {integrity: sha512-q/LHkOstoJ7pI1J0q6djesLzRvQSIfEto148ppAd+BVQK0JYjQIFSK3JgYZJa+Yzi0DDa52ZsQx2rqytBnf8Hw==} + '@tailwindcss/oxide-darwin-arm64@4.2.2': + resolution: {integrity: sha512-iq9Qjr6knfMpZHj55/37ouZeykwbDqF21gPFtfnhCCKGDcPI/21FKC9XdMO/XyBM7qKORx6UIhGgg6jLl7BZlg==} engines: {node: '>= 20'} cpu: [arm64] os: [darwin] - '@tailwindcss/oxide-darwin-x64@4.2.1': - resolution: {integrity: sha512-/f/ozlaXGY6QLbpvd/kFTro2l18f7dHKpB+ieXz+Cijl4Mt9AI2rTrpq7V+t04nK+j9XBQHnSMdeQRhbGyt6fw==} + '@tailwindcss/oxide-darwin-x64@4.2.2': + resolution: {integrity: sha512-BlR+2c3nzc8f2G639LpL89YY4bdcIdUmiOOkv2GQv4/4M0vJlpXEa0JXNHhCHU7VWOKWT/CjqHdTP8aUuDJkuw==} engines: {node: '>= 20'} cpu: [x64] os: [darwin] - '@tailwindcss/oxide-freebsd-x64@4.2.1': - resolution: {integrity: sha512-5e/AkgYJT/cpbkys/OU2Ei2jdETCLlifwm7ogMC7/hksI2fC3iiq6OcXwjibcIjPung0kRtR3TxEITkqgn0TcA==} + '@tailwindcss/oxide-freebsd-x64@4.2.2': + resolution: {integrity: sha512-YUqUgrGMSu2CDO82hzlQ5qSb5xmx3RUrke/QgnoEx7KvmRJHQuZHZmZTLSuuHwFf0DJPybFMXMYf+WJdxHy/nQ==} engines: {node: '>= 20'} cpu: [x64] os: [freebsd] - '@tailwindcss/oxide-linux-arm-gnueabihf@4.2.1': - resolution: {integrity: sha512-Uny1EcVTTmerCKt/1ZuKTkb0x8ZaiuYucg2/kImO5A5Y/kBz41/+j0gxUZl+hTF3xkWpDmHX+TaWhOtba2Fyuw==} + '@tailwindcss/oxide-linux-arm-gnueabihf@4.2.2': + resolution: {integrity: sha512-FPdhvsW6g06T9BWT0qTwiVZYE2WIFo2dY5aCSpjG/S/u1tby+wXoslXS0kl3/KXnULlLr1E3NPRRw0g7t2kgaQ==} engines: {node: '>= 20'} cpu: [arm] os: [linux] - '@tailwindcss/oxide-linux-arm64-gnu@4.2.1': - resolution: {integrity: sha512-CTrwomI+c7n6aSSQlsPL0roRiNMDQ/YzMD9EjcR+H4f0I1SQ8QqIuPnsVp7QgMkC1Qi8rtkekLkOFjo7OlEFRQ==} + '@tailwindcss/oxide-linux-arm64-gnu@4.2.2': + resolution: {integrity: sha512-4og1V+ftEPXGttOO7eCmW7VICmzzJWgMx+QXAJRAhjrSjumCwWqMfkDrNu1LXEQzNAwz28NCUpucgQPrR4S2yw==} engines: {node: '>= 20'} cpu: [arm64] os: [linux] - '@tailwindcss/oxide-linux-arm64-musl@4.2.1': - resolution: {integrity: sha512-WZA0CHRL/SP1TRbA5mp9htsppSEkWuQ4KsSUumYQnyl8ZdT39ntwqmz4IUHGN6p4XdSlYfJwM4rRzZLShHsGAQ==} + '@tailwindcss/oxide-linux-arm64-musl@4.2.2': + resolution: {integrity: sha512-oCfG/mS+/+XRlwNjnsNLVwnMWYH7tn/kYPsNPh+JSOMlnt93mYNCKHYzylRhI51X+TbR+ufNhhKKzm6QkqX8ag==} engines: {node: '>= 20'} cpu: [arm64] os: [linux] - '@tailwindcss/oxide-linux-x64-gnu@4.2.1': - resolution: {integrity: sha512-qMFzxI2YlBOLW5PhblzuSWlWfwLHaneBE0xHzLrBgNtqN6mWfs+qYbhryGSXQjFYB1Dzf5w+LN5qbUTPhW7Y5g==} + '@tailwindcss/oxide-linux-x64-gnu@4.2.2': + resolution: {integrity: sha512-rTAGAkDgqbXHNp/xW0iugLVmX62wOp2PoE39BTCGKjv3Iocf6AFbRP/wZT/kuCxC9QBh9Pu8XPkv/zCZB2mcMg==} engines: {node: '>= 20'} cpu: [x64] os: [linux] - '@tailwindcss/oxide-linux-x64-musl@4.2.1': - resolution: {integrity: sha512-5r1X2FKnCMUPlXTWRYpHdPYUY6a1Ar/t7P24OuiEdEOmms5lyqjDRvVY1yy9Rmioh+AunQ0rWiOTPE8F9A3v5g==} + '@tailwindcss/oxide-linux-x64-musl@4.2.2': + resolution: {integrity: sha512-XW3t3qwbIwiSyRCggeO2zxe3KWaEbM0/kW9e8+0XpBgyKU4ATYzcVSMKteZJ1iukJ3HgHBjbg9P5YPRCVUxlnQ==} engines: {node: '>= 20'} cpu: [x64] os: [linux] - '@tailwindcss/oxide-wasm32-wasi@4.2.1': - resolution: {integrity: sha512-MGFB5cVPvshR85MTJkEvqDUnuNoysrsRxd6vnk1Lf2tbiqNlXpHYZqkqOQalydienEWOHHFyyuTSYRsLfxFJ2Q==} + '@tailwindcss/oxide-wasm32-wasi@4.2.2': + resolution: {integrity: sha512-eKSztKsmEsn1O5lJ4ZAfyn41NfG7vzCg496YiGtMDV86jz1q/irhms5O0VrY6ZwTUkFy/EKG3RfWgxSI3VbZ8Q==} engines: {node: '>=14.0.0'} cpu: [wasm32] bundledDependencies: @@ -500,26 +364,29 @@ packages: - '@emnapi/wasi-threads' - tslib - '@tailwindcss/oxide-win32-arm64-msvc@4.2.1': - resolution: {integrity: sha512-YlUEHRHBGnCMh4Nj4GnqQyBtsshUPdiNroZj8VPkvTZSoHsilRCwXcVKnG9kyi0ZFAS/3u+qKHBdDc81SADTRA==} + '@tailwindcss/oxide-win32-arm64-msvc@4.2.2': + resolution: {integrity: sha512-qPmaQM4iKu5mxpsrWZMOZRgZv1tOZpUm+zdhhQP0VhJfyGGO3aUKdbh3gDZc/dPLQwW4eSqWGrrcWNBZWUWaXQ==} engines: {node: '>= 20'} cpu: [arm64] os: [win32] - '@tailwindcss/oxide-win32-x64-msvc@4.2.1': - resolution: {integrity: sha512-rbO34G5sMWWyrN/idLeVxAZgAKWrn5LiR3/I90Q9MkA67s6T1oB0xtTe+0heoBvHSpbU9Mk7i6uwJnpo4u21XQ==} + '@tailwindcss/oxide-win32-x64-msvc@4.2.2': + resolution: {integrity: sha512-1T/37VvI7WyH66b+vqHj/cLwnCxt7Qt3WFu5Q8hk65aOvlwAhs7rAp1VkulBJw/N4tMirXjVnylTR72uI0HGcA==} engines: {node: '>= 20'} cpu: [x64] os: [win32] - '@tailwindcss/oxide@4.2.1': - resolution: {integrity: sha512-yv9jeEFWnjKCI6/T3Oq50yQEOqmpmpfzG1hcZsAOaXFQPfzWprWrlHSdGPEF3WQTi8zu8ohC9Mh9J470nT5pUw==} + '@tailwindcss/oxide@4.2.2': + resolution: {integrity: sha512-qEUA07+E5kehxYp9BVMpq9E8vnJuBHfJEC0vPC5e7iL/hw7HR61aDKoVoKzrG+QKp56vhNZe4qwkRmMC0zDLvg==} engines: {node: '>= 20'} - '@tailwindcss/vite@4.2.1': - resolution: {integrity: sha512-TBf2sJjYeb28jD2U/OhwdW0bbOsxkWPwQ7SrqGf9sVcoYwZj7rkXljroBO9wKBut9XnmQLXanuDUeqQK0lGg/w==} + '@tailwindcss/vite@4.2.2': + resolution: {integrity: sha512-mEiF5HO1QqCLXoNEfXVA1Tzo+cYsrqV7w9Juj2wdUFyW07JRenqMG225MvPwr3ZD9N1bFQj46X7r33iHxLUW0w==} peerDependencies: - vite: ^5.2.0 || ^6 || ^7 + vite: ^5.2.0 || ^6 || ^7 || ^8 + + '@tybys/wasm-util@0.10.1': + resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} '@types/cookie@0.6.0': resolution: {integrity: sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==} @@ -527,12 +394,29 @@ packages: '@types/estree@1.0.8': resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + '@types/hast@3.0.4': + resolution: {integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==} + + '@types/mdast@4.0.4': + resolution: {integrity: sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==} + '@types/trusted-types@2.0.7': resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} - '@typescript-eslint/types@8.57.1': - resolution: {integrity: sha512-S29BOBPJSFUiblEl6RzPPjJt6w25A6XsBqRVDt53tA/tlL8q7ceQNZHTjPeONt/3S7KRI4quk+yP9jK2WjBiPQ==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@types/unist@3.0.3': + resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==} + + '@ungap/structured-clone@1.3.0': + resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==} + + '@xterm/addon-fit@0.11.0': + resolution: {integrity: sha512-jYcgT6xtVYhnhgxh3QgYDnnNMYTcf8ElbxxFzX0IZo+vabQqSPAjC3c1wJrKB5E19VwQei89QCiZZP86DCPF7g==} + + '@xterm/addon-web-links@0.12.0': + resolution: {integrity: sha512-4Smom3RPyVp7ZMYOYDoC/9eGJJJqYhnPLGGqJ6wOBfB8VxPViJNSKdgRYb8NpaM6YSelEKbA2SStD7lGyqaobw==} + + '@xterm/xterm@6.0.0': + resolution: {integrity: sha512-TQwDdQGtwwDt+2cgKDLn0IRaSxYu1tSUjgKarSDkUM0ZNiSRXFpjxEsvc/Zgc5kq5omJ+V0a8/kIM2WD3sMOYg==} acorn@8.16.0: resolution: {integrity: sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==} @@ -547,13 +431,22 @@ packages: resolution: {integrity: sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==} engines: {node: '>= 0.4'} - bits-ui@2.16.3: - resolution: {integrity: sha512-5hJ5dEhf5yPzkRFcxzgQHScGodeo0gK0MUUXrdLlRHWaBOBGZiacWLG96j/wwFatKwZvouw7q+sn14i0fx3RIg==} + bits-ui@2.17.3: + resolution: {integrity: sha512-Bef41uY9U2jaBJHPhcPvmBNkGec5Wx2z6eioDsTmsaR2vH4QoaOcPi75gzCG3+/2TNr6v/qBwzgWNPYCxNtrEA==} engines: {node: '>=20'} peerDependencies: '@internationalized/date': ^3.8.1 svelte: ^5.33.0 + ccount@2.0.1: + resolution: {integrity: sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==} + + character-entities-html4@2.1.0: + resolution: {integrity: sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==} + + character-entities-legacy@3.0.0: + resolution: {integrity: sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==} + chart.js@4.5.1: resolution: {integrity: sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==} engines: {pnpm: '>=8'} @@ -566,6 +459,9 @@ packages: resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} engines: {node: '>=6'} + comma-separated-tokens@2.0.3: + resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==} + cookie@0.6.0: resolution: {integrity: sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==} engines: {node: '>= 0.6'} @@ -582,23 +478,26 @@ packages: resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} engines: {node: '>=8'} - devalue@5.6.4: - resolution: {integrity: sha512-Gp6rDldRsFh/7XuouDbxMH3Mx8GMCcgzIb1pDTvNyn8pZGQ22u+Wa+lGV9dQCltFQ7uVw0MhRyb8XDskNFOReA==} + devalue@5.7.1: + resolution: {integrity: sha512-MUbZ586EgQqdRnC4yDrlod3BEdyvE4TapGYHMW2CiaW+KkkFmWEFqBUaLltEZCGi0iFXCEjRF0OjF0DV2QHjOA==} + + devlop@1.1.0: + resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==} enhanced-resolve@5.20.1: resolution: {integrity: sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA==} engines: {node: '>=10.13.0'} - esbuild@0.27.4: - resolution: {integrity: sha512-Rq4vbHnYkK5fws5NF7MYTU68FPRE1ajX7heQ/8QXXWqNgqqJ/GkmmyxIzUnf2Sr/bakf8l54716CcMGHYhMrrQ==} - engines: {node: '>=18'} - hasBin: true - esm-env@1.2.2: resolution: {integrity: sha512-Epxrv+Nr/CaL4ZcFGPJIYLWFom+YeV1DqMLHJoEd9SYRxNbaFruBwfEX/kkHUJf55j2+TUbmDcmuilbP1TmXHA==} - esrap@2.2.4: - resolution: {integrity: sha512-suICpxAmZ9A8bzJjEl/+rLJiDKC0X4gYWUxT6URAWBLvlXmtbZd5ySMu/N2ZGEtMCAmflUDPSehrP9BQcsGcSg==} + esrap@2.2.5: + resolution: {integrity: sha512-/yLB1538mag+dn0wsePTe8C0rDIjUOaJpMs2McodSzmM2msWcZsBSdRtg6HOBt0A/r82BN+Md3pgwSc/uWt2Ig==} + peerDependencies: + '@typescript-eslint/types': ^8.2.0 + peerDependenciesMeta: + '@typescript-eslint/types': + optional: true fdir@6.5.0: resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} @@ -617,6 +516,15 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + hast-util-to-html@9.0.5: + resolution: {integrity: sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==} + + hast-util-whitespace@3.0.0: + resolution: {integrity: sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==} + + html-void-elements@3.0.0: + resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==} + inline-style-parser@0.2.7: resolution: {integrity: sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==} @@ -631,74 +539,74 @@ packages: resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==} engines: {node: '>=6'} - lightningcss-android-arm64@1.31.1: - resolution: {integrity: sha512-HXJF3x8w9nQ4jbXRiNppBCqeZPIAfUo8zE/kOEGbW5NZvGc/K7nMxbhIr+YlFlHW5mpbg/YFPdbnCh1wAXCKFg==} + lightningcss-android-arm64@1.32.0: + resolution: {integrity: sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [android] - lightningcss-darwin-arm64@1.31.1: - resolution: {integrity: sha512-02uTEqf3vIfNMq3h/z2cJfcOXnQ0GRwQrkmPafhueLb2h7mqEidiCzkE4gBMEH65abHRiQvhdcQ+aP0D0g67sg==} + lightningcss-darwin-arm64@1.32.0: + resolution: {integrity: sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [darwin] - lightningcss-darwin-x64@1.31.1: - resolution: {integrity: sha512-1ObhyoCY+tGxtsz1lSx5NXCj3nirk0Y0kB/g8B8DT+sSx4G9djitg9ejFnjb3gJNWo7qXH4DIy2SUHvpoFwfTA==} + lightningcss-darwin-x64@1.32.0: + resolution: {integrity: sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [darwin] - lightningcss-freebsd-x64@1.31.1: - resolution: {integrity: sha512-1RINmQKAItO6ISxYgPwszQE1BrsVU5aB45ho6O42mu96UiZBxEXsuQ7cJW4zs4CEodPUioj/QrXW1r9pLUM74A==} + lightningcss-freebsd-x64@1.32.0: + resolution: {integrity: sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [freebsd] - lightningcss-linux-arm-gnueabihf@1.31.1: - resolution: {integrity: sha512-OOCm2//MZJ87CdDK62rZIu+aw9gBv4azMJuA8/KB74wmfS3lnC4yoPHm0uXZ/dvNNHmnZnB8XLAZzObeG0nS1g==} + lightningcss-linux-arm-gnueabihf@1.32.0: + resolution: {integrity: sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw==} engines: {node: '>= 12.0.0'} cpu: [arm] os: [linux] - lightningcss-linux-arm64-gnu@1.31.1: - resolution: {integrity: sha512-WKyLWztD71rTnou4xAD5kQT+982wvca7E6QoLpoawZ1gP9JM0GJj4Tp5jMUh9B3AitHbRZ2/H3W5xQmdEOUlLg==} + lightningcss-linux-arm64-gnu@1.32.0: + resolution: {integrity: sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [linux] - lightningcss-linux-arm64-musl@1.31.1: - resolution: {integrity: sha512-mVZ7Pg2zIbe3XlNbZJdjs86YViQFoJSpc41CbVmKBPiGmC4YrfeOyz65ms2qpAobVd7WQsbW4PdsSJEMymyIMg==} + lightningcss-linux-arm64-musl@1.32.0: + resolution: {integrity: sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [linux] - lightningcss-linux-x64-gnu@1.31.1: - resolution: {integrity: sha512-xGlFWRMl+0KvUhgySdIaReQdB4FNudfUTARn7q0hh/V67PVGCs3ADFjw+6++kG1RNd0zdGRlEKa+T13/tQjPMA==} + lightningcss-linux-x64-gnu@1.32.0: + resolution: {integrity: sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [linux] - lightningcss-linux-x64-musl@1.31.1: - resolution: {integrity: sha512-eowF8PrKHw9LpoZii5tdZwnBcYDxRw2rRCyvAXLi34iyeYfqCQNA9rmUM0ce62NlPhCvof1+9ivRaTY6pSKDaA==} + lightningcss-linux-x64-musl@1.32.0: + resolution: {integrity: sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [linux] - lightningcss-win32-arm64-msvc@1.31.1: - resolution: {integrity: sha512-aJReEbSEQzx1uBlQizAOBSjcmr9dCdL3XuC/6HLXAxmtErsj2ICo5yYggg1qOODQMtnjNQv2UHb9NpOuFtYe4w==} + lightningcss-win32-arm64-msvc@1.32.0: + resolution: {integrity: sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [win32] - lightningcss-win32-x64-msvc@1.31.1: - resolution: {integrity: sha512-I9aiFrbd7oYHwlnQDqr1Roz+fTz61oDDJX7n9tYF9FJymH1cIN1DtKw3iYt6b8WZgEjoNwVSncwF4wx/ZedMhw==} + lightningcss-win32-x64-msvc@1.32.0: + resolution: {integrity: sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [win32] - lightningcss@1.31.1: - resolution: {integrity: sha512-l51N2r93WmGUye3WuFoN5k10zyvrVs0qfKBhyC5ogUQ6Ew6JUSswh78mbSO+IU3nTWsyOArqPCcShdQSadghBQ==} + lightningcss@1.32.0: + resolution: {integrity: sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ==} engines: {node: '>= 12.0.0'} locate-character@3.0.0: @@ -711,6 +619,24 @@ packages: magic-string@0.30.21: resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + mdast-util-to-hast@13.2.1: + resolution: {integrity: sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==} + + micromark-util-character@2.1.1: + resolution: {integrity: sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==} + + micromark-util-encode@2.0.1: + resolution: {integrity: sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==} + + micromark-util-sanitize-uri@2.0.1: + resolution: {integrity: sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==} + + micromark-util-symbol@2.0.1: + resolution: {integrity: sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==} + + micromark-util-types@2.0.2: + resolution: {integrity: sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==} + mri@1.2.0: resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==} engines: {node: '>=4'} @@ -727,24 +653,42 @@ packages: obug@2.1.1: resolution: {integrity: sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==} + oniguruma-parser@0.12.1: + resolution: {integrity: sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==} + + oniguruma-to-es@4.3.5: + resolution: {integrity: sha512-Zjygswjpsewa0NLTsiizVuMQZbp0MDyM6lIt66OxsF21npUDlzpHi1Mgb/qhQdkb+dWFTzJmFbEWdvZgRho8eQ==} + picocolors@1.1.1: resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} - picomatch@4.0.3: - resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} + picomatch@4.0.4: + resolution: {integrity: sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==} engines: {node: '>=12'} - postcss@8.5.8: - resolution: {integrity: sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==} + postcss@8.5.9: + resolution: {integrity: sha512-7a70Nsot+EMX9fFU3064K/kdHWZqGVY+BADLyXc8Dfv+mTLLVl6JzJpPaCZ2kQL9gIJvKXSLMHhqdRRjwQeFtw==} engines: {node: ^10 || ^12 || >=14} + property-information@7.1.0: + resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==} + readdirp@4.1.2: resolution: {integrity: sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==} engines: {node: '>= 14.18.0'} - rollup@4.59.0: - resolution: {integrity: sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==} - engines: {node: '>=18.0.0', npm: '>=8.0.0'} + regex-recursion@6.0.2: + resolution: {integrity: sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==} + + regex-utilities@2.3.0: + resolution: {integrity: sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==} + + regex@6.1.0: + resolution: {integrity: sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==} + + rolldown@1.0.0-rc.15: + resolution: {integrity: sha512-Ff31guA5zT6WjnGp0SXw76X6hzGRk/OQq2hE+1lcDe+lJdHSgnSX6nK3erbONHyCbpSj9a9E+uX/OvytZoWp2g==} + engines: {node: ^20.19.0 || >=22.12.0} hasBin: true runed@0.35.1: @@ -760,8 +704,12 @@ packages: resolution: {integrity: sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==} engines: {node: '>=6'} - set-cookie-parser@3.0.1: - resolution: {integrity: sha512-n7Z7dXZhJbwuAHhNzkTti6Aw9QDDjZtm3JTpTGATIdNzdQz5GuFs22w90BcvF4INfnrL5xrX3oGsuqO5Dx3A1Q==} + set-cookie-parser@3.1.0: + resolution: {integrity: sha512-kjnC1DXBHcxaOaOXBHBeRtltsDG2nUiUni+jP92M9gYdW12rsmx92UsfpH7o5tDRs7I1ZZPSQJQGv3UaRfCiuw==} + + shiki@4.0.2: + resolution: {integrity: sha512-eAVKTMedR5ckPo4xne/PjYQYrU3qx78gtJZ+sHlXEg5IHhhoQhMfZVzetTYuaJS0L2Ef3AcCRzCHV8T0WI6nIQ==} + engines: {node: '>=20'} sirv@3.0.2: resolution: {integrity: sha512-2wcC/oGxHis/BoHkkPwldgiPSYcpZK3JU28WoMVv55yHJgcZ8rlXvuG9iZggz+sU1d4bRgIGASwyWqjxu3FM0g==} @@ -771,11 +719,17 @@ packages: resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} engines: {node: '>=0.10.0'} + space-separated-tokens@2.0.2: + resolution: {integrity: sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==} + + stringify-entities@4.0.4: + resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==} + style-to-object@1.0.14: resolution: {integrity: sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==} - svelte-check@4.4.5: - resolution: {integrity: sha512-1bSwIRCvvmSHrlK52fOlZmVtUZgil43jNL/2H18pRpa+eQjzGt6e3zayxhp1S7GajPFKNM/2PMCG+DZFHlG9fw==} + svelte-check@4.4.6: + resolution: {integrity: sha512-kP1zG81EWaFe9ZyTv4ZXv44Csi6Pkdpb7S3oj6m+K2ec/IcDg/a8LsFsnVLqm2nxtkSwsd5xPj/qFkTBgXHXjg==} engines: {node: '>= 18.0.0'} hasBin: true peerDependencies: @@ -788,45 +742,70 @@ packages: peerDependencies: svelte: ^5.30.2 - svelte@5.53.12: - resolution: {integrity: sha512-4x/uk4rQe/d7RhfvS8wemTfNjQ0bJbKvamIzRBfTe2eHHjzBZ7PZicUQrC2ryj83xxEacfA1zHKd1ephD1tAxA==} + svelte@5.55.3: + resolution: {integrity: sha512-dS1N+i3bA1v+c4UDb750MlN5vCO82G6vxh8HeTsPsTdJ1BLsN1zxSyDlIdBBqUjqZ/BxEwM8UrFf98aaoVnZFQ==} engines: {node: '>=18'} tabbable@6.4.0: resolution: {integrity: sha512-05PUHKSNE8ou2dwIxTngl4EzcnsCDZGJ/iCLtDflR/SHB/ny14rXc+qU5P4mG9JkusiV7EivzY9Mhm55AzAvCg==} - tailwindcss@4.2.1: - resolution: {integrity: sha512-/tBrSQ36vCleJkAOsy9kbNTgaxvGbyOamC30PRePTQe/o1MFwEKHQk4Cn7BNGaPtjp+PuUrByJehM1hgxfq4sw==} + tailwindcss@4.2.2: + resolution: {integrity: sha512-KWBIxs1Xb6NoLdMVqhbhgwZf2PGBpPEiwOqgI4pFIYbNTfBXiKYyWoTsXgBQ9WFg/OlhnvHaY+AEpW7wSmFo2Q==} - tapable@2.3.0: - resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==} + tapable@2.3.2: + resolution: {integrity: sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA==} engines: {node: '>=6'} - tinyglobby@0.2.15: - resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + tinyglobby@0.2.16: + resolution: {integrity: sha512-pn99VhoACYR8nFHhxqix+uvsbXineAasWm5ojXoN8xEwK5Kd3/TrhNn1wByuD52UxWRLy8pu+kRMniEi6Eq9Zg==} engines: {node: '>=12.0.0'} totalist@3.0.1: resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==} engines: {node: '>=6'} + trim-lines@3.0.1: + resolution: {integrity: sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==} + tslib@2.8.1: resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} - typescript@5.9.3: - resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} + typescript@6.0.2: + resolution: {integrity: sha512-bGdAIrZ0wiGDo5l8c++HWtbaNCWTS4UTv7RaTH/ThVIgjkveJt83m74bBHMJkuCbslY8ixgLBVZJIOiQlQTjfQ==} engines: {node: '>=14.17'} hasBin: true - vite@7.3.1: - resolution: {integrity: sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==} + unist-util-is@6.0.1: + resolution: {integrity: sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==} + + unist-util-position@5.0.0: + resolution: {integrity: sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==} + + unist-util-stringify-position@4.0.0: + resolution: {integrity: sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==} + + unist-util-visit-parents@6.0.2: + resolution: {integrity: sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==} + + unist-util-visit@5.1.0: + resolution: {integrity: sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==} + + vfile-message@4.0.3: + resolution: {integrity: sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==} + + vfile@6.0.3: + resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} + + vite@8.0.8: + resolution: {integrity: sha512-dbU7/iLVa8KZALJyLOBOQ88nOXtNG8vxKuOT4I2mD+Ya70KPceF4IAmDsmU0h1Qsn5bPrvsY9HJstCRh3hG6Uw==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true peerDependencies: '@types/node': ^20.19.0 || >=22.12.0 + '@vitejs/devtools': ^0.1.0 + esbuild: ^0.27.0 || ^0.28.0 jiti: '>=1.21.0' less: ^4.0.0 - lightningcss: ^1.21.0 sass: ^1.70.0 sass-embedded: ^1.70.0 stylus: '>=0.54.8' @@ -837,12 +816,14 @@ packages: peerDependenciesMeta: '@types/node': optional: true + '@vitejs/devtools': + optional: true + esbuild: + optional: true jiti: optional: true less: optional: true - lightningcss: - optional: true sass: optional: true sass-embedded: @@ -858,10 +839,10 @@ packages: yaml: optional: true - vitefu@1.1.2: - resolution: {integrity: sha512-zpKATdUbzbsycPFBN71nS2uzBUQiVnFoOrr2rvqv34S1lcAgMKKkjWleLGeiJlZ8lwCXvtWaRn7R3ZC16SYRuw==} + vitefu@1.1.3: + resolution: {integrity: sha512-ub4okH7Z5KLjb6hDyjqrGXqWtWvoYdU3IGm/NorpgHncKoLTCfRIbvlhBm7r0YstIaQRYlp4yEbFqDcKSzXSSg==} peerDependencies: - vite: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-beta.0 + vite: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0 peerDependenciesMeta: vite: optional: true @@ -869,84 +850,25 @@ packages: zimmerframe@1.1.4: resolution: {integrity: sha512-B58NGBEoc8Y9MWWCQGl/gq9xBCe4IiKM0a2x7GZdQKOW5Exr8S1W24J6OgM1njK8xCRGvAJIL/MxXHf6SkmQKQ==} + zwitch@2.0.4: + resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==} + snapshots: - '@esbuild/aix-ppc64@0.27.4': + '@emnapi/core@1.9.2': + dependencies: + '@emnapi/wasi-threads': 1.2.1 + tslib: 2.8.1 optional: true - '@esbuild/android-arm64@0.27.4': + '@emnapi/runtime@1.9.2': + dependencies: + tslib: 2.8.1 optional: true - '@esbuild/android-arm@0.27.4': - optional: true - - '@esbuild/android-x64@0.27.4': - optional: true - - '@esbuild/darwin-arm64@0.27.4': - optional: true - - '@esbuild/darwin-x64@0.27.4': - optional: true - - '@esbuild/freebsd-arm64@0.27.4': - optional: true - - '@esbuild/freebsd-x64@0.27.4': - optional: true - - '@esbuild/linux-arm64@0.27.4': - optional: true - - '@esbuild/linux-arm@0.27.4': - optional: true - - '@esbuild/linux-ia32@0.27.4': - optional: true - - '@esbuild/linux-loong64@0.27.4': - optional: true - - '@esbuild/linux-mips64el@0.27.4': - optional: true - - '@esbuild/linux-ppc64@0.27.4': - optional: true - - '@esbuild/linux-riscv64@0.27.4': - optional: true - - '@esbuild/linux-s390x@0.27.4': - optional: true - - '@esbuild/linux-x64@0.27.4': - optional: true - - '@esbuild/netbsd-arm64@0.27.4': - optional: true - - '@esbuild/netbsd-x64@0.27.4': - optional: true - - '@esbuild/openbsd-arm64@0.27.4': - optional: true - - '@esbuild/openbsd-x64@0.27.4': - optional: true - - '@esbuild/openharmony-arm64@0.27.4': - optional: true - - '@esbuild/sunos-x64@0.27.4': - optional: true - - '@esbuild/win32-arm64@0.27.4': - optional: true - - '@esbuild/win32-ia32@0.27.4': - optional: true - - '@esbuild/win32-x64@0.27.4': + '@emnapi/wasi-threads@1.2.1': + dependencies: + tslib: 2.8.1 optional: true '@floating-ui/core@1.7.5': @@ -970,7 +892,7 @@ snapshots: '@internationalized/date@3.12.0': dependencies: - '@swc/helpers': 0.5.19 + '@swc/helpers': 0.5.21 '@jridgewell/gen-mapping@0.3.13': dependencies: @@ -993,82 +915,107 @@ snapshots: '@kurkle/color@0.3.4': {} + '@napi-rs/wasm-runtime@1.1.3(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2)': + dependencies: + '@emnapi/core': 1.9.2 + '@emnapi/runtime': 1.9.2 + '@tybys/wasm-util': 0.10.1 + optional: true + + '@oxc-project/types@0.124.0': {} + '@polka/url@1.0.0-next.29': {} - '@rollup/rollup-android-arm-eabi@4.59.0': + '@rolldown/binding-android-arm64@1.0.0-rc.15': optional: true - '@rollup/rollup-android-arm64@4.59.0': + '@rolldown/binding-darwin-arm64@1.0.0-rc.15': optional: true - '@rollup/rollup-darwin-arm64@4.59.0': + '@rolldown/binding-darwin-x64@1.0.0-rc.15': optional: true - '@rollup/rollup-darwin-x64@4.59.0': + '@rolldown/binding-freebsd-x64@1.0.0-rc.15': optional: true - '@rollup/rollup-freebsd-arm64@4.59.0': + '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.15': optional: true - '@rollup/rollup-freebsd-x64@4.59.0': + '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.15': optional: true - '@rollup/rollup-linux-arm-gnueabihf@4.59.0': + '@rolldown/binding-linux-arm64-musl@1.0.0-rc.15': optional: true - '@rollup/rollup-linux-arm-musleabihf@4.59.0': + '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.15': optional: true - '@rollup/rollup-linux-arm64-gnu@4.59.0': + '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.15': optional: true - '@rollup/rollup-linux-arm64-musl@4.59.0': + '@rolldown/binding-linux-x64-gnu@1.0.0-rc.15': optional: true - '@rollup/rollup-linux-loong64-gnu@4.59.0': + '@rolldown/binding-linux-x64-musl@1.0.0-rc.15': optional: true - '@rollup/rollup-linux-loong64-musl@4.59.0': + '@rolldown/binding-openharmony-arm64@1.0.0-rc.15': optional: true - '@rollup/rollup-linux-ppc64-gnu@4.59.0': + '@rolldown/binding-wasm32-wasi@1.0.0-rc.15': + dependencies: + '@emnapi/core': 1.9.2 + '@emnapi/runtime': 1.9.2 + '@napi-rs/wasm-runtime': 1.1.3(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2) optional: true - '@rollup/rollup-linux-ppc64-musl@4.59.0': + '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.15': optional: true - '@rollup/rollup-linux-riscv64-gnu@4.59.0': + '@rolldown/binding-win32-x64-msvc@1.0.0-rc.15': optional: true - '@rollup/rollup-linux-riscv64-musl@4.59.0': - optional: true + '@rolldown/pluginutils@1.0.0-rc.15': {} - '@rollup/rollup-linux-s390x-gnu@4.59.0': - optional: true + '@shikijs/core@4.0.2': + dependencies: + '@shikijs/primitive': 4.0.2 + '@shikijs/types': 4.0.2 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + hast-util-to-html: 9.0.5 - '@rollup/rollup-linux-x64-gnu@4.59.0': - optional: true + '@shikijs/engine-javascript@4.0.2': + dependencies: + '@shikijs/types': 4.0.2 + '@shikijs/vscode-textmate': 10.0.2 + oniguruma-to-es: 4.3.5 - '@rollup/rollup-linux-x64-musl@4.59.0': - optional: true + '@shikijs/engine-oniguruma@4.0.2': + dependencies: + '@shikijs/types': 4.0.2 + '@shikijs/vscode-textmate': 10.0.2 - '@rollup/rollup-openbsd-x64@4.59.0': - optional: true + '@shikijs/langs@4.0.2': + dependencies: + '@shikijs/types': 4.0.2 - '@rollup/rollup-openharmony-arm64@4.59.0': - optional: true + '@shikijs/primitive@4.0.2': + dependencies: + '@shikijs/types': 4.0.2 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 - '@rollup/rollup-win32-arm64-msvc@4.59.0': - optional: true + '@shikijs/themes@4.0.2': + dependencies: + '@shikijs/types': 4.0.2 - '@rollup/rollup-win32-ia32-msvc@4.59.0': - optional: true + '@shikijs/types@4.0.2': + dependencies: + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 - '@rollup/rollup-win32-x64-gnu@4.59.0': - optional: true - - '@rollup/rollup-win32-x64-msvc@4.59.0': - optional: true + '@shikijs/vscode-textmate@10.0.2': {} '@standard-schema/spec@1.1.0': {} @@ -1076,126 +1023,139 @@ snapshots: dependencies: acorn: 8.16.0 - '@sveltejs/adapter-static@3.0.10(@sveltejs/kit@2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))': + '@sveltejs/adapter-static@3.0.10(@sveltejs/kit@2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)))': dependencies: - '@sveltejs/kit': 2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)) + '@sveltejs/kit': 2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)) - '@sveltejs/kit@2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1))': + '@sveltejs/kit@2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1))': dependencies: '@standard-schema/spec': 1.1.0 '@sveltejs/acorn-typescript': 1.0.9(acorn@8.16.0) - '@sveltejs/vite-plugin-svelte': 6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)) + '@sveltejs/vite-plugin-svelte': 7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)) '@types/cookie': 0.6.0 acorn: 8.16.0 cookie: 0.6.0 - devalue: 5.6.4 + devalue: 5.7.1 esm-env: 1.2.2 kleur: 4.1.5 magic-string: 0.30.21 mrmime: 2.0.1 - set-cookie-parser: 3.0.1 + set-cookie-parser: 3.1.0 sirv: 3.0.2 - svelte: 5.53.12 - vite: 7.3.1(jiti@2.6.1)(lightningcss@1.31.1) + svelte: 5.55.3 + vite: 8.0.8(jiti@2.6.1) optionalDependencies: - typescript: 5.9.3 + typescript: 6.0.2 - '@sveltejs/vite-plugin-svelte-inspector@5.0.2(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1))': + '@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1))': dependencies: - '@sveltejs/vite-plugin-svelte': 6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)) - obug: 2.1.1 - svelte: 5.53.12 - vite: 7.3.1(jiti@2.6.1)(lightningcss@1.31.1) - - '@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1))': - dependencies: - '@sveltejs/vite-plugin-svelte-inspector': 5.0.2(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)) deepmerge: 4.3.1 magic-string: 0.30.21 obug: 2.1.1 - svelte: 5.53.12 - vite: 7.3.1(jiti@2.6.1)(lightningcss@1.31.1) - vitefu: 1.1.2(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)) + svelte: 5.55.3 + vite: 8.0.8(jiti@2.6.1) + vitefu: 1.1.3(vite@8.0.8(jiti@2.6.1)) - '@swc/helpers@0.5.19': + '@swc/helpers@0.5.21': dependencies: tslib: 2.8.1 - '@tailwindcss/node@4.2.1': + '@tailwindcss/node@4.2.2': dependencies: '@jridgewell/remapping': 2.3.5 enhanced-resolve: 5.20.1 jiti: 2.6.1 - lightningcss: 1.31.1 + lightningcss: 1.32.0 magic-string: 0.30.21 source-map-js: 1.2.1 - tailwindcss: 4.2.1 + tailwindcss: 4.2.2 - '@tailwindcss/oxide-android-arm64@4.2.1': + '@tailwindcss/oxide-android-arm64@4.2.2': optional: true - '@tailwindcss/oxide-darwin-arm64@4.2.1': + '@tailwindcss/oxide-darwin-arm64@4.2.2': optional: true - '@tailwindcss/oxide-darwin-x64@4.2.1': + '@tailwindcss/oxide-darwin-x64@4.2.2': optional: true - '@tailwindcss/oxide-freebsd-x64@4.2.1': + '@tailwindcss/oxide-freebsd-x64@4.2.2': optional: true - '@tailwindcss/oxide-linux-arm-gnueabihf@4.2.1': + '@tailwindcss/oxide-linux-arm-gnueabihf@4.2.2': optional: true - '@tailwindcss/oxide-linux-arm64-gnu@4.2.1': + '@tailwindcss/oxide-linux-arm64-gnu@4.2.2': optional: true - '@tailwindcss/oxide-linux-arm64-musl@4.2.1': + '@tailwindcss/oxide-linux-arm64-musl@4.2.2': optional: true - '@tailwindcss/oxide-linux-x64-gnu@4.2.1': + '@tailwindcss/oxide-linux-x64-gnu@4.2.2': optional: true - '@tailwindcss/oxide-linux-x64-musl@4.2.1': + '@tailwindcss/oxide-linux-x64-musl@4.2.2': optional: true - '@tailwindcss/oxide-wasm32-wasi@4.2.1': + '@tailwindcss/oxide-wasm32-wasi@4.2.2': optional: true - '@tailwindcss/oxide-win32-arm64-msvc@4.2.1': + '@tailwindcss/oxide-win32-arm64-msvc@4.2.2': optional: true - '@tailwindcss/oxide-win32-x64-msvc@4.2.1': + '@tailwindcss/oxide-win32-x64-msvc@4.2.2': optional: true - '@tailwindcss/oxide@4.2.1': + '@tailwindcss/oxide@4.2.2': optionalDependencies: - '@tailwindcss/oxide-android-arm64': 4.2.1 - '@tailwindcss/oxide-darwin-arm64': 4.2.1 - '@tailwindcss/oxide-darwin-x64': 4.2.1 - '@tailwindcss/oxide-freebsd-x64': 4.2.1 - '@tailwindcss/oxide-linux-arm-gnueabihf': 4.2.1 - '@tailwindcss/oxide-linux-arm64-gnu': 4.2.1 - '@tailwindcss/oxide-linux-arm64-musl': 4.2.1 - '@tailwindcss/oxide-linux-x64-gnu': 4.2.1 - '@tailwindcss/oxide-linux-x64-musl': 4.2.1 - '@tailwindcss/oxide-wasm32-wasi': 4.2.1 - '@tailwindcss/oxide-win32-arm64-msvc': 4.2.1 - '@tailwindcss/oxide-win32-x64-msvc': 4.2.1 + '@tailwindcss/oxide-android-arm64': 4.2.2 + '@tailwindcss/oxide-darwin-arm64': 4.2.2 + '@tailwindcss/oxide-darwin-x64': 4.2.2 + '@tailwindcss/oxide-freebsd-x64': 4.2.2 + '@tailwindcss/oxide-linux-arm-gnueabihf': 4.2.2 + '@tailwindcss/oxide-linux-arm64-gnu': 4.2.2 + '@tailwindcss/oxide-linux-arm64-musl': 4.2.2 + '@tailwindcss/oxide-linux-x64-gnu': 4.2.2 + '@tailwindcss/oxide-linux-x64-musl': 4.2.2 + '@tailwindcss/oxide-wasm32-wasi': 4.2.2 + '@tailwindcss/oxide-win32-arm64-msvc': 4.2.2 + '@tailwindcss/oxide-win32-x64-msvc': 4.2.2 - '@tailwindcss/vite@4.2.1(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1))': + '@tailwindcss/vite@4.2.2(vite@8.0.8(jiti@2.6.1))': dependencies: - '@tailwindcss/node': 4.2.1 - '@tailwindcss/oxide': 4.2.1 - tailwindcss: 4.2.1 - vite: 7.3.1(jiti@2.6.1)(lightningcss@1.31.1) + '@tailwindcss/node': 4.2.2 + '@tailwindcss/oxide': 4.2.2 + tailwindcss: 4.2.2 + vite: 8.0.8(jiti@2.6.1) + + '@tybys/wasm-util@0.10.1': + dependencies: + tslib: 2.8.1 + optional: true '@types/cookie@0.6.0': {} '@types/estree@1.0.8': {} + '@types/hast@3.0.4': + dependencies: + '@types/unist': 3.0.3 + + '@types/mdast@4.0.4': + dependencies: + '@types/unist': 3.0.3 + '@types/trusted-types@2.0.7': {} - '@typescript-eslint/types@8.57.1': {} + '@types/unist@3.0.3': {} + + '@ungap/structured-clone@1.3.0': {} + + '@xterm/addon-fit@0.11.0': {} + + '@xterm/addon-web-links@0.12.0': {} + + '@xterm/xterm@6.0.0': {} acorn@8.16.0: {} @@ -1203,19 +1163,25 @@ snapshots: axobject-query@4.1.0: {} - bits-ui@2.16.3(@internationalized/date@3.12.0)(@sveltejs/kit@2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12): + bits-ui@2.17.3(@internationalized/date@3.12.0)(@sveltejs/kit@2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3): dependencies: '@floating-ui/core': 1.7.5 '@floating-ui/dom': 1.7.6 '@internationalized/date': 3.12.0 esm-env: 1.2.2 - runed: 0.35.1(@sveltejs/kit@2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12) - svelte: 5.53.12 - svelte-toolbelt: 0.10.6(@sveltejs/kit@2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12) + runed: 0.35.1(@sveltejs/kit@2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3) + svelte: 5.55.3 + svelte-toolbelt: 0.10.6(@sveltejs/kit@2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3) tabbable: 6.4.0 transitivePeerDependencies: - '@sveltejs/kit' + ccount@2.0.1: {} + + character-entities-html4@2.1.0: {} + + character-entities-legacy@3.0.0: {} + chart.js@4.5.1: dependencies: '@kurkle/color': 0.3.4 @@ -1226,6 +1192,8 @@ snapshots: clsx@2.1.1: {} + comma-separated-tokens@2.0.3: {} + cookie@0.6.0: {} deepmerge@4.3.1: {} @@ -1234,58 +1202,52 @@ snapshots: detect-libc@2.1.2: {} - devalue@5.6.4: {} + devalue@5.7.1: {} + + devlop@1.1.0: + dependencies: + dequal: 2.0.3 enhanced-resolve@5.20.1: dependencies: graceful-fs: 4.2.11 - tapable: 2.3.0 - - esbuild@0.27.4: - optionalDependencies: - '@esbuild/aix-ppc64': 0.27.4 - '@esbuild/android-arm': 0.27.4 - '@esbuild/android-arm64': 0.27.4 - '@esbuild/android-x64': 0.27.4 - '@esbuild/darwin-arm64': 0.27.4 - '@esbuild/darwin-x64': 0.27.4 - '@esbuild/freebsd-arm64': 0.27.4 - '@esbuild/freebsd-x64': 0.27.4 - '@esbuild/linux-arm': 0.27.4 - '@esbuild/linux-arm64': 0.27.4 - '@esbuild/linux-ia32': 0.27.4 - '@esbuild/linux-loong64': 0.27.4 - '@esbuild/linux-mips64el': 0.27.4 - '@esbuild/linux-ppc64': 0.27.4 - '@esbuild/linux-riscv64': 0.27.4 - '@esbuild/linux-s390x': 0.27.4 - '@esbuild/linux-x64': 0.27.4 - '@esbuild/netbsd-arm64': 0.27.4 - '@esbuild/netbsd-x64': 0.27.4 - '@esbuild/openbsd-arm64': 0.27.4 - '@esbuild/openbsd-x64': 0.27.4 - '@esbuild/openharmony-arm64': 0.27.4 - '@esbuild/sunos-x64': 0.27.4 - '@esbuild/win32-arm64': 0.27.4 - '@esbuild/win32-ia32': 0.27.4 - '@esbuild/win32-x64': 0.27.4 + tapable: 2.3.2 esm-env@1.2.2: {} - esrap@2.2.4: + esrap@2.2.5: dependencies: '@jridgewell/sourcemap-codec': 1.5.5 - '@typescript-eslint/types': 8.57.1 - fdir@6.5.0(picomatch@4.0.3): + fdir@6.5.0(picomatch@4.0.4): optionalDependencies: - picomatch: 4.0.3 + picomatch: 4.0.4 fsevents@2.3.3: optional: true graceful-fs@4.2.11: {} + hast-util-to-html@9.0.5: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + ccount: 2.0.1 + comma-separated-tokens: 2.0.3 + hast-util-whitespace: 3.0.0 + html-void-elements: 3.0.0 + mdast-util-to-hast: 13.2.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + stringify-entities: 4.0.4 + zwitch: 2.0.4 + + hast-util-whitespace@3.0.0: + dependencies: + '@types/hast': 3.0.4 + + html-void-elements@3.0.0: {} + inline-style-parser@0.2.7: {} is-reference@3.0.3: @@ -1296,54 +1258,54 @@ snapshots: kleur@4.1.5: {} - lightningcss-android-arm64@1.31.1: + lightningcss-android-arm64@1.32.0: optional: true - lightningcss-darwin-arm64@1.31.1: + lightningcss-darwin-arm64@1.32.0: optional: true - lightningcss-darwin-x64@1.31.1: + lightningcss-darwin-x64@1.32.0: optional: true - lightningcss-freebsd-x64@1.31.1: + lightningcss-freebsd-x64@1.32.0: optional: true - lightningcss-linux-arm-gnueabihf@1.31.1: + lightningcss-linux-arm-gnueabihf@1.32.0: optional: true - lightningcss-linux-arm64-gnu@1.31.1: + lightningcss-linux-arm64-gnu@1.32.0: optional: true - lightningcss-linux-arm64-musl@1.31.1: + lightningcss-linux-arm64-musl@1.32.0: optional: true - lightningcss-linux-x64-gnu@1.31.1: + lightningcss-linux-x64-gnu@1.32.0: optional: true - lightningcss-linux-x64-musl@1.31.1: + lightningcss-linux-x64-musl@1.32.0: optional: true - lightningcss-win32-arm64-msvc@1.31.1: + lightningcss-win32-arm64-msvc@1.32.0: optional: true - lightningcss-win32-x64-msvc@1.31.1: + lightningcss-win32-x64-msvc@1.32.0: optional: true - lightningcss@1.31.1: + lightningcss@1.32.0: dependencies: detect-libc: 2.1.2 optionalDependencies: - lightningcss-android-arm64: 1.31.1 - lightningcss-darwin-arm64: 1.31.1 - lightningcss-darwin-x64: 1.31.1 - lightningcss-freebsd-x64: 1.31.1 - lightningcss-linux-arm-gnueabihf: 1.31.1 - lightningcss-linux-arm64-gnu: 1.31.1 - lightningcss-linux-arm64-musl: 1.31.1 - lightningcss-linux-x64-gnu: 1.31.1 - lightningcss-linux-x64-musl: 1.31.1 - lightningcss-win32-arm64-msvc: 1.31.1 - lightningcss-win32-x64-msvc: 1.31.1 + lightningcss-android-arm64: 1.32.0 + lightningcss-darwin-arm64: 1.32.0 + lightningcss-darwin-x64: 1.32.0 + lightningcss-freebsd-x64: 1.32.0 + lightningcss-linux-arm-gnueabihf: 1.32.0 + lightningcss-linux-arm64-gnu: 1.32.0 + lightningcss-linux-arm64-musl: 1.32.0 + lightningcss-linux-x64-gnu: 1.32.0 + lightningcss-linux-x64-musl: 1.32.0 + lightningcss-win32-arm64-msvc: 1.32.0 + lightningcss-win32-x64-msvc: 1.32.0 locate-character@3.0.0: {} @@ -1353,6 +1315,35 @@ snapshots: dependencies: '@jridgewell/sourcemap-codec': 1.5.5 + mdast-util-to-hast@13.2.1: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + '@ungap/structured-clone': 1.3.0 + devlop: 1.1.0 + micromark-util-sanitize-uri: 2.0.1 + trim-lines: 3.0.1 + unist-util-position: 5.0.0 + unist-util-visit: 5.1.0 + vfile: 6.0.3 + + micromark-util-character@2.1.1: + dependencies: + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-encode@2.0.1: {} + + micromark-util-sanitize-uri@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-encode: 2.0.1 + micromark-util-symbol: 2.0.1 + + micromark-util-symbol@2.0.1: {} + + micromark-util-types@2.0.2: {} + mri@1.2.0: {} mrmime@2.0.1: {} @@ -1361,63 +1352,84 @@ snapshots: obug@2.1.1: {} + oniguruma-parser@0.12.1: {} + + oniguruma-to-es@4.3.5: + dependencies: + oniguruma-parser: 0.12.1 + regex: 6.1.0 + regex-recursion: 6.0.2 + picocolors@1.1.1: {} - picomatch@4.0.3: {} + picomatch@4.0.4: {} - postcss@8.5.8: + postcss@8.5.9: dependencies: nanoid: 3.3.11 picocolors: 1.1.1 source-map-js: 1.2.1 + property-information@7.1.0: {} + readdirp@4.1.2: {} - rollup@4.59.0: + regex-recursion@6.0.2: dependencies: - '@types/estree': 1.0.8 - optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.59.0 - '@rollup/rollup-android-arm64': 4.59.0 - '@rollup/rollup-darwin-arm64': 4.59.0 - '@rollup/rollup-darwin-x64': 4.59.0 - '@rollup/rollup-freebsd-arm64': 4.59.0 - '@rollup/rollup-freebsd-x64': 4.59.0 - '@rollup/rollup-linux-arm-gnueabihf': 4.59.0 - '@rollup/rollup-linux-arm-musleabihf': 4.59.0 - '@rollup/rollup-linux-arm64-gnu': 4.59.0 - '@rollup/rollup-linux-arm64-musl': 4.59.0 - '@rollup/rollup-linux-loong64-gnu': 4.59.0 - '@rollup/rollup-linux-loong64-musl': 4.59.0 - '@rollup/rollup-linux-ppc64-gnu': 4.59.0 - '@rollup/rollup-linux-ppc64-musl': 4.59.0 - '@rollup/rollup-linux-riscv64-gnu': 4.59.0 - '@rollup/rollup-linux-riscv64-musl': 4.59.0 - '@rollup/rollup-linux-s390x-gnu': 4.59.0 - '@rollup/rollup-linux-x64-gnu': 4.59.0 - '@rollup/rollup-linux-x64-musl': 4.59.0 - '@rollup/rollup-openbsd-x64': 4.59.0 - '@rollup/rollup-openharmony-arm64': 4.59.0 - '@rollup/rollup-win32-arm64-msvc': 4.59.0 - '@rollup/rollup-win32-ia32-msvc': 4.59.0 - '@rollup/rollup-win32-x64-gnu': 4.59.0 - '@rollup/rollup-win32-x64-msvc': 4.59.0 - fsevents: 2.3.3 + regex-utilities: 2.3.0 - runed@0.35.1(@sveltejs/kit@2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12): + regex-utilities@2.3.0: {} + + regex@6.1.0: + dependencies: + regex-utilities: 2.3.0 + + rolldown@1.0.0-rc.15: + dependencies: + '@oxc-project/types': 0.124.0 + '@rolldown/pluginutils': 1.0.0-rc.15 + optionalDependencies: + '@rolldown/binding-android-arm64': 1.0.0-rc.15 + '@rolldown/binding-darwin-arm64': 1.0.0-rc.15 + '@rolldown/binding-darwin-x64': 1.0.0-rc.15 + '@rolldown/binding-freebsd-x64': 1.0.0-rc.15 + '@rolldown/binding-linux-arm-gnueabihf': 1.0.0-rc.15 + '@rolldown/binding-linux-arm64-gnu': 1.0.0-rc.15 + '@rolldown/binding-linux-arm64-musl': 1.0.0-rc.15 + '@rolldown/binding-linux-ppc64-gnu': 1.0.0-rc.15 + '@rolldown/binding-linux-s390x-gnu': 1.0.0-rc.15 + '@rolldown/binding-linux-x64-gnu': 1.0.0-rc.15 + '@rolldown/binding-linux-x64-musl': 1.0.0-rc.15 + '@rolldown/binding-openharmony-arm64': 1.0.0-rc.15 + '@rolldown/binding-wasm32-wasi': 1.0.0-rc.15 + '@rolldown/binding-win32-arm64-msvc': 1.0.0-rc.15 + '@rolldown/binding-win32-x64-msvc': 1.0.0-rc.15 + + runed@0.35.1(@sveltejs/kit@2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3): dependencies: dequal: 2.0.3 esm-env: 1.2.2 lz-string: 1.5.0 - svelte: 5.53.12 + svelte: 5.55.3 optionalDependencies: - '@sveltejs/kit': 2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)) + '@sveltejs/kit': 2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)) sade@1.8.1: dependencies: mri: 1.2.0 - set-cookie-parser@3.0.1: {} + set-cookie-parser@3.1.0: {} + + shiki@4.0.2: + dependencies: + '@shikijs/core': 4.0.2 + '@shikijs/engine-javascript': 4.0.2 + '@shikijs/engine-oniguruma': 4.0.2 + '@shikijs/langs': 4.0.2 + '@shikijs/themes': 4.0.2 + '@shikijs/types': 4.0.2 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 sirv@3.0.2: dependencies: @@ -1427,32 +1439,39 @@ snapshots: source-map-js@1.2.1: {} + space-separated-tokens@2.0.2: {} + + stringify-entities@4.0.4: + dependencies: + character-entities-html4: 2.1.0 + character-entities-legacy: 3.0.0 + style-to-object@1.0.14: dependencies: inline-style-parser: 0.2.7 - svelte-check@4.4.5(picomatch@4.0.3)(svelte@5.53.12)(typescript@5.9.3): + svelte-check@4.4.6(picomatch@4.0.4)(svelte@5.55.3)(typescript@6.0.2): dependencies: '@jridgewell/trace-mapping': 0.3.31 chokidar: 4.0.3 - fdir: 6.5.0(picomatch@4.0.3) + fdir: 6.5.0(picomatch@4.0.4) picocolors: 1.1.1 sade: 1.8.1 - svelte: 5.53.12 - typescript: 5.9.3 + svelte: 5.55.3 + typescript: 6.0.2 transitivePeerDependencies: - picomatch - svelte-toolbelt@0.10.6(@sveltejs/kit@2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12): + svelte-toolbelt@0.10.6(@sveltejs/kit@2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3): dependencies: clsx: 2.1.1 - runed: 0.35.1(@sveltejs/kit@2.55.0(@sveltejs/vite-plugin-svelte@6.2.4(svelte@5.53.12)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12)(typescript@5.9.3)(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)))(svelte@5.53.12) + runed: 0.35.1(@sveltejs/kit@2.57.1(@sveltejs/vite-plugin-svelte@7.0.0(svelte@5.55.3)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3)(typescript@6.0.2)(vite@8.0.8(jiti@2.6.1)))(svelte@5.55.3) style-to-object: 1.0.14 - svelte: 5.53.12 + svelte: 5.55.3 transitivePeerDependencies: - '@sveltejs/kit' - svelte@5.53.12: + svelte@5.55.3: dependencies: '@jridgewell/remapping': 2.3.5 '@jridgewell/sourcemap-codec': 1.5.5 @@ -1463,46 +1482,83 @@ snapshots: aria-query: 5.3.1 axobject-query: 4.1.0 clsx: 2.1.1 - devalue: 5.6.4 + devalue: 5.7.1 esm-env: 1.2.2 - esrap: 2.2.4 + esrap: 2.2.5 is-reference: 3.0.3 locate-character: 3.0.0 magic-string: 0.30.21 zimmerframe: 1.1.4 + transitivePeerDependencies: + - '@typescript-eslint/types' tabbable@6.4.0: {} - tailwindcss@4.2.1: {} + tailwindcss@4.2.2: {} - tapable@2.3.0: {} + tapable@2.3.2: {} - tinyglobby@0.2.15: + tinyglobby@0.2.16: dependencies: - fdir: 6.5.0(picomatch@4.0.3) - picomatch: 4.0.3 + fdir: 6.5.0(picomatch@4.0.4) + picomatch: 4.0.4 totalist@3.0.1: {} + trim-lines@3.0.1: {} + tslib@2.8.1: {} - typescript@5.9.3: {} + typescript@6.0.2: {} - vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1): + unist-util-is@6.0.1: dependencies: - esbuild: 0.27.4 - fdir: 6.5.0(picomatch@4.0.3) - picomatch: 4.0.3 - postcss: 8.5.8 - rollup: 4.59.0 - tinyglobby: 0.2.15 + '@types/unist': 3.0.3 + + unist-util-position@5.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-stringify-position@4.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-visit-parents@6.0.2: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + + unist-util-visit@5.1.0: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + unist-util-visit-parents: 6.0.2 + + vfile-message@4.0.3: + dependencies: + '@types/unist': 3.0.3 + unist-util-stringify-position: 4.0.0 + + vfile@6.0.3: + dependencies: + '@types/unist': 3.0.3 + vfile-message: 4.0.3 + + vite@8.0.8(jiti@2.6.1): + dependencies: + lightningcss: 1.32.0 + picomatch: 4.0.4 + postcss: 8.5.9 + rolldown: 1.0.0-rc.15 + tinyglobby: 0.2.16 optionalDependencies: fsevents: 2.3.3 jiti: 2.6.1 - lightningcss: 1.31.1 - vitefu@1.1.2(vite@7.3.1(jiti@2.6.1)(lightningcss@1.31.1)): + vitefu@1.1.3(vite@8.0.8(jiti@2.6.1)): optionalDependencies: - vite: 7.3.1(jiti@2.6.1)(lightningcss@1.31.1) + vite: 8.0.8(jiti@2.6.1) zimmerframe@1.1.4: {} + + zwitch@2.0.4: {} diff --git a/frontend/src/app.css b/frontend/src/app.css index 9c2e326..d76358b 100644 --- a/frontend/src/app.css +++ b/frontend/src/app.css @@ -92,6 +92,11 @@ body { min-height: 100vh; } +/* Instrument Serif reads less condensed with a touch of positive tracking */ +.font-serif { + letter-spacing: 0.015em; +} + /* Tabular figures on all mono text — numbers align in tables and metric displays */ .font-mono { font-variant-numeric: tabular-nums; @@ -175,6 +180,13 @@ body { 50% { transform: translateY(-6px); } } +/* CSS containment — isolate paint for independent UI regions. + Note: `contain: layout` is omitted because it creates a containing block + that breaks `position: fixed` popups rendered inside
. */ +main { + contain: style; +} + /* Respect user motion preferences — covers both CSS class animations and inline style animations */ @media (prefers-reduced-motion: reduce) { *, diff --git a/frontend/src/lib/api/admin-capsules.ts b/frontend/src/lib/api/admin-capsules.ts new file mode 100644 index 0000000..337ee0a --- /dev/null +++ b/frontend/src/lib/api/admin-capsules.ts @@ -0,0 +1,40 @@ +import { apiFetch, type ApiResult } from '$lib/api/client'; +import type { Capsule, CreateCapsuleParams, Snapshot } from '$lib/api/capsules'; +import type { AdminTemplate } from '$lib/api/builds'; + +export async function listAdminCapsules(): Promise> { + return apiFetch('GET', '/api/v1/admin/capsules'); +} + +export async function getAdminCapsule(id: string): Promise> { + return apiFetch('GET', `/api/v1/admin/capsules/${id}`); +} + +export async function createAdminCapsule(params: CreateCapsuleParams): Promise> { + return apiFetch('POST', '/api/v1/admin/capsules', params); +} + +export async function destroyAdminCapsule(id: string): Promise> { + return apiFetch('DELETE', `/api/v1/admin/capsules/${id}`); +} + +export async function snapshotAdminCapsule(id: string, name?: string): Promise> { + return apiFetch('POST', `/api/v1/admin/capsules/${id}/snapshot`, { name }); +} + +/** Fetch platform templates for the admin create dialog. */ +export async function listPlatformTemplates(): Promise> { + const result = await apiFetch('GET', '/api/v1/admin/templates'); + if (!result.ok) return result; + // Map AdminTemplate → Snapshot shape. + const snapshots: Snapshot[] = result.data.map((t) => ({ + name: t.name, + type: t.type, + vcpus: t.vcpus || undefined, + memory_mb: t.memory_mb || undefined, + size_bytes: t.size_bytes, + created_at: t.created_at, + platform: true, + })); + return { ok: true, data: snapshots }; +} diff --git a/frontend/src/lib/api/admin-users.ts b/frontend/src/lib/api/admin-users.ts new file mode 100644 index 0000000..c5dd339 --- /dev/null +++ b/frontend/src/lib/api/admin-users.ts @@ -0,0 +1,28 @@ +import { apiFetch, type ApiResult } from '$lib/api/client'; + +export type AdminUser = { + id: string; + email: string; + name: string; + is_admin: boolean; + status: string; + created_at: string; + teams_joined: number; + teams_owned: number; +}; + +export type AdminUsersResponse = { + users: AdminUser[]; + total: number; + page: number; + per_page: number; + total_pages: number; +}; + +export async function listAdminUsers(page: number = 1): Promise> { + return apiFetch('GET', `/api/v1/admin/users?page=${page}`); +} + +export async function setUserActive(id: string, active: boolean): Promise> { + return apiFetch('PUT', `/api/v1/admin/users/${id}/active`, { active }); +} diff --git a/frontend/src/lib/api/auth.ts b/frontend/src/lib/api/auth.ts index 845b8a3..1a3ede9 100644 --- a/frontend/src/lib/api/auth.ts +++ b/frontend/src/lib/api/auth.ts @@ -6,17 +6,26 @@ export type AuthResponse = { name: string; }; +export type SignupResponse = { + message: string; +}; + export type AuthResult = { ok: true; data: AuthResponse } | { ok: false; error: string }; +export type SignupResult = { ok: true; data: SignupResponse } | { ok: false; error: string }; export async function apiLogin(email: string, password: string): Promise { return authFetch('/api/v1/auth/login', { email, password }); } -export async function apiSignup(email: string, password: string, name: string): Promise { +export async function apiSignup(email: string, password: string, name: string): Promise { return authFetch('/api/v1/auth/signup', { email, password, name }); } -async function authFetch(url: string, body: Record): Promise { +export async function apiActivate(token: string): Promise { + return authFetch('/api/v1/auth/activate', { token }); +} + +async function authFetch(url: string, body: Record): Promise<{ ok: true; data: T } | { ok: false; error: string }> { try { const res = await fetch(url, { method: 'POST', @@ -31,7 +40,7 @@ async function authFetch(url: string, body: Record): Promise; created_at: string; started_at?: string; completed_at?: string; @@ -39,9 +41,18 @@ export type CreateBuildParams = { vcpus?: number; memory_mb?: number; skip_pre_post?: boolean; + archive?: File; }; export async function createBuild(params: CreateBuildParams): Promise> { + if (params.archive) { + // Use multipart when an archive file is provided. + const { archive, ...config } = params; + const formData = new FormData(); + formData.append('config', JSON.stringify(config)); + formData.append('archive', archive); + return apiFetchMultipart('POST', '/api/v1/admin/builds', formData); + } return apiFetch('POST', '/api/v1/admin/builds', params); } diff --git a/frontend/src/lib/api/capsules.ts b/frontend/src/lib/api/capsules.ts index 565f14f..3e8f7f3 100644 --- a/frontend/src/lib/api/capsules.ts +++ b/frontend/src/lib/api/capsules.ts @@ -17,11 +17,11 @@ export type Capsule = { export async function listCapsules(): Promise> { - return apiFetch('GET', '/api/v1/sandboxes'); + return apiFetch('GET', '/api/v1/capsules'); } export async function getCapsule(id: string): Promise> { - return apiFetch('GET', `/api/v1/sandboxes/${id}`); + return apiFetch('GET', `/api/v1/capsules/${id}`); } export type CreateCapsuleParams = { @@ -32,19 +32,19 @@ export type CreateCapsuleParams = { }; export async function createCapsule(params: CreateCapsuleParams): Promise> { - return apiFetch('POST', '/api/v1/sandboxes', params); + return apiFetch('POST', '/api/v1/capsules', params); } export async function pauseCapsule(id: string): Promise> { - return apiFetch('POST', `/api/v1/sandboxes/${id}/pause`); + return apiFetch('POST', `/api/v1/capsules/${id}/pause`); } export async function resumeCapsule(id: string): Promise> { - return apiFetch('POST', `/api/v1/sandboxes/${id}/resume`); + return apiFetch('POST', `/api/v1/capsules/${id}/resume`); } export async function destroyCapsule(id: string): Promise> { - return apiFetch('DELETE', `/api/v1/sandboxes/${id}`); + return apiFetch('DELETE', `/api/v1/capsules/${id}`); } export type Snapshot = { @@ -57,8 +57,8 @@ export type Snapshot = { platform: boolean; }; -export async function createSnapshot(sandboxId: string, name?: string): Promise> { - return apiFetch('POST', '/api/v1/snapshots', { sandbox_id: sandboxId, name }); +export async function createSnapshot(capsuleId: string, name?: string): Promise> { + return apiFetch('POST', '/api/v1/snapshots', { sandbox_id: capsuleId, name }); } export async function listSnapshots(typeFilter?: string): Promise> { diff --git a/frontend/src/lib/api/client.ts b/frontend/src/lib/api/client.ts index 00fa381..d6e6459 100644 --- a/frontend/src/lib/api/client.ts +++ b/frontend/src/lib/api/client.ts @@ -22,3 +22,24 @@ export async function apiFetch(method: string, path: string, body?: unknown): return { ok: false, error: 'Unable to connect to the server' }; } } + +export async function apiFetchMultipart(method: string, path: string, formData: FormData): Promise> { + try { + const headers: Record = {}; + if (auth.token) headers['Authorization'] = `Bearer ${auth.token}`; + + const res = await fetch(path, { + method, + headers, + body: formData + }); + + if (res.status === 204) return { ok: true, data: undefined as T }; + + const data = await res.json(); + if (!res.ok) return { ok: false, error: data?.error?.message ?? 'Something went wrong' }; + return { ok: true, data: data as T }; + } catch { + return { ok: false, error: 'Unable to connect to the server' }; + } +} diff --git a/frontend/src/lib/api/files.ts b/frontend/src/lib/api/files.ts new file mode 100644 index 0000000..7d066ec --- /dev/null +++ b/frontend/src/lib/api/files.ts @@ -0,0 +1,141 @@ +import { auth } from '$lib/auth.svelte'; +import { type ApiResult } from '$lib/api/client'; + +export type FileEntry = { + name: string; + path: string; + type: 'file' | 'directory' | 'symlink' | 'unknown'; + size: number; + mode: number; + permissions: string; + owner: string; + group: string; + modified_at: number; + symlink_target?: string | null; +}; + +export type ListDirResponse = { + entries: FileEntry[]; +}; + +const MAX_READABLE_SIZE = 10 * 1024 * 1024; // 10 MB + +/** + * Whether a file can be previewed as text in the browser. + * Binary/unreadable extensions and files > 10 MB should be downloaded instead. + */ +const BINARY_EXTENSIONS = new Set([ + '.png', '.jpg', '.jpeg', '.gif', '.bmp', '.ico', '.webp', '.avif', '.svg', + '.mp3', '.mp4', '.wav', '.ogg', '.flac', '.avi', '.mkv', '.mov', '.webm', + '.zip', '.tar', '.gz', '.bz2', '.xz', '.7z', '.rar', '.zst', + '.pdf', '.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx', + '.exe', '.dll', '.so', '.dylib', '.bin', '.o', '.a', '.class', '.pyc', + '.woff', '.woff2', '.ttf', '.otf', '.eot', + '.db', '.sqlite', '.sqlite3', + '.iso', '.img', '.dmg', +]); + +export function isBinaryFile(name: string): boolean { + const dot = name.lastIndexOf('.'); + if (dot === -1) return false; + return BINARY_EXTENSIONS.has(name.slice(dot).toLowerCase()); +} + +export function isFileTooLarge(size: number): boolean { + return size > MAX_READABLE_SIZE; +} + +export function formatFileSize(bytes: number): string { + if (bytes === 0) return '0 B'; + const units = ['B', 'KB', 'MB', 'GB', 'TB']; + const i = Math.floor(Math.log(bytes) / Math.log(1024)); + const val = bytes / Math.pow(1024, i); + return `${val < 10 ? val.toFixed(1) : Math.round(val)} ${units[i]}`; +} + +export async function listDir(capsuleId: string, path: string, depth = 1, basePath = '/api/v1/capsules'): Promise> { + try { + const headers: Record = { 'Content-Type': 'application/json' }; + if (auth.token) headers['Authorization'] = `Bearer ${auth.token}`; + + const res = await fetch(`${basePath}/${capsuleId}/files/list`, { + method: 'POST', + headers, + body: JSON.stringify({ path, depth }), + }); + + const data = await res.json(); + if (!res.ok) return { ok: false, error: data?.error?.message ?? 'Failed to list directory' }; + return { ok: true, data: data as ListDirResponse }; + } catch { + return { ok: false, error: 'Unable to connect to the server' }; + } +} + +export async function readFile( + capsuleId: string, + path: string, + signal?: AbortSignal, + basePath = '/api/v1/capsules', +): Promise> { + try { + const headers: Record = { 'Content-Type': 'application/json' }; + if (auth.token) headers['Authorization'] = `Bearer ${auth.token}`; + + const res = await fetch(`${basePath}/${capsuleId}/files/read`, { + method: 'POST', + headers, + body: JSON.stringify({ path }), + signal, + }); + + if (!res.ok) { + try { + const data = await res.json(); + return { ok: false, error: data?.error?.message ?? 'Failed to read file' }; + } catch { + return { ok: false, error: `HTTP ${res.status}` }; + } + } + + const blob = await res.blob(); + const text = await blob.text(); + return { ok: true, data: text }; + } catch (e) { + if (e instanceof DOMException && e.name === 'AbortError') { + return { ok: false, error: 'Request aborted' }; + } + return { ok: false, error: 'Unable to connect to the server' }; + } +} + +export async function downloadFile( + capsuleId: string, + path: string, + filename: string, + signal?: AbortSignal, + basePath = '/api/v1/capsules', +): Promise { + const headers: Record = { 'Content-Type': 'application/json' }; + if (auth.token) headers['Authorization'] = `Bearer ${auth.token}`; + + const res = await fetch(`${basePath}/${capsuleId}/files/read`, { + method: 'POST', + headers, + body: JSON.stringify({ path }), + signal, + }); + + if (!res.ok) throw new Error('Download failed'); + + const blob = await res.blob(); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = filename; + document.body.appendChild(a); + a.click(); + a.remove(); + // Delay revocation so the browser has time to start the download + setTimeout(() => URL.revokeObjectURL(url), 5000); +} diff --git a/frontend/src/lib/api/me.ts b/frontend/src/lib/api/me.ts new file mode 100644 index 0000000..1396d9d --- /dev/null +++ b/frontend/src/lib/api/me.ts @@ -0,0 +1,42 @@ +import { apiFetch, type ApiResult } from '$lib/api/client'; +import type { AuthResponse } from '$lib/api/auth'; + +export type MeResponse = { + name: string; + email: string; + has_password: boolean; + providers: string[]; +}; + +export type ChangePasswordBody = { + current_password?: string; + new_password: string; + confirm_password?: string; +}; + +export const getMe = (): Promise> => + apiFetch('GET', '/api/v1/me'); + +export const updateName = (name: string): Promise> => + apiFetch('PATCH', '/api/v1/me', { name }); + +export const changePassword = (body: ChangePasswordBody): Promise> => + apiFetch('POST', '/api/v1/me/password', body); + +export const requestPasswordReset = (email: string): Promise> => + apiFetch('POST', '/api/v1/me/password/reset', { email }); + +export const confirmPasswordReset = ( + token: string, + new_password: string +): Promise> => + apiFetch('POST', '/api/v1/me/password/reset/confirm', { token, new_password }); + +export const getProviderConnectURL = (provider: string): Promise> => + apiFetch('GET', `/api/v1/me/providers/${provider}/connect`); + +export const disconnectProvider = (provider: string): Promise> => + apiFetch('DELETE', `/api/v1/me/providers/${provider}`); + +export const deleteAccount = (confirmation: string): Promise> => + apiFetch('DELETE', '/api/v1/me', { confirmation }); diff --git a/frontend/src/lib/api/metrics.ts b/frontend/src/lib/api/metrics.ts index baf9f11..f093bf5 100644 --- a/frontend/src/lib/api/metrics.ts +++ b/frontend/src/lib/api/metrics.ts @@ -15,11 +15,20 @@ export type MetricsResponse = { points: MetricPoint[]; }; -export async function fetchSandboxMetrics(id: string, range: MetricRange): Promise> { - return apiFetch('GET', `/api/v1/sandboxes/${id}/metrics?range=${range}`); +export async function fetchCapsuleMetrics(id: string, range: MetricRange, basePath = '/api/v1/capsules'): Promise> { + return apiFetch('GET', `${basePath}/${id}/metrics?range=${range}`); } export const METRIC_RANGES: MetricRange[] = ['5m', '10m', '1h', '6h', '24h']; -// All ranges poll every 10 seconds. +// Poll interval varies by range — shorter ranges need fresher data. +export const METRIC_POLL_INTERVALS: Record = { + '5m': 10_000, + '10m': 10_000, + '1h': 30_000, + '6h': 60_000, + '24h': 120_000, +}; + +/** @deprecated Use METRIC_POLL_INTERVALS instead */ export const METRIC_POLL_INTERVAL = 10_000; diff --git a/frontend/src/lib/api/stats.ts b/frontend/src/lib/api/stats.ts index 3f85483..948ae12 100644 --- a/frontend/src/lib/api/stats.ts +++ b/frontend/src/lib/api/stats.ts @@ -24,7 +24,7 @@ export type StatsResponse = { }; export async function fetchStats(range: TimeRange): Promise> { - return apiFetch('GET', `/api/v1/sandboxes/stats?range=${range}`); + return apiFetch('GET', `/api/v1/capsules/stats?range=${range}`); } export const POLL_INTERVALS: Record = { diff --git a/frontend/src/lib/api/team.ts b/frontend/src/lib/api/team.ts index 0ffc4ed..2cebb8e 100644 --- a/frontend/src/lib/api/team.ts +++ b/frontend/src/lib/api/team.ts @@ -83,3 +83,39 @@ export async function leaveTeam(id: string): Promise> { export async function searchUsers(email: string): Promise> { return apiFetch('GET', `/api/v1/users/search?email=${encodeURIComponent(email)}`); } + +// Admin team types and API functions + +export type AdminTeam = { + id: string; + name: string; + slug: string; + is_byoc: boolean; + created_at: string; + deleted_at: string | null; + member_count: number; + owner_name: string; + owner_email: string; + active_sandbox_count: number; + channel_count: number; +}; + +export type AdminTeamsResponse = { + teams: AdminTeam[]; + total: number; + page: number; + per_page: number; + total_pages: number; +}; + +export async function listAdminTeams(page: number = 1): Promise> { + return apiFetch('GET', `/api/v1/admin/teams?page=${page}`); +} + +export async function adminSetBYOC(id: string, enabled: boolean): Promise> { + return apiFetch('PUT', `/api/v1/admin/teams/${id}/byoc`, { enabled }); +} + +export async function adminDeleteTeam(id: string): Promise> { + return apiFetch('DELETE', `/api/v1/admin/teams/${id}`); +} diff --git a/frontend/src/lib/components/AdminSidebar.svelte b/frontend/src/lib/components/AdminSidebar.svelte index ebf4b64..e7421b0 100644 --- a/frontend/src/lib/components/AdminSidebar.svelte +++ b/frontend/src/lib/components/AdminSidebar.svelte @@ -3,14 +3,17 @@ import { auth } from '$lib/auth.svelte'; import { IconServer, - IconTemplate, + IconBox, + IconMonitor, IconSettings, IconLogout, IconSidebar, IconBell, IconDocs, IconChevron, - IconShield + IconShield, + IconMembers, + IconUser } from './icons'; let { collapsed = $bindable(false) }: { collapsed: boolean } = $props(); @@ -22,8 +25,14 @@ }; const managementItems: NavItem[] = [ - { label: 'Hosts', icon: IconServer, href: '/admin/hosts' }, - { label: 'Templates', icon: IconTemplate, href: '/admin/templates' } + { label: 'Users', icon: IconUser, href: '/admin/users' }, + { label: 'Teams', icon: IconMembers, href: '/admin/teams' } + ]; + + const platformItems: NavItem[] = [ + { label: 'Templates', icon: IconBox, href: '/admin/templates' }, + { label: 'Capsules', icon: IconMonitor, href: '/admin/capsules' }, + { label: 'Hosts', icon: IconServer, href: '/admin/hosts' } ]; function isActive(href: string): boolean { @@ -96,43 +105,8 @@ @@ -184,3 +158,43 @@ + +{#snippet navSection(title: string, items: NavItem[])} +
+ {#if !collapsed} +
+ {title} +
+ {:else} +
+ {/if} + {#each items as item} + {#if isActive(item.href)} + + {#if !collapsed} +
+ {/if} + + {#if !collapsed} + {item.label} + {/if} +
+ {:else} + + + {#if !collapsed} + {item.label} + {/if} + + {/if} + {/each} +
+{/snippet} diff --git a/frontend/src/lib/components/AuthModal.svelte b/frontend/src/lib/components/AuthModal.svelte deleted file mode 100644 index 2f23a5c..0000000 --- a/frontend/src/lib/components/AuthModal.svelte +++ /dev/null @@ -1,210 +0,0 @@ - - - - - - - - - - - -
- -
- - {title} - - - {subtitle} - -
- - - - - -
-
- or -
-
- - -
- {#if mode === 'signup'} -
-
- -
- -
- {/if} - -
-
- -
- -
- -
-
- -
- - -
- - {#if mode === 'signin'} -
- -
- {/if} - - -
- - -

- {switchText} - -

-
-
-
-
- - diff --git a/frontend/src/lib/components/CopyButton.svelte b/frontend/src/lib/components/CopyButton.svelte new file mode 100644 index 0000000..97ca2be --- /dev/null +++ b/frontend/src/lib/components/CopyButton.svelte @@ -0,0 +1,112 @@ + + + + + diff --git a/frontend/src/lib/components/CreateCapsuleDialog.svelte b/frontend/src/lib/components/CreateCapsuleDialog.svelte index 2bd027d..36d893f 100644 --- a/frontend/src/lib/components/CreateCapsuleDialog.svelte +++ b/frontend/src/lib/components/CreateCapsuleDialog.svelte @@ -1,23 +1,126 @@ + +{#if open} +
+ +
{ if (e.key === 'Escape') handleClose(); }} + >
+
+
+

Destroy Capsule

+

+ Terminate {capsuleId} and destroy all data inside it. This cannot be undone. +

+ + {#if error} +
+ {error} +
+ {/if} + +
+ + +
+
+
+
+{/if} diff --git a/frontend/src/lib/components/FilesTab.svelte b/frontend/src/lib/components/FilesTab.svelte new file mode 100644 index 0000000..46c2d4a --- /dev/null +++ b/frontend/src/lib/components/FilesTab.svelte @@ -0,0 +1,856 @@ + + + + +{#if !isRunning} +
+
+
+ +
+
+ Capsule not running + Start or resume the capsule to browse files +
+
+
+{:else} +
+ + +
+ + +
+
+ + + (pathInputFocused = true)} + onblur={() => (pathInputFocused = false)} + onkeydown={handleKeydown} + placeholder="/home/user or ~/file.txt" + spellcheck="false" + autocomplete="off" + class="flex-1 bg-transparent font-mono text-meta text-[var(--color-text-primary)] outline-none placeholder:text-[var(--color-text-muted)]" + /> + +
+
+ + +
+ + + + {#each breadcrumbs as crumb, i} + {#if i > 0} + + + + {/if} + + {/each} +
+ + +
+ {#if dirLoading} +
+
+ + + + Loading... +
+
+ {:else if dirError} +
+
+ + + + {dirError} +
+
+ {:else if entries.length === 0} +
+
+ + + +
+ Empty directory +
+ {:else} + {#each sortedEntries as entry, idx (entry.path)} + + {/each} + {/if} +
+ + + {#if !dirLoading && !dirError && entries.length > 0} +
+ {#if dirCount > 0} + + {dirCount} dir{dirCount !== 1 ? 's' : ''} + + {/if} + {#if fileCount > 0} + + {fileCount} file{fileCount !== 1 ? 's' : ''} + + {/if} +
+ {/if} +
+ + + {#if !treeOnly && (!compact || selectedFile)} +
+ {#if !selectedFile} + +
+
+
+ + + + +
+
+ Select a file to preview + Click a file in the tree or type a path above +
+
+
+ {:else} + +
+
+ {#if isBinaryFile(selectedFile.name) || isFileTooLarge(selectedFile.size)} + + + + + {:else} + + + + + {/if} + {selectedFile.path} + {#if extLabel(selectedFile.name)} + + {extLabel(selectedFile.name)} + + {/if} +
+
+ {formatFileSize(selectedFile.size)} + +
+
+ + +
+ {#if fileLoading} +
+
+ + + + Reading file... +
+
+ {:else if fileError} +
+
+ + + + {fileError} +
+
+ {:else if isSpecialFile} + +
+
+
+ + + + +
+
+ Not a regular file + + {selectedFile.name} + is a device, socket, or pipe + + + These file types can't be read or downloaded. + +
+
+
+ {:else if !isDownloadable} + +
+
+
+ + + + +
+
+ Symlink + {#if selectedFile.symlink_target} + + Points to {selectedFile.symlink_target} + + {/if} + + Open the target path to view or download its contents. + +
+
+
+ {:else if isBinaryFile(selectedFile.name) || isFileTooLarge(selectedFile.size) || (selectedFile && fileContent === null && !fileLoading)} + +
+
+
+ {#if isFileTooLarge(selectedFile.size)} + + + + + + {:else} + + + + + {/if} +
+
+ {#if isFileTooLarge(selectedFile.size)} + File too large to preview + + {formatFileSize(selectedFile.size)} exceeds the 10 MB preview limit + + {:else} + Binary file + + This file type can't be displayed as text + + {/if} +
+ +
+
+ {:else if fileContent !== null} + +
+
{#each previewLines.lines as line, i}
{i + 1}{#if highlightedTokens && highlightedTokens[i]}{#each highlightedTokens[i] as token}{token.content}{/each}{:else}{line || ' '}{/if}
{/each}
+
+ {#if previewLines.truncated} +
+ + Showing {MAX_PREVIEW_LINES.toLocaleString()} of {previewLines.totalLines.toLocaleString()} lines + + +
+ {/if} + {/if} +
+ {/if} +
+ {/if} + +
+{/if} diff --git a/frontend/src/lib/components/MetricsPanel.svelte b/frontend/src/lib/components/MetricsPanel.svelte new file mode 100644 index 0000000..38a424d --- /dev/null +++ b/frontend/src/lib/components/MetricsPanel.svelte @@ -0,0 +1,350 @@ + + + + +
+ +
+ {#if layout === 'full'} + {#if !metricsLoading} + + + Live + + {:else} +
+ {/if} + {:else} +
+ {/if} + +
+ {#each METRIC_RANGES as r, i} + + {/each} +
+
+ + {#if metricsError} +
+ + + + Could not load metrics: {metricsError}. Will retry automatically. +
+ {/if} + + +
+ + +
+
+
+ + CPU Usage +
+ {#if latestCpu !== null} +
+ {latestCpu.toFixed(1)} + % +
+ {:else if metricsLoading} + + {/if} +
+
+ +
+
+ + +
+
+
+ + RAM Usage +
+ {#if latestRamMB !== null} +
+ {latestRamMB.toFixed(0)} + MB +
+ {:else if metricsLoading} + + {/if} +
+
+ +
+
+ +
+
diff --git a/frontend/src/lib/components/Sidebar.svelte b/frontend/src/lib/components/Sidebar.svelte index 4111dd8..47d2960 100644 --- a/frontend/src/lib/components/Sidebar.svelte +++ b/frontend/src/lib/components/Sidebar.svelte @@ -49,7 +49,7 @@ const platformItems: NavItem[] = [ { label: 'Capsules', icon: IconMonitor, href: '/dashboard/capsules' }, - { label: 'Templates', icon: IconBox, href: '/dashboard/snapshots' }, + { label: 'Templates', icon: IconBox, href: '/dashboard/templates' }, { label: 'Metrics', icon: IconMetrics, href: '/dashboard/metrics' } ]; @@ -280,13 +280,21 @@ {#if !collapsed}Notifications{/if} -
- - {#if !collapsed}Settings{/if} -
+ {#if isActive('/dashboard/settings') && !collapsed} +
+ {/if} + + {#if !collapsed} + + Settings + + {/if} + @@ -402,7 +410,7 @@ class="relative w-full max-w-[380px] rounded-[var(--radius-card)] border border-[var(--color-border-mid)] bg-[var(--color-bg-2)] p-6" style="animation: fadeUp 0.2s ease both; box-shadow: var(--shadow-dialog)" > -

+

Create Team

diff --git a/frontend/src/lib/components/SnapshotDialog.svelte b/frontend/src/lib/components/SnapshotDialog.svelte new file mode 100644 index 0000000..3db648d --- /dev/null +++ b/frontend/src/lib/components/SnapshotDialog.svelte @@ -0,0 +1,130 @@ + + +{#if open} +

+ +
{ if (e.key === 'Escape') handleClose(); }} + >
+ +
+
+
+ + + + +
+
+

Capture snapshot

+

{capsuleId}

+
+
+ +
+ {#if pauseFirst} +
+ + + + + +

This capsule will be paused first, then its full state (memory + disk) will be captured.

+
+ {:else} +

The capsule's current state (memory + disk) will be captured and stored as a reusable snapshot.

+ {/if} + + {#if error} +
+ {error} +
+ {/if} + +
+
+ + optional +
+ { if (e.key === 'Enter' && !snapshotting) handleConfirm(); }} + /> +

Leave blank to use an auto-generated name.

+
+ +
+ + +
+
+
+
+{/if} diff --git a/frontend/src/lib/components/StatsPanel.svelte b/frontend/src/lib/components/StatsPanel.svelte index d7a2f12..b40d558 100644 --- a/frontend/src/lib/components/StatsPanel.svelte +++ b/frontend/src/lib/components/StatsPanel.svelte @@ -26,6 +26,8 @@ let chartRam: any = null; let pollInterval: ReturnType | null = null; + let lastDataKey = ''; // cheap fingerprint to skip redundant chart redraws + let visibilityHandler: (() => void) | null = null; async function load() { const result = await fetchStats(range); @@ -43,6 +45,10 @@ function updateCharts() { if (!stats) return; + // Skip redraw if data hasn't changed (same length + same last label). + const key = `${stats.series.labels.length}:${stats.series.labels.at(-1) ?? ''}`; + if (key === lastDataKey) return; + lastDataKey = key; // Use Array.from to pass plain JS arrays to Chart.js — Svelte 5 $state // wraps arrays in reactive proxies which Chart.js can't iterate reliably. const labels = formatLabels(Array.from(stats.series.labels), range); @@ -77,14 +83,19 @@ }); } + function stopPolling() { + if (pollInterval) { clearInterval(pollInterval); pollInterval = null; } + } + function restartPolling() { - if (pollInterval) clearInterval(pollInterval); + stopPolling(); load(); pollInterval = setInterval(load, POLL_INTERVALS[range]); } function setRange(r: TimeRange) { range = r; + lastDataKey = ''; // force chart redraw on range switch goto(`?range=${r}`, { replaceState: true, noScroll: true, keepFocus: true }); restartPolling(); } @@ -185,7 +196,7 @@ ...BASE_CHART_OPTIONS.scales.y, ticks: { ...BASE_CHART_OPTIONS.scales.y.ticks, - callback: (v: number) => `${v}`, + callback: (v: string | number) => `${v}`, }, }, }, @@ -215,7 +226,8 @@ tooltip: { ...BASE_CHART_OPTIONS.plugins.tooltip, callbacks: { - label: (ctx: { parsed: { y: number } }) => ` ${ctx.parsed.y.toFixed(1)} GB`, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + label: (ctx: any) => ` ${ctx.parsed.y.toFixed(1)} GB`, }, }, }, @@ -225,7 +237,7 @@ ...BASE_CHART_OPTIONS.scales.y, ticks: { ...BASE_CHART_OPTIONS.scales.y.ticks, - callback: (v: number) => `${(+v).toFixed(1)} GB`, + callback: (v: string | number) => `${(+v).toFixed(1)} GB`, }, }, }, @@ -236,10 +248,21 @@ updateCharts(); restartPolling(); + + // Pause polling when the browser tab is hidden to save bandwidth/CPU. + visibilityHandler = () => { + if (document.hidden) { + stopPolling(); + } else { + restartPolling(); + } + }; + document.addEventListener('visibilitychange', visibilityHandler); }); onDestroy(() => { - if (pollInterval) clearInterval(pollInterval); + stopPolling(); + if (visibilityHandler) document.removeEventListener('visibilitychange', visibilityHandler); chartRunning?.destroy(); chartCpu?.destroy(); chartRam?.destroy(); @@ -312,7 +335,7 @@
Peak · 30d
-
+
{loading ? '—' : (stats?.peaks.running_count ?? 0)}
@@ -334,7 +357,7 @@
Peak · 30d
-
+
{loading ? '—' : (stats?.peaks.vcpus ?? 0)}
@@ -356,7 +379,7 @@
Peak · 30d
-
+
{loading ? '—' : fmtGB(stats?.peaks.memory_mb ?? 0)}
diff --git a/frontend/src/lib/components/TerminalTab.svelte b/frontend/src/lib/components/TerminalTab.svelte new file mode 100644 index 0000000..e7eadf4 --- /dev/null +++ b/frontend/src/lib/components/TerminalTab.svelte @@ -0,0 +1,602 @@ + + + + +
+ {#if !isRunning} +
+
+
+ +
+
+ Terminal unavailable + Start the capsule to connect +
+
+
+ {:else} + +
+
+ {#each sessions as session (session.id)} + +
switchTo(session.id)} + onkeydown={(e) => { if (e.key === 'Enter' || e.key === ' ') switchTo(session.id); }} + role="tab" + tabindex="0" + aria-selected={session.id === activeSessionId} + class="term-tab group flex shrink-0 cursor-pointer items-center gap-2.5 px-5 py-2.5 text-meta transition-colors + {session.id === activeSessionId + ? 'term-tab-active bg-[var(--color-bg-0)] text-[var(--color-text-primary)]' + : 'bg-[var(--color-bg-1)] text-[var(--color-text-tertiary)] hover:bg-[var(--color-bg-2)] hover:text-[var(--color-text-secondary)] border-b border-b-[var(--color-border)]'}" + > + {#if session.state === 'connected'} + + + + + {:else if session.state === 'connecting'} + + {:else if session.state === 'error'} + + {:else} + + {/if} + + + bash{#if session.ptyPid}:{session.ptyPid}{/if} + + + +
+ {/each} +
+ + + +
+ + {#if activeSession} +
+ {#if activeSession.state === 'error' && activeSession.errorMessage} + {activeSession.errorMessage} + {/if} + + {#if (activeSession.state === 'disconnected' || activeSession.state === 'error') && activeSession.ptyTag} + + {/if} + + {#if activeSession.ptyTag} + {activeSession.ptyTag} + {/if} +
+ {/if} +
+ + +
+ {#each sessions as session (session.id)} +
+ {/each} + + {#if sessions.length === 0} +
+
+
+ +
+
+ No active sessions + All terminal sessions have been closed +
+ +
+
+ {/if} +
+ {/if} +
diff --git a/frontend/src/lib/components/Toaster.svelte b/frontend/src/lib/components/Toaster.svelte index bb846dc..d81917f 100644 --- a/frontend/src/lib/components/Toaster.svelte +++ b/frontend/src/lib/components/Toaster.svelte @@ -13,6 +13,7 @@ {t.message} +
+ + + + +
+ +
+
+ + + + +
+ {filteredCapsules.length} capsule{filteredCapsules.length !== 1 ? 's' : ''} + +
+ + + + + + +
+ + {#if error} +
+ + + + {error}. Try refreshing the page. +
+ {/if} + + +
+ +
+
ID
+ {@render sortableHeader('Template', 'template')} + {@render sortableHeader('CPU', 'vcpus')} + {@render sortableHeader('Memory', 'memory_mb')} + {@render sortableHeader('Started', 'started_at')} + {@render sortableHeader('Status', 'status')} +
Actions
+
+ + {#if loading && capsules.length === 0} +
+
+ + + + Loading capsules... +
+
+ {:else if filteredCapsules.length === 0 && searchQuery} +
+
+
+ + + +
+
+

+ No matching capsules +

+

+ No capsules match "{searchQuery}". +

+ +
+ {:else if filteredCapsules.length === 0} +
+
+
+
+ + + +
+
+

+ No capsules +

+

+ Launch a capsule, configure it interactively, then snapshot it as a platform template. +

+ +
+ {:else} + {#each filteredCapsules as capsule, i (capsule.id)} + {@const stripeColor = capsule.status === 'running' ? 'bg-[var(--color-accent)]' : capsule.status === 'paused' ? 'bg-[var(--color-amber)]' : capsule.status === 'error' ? 'bg-[var(--color-red)]' : 'bg-[var(--color-text-muted)]'} +
+ +
+ + +
+ {#if capsule.status === 'running'} + + + + + {:else if capsule.status === 'paused'} + + {:else if capsule.status === 'error'} + + {:else} + + {/if} + {#if searchQuery && capsule.id.toLowerCase().includes(searchQuery.toLowerCase())} + {@const matchIdx = capsule.id.toLowerCase().indexOf(searchQuery.toLowerCase())} + {capsule.id.slice(0, matchIdx)}{capsule.id.slice(matchIdx, matchIdx + searchQuery.length)}{capsule.id.slice(matchIdx + searchQuery.length)} + {:else} + {capsule.id} + {/if} + +
+ + +
+ {capsule.template} +
+ + +
+ {capsule.vcpus} +
+ + +
+ {capsule.memory_mb}MB +
+ + +
+ {formatTime(capsule.started_at)} + {#if capsule.last_active_at} + active {timeAgo(capsule.last_active_at)} + {/if} +
+ + +
+ + {capsule.status} + +
+ + +
+ {#if capsule.status === 'running' || capsule.status === 'paused'} + + {/if} +
+
+ {/each} + {/if} +
+
+ + +
+
+ + + + + All systems operational +
+
+
+ + { showCreateDialog = false; }} + oncreated={handleCreated} + templateSource="platform" +/> + +{#if destroyTarget} + { destroyTarget = null; }} + ondestroyed={handleDestroyed} + destroyFn={destroyAdminCapsule} + /> +{/if} + +{#snippet sortableHeader(label: string, key: SortKey)} + +{/snippet} diff --git a/frontend/src/routes/admin/capsules/[id]/+page.js b/frontend/src/routes/admin/capsules/[id]/+page.js new file mode 100644 index 0000000..d43d0cd --- /dev/null +++ b/frontend/src/routes/admin/capsules/[id]/+page.js @@ -0,0 +1 @@ +export const prerender = false; diff --git a/frontend/src/routes/admin/capsules/[id]/+page.svelte b/frontend/src/routes/admin/capsules/[id]/+page.svelte new file mode 100644 index 0000000..0c9d587 --- /dev/null +++ b/frontend/src/routes/admin/capsules/[id]/+page.svelte @@ -0,0 +1,323 @@ + + + + Wrenn Admin — {capsuleId} + + +
+ {#if capsuleLoading} +
+
+ + Loading capsule... +
+
+ {:else if capsuleError} +
+
+ + + + {capsuleError} +
+
+ {:else if capsule} + +
+
+ + Capsules + + + + {capsuleId} + + + + + {#if capsule.status === 'running'} + + + + + {/if} + {capsule.status} + + {capsule.template} · {capsule.vcpus}v · {capsule.memory_mb}MB + +
+ {#if capsule.status === 'running' || capsule.status === 'paused'} + + + {/if} +
+
+
+ +
+ + +
+ +
+ +
+ + +
+ {#if metricsAvailable} +
+ +
+ {/if} + +
+ +
+
+
+ {/if} + + +
+
+ + + + + All systems operational +
+
+
+ + +{#if showSnapshot} +
+ +
{ if (!snapshotting) showSnapshot = false; }} + onkeydown={(e) => { if (e.key === 'Escape' && !snapshotting) showSnapshot = false; }} + >
+ +
+
+
+ + + + +
+
+

Snapshot as platform template

+

{capsuleId}

+
+
+ +
+
+ + + + + +

This will pause, snapshot, and destroy the capsule. The snapshot will be available as a platform template for all teams.

+
+ + {#if snapshotError} +
+ {snapshotError} +
+ {/if} + +
+
+ + optional +
+ { if (e.key === 'Enter' && !snapshotting) handleSnapshot(); }} + /> +

Leave blank for an auto-generated name. If the name already exists, it will be overwritten.

+
+ +
+ + +
+
+
+
+{/if} + + { showDestroy = false; }} + ondestroyed={() => { toast.success('Capsule destroyed'); goto('/admin/capsules'); }} + destroyFn={destroyAdminCapsule} +/> diff --git a/frontend/src/routes/admin/hosts/+page.svelte b/frontend/src/routes/admin/hosts/+page.svelte index 16c7476..30b3ad5 100644 --- a/frontend/src/routes/admin/hosts/+page.svelte +++ b/frontend/src/routes/admin/hosts/+page.svelte @@ -1,5 +1,4 @@ -
- +
+ +
+ +
-
- -
-
-
-

- Hosts +
+
+

+ Hosts

-

+

Platform and BYOC compute across all teams.

{#if activeTab === 'platform'} {/if}
- + {#if !loading && !error} -
-
- {totalCount} +
+
+ {totalCount} total
-
- - - +
+ + + - {onlineCount} + {onlineCount} online
{#if pendingCount > 0} -
- {pendingCount} +
+ {pendingCount} pending
{/if} @@ -214,30 +207,32 @@ {/if}

- -
+ +
{#each [['platform', 'Platform', platformHosts.length], ['byoc', 'BYOC', byocHosts.length]] as [id, label, count] (id)} {/each}
-
+
{#if loading} {@render skeletonRows()} {:else if error} @@ -251,16 +246,16 @@ {#if byocHosts.length === 0} {@render emptyState('byoc')} {:else} -
+
{#each byocGroups as group (group.teamId ?? '__none__')} {@const groupPageHosts = byocPageHosts.filter(h => h.team_id === group.teamId || (group.teamId === null && !h.team_id))} {#if groupPageHosts.length > 0}
-
+
{group.teamName} - + {group.hosts.length}
@@ -271,22 +266,22 @@ {#if byocPageCount > 1} -
+
- Page {byocPage + 1} of {byocPageCount} · {byocHosts.length} hosts + Page {byocPage + 1} of {byocPageCount} · {byocHosts.length} hosts
@@ -298,37 +293,46 @@ {/if}
-
+ +
+
+ + + + + All systems operational +
+
{#snippet skeletonRows()} -
+
- - - - - - + + + + + + {#each Array(5) as _, i} - - + - - - - @@ -342,26 +346,28 @@ {#if hosts.length === 0} {@render emptyState('platform')} {:else} -
+
HostStatus
HostStatus
+
+
+
- - - - - - + + + + + + - {#each hosts as host (host.id)} + {#each hosts as host, i (host.id)} - - - - -
HostStatus
HostStatus
-
{host.id}
+
+
{host.id}
{#if host.address}
{host.address}
{/if} @@ -371,31 +377,31 @@ {/if}
- + + {#if host.status === 'online'} - - - + + + {:else} - + {/if} {host.status} + @@ -409,18 +415,31 @@ {/snippet} {#snippet emptyState(type: 'platform' | 'byoc')} -
-
- +
+ +
+
+
+ +
-

- {type === 'platform' ? 'No platform hosts yet.' : 'No BYOC hosts across any team.'} +

+ {type === 'platform' ? 'No platform hosts yet' : 'No BYOC hosts across any team'}

-

+

{type === 'platform' ? 'Add a host to start scheduling capsules onto your own compute.' : 'Teams that register their own compute will appear here.'}

+ {#if type === 'platform'} + + {/if}
{/snippet} @@ -435,10 +454,14 @@ onkeydown={(e) => { if (e.key === 'Escape' && !creating) showCreate = false; }} >
-

+ +
+ +
+

Add Platform Host

@@ -491,7 +514,7 @@

+

{/if} @@ -510,11 +534,15 @@
+ +
+ +
-
+
-

+

Host registered

@@ -558,11 +586,12 @@

+
{/if} @@ -578,10 +607,14 @@ onkeydown={(e) => { if (e.key === 'Escape' && !deleting) deleteTarget = null; }} >
-

+ +
+ +
+

Delete Host

@@ -593,10 +626,10 @@ Checking active capsules…

- {:else if deletePreviewSandboxes.length > 0} + {:else if deletePreviewCapsules.length > 0}

- {deletePreviewSandboxes.length} active capsule{deletePreviewSandboxes.length === 1 ? '' : 's'} will be destroyed. + {deletePreviewCapsules.length} active capsule{deletePreviewCapsules.length === 1 ? '' : 's'} will be destroyed.

All running workloads on this host will be terminated immediately. @@ -621,16 +654,17 @@

+

{/if} @@ -676,4 +710,54 @@ .checkmark-drawn { stroke-dashoffset: 0; } + + /* Stat pill — shared base */ + .stat-pill { + display: flex; + align-items: baseline; + gap: 6px; + border-radius: var(--radius-button); + border-width: 1px; + padding: 6px 12px; + transition: transform 0.15s ease, box-shadow 0.15s ease; + } + .stat-pill:hover { + transform: translateY(-1px); + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.25); + } + + /* Table header */ + .table-header { + padding: 10px 20px; + text-align: left; + font-size: var(--text-label); + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.06em; + color: var(--color-text-tertiary); + } + + /* Staggered row entrance */ + .table-row-animate { + animation: fadeUp 0.25s ease both; + } + + /* Tab button */ + .tab-button { + position: relative; + padding: 14px 20px 14px 0; + font-size: var(--text-ui); + transition: color 0.15s ease; + cursor: pointer; + } + + /* Online host row — subtle left accent */ + .host-row-online { + box-shadow: inset 3px 0 0 var(--color-accent); + } + + /* Empty state icon float */ + .empty-icon-float { + animation: iconFloat 3s ease-in-out infinite; + } diff --git a/frontend/src/routes/admin/teams/+page.svelte b/frontend/src/routes/admin/teams/+page.svelte new file mode 100644 index 0000000..90a8db2 --- /dev/null +++ b/frontend/src/routes/admin/teams/+page.svelte @@ -0,0 +1,498 @@ + + + + Wrenn Admin — Teams + + + + +
+ +
+
+ +
+
+

+ Teams +

+

+ All registered teams, BYOC status, and active capsules. +

+
+
+ + + {#if !loading && !error} +
+
+ {totalTeams} + team{totalTeams !== 1 ? 's' : ''} +
+ {#if byocCount > 0} +
+ {byocCount} + BYOC +
+ {/if} + {#if totalActiveSandboxes > 0} +
+ + + + + {totalActiveSandboxes} + active +
+ {/if} +
+ {/if} +
+ + +
+ {#if error} +
+ + + + {error}. Try refreshing the page. +
+ {/if} + + +
+ +
+
Name
+
Members
+
Owner
+
BYOC
+
Capsules
+
Channels
+
Created
+
Actions
+
+ + {#if loading && teams.length === 0} +
+
+ + + + Loading teams... +
+
+ {:else if teams.length === 0} +
+
+
+
+ + + +
+
+

+ No teams yet +

+

+ Teams are created when users sign up. +

+
+ {:else} + {#each teams as team, i (team.id)} + {@const isDeleted = !!team.deleted_at} +
+ + {#if !isDeleted} +
+ {/if} + + +
+
+ {team.name} + {#if isDeleted} + + Deleted + + {/if} +
+ {team.slug} +
+ + +
+ {team.member_count} +
+ + +
+ {#if team.owner_name || team.owner_email} + {team.owner_name || '\u2014'} + {team.owner_email} + {:else} + + {/if} +
+ + +
+ {#if team.is_byoc} + + Enabled + + {:else if !isDeleted} + + {:else} + + {/if} +
+ + +
+ {#if team.active_sandbox_count > 0} + + + + + + {team.active_sandbox_count} + + {:else} + 0 + {/if} +
+ + +
+ {team.channel_count} +
+ + +
+ {formatDate(team.created_at)} +
+ + +
+ {#if !isDeleted} + + {/if} +
+
+ {/each} + {/if} +
+ + + {#if totalPages > 1} +
+ + Page {currentPage} of {totalPages} + +
+ + +
+
+ {/if} +
+ + +
+
+ + + + + All systems operational +
+
+
+ + +{#if byocTarget} +
+ +
{ if (!enablingByoc) byocTarget = null; }} + onkeydown={(e) => { if (e.key === 'Escape' && !enablingByoc) byocTarget = null; }} + >
+
+
+

+ Enable BYOC +

+

+ Allow {byocTarget.name} to register and run capsules on their own hosts. +

+ +
+ + + + + BYOC cannot be disabled once enabled. + +
+ + {#if byocError} +
+ {byocError} +
+ {/if} + +
+ + +
+
+
+
+{/if} + + +{#if deleteTarget} +
+ +
{ if (!deleting) deleteTarget = null; }} + onkeydown={(e) => { if (e.key === 'Escape' && !deleting) deleteTarget = null; }} + >
+
+
+

+ Delete Team +

+

+ Remove {deleteTarget.name} and stop all its running capsules. Members will lose access immediately. +

+ + {#if deleteTarget.active_sandbox_count > 0} +
+ + + + + {deleteTarget.active_sandbox_count} active capsule{deleteTarget.active_sandbox_count !== 1 ? 's' : ''} will be destroyed immediately. + +
+ {/if} + + {#if deleteError} +
+ {deleteError} +
+ {/if} + +
+ + +
+
+
+
+{/if} diff --git a/frontend/src/routes/admin/templates/+page.svelte b/frontend/src/routes/admin/templates/+page.svelte index 4619e7b..ae678ed 100644 --- a/frontend/src/routes/admin/templates/+page.svelte +++ b/frontend/src/routes/admin/templates/+page.svelte @@ -1,5 +1,5 @@ -
- +
+ +
+ +
-
- -
-
+
-

+

Templates

-

+

Build and manage global templates available to all teams.

- + {#if !templatesLoading && !templatesError} -
-
- {templateCount} +
+
+ {templateCount} templates
-
- {baseCount} +
+ {baseCount} base
-
- {snapshotCount} +
+ {snapshotCount} snapshots
{#if runningBuilds > 0} -
- - - +
+ + + - {runningBuilds} + {runningBuilds} building
{/if} @@ -299,30 +311,32 @@ {/if}
- -
+ +
{#each [['templates', 'Templates', templateCount], ['builds', 'Builds', builds.length]] as [id, label, count] (id)} {/each}
-
+
{#if activeTab === 'templates'} {#if templatesLoading} {@render skeletonRows(5, ['Name', 'Type', 'Specs', 'Size', 'Created', ''])} @@ -349,26 +363,35 @@ {/if} {/if}
-
-
+ + +
+
+ + + + + All systems operational +
+
{#snippet skeletonRows(count: number, headers: string[])} -
+
- + {#each headers as h} - + {/each} {#each Array(count) as _, i} - + {#each headers as _h, j} - {/each} @@ -380,78 +403,99 @@ {/snippet} {#snippet emptyState(type: 'templates' | 'builds')} -
-
- {#if type === 'templates'} - - {:else} - - {/if} +
+ +
+
+
+ {#if type === 'templates'} + + {:else} + + {/if} +
-

- {type === 'templates' ? 'No templates yet.' : 'No builds yet.'} +

+ {type === 'templates' ? 'No templates yet' : 'No builds yet'}

-

+

{type === 'templates' ? 'Create a template to provide pre-configured environments for all teams.' : 'Start a template build to see progress and logs here.'}

+ {#if type === 'templates'} + + {/if}
{/snippet} {#snippet templatesTable()} -
+
{h}{h}
+
- - - - - - - + + + + + + + - {#each templates as tmpl (tmpl.name)} - - + - - - - -
NameType
NameType
- {tmpl.name} + {#each templates as tmpl, i (tmpl.name)} +
+
+ {tmpl.name} + +
+ {#if tmpl.type === 'snapshot'} - + + snapshot {:else} - + + base {/if} + @@ -464,28 +508,30 @@ {/snippet} {#snippet buildsTable()} -
+
- - - - - - - - + + + + + + + + - {#each builds as build (build.id)} + {#each builds as build, i (build.id)} toggleBuildExpand(build.id)} > - - - - - - - @@ -547,7 +595,7 @@ {#if expandedBuildId === build.id}
BuildNameStatus
BuildNameStatus
-
+
+
{build.id}
- {build.name} + + {build.name} - + + {#if build.status === 'running'} - - - + + + {:else if build.status === 'success'} - + {:else if build.status === 'failed'} - + {:else} - + {/if} {build.status}
-
+
{#if build.status === 'pending' || build.status === 'running'}
+ + {:else} + tar, tar.gz, or zip + {/if} +
+
+
+
{/if} @@ -829,18 +917,18 @@ {#if deleteTarget}
+
{ if (!deleting) deleteTarget = null; }} onkeydown={(e) => { if (e.key === 'Escape' && !deleting) deleteTarget = null; }} >
-

+
+

Delete Template

@@ -864,7 +952,7 @@

+

{/if} @@ -899,4 +988,59 @@ background-size: 200% 100%; animation: shimmer 1.4s ease infinite; } + + /* Stat pill — shared base */ + .stat-pill { + display: flex; + align-items: baseline; + gap: 6px; + border-radius: var(--radius-button); + border-width: 1px; + padding: 6px 12px; + transition: transform 0.15s ease, box-shadow 0.15s ease; + } + .stat-pill:hover { + transform: translateY(-1px); + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.25); + } + + /* Table header */ + .table-header { + padding: 10px 20px; + text-align: left; + font-size: var(--text-label); + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.06em; + color: var(--color-text-tertiary); + } + + /* Staggered row entrance */ + .table-row-animate { + animation: fadeUp 0.25s ease both; + } + + /* Tab button */ + .tab-button { + position: relative; + padding: 14px 20px 14px 0; + font-size: var(--text-ui); + transition: color 0.15s ease; + cursor: pointer; + } + + /* Active build row — subtle left accent */ + .build-row-active { + box-shadow: inset 3px 0 0 var(--color-blue); + } + + /* Progress bar glow for running builds */ + .progress-bar-glow { + box-shadow: 0 0 8px rgba(90, 159, 212, 0.4); + } + + /* Empty state icon float */ + .empty-icon-float { + animation: iconFloat 3s ease-in-out infinite; + } diff --git a/frontend/src/routes/admin/users/+page.svelte b/frontend/src/routes/admin/users/+page.svelte new file mode 100644 index 0000000..3630f4f --- /dev/null +++ b/frontend/src/routes/admin/users/+page.svelte @@ -0,0 +1,294 @@ + + + + Wrenn Admin — Users + + + + +
+ +
+
+ +
+
+

+ Users +

+

+ All registered users, team memberships, and account status. +

+
+
+ + + {#if !loading && !error} +
+
+ {totalUsers} + user{totalUsers !== 1 ? 's' : ''} +
+
+ {/if} +
+ + +
+ {#if error} +
+ + + + {error}. Try refreshing the page. +
+ {/if} + + +
+ +
+
Name
+
Email
+
Teams
+
Owned
+
Role
+
Joined
+
Status
+
+ + {#if loading && users.length === 0} +
+
+ + + + Loading users... +
+
+ {:else if users.length === 0} +
+
+
+
+ + + +
+
+

+ No users yet +

+

+ Users appear here when they sign up. +

+
+ {:else} + {#each users as user, i (user.id)} +
+ + {#if user.status === 'active'} +
+ {/if} + + +
+
+ {user.name || '\u2014'} + {#if user.is_admin} + + Admin + + {/if} +
+ {user.id} +
+ + +
+ {user.email} +
+ + +
+ {user.teams_joined} +
+ + +
+ {user.teams_owned} +
+ + +
+ {user.is_admin ? 'Admin' : 'User'} +
+ + +
+ {formatDate(user.created_at)} +
+ + +
+ +
+
+ {/each} + {/if} +
+ + + {#if totalPages > 1} +
+ + Page {currentPage} of {totalPages} + +
+ + +
+
+ {/if} +
+ + +
+
+ + + + + All systems operational +
+
+
diff --git a/frontend/src/routes/dashboard/+layout.svelte b/frontend/src/routes/dashboard/+layout.svelte index 404b561..910fc04 100644 --- a/frontend/src/routes/dashboard/+layout.svelte +++ b/frontend/src/routes/dashboard/+layout.svelte @@ -1,7 +1,19 @@ -{@render children()} +
+ +
+ {@render children()} +
+
diff --git a/frontend/src/routes/dashboard/audit/+page.svelte b/frontend/src/routes/dashboard/audit/+page.svelte index 1cd4e93..3923155 100644 --- a/frontend/src/routes/dashboard/audit/+page.svelte +++ b/frontend/src/routes/dashboard/audit/+page.svelte @@ -1,14 +1,7 @@ Wrenn — Capsules -
- - -
-
- - {#if $page.params.id} - -
-
- - Capsules - - - - {$page.params.id} - -
-
- {:else} - -
-
-
-

- Capsules -

-

- All active and recent capsules across your team. -

-
- -
-
- - - - - {capsuleRunningCount.value} - running now -
-
-
-
- {/if} - - {@render children()} -
- - -
-
- - - +
+ + {#if $page.params.id} + +
+
+ + Capsules + + + + + {$page.params.id} + + - All systems operational
-
+
+ {:else} + +
+
+
+

+ Capsules +

+

+ All active and recent capsules across your team. +

+
+ +
+
+ + + + + {capsuleRunningCount.value} + running now +
+
+
+ +
+
+ {/if} + + {@render children()} + + + +
+
+ + + + + All systems operational
-
+ diff --git a/frontend/src/routes/dashboard/capsules/+page.svelte b/frontend/src/routes/dashboard/capsules/+page.svelte index afb9de0..d7fd84c 100644 --- a/frontend/src/routes/dashboard/capsules/+page.svelte +++ b/frontend/src/routes/dashboard/capsules/+page.svelte @@ -1,5 +1,8 @@ @@ -379,7 +371,7 @@ {/if} -
+
ID
@@ -388,7 +380,7 @@ {@render sortableHeader('Memory', 'memory_mb')} {@render sortableHeader('Idle Timeout', 'timeout_sec')} {@render sortableHeader('Started', 'started_at')} - {@render sortableHeader('Status', 'status')} + {@render sortableHeader('Status', 'status', 'right')}
{#if loading && capsules.length === 0} @@ -400,7 +392,31 @@ Loading capsules...
+ {:else if filteredCapsules.length === 0 && searchQuery} + +
+
+
+ + + +
+
+

+ No matching capsules +

+

+ No capsules match "{searchQuery}". Try a different ID. +

+ +
{:else if filteredCapsules.length === 0} +
@@ -412,7 +428,7 @@
-

+

No capsules yet

@@ -440,7 +456,7 @@

-
+
{#if capsule.status === 'running'} @@ -453,10 +469,11 @@ {/if} {#if searchQuery && capsule.id.toLowerCase().includes(searchQuery.toLowerCase())} {@const matchIdx = capsule.id.toLowerCase().indexOf(searchQuery.toLowerCase())} - {capsule.id.slice(0, matchIdx)}{capsule.id.slice(matchIdx, matchIdx + searchQuery.length)}{capsule.id.slice(matchIdx + searchQuery.length)} + {capsule.id.slice(0, matchIdx)}{capsule.id.slice(matchIdx, matchIdx + searchQuery.length)}{capsule.id.slice(matchIdx + searchQuery.length)} {:else} - {capsule.id} + {capsule.id} {/if} +
@@ -488,7 +505,7 @@
-
+
{#if actionLoading === capsule.id} @@ -503,7 +520,16 @@ openMenuId = null; } else { const rect = (e.currentTarget as HTMLElement).getBoundingClientRect(); - menuPos = { top: rect.bottom + 4, left: rect.right - 180 }; + const menuW = 180; + const menuH = 140; // approximate max menu height + const vw = document.documentElement.clientWidth; + const top = rect.bottom + 4 + menuH > window.innerHeight + ? rect.top - menuH - 4 + : rect.bottom + 4; + // Anchor right edge of menu to right edge of button, clamped within viewport + const idealLeft = rect.right - menuW; + const left = Math.min(Math.max(8, idealLeft), vw - menuW - 8); + menuPos = { top, left }; openMenuId = capsule.id; } }} @@ -577,7 +603,7 @@ {/if}
- -
-
- - + { snapshotTarget = null; }} + onsnapshot={handleSnapshotDone} + /> {/if} - + {#if destroyTarget} -
- -
{ if (!destroying) destroyTarget = null; }} - onkeydown={(e) => { if (e.key === 'Escape' && !destroying) destroyTarget = null; }} - >
-
-

Destroy Capsule

-

- Terminate {destroyTarget.id} and destroy all data inside it. This cannot be undone. -

- - {#if destroyError} -
- {destroyError} -
- {/if} - -
- - -
-
-
+ { destroyTarget = null; }} + ondestroyed={handleDestroyed} + /> {/if} @@ -734,10 +644,10 @@ oncreated={handleCapsuleCreated} /> -{#snippet sortableHeader(label: string, key: SortKey)} +{#snippet sortableHeader(label: string, key: SortKey, align: 'left' | 'right' = 'left')} + {:else} + + {team.name} + + {/if} {/if} @@ -569,7 +560,7 @@

Members

@@ -717,6 +708,7 @@
-
+
+
+ + + + + All systems operational +
+
{#if openDropdownId} @@ -893,10 +891,10 @@

Add Member

@@ -1030,10 +1028,10 @@

Remove Member

@@ -1104,11 +1102,11 @@
{#if myRole === 'owner'}

Delete Team

@@ -1119,7 +1117,7 @@

{:else}

Leave Team

diff --git a/frontend/src/routes/dashboard/snapshots/+page.svelte b/frontend/src/routes/dashboard/templates/+page.svelte similarity index 82% rename from frontend/src/routes/dashboard/snapshots/+page.svelte rename to frontend/src/routes/dashboard/templates/+page.svelte index 2ae201a..f159ed1 100644 --- a/frontend/src/routes/dashboard/snapshots/+page.svelte +++ b/frontend/src/routes/dashboard/templates/+page.svelte @@ -1,5 +1,5 @@ + + + Wrenn — Reset password + + +
+
+ +
+ Wrenn + Wrenn +
+ + {#if submitted} +
+

Check your email

+

+ If an account exists for {email}, you'll receive a reset link shortly. The link expires in 15 minutes. +

+ + Back to sign in + +
+ {:else} +
+

Reset your password

+

+ Enter your email and we'll send you a reset link. +

+ + {#if error} +

{error}

+ {/if} + +
+
+ + +
+ + +
+ + + Back to sign in + +
+ {/if} +
+
diff --git a/frontend/src/routes/login/+page.svelte b/frontend/src/routes/login/+page.svelte index b3b76e8..18c55fa 100644 --- a/frontend/src/routes/login/+page.svelte +++ b/frontend/src/routes/login/+page.svelte @@ -1,6 +1,6 @@ + + + Wrenn — Set new password + + +
+
+ +
+ Wrenn + Wrenn +
+ + {#if done} +
+

All set

+

+ Your password has been updated. Sign in to continue. +

+ + Sign in + +
+ {:else} +
+

Set new password

+

Must be at least 8 characters.

+ + {#if error} +

{error}

+ {/if} + +
+
+
+ +
+ +
+ +
+
+ +
+ +
+ + +
+
+ {/if} + + + Back to sign in + +
+
diff --git a/frontend/static/logo.svg b/frontend/static/logo.svg index 26f2ab1..560d80f 100644 --- a/frontend/static/logo.svg +++ b/frontend/static/logo.svg @@ -1 +1,38 @@ - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts index 350070b..76405cc 100644 --- a/frontend/vite.config.ts +++ b/frontend/vite.config.ts @@ -8,7 +8,8 @@ export default defineConfig({ proxy: { '/api': { target: 'http://localhost:8080', - rewrite: (path) => path.replace(/^\/api/, '') + rewrite: (path) => path.replace(/^\/api/, ''), + ws: true } } } diff --git a/go.mod b/go.mod index d37ace7..539abaa 100644 --- a/go.mod +++ b/go.mod @@ -9,14 +9,14 @@ require ( github.com/golang-jwt/jwt/v5 v5.3.1 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 - github.com/jackc/pgx/v5 v5.8.0 + github.com/jackc/pgx/v5 v5.9.1 github.com/joho/godotenv v1.5.1 github.com/redis/go-redis/v9 v9.18.0 - github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 - github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f - golang.org/x/crypto v0.49.0 + github.com/vishvananda/netlink v1.3.1 + github.com/vishvananda/netns v0.0.5 + golang.org/x/crypto v0.50.0 golang.org/x/oauth2 v0.36.0 - golang.org/x/sys v0.42.0 + golang.org/x/sys v0.43.0 google.golang.org/protobuf v1.36.11 ) @@ -31,5 +31,5 @@ require ( github.com/mattn/go-isatty v0.0.17 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/sync v0.20.0 // indirect - golang.org/x/text v0.35.0 // indirect + golang.org/x/text v0.36.0 // indirect ) diff --git a/go.sum b/go.sum index 27ca5de..1c183c4 100644 --- a/go.sum +++ b/go.sum @@ -37,8 +37,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= -github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= +github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= @@ -65,32 +65,31 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0= +github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= -golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= -golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= -golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= -golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= -golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= -golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= -golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= +golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= +golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/images/wrenn-init.sh b/images/wrenn-init.sh index d83be1c..8a9e22e 100644 --- a/images/wrenn-init.sh +++ b/images/wrenn-init.sh @@ -17,8 +17,9 @@ mkdir -p /sys/fs/cgroup mount -t cgroup2 cgroup2 /sys/fs/cgroup 2>/dev/null || true echo "+cpu +memory +io" > /sys/fs/cgroup/cgroup.subtree_control 2>/dev/null || true -# Set hostname +# Set hostname and make it resolvable (sudo requires this). hostname sandbox +echo "127.0.0.1 sandbox" >> /etc/hosts # Configure networking if the kernel ip= boot arg did not already set it up. if ! ip addr show eth0 2>/dev/null | grep -q "169.254.0.21"; then diff --git a/internal/api/agent_helper.go b/internal/api/agent_helper.go index e4a3545..6a7acf5 100644 --- a/internal/api/agent_helper.go +++ b/internal/api/agent_helper.go @@ -6,8 +6,8 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen/hostagentv1connect" ) diff --git a/internal/api/handler_sandbox_proxy.go b/internal/api/handler_sandbox_proxy.go index 9abae06..5e3754d 100644 --- a/internal/api/handler_sandbox_proxy.go +++ b/internal/api/handler_sandbox_proxy.go @@ -17,10 +17,9 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" ) // Sentinel errors returned by proxyTarget, used to map to HTTP status codes @@ -44,7 +43,7 @@ func (e errProxySandboxNotRunning) Error() string { return fmt.Sprintf("sandbox is not running (status: %s)", e.status) } -// proxyCacheEntry caches the resolved agent URL for a (sandbox, team) pair. +// proxyCacheEntry caches the resolved agent URL for a sandbox. // The *httputil.ReverseProxy is built per-request (cheap) so the Director closure // can capture the correct port without the cache key needing to include it. type proxyCacheEntry struct { @@ -52,23 +51,13 @@ type proxyCacheEntry struct { expiresAt time.Time } -// proxyCacheKey is a fixed-size key from two UUIDs, avoids string allocation. -type proxyCacheKey [32]byte - -func makeProxyCacheKey(sandboxID, teamID pgtype.UUID) proxyCacheKey { - var k proxyCacheKey - copy(k[:16], sandboxID.Bytes[:]) - copy(k[16:], teamID.Bytes[:]) - return k -} - // SandboxProxyWrapper wraps an existing HTTP handler and intercepts requests // whose Host header matches the {port}-{sandbox_id}.{domain} pattern. Matching // requests are reverse-proxied through the host agent that owns the sandbox. // All other requests are passed through to the inner handler. // -// Authentication is via X-API-Key header only (no JWT). The API key's team -// must own the sandbox. +// No authentication is required — sandbox URLs are unguessable and access is +// scoped to the sandbox ID embedded in the hostname. type SandboxProxyWrapper struct { inner http.Handler db *db.Queries @@ -76,7 +65,7 @@ type SandboxProxyWrapper struct { transport http.RoundTripper cacheMu sync.Mutex - cache map[proxyCacheKey]proxyCacheEntry + cache map[pgtype.UUID]proxyCacheEntry } // NewSandboxProxyWrapper creates a new proxy wrapper. @@ -86,19 +75,15 @@ func NewSandboxProxyWrapper(inner http.Handler, queries *db.Queries, pool *lifec db: queries, pool: pool, transport: pool.Transport(), - cache: make(map[proxyCacheKey]proxyCacheEntry), + cache: make(map[pgtype.UUID]proxyCacheEntry), } } -// proxyTarget looks up the cached agent URL for (sandboxID, teamID). +// proxyTarget looks up the cached agent URL for sandboxID. // On a miss it queries the DB, resolves the address, and populates the cache. -// The *httputil.ReverseProxy is built by the caller so the Director closure -// captures the correct port without the cache key needing to include it. -func (h *SandboxProxyWrapper) proxyTarget(ctx context.Context, sandboxID, teamID pgtype.UUID) (*url.URL, error) { - cacheKey := makeProxyCacheKey(sandboxID, teamID) - +func (h *SandboxProxyWrapper) proxyTarget(ctx context.Context, sandboxID pgtype.UUID) (*url.URL, error) { h.cacheMu.Lock() - entry, ok := h.cache[cacheKey] + entry, ok := h.cache[sandboxID] h.cacheMu.Unlock() if ok && time.Now().Before(entry.expiresAt) { @@ -106,10 +91,7 @@ func (h *SandboxProxyWrapper) proxyTarget(ctx context.Context, sandboxID, teamID } // Cache miss or expired — query DB. - target, err := h.db.GetSandboxProxyTarget(ctx, db.GetSandboxProxyTargetParams{ - ID: sandboxID, - TeamID: teamID, - }) + target, err := h.db.GetSandboxProxyTarget(ctx, sandboxID) if err != nil { return nil, errProxySandboxNotFound } @@ -126,7 +108,7 @@ func (h *SandboxProxyWrapper) proxyTarget(ctx context.Context, sandboxID, teamID } h.cacheMu.Lock() - h.cache[cacheKey] = proxyCacheEntry{ + h.cache[sandboxID] = proxyCacheEntry{ agentURL: agentURL, expiresAt: time.Now().Add(proxyCacheTTL), } @@ -135,11 +117,11 @@ func (h *SandboxProxyWrapper) proxyTarget(ctx context.Context, sandboxID, teamID return agentURL, nil } -// evictProxyCache removes the cached entry for a (sandbox, team) pair. +// evictProxyCache removes the cached entry for a sandbox. // Called on 502 so a stopped/moved sandbox is re-resolved on the next request. -func (h *SandboxProxyWrapper) evictProxyCache(sandboxID, teamID pgtype.UUID) { +func (h *SandboxProxyWrapper) evictProxyCache(sandboxID pgtype.UUID) { h.cacheMu.Lock() - delete(h.cache, makeProxyCacheKey(sandboxID, teamID)) + delete(h.cache, sandboxID) h.cacheMu.Unlock() } @@ -166,20 +148,13 @@ func (h *SandboxProxyWrapper) ServeHTTP(w http.ResponseWriter, r *http.Request) return } - // Authenticate: require API key or JWT, extract team ID. - teamID, err := h.authenticateRequest(r) - if err != nil { - writeError(w, http.StatusUnauthorized, "unauthorized", err.Error()) - return - } - sandboxID, err := id.ParseSandboxID(sandboxIDStr) if err != nil { http.Error(w, "invalid sandbox ID", http.StatusBadRequest) return } - agentURL, err := h.proxyTarget(r.Context(), sandboxID, teamID) + agentURL, err := h.proxyTarget(r.Context(), sandboxID) if err != nil { switch { case errors.Is(err, errProxySandboxNotFound): @@ -206,25 +181,9 @@ func (h *SandboxProxyWrapper) ServeHTTP(w http.ResponseWriter, r *http.Request) "port", port, "error", err, ) - h.evictProxyCache(sandboxID, teamID) + h.evictProxyCache(sandboxID) http.Error(w, "proxy error: "+err.Error(), http.StatusBadGateway) }, } proxy.ServeHTTP(w, r) } - -// authenticateRequest validates the request's API key and returns the team ID. -// Only API key authentication is supported for sandbox proxy requests (not JWT). -func (h *SandboxProxyWrapper) authenticateRequest(r *http.Request) (pgtype.UUID, error) { - key := r.Header.Get("X-API-Key") - if key == "" { - return pgtype.UUID{}, fmt.Errorf("X-API-Key header required") - } - - hash := auth.HashAPIKey(key) - row, err := h.db.GetAPIKeyByHash(r.Context(), hash) - if err != nil { - return pgtype.UUID{}, fmt.Errorf("invalid API key") - } - return row.TeamID, nil -} diff --git a/internal/api/handlers_admin_capsules.go b/internal/api/handlers_admin_capsules.go new file mode 100644 index 0000000..13250e5 --- /dev/null +++ b/internal/api/handlers_admin_capsules.go @@ -0,0 +1,248 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "time" + + "connectrpc.com/connect" + "github.com/go-chi/chi/v5" + + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/service" + "git.omukk.dev/wrenn/wrenn/pkg/validate" + pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" +) + +type adminCapsuleHandler struct { + svc *service.SandboxService + db *db.Queries + pool *lifecycle.HostClientPool + audit *audit.AuditLogger +} + +func newAdminCapsuleHandler(svc *service.SandboxService, db *db.Queries, pool *lifecycle.HostClientPool, al *audit.AuditLogger) *adminCapsuleHandler { + return &adminCapsuleHandler{svc: svc, db: db, pool: pool, audit: al} +} + +// Create handles POST /v1/admin/capsules. +func (h *adminCapsuleHandler) Create(w http.ResponseWriter, r *http.Request) { + var req createSandboxRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + + ac := auth.MustFromContext(r.Context()) + + sb, err := h.svc.Create(r.Context(), service.SandboxCreateParams{ + TeamID: id.PlatformTeamID, + Template: req.Template, + VCPUs: req.VCPUs, + MemoryMB: req.MemoryMB, + TimeoutSec: req.TimeoutSec, + }) + if err != nil { + status, code, msg := serviceErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + h.audit.LogSandboxCreate(r.Context(), ac, sb.ID, sb.Template) + writeJSON(w, http.StatusCreated, sandboxToResponse(sb)) +} + +// List handles GET /v1/admin/capsules. +func (h *adminCapsuleHandler) List(w http.ResponseWriter, r *http.Request) { + sandboxes, err := h.svc.List(r.Context(), id.PlatformTeamID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to list sandboxes") + return + } + + resp := make([]sandboxResponse, len(sandboxes)) + for i, sb := range sandboxes { + resp[i] = sandboxToResponse(sb) + } + + writeJSON(w, http.StatusOK, resp) +} + +// Get handles GET /v1/admin/capsules/{id}. +func (h *adminCapsuleHandler) Get(w http.ResponseWriter, r *http.Request) { + sandboxIDStr := chi.URLParam(r, "id") + + sandboxID, err := id.ParseSandboxID(sandboxIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid sandbox ID") + return + } + + sb, err := h.svc.Get(r.Context(), sandboxID, id.PlatformTeamID) + if err != nil { + writeError(w, http.StatusNotFound, "not_found", "sandbox not found") + return + } + + writeJSON(w, http.StatusOK, sandboxToResponse(sb)) +} + +// Destroy handles DELETE /v1/admin/capsules/{id}. +func (h *adminCapsuleHandler) Destroy(w http.ResponseWriter, r *http.Request) { + sandboxIDStr := chi.URLParam(r, "id") + ac := auth.MustFromContext(r.Context()) + + sandboxID, err := id.ParseSandboxID(sandboxIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid sandbox ID") + return + } + + if err := h.svc.Destroy(r.Context(), sandboxID, id.PlatformTeamID); err != nil { + status, code, msg := serviceErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + h.audit.LogSandboxDestroy(r.Context(), ac, sandboxID) + w.WriteHeader(http.StatusNoContent) +} + +type adminSnapshotRequest struct { + Name string `json:"name"` +} + +// Snapshot handles POST /v1/admin/capsules/{id}/snapshot. +// Pauses the capsule, takes a snapshot as a platform template, then destroys the capsule. +func (h *adminCapsuleHandler) Snapshot(w http.ResponseWriter, r *http.Request) { + sandboxIDStr := chi.URLParam(r, "id") + ac := auth.MustFromContext(r.Context()) + + sandboxID, err := id.ParseSandboxID(sandboxIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid sandbox ID") + return + } + + var req adminSnapshotRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + + if req.Name == "" { + req.Name = id.NewSnapshotName() + } + if err := validate.SafeName(req.Name); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", fmt.Sprintf("invalid snapshot name: %s", err)) + return + } + + ctx := r.Context() + + // Verify sandbox exists and belongs to platform team BEFORE any + // destructive operations (template overwrite). + sb, err := h.db.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: id.PlatformTeamID}) + if err != nil { + writeError(w, http.StatusNotFound, "not_found", "sandbox not found") + return + } + if sb.Status != "running" && sb.Status != "paused" { + writeError(w, http.StatusConflict, "invalid_state", "sandbox must be running or paused") + return + } + + // Check if name already exists as a platform template. + if existing, err := h.db.GetPlatformTemplateByName(ctx, req.Name); err == nil { + // Delete old snapshot files from all hosts before removing the DB record. + if err := deleteSnapshotBroadcast(ctx, h.db, h.pool, existing.TeamID, existing.ID); err != nil { + writeError(w, http.StatusInternalServerError, "agent_error", "failed to delete existing snapshot files") + return + } + if err := h.db.DeleteTemplateByTeam(ctx, db.DeleteTemplateByTeamParams{Name: req.Name, TeamID: id.PlatformTeamID}); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to remove existing template record") + return + } + } + + agent, err := agentForHost(ctx, h.db, h.pool, sb.HostID) + if err != nil { + writeError(w, http.StatusServiceUnavailable, "host_unavailable", "sandbox host is not reachable") + return + } + + // Pre-mark sandbox as "paused" to prevent the reconciler from racing. + if sb.Status == "running" { + if _, err := h.db.UpdateSandboxStatus(ctx, db.UpdateSandboxStatusParams{ + ID: sandboxID, Status: "paused", + }); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to update sandbox status") + return + } + } + + // Use a detached context so the snapshot completes even if the client disconnects. + snapCtx, snapCancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer snapCancel() + + newTemplateID := id.NewTemplateID() + + resp, err := agent.CreateSnapshot(snapCtx, connect.NewRequest(&pb.CreateSnapshotRequest{ + SandboxId: sandboxIDStr, + Name: req.Name, + TeamId: formatUUIDForRPC(id.PlatformTeamID), + TemplateId: formatUUIDForRPC(newTemplateID), + })) + if err != nil { + // Snapshot failed — revert status. + if sb.Status == "running" { + if _, dbErr := h.db.UpdateSandboxStatus(snapCtx, db.UpdateSandboxStatusParams{ + ID: sandboxID, Status: "running", + }); dbErr != nil { + slog.Error("failed to revert sandbox status after snapshot error", "sandbox_id", sandboxIDStr, "error", dbErr) + } + } + status, code, msg := agentErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + tmpl, err := h.db.InsertTemplate(snapCtx, db.InsertTemplateParams{ + ID: newTemplateID, + Name: req.Name, + Type: "snapshot", + Vcpus: sb.Vcpus, + MemoryMb: sb.MemoryMb, + SizeBytes: resp.Msg.SizeBytes, + TeamID: id.PlatformTeamID, + DefaultUser: "root", + DefaultEnv: []byte("{}"), + Metadata: sb.Metadata, + }) + if err != nil { + slog.Error("failed to insert template record", "name", req.Name, "error", err) + writeError(w, http.StatusInternalServerError, "db_error", "snapshot created but failed to record in database") + return + } + + // Destroy the ephemeral capsule after successful snapshot. + if err := h.svc.Destroy(snapCtx, sandboxID, id.PlatformTeamID); err != nil { + slog.Error("failed to destroy capsule after snapshot", "sandbox_id", sandboxIDStr, "error", err) + // Don't fail the response — the snapshot was created successfully. + } + + h.audit.LogSnapshotCreate(snapCtx, ac, req.Name) + + if ctx.Err() != nil { + slog.Info("snapshot created but client disconnected before response", "name", req.Name) + return + } + writeJSON(w, http.StatusCreated, templateToResponse(tmpl)) +} diff --git a/internal/api/handlers_apikeys.go b/internal/api/handlers_apikeys.go index 440cb59..9fc315d 100644 --- a/internal/api/handlers_apikeys.go +++ b/internal/api/handlers_apikeys.go @@ -6,11 +6,11 @@ import ( "github.com/go-chi/chi/v5" - "git.omukk.dev/wrenn/wrenn/internal/audit" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/service" + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/service" ) type apiKeyHandler struct { @@ -63,7 +63,7 @@ func apiKeyWithCreatorToResponse(k db.ListAPIKeysByTeamWithCreatorRow) apiKeyRes Name: k.Name, KeyPrefix: k.KeyPrefix, CreatedBy: id.FormatUserID(k.CreatedBy), - CreatorEmail: k.CreatorEmail, + CreatorEmail: k.CreatorEmail.String, } if k.CreatedAt.Valid { resp.CreatedAt = k.CreatedAt.Time.Format(time.RFC3339) diff --git a/internal/api/handlers_audit.go b/internal/api/handlers_audit.go index 66768ec..feaebb7 100644 --- a/internal/api/handlers_audit.go +++ b/internal/api/handlers_audit.go @@ -8,9 +8,9 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/service" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/service" ) type auditHandler struct { diff --git a/internal/api/handlers_auth.go b/internal/api/handlers_auth.go index 4c24b6e..1768606 100644 --- a/internal/api/handlers_auth.go +++ b/internal/api/handlers_auth.go @@ -2,19 +2,32 @@ package api import ( "context" + "crypto/rand" + "crypto/sha256" + "encoding/hex" "errors" + "fmt" "log/slog" "net/http" "strings" + "time" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgtype" "github.com/jackc/pgx/v5/pgxpool" + "github.com/redis/go-redis/v9" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/internal/email" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" +) + +const ( + activationKeyPrefix = "wrenn:activation:" + activationTTL = 30 * time.Minute + signupCooldown = 30 * time.Minute ) // loginTeam returns the team and role to stamp into a login JWT. @@ -52,18 +65,89 @@ func loginTeam(ctx context.Context, q *db.Queries, userID pgtype.UUID) (db.Team, }, first.Role, nil } +// ensureDefaultTeam creates a default team for a user if they have none. +// This happens on first login after activation or for edge cases where a user +// has no teams. Returns the team, role, and whether the user was set as admin. +func ensureDefaultTeam(ctx context.Context, qtx *db.Queries, pool *pgxpool.Pool, userID pgtype.UUID, userName string) (db.Team, string, bool, error) { + // Try existing teams first. + team, role, err := loginTeam(ctx, qtx, userID) + if err == nil { + return team, role, false, nil + } + if !errors.Is(err, pgx.ErrNoRows) { + return db.Team{}, "", false, err + } + + // No teams — create default team in a transaction. + tx, err := pool.Begin(ctx) + if err != nil { + return db.Team{}, "", false, fmt.Errorf("begin tx: %w", err) + } + defer tx.Rollback(ctx) //nolint:errcheck + + txq := qtx.WithTx(tx) + + // First active user to have a team becomes admin. + activeCount, err := txq.CountActiveUsers(ctx) + if err != nil { + return db.Team{}, "", false, fmt.Errorf("count active users: %w", err) + } + isFirstUser := activeCount == 1 // only this user is active + + teamID := id.NewTeamID() + teamRow, err := txq.InsertTeam(ctx, db.InsertTeamParams{ + ID: teamID, + Name: userName + "'s Team", + Slug: id.NewTeamSlug(), + }) + if err != nil { + return db.Team{}, "", false, fmt.Errorf("insert team: %w", err) + } + + if err := txq.InsertTeamMember(ctx, db.InsertTeamMemberParams{ + UserID: userID, + TeamID: teamID, + IsDefault: true, + Role: "owner", + }); err != nil { + return db.Team{}, "", false, fmt.Errorf("insert team member: %w", err) + } + + if isFirstUser { + if err := txq.SetUserAdmin(ctx, db.SetUserAdminParams{ID: userID, IsAdmin: true}); err != nil { + return db.Team{}, "", false, fmt.Errorf("set admin: %w", err) + } + } + + if err := tx.Commit(ctx); err != nil { + return db.Team{}, "", false, fmt.Errorf("commit: %w", err) + } + + return db.Team{ + ID: teamRow.ID, + Name: teamRow.Name, + Slug: teamRow.Slug, + IsByoc: teamRow.IsByoc, + CreatedAt: teamRow.CreatedAt, + DeletedAt: teamRow.DeletedAt, + }, "owner", isFirstUser, nil +} + type switchTeamRequest struct { TeamID string `json:"team_id"` } type authHandler struct { - db *db.Queries - pool *pgxpool.Pool - jwtSecret []byte + db *db.Queries + pool *pgxpool.Pool + jwtSecret []byte + mailer email.Mailer + rdb *redis.Client + redirectURL string } -func newAuthHandler(db *db.Queries, pool *pgxpool.Pool, jwtSecret []byte) *authHandler { - return &authHandler{db: db, pool: pool, jwtSecret: jwtSecret} +func newAuthHandler(db *db.Queries, pool *pgxpool.Pool, jwtSecret []byte, mailer email.Mailer, rdb *redis.Client, redirectURL string) *authHandler { + return &authHandler{db: db, pool: pool, jwtSecret: jwtSecret, mailer: mailer, rdb: rdb, redirectURL: strings.TrimRight(redirectURL, "/")} } type signupRequest struct { @@ -77,6 +161,10 @@ type loginRequest struct { Password string `json:"password"` } +type activateRequest struct { + Token string `json:"token"` +} + type authResponse struct { Token string `json:"token"` UserID string `json:"user_id"` @@ -85,6 +173,10 @@ type authResponse struct { Name string `json:"name"` } +type signupResponse struct { + Message string `json:"message"` +} + // Signup handles POST /v1/auth/signup. func (h *authHandler) Signup(w http.ResponseWriter, r *http.Request) { var req signupRequest @@ -110,24 +202,41 @@ func (h *authHandler) Signup(w http.ResponseWriter, r *http.Request) { ctx := r.Context() + // Check for existing user with this email. + existing, err := h.db.GetUserByEmail(ctx, req.Email) + if err == nil { + // User exists — decide what to do based on status. + switch existing.Status { + case "inactive": + // Unactivated user — allow re-signup after cooldown. + if time.Since(existing.CreatedAt.Time) < signupCooldown { + writeError(w, http.StatusConflict, "signup_cooldown", + "an activation email was recently sent to this address — please check your inbox or try again later") + return + } + // Cooldown passed — delete the old row and proceed with fresh signup. + if err := h.db.HardDeleteUser(ctx, existing.ID); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to clean up previous signup") + return + } + default: + // active, disabled, deleted — email is taken. + writeError(w, http.StatusConflict, "email_taken", "an account with this email already exists") + return + } + } else if !errors.Is(err, pgx.ErrNoRows) { + writeError(w, http.StatusInternalServerError, "db_error", "failed to look up user") + return + } + passwordHash, err := auth.HashPassword(req.Password) if err != nil { writeError(w, http.StatusInternalServerError, "internal_error", "failed to hash password") return } - // Use a transaction to atomically create user + team + membership. - tx, err := h.pool.Begin(ctx) - if err != nil { - writeError(w, http.StatusInternalServerError, "db_error", "failed to begin transaction") - return - } - defer tx.Rollback(ctx) //nolint:errcheck - - qtx := h.db.WithTx(tx) - userID := id.NewUserID() - _, err = qtx.InsertUser(ctx, db.InsertUserParams{ + _, err = h.db.InsertUserInactive(ctx, db.InsertUserInactiveParams{ ID: userID, Email: req.Email, PasswordHash: pgtype.Text{String: passwordHash, Valid: true}, @@ -143,44 +252,111 @@ func (h *authHandler) Signup(w http.ResponseWriter, r *http.Request) { return } - // Create default team. - teamID := id.NewTeamID() - if _, err := qtx.InsertTeam(ctx, db.InsertTeamParams{ - ID: teamID, - Name: req.Name + "'s Team", - Slug: id.NewTeamSlug(), + // Generate activation token and store in Redis. + rawToken := generateActivationToken() + tokenHash := hashActivationToken(rawToken) + redisKey := activationKeyPrefix + tokenHash + + if err := h.rdb.Set(ctx, redisKey, id.FormatUserID(userID), activationTTL).Err(); err != nil { + slog.Error("signup: failed to store activation token in redis", "error", err) + writeError(w, http.StatusInternalServerError, "internal_error", "failed to create activation token") + return + } + + activateURL := h.redirectURL + "/activate?token=" + rawToken + go func() { + sendCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := h.mailer.Send(sendCtx, req.Email, "Activate your Wrenn account", email.EmailData{ + RecipientName: req.Name, + Message: "Welcome to Wrenn! Click the button below to activate your account. This link expires in 30 minutes.", + Button: &email.Button{Text: "Activate Account", URL: activateURL}, + Closing: "If you didn't create this account, you can safely ignore this email.", + }); err != nil { + slog.Warn("signup: failed to send activation email", "email", req.Email, "error", err) + } + }() + + writeJSON(w, http.StatusCreated, signupResponse{ + Message: "Account created. Please check your email to activate your account.", + }) +} + +// Activate handles POST /v1/auth/activate. +func (h *authHandler) Activate(w http.ResponseWriter, r *http.Request) { + var req activateRequest + if err := decodeJSON(r, &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + + if req.Token == "" { + writeError(w, http.StatusBadRequest, "invalid_request", "token is required") + return + } + + ctx := r.Context() + tokenHash := hashActivationToken(req.Token) + redisKey := activationKeyPrefix + tokenHash + + userIDStr, err := h.rdb.GetDel(ctx, redisKey).Result() + if errors.Is(err, redis.Nil) { + writeError(w, http.StatusBadRequest, "invalid_token", "activation link is invalid or has expired") + return + } + if err != nil { + writeError(w, http.StatusInternalServerError, "internal_error", "failed to verify token") + return + } + + userID, err := id.ParseUserID(userIDStr) + if err != nil { + writeError(w, http.StatusInternalServerError, "internal_error", "invalid stored user ID") + return + } + + user, err := h.db.GetUserByID(ctx, userID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to get user") + return + } + + if user.Status != "inactive" { + writeError(w, http.StatusBadRequest, "already_activated", "this account has already been activated") + return + } + + // Activate the user. + if err := h.db.SetUserStatus(ctx, db.SetUserStatusParams{ + ID: userID, + Status: "active", }); err != nil { - writeError(w, http.StatusInternalServerError, "db_error", "failed to create team") + slog.Error("activate: failed to set user status", "user_id", id.FormatUserID(userID), "error", err) + writeError(w, http.StatusInternalServerError, "db_error", "failed to activate user") return } - if err := qtx.InsertTeamMember(ctx, db.InsertTeamMemberParams{ - UserID: userID, - TeamID: teamID, - IsDefault: true, - Role: "owner", - }); err != nil { - writeError(w, http.StatusInternalServerError, "db_error", "failed to add user to team") + // Create default team and log them in. + team, role, isFirstUser, err := ensureDefaultTeam(ctx, h.db, h.pool, userID, user.Name) + if err != nil { + slog.Error("activate: failed to create default team", "error", err) + writeError(w, http.StatusInternalServerError, "db_error", "failed to set up account") return } - if err := tx.Commit(ctx); err != nil { - writeError(w, http.StatusInternalServerError, "db_error", "failed to commit signup") - return - } - - token, err := auth.SignJWT(h.jwtSecret, userID, teamID, req.Email, req.Name, "owner", false) + isAdmin := user.IsAdmin || isFirstUser + token, err := auth.SignJWT(h.jwtSecret, userID, team.ID, user.Email, user.Name, role, isAdmin) if err != nil { writeError(w, http.StatusInternalServerError, "internal_error", "failed to generate token") return } - writeJSON(w, http.StatusCreated, authResponse{ + writeJSON(w, http.StatusOK, authResponse{ Token: token, UserID: id.FormatUserID(userID), - TeamID: id.FormatTeamID(teamID), - Email: req.Email, - Name: req.Name, + TeamID: id.FormatTeamID(team.ID), + Email: user.Email, + Name: user.Name, }) } @@ -222,17 +398,36 @@ func (h *authHandler) Login(w http.ResponseWriter, r *http.Request) { return } - team, role, err := loginTeam(ctx, h.db, user.ID) + switch user.Status { + case "active": + // OK — proceed. + case "inactive": + slog.Warn("login failed: account not activated", "email", req.Email, "ip", r.RemoteAddr) + writeError(w, http.StatusForbidden, "account_not_activated", "please check your email and activate your account before signing in") + return + case "disabled": + slog.Warn("login failed: account disabled", "email", req.Email, "ip", r.RemoteAddr) + writeError(w, http.StatusForbidden, "account_disabled", "your account has been deactivated — contact your administrator to regain access") + return + case "deleted": + slog.Warn("login failed: account deleted", "email", req.Email, "ip", r.RemoteAddr) + writeError(w, http.StatusUnauthorized, "unauthorized", "invalid email or password") + return + default: + writeError(w, http.StatusUnauthorized, "unauthorized", "invalid email or password") + return + } + + // Ensure user has a default team (creates one on first login after activation). + team, role, isFirstUser, err := ensureDefaultTeam(ctx, h.db, h.pool, user.ID, user.Name) if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - writeError(w, http.StatusForbidden, "no_team", "user is not a member of any team") - return - } + slog.Error("login: failed to ensure default team", "error", err) writeError(w, http.StatusInternalServerError, "db_error", "failed to look up team") return } - token, err := auth.SignJWT(h.jwtSecret, user.ID, team.ID, user.Email, user.Name, role, user.IsAdmin) + isAdmin := user.IsAdmin || isFirstUser + token, err := auth.SignJWT(h.jwtSecret, user.ID, team.ID, user.Email, user.Name, role, isAdmin) if err != nil { writeError(w, http.StatusInternalServerError, "internal_error", "failed to generate token") return @@ -322,3 +517,18 @@ func (h *authHandler) SwitchTeam(w http.ResponseWriter, r *http.Request) { Name: user.Name, }) } + +// --- helpers --- + +func generateActivationToken() string { + b := make([]byte, 16) + if _, err := rand.Read(b); err != nil { + panic(fmt.Sprintf("crypto/rand failed: %v", err)) + } + return hex.EncodeToString(b) +} + +func hashActivationToken(raw string) string { + h := sha256.Sum256([]byte(raw)) + return hex.EncodeToString(h[:]) +} diff --git a/internal/api/handlers_builds.go b/internal/api/handlers_builds.go index bd3260e..5228420 100644 --- a/internal/api/handlers_builds.go +++ b/internal/api/handlers_builds.go @@ -3,19 +3,21 @@ package api import ( "encoding/json" "fmt" + "io" "log/slog" "net/http" + "strings" "time" "connectrpc.com/connect" "github.com/go-chi/chi/v5" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" "git.omukk.dev/wrenn/wrenn/internal/layout" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" - "git.omukk.dev/wrenn/wrenn/internal/service" - "git.omukk.dev/wrenn/wrenn/internal/validate" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/service" + "git.omukk.dev/wrenn/wrenn/pkg/validate" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) @@ -54,6 +56,8 @@ type buildResponse struct { Error *string `json:"error,omitempty"` SandboxID *string `json:"sandbox_id,omitempty"` HostID *string `json:"host_id,omitempty"` + DefaultUser string `json:"default_user"` + DefaultEnv json.RawMessage `json:"default_env"` CreatedAt string `json:"created_at"` StartedAt *string `json:"started_at,omitempty"` CompletedAt *string `json:"completed_at,omitempty"` @@ -71,6 +75,8 @@ func buildToResponse(b db.TemplateBuild) buildResponse { CurrentStep: b.CurrentStep, TotalSteps: b.TotalSteps, Logs: b.Logs, + DefaultUser: b.DefaultUser, + DefaultEnv: b.DefaultEnv, } if b.Healthcheck != "" { resp.Healthcheck = &b.Healthcheck @@ -101,11 +107,54 @@ func buildToResponse(b db.TemplateBuild) buildResponse { } // Create handles POST /v1/admin/builds. +// Accepts either JSON body or multipart/form-data with a "config" JSON part +// and an optional "archive" file part (tar/tar.gz/zip for COPY commands). func (h *buildHandler) Create(w http.ResponseWriter, r *http.Request) { var req createBuildRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") - return + var archive []byte + var archiveName string + + ct := r.Header.Get("Content-Type") + if strings.HasPrefix(ct, "multipart/") { + // 100 MB max for multipart (archive + JSON config). + if err := r.ParseMultipartForm(100 << 20); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "failed to parse multipart form") + return + } + + // Parse JSON config from "config" field. + configStr := r.FormValue("config") + if configStr == "" { + writeError(w, http.StatusBadRequest, "invalid_request", "multipart form requires a 'config' JSON field") + return + } + if err := json.Unmarshal([]byte(configStr), &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid config JSON in multipart form") + return + } + + // Read optional archive file (max 100 MB). + file, header, err := r.FormFile("archive") + if err == nil { + defer file.Close() + const maxArchiveSize = 100 << 20 // 100 MB + lr := io.LimitReader(file, maxArchiveSize+1) + archive, err = io.ReadAll(lr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "failed to read archive file") + return + } + if int64(len(archive)) > maxArchiveSize { + writeError(w, http.StatusRequestEntityTooLarge, "invalid_request", "archive exceeds 100 MB limit") + return + } + archiveName = header.Filename + } + } else { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } } if req.Name == "" { @@ -129,6 +178,8 @@ func (h *buildHandler) Create(w http.ResponseWriter, r *http.Request) { VCPUs: req.VCPUs, MemoryMB: req.MemoryMB, SkipPrePost: req.SkipPrePost, + Archive: archive, + ArchiveName: archiveName, }) if err != nil { slog.Error("failed to create build", "error", err) diff --git a/internal/api/handlers_channels.go b/internal/api/handlers_channels.go index 9da20e0..a221e31 100644 --- a/internal/api/handlers_channels.go +++ b/internal/api/handlers_channels.go @@ -8,11 +8,11 @@ import ( "github.com/go-chi/chi/v5" "github.com/jackc/pgx/v5" - "git.omukk.dev/wrenn/wrenn/internal/audit" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/channels" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/channels" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) type channelHandler struct { diff --git a/internal/api/handlers_exec.go b/internal/api/handlers_exec.go index 87abf07..8e94da7 100644 --- a/internal/api/handlers_exec.go +++ b/internal/api/handlers_exec.go @@ -12,10 +12,10 @@ import ( "github.com/go-chi/chi/v5" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) @@ -29,9 +29,13 @@ func newExecHandler(db *db.Queries, pool *lifecycle.HostClientPool) *execHandler } type execRequest struct { - Cmd string `json:"cmd"` - Args []string `json:"args"` - TimeoutSec int32 `json:"timeout_sec"` + Cmd string `json:"cmd"` + Args []string `json:"args"` + TimeoutSec int32 `json:"timeout_sec"` + Background bool `json:"background"` + Tag string `json:"tag"` + Envs map[string]string `json:"envs"` + Cwd string `json:"cwd"` } type execResponse struct { @@ -45,7 +49,14 @@ type execResponse struct { Encoding string `json:"encoding"` } -// Exec handles POST /v1/sandboxes/{id}/exec. +type backgroundExecResponse struct { + SandboxID string `json:"sandbox_id"` + Cmd string `json:"cmd"` + PID uint32 `json:"pid"` + Tag string `json:"tag"` +} + +// Exec handles POST /v1/capsules/{id}/exec. func (h *execHandler) Exec(w http.ResponseWriter, r *http.Request) { sandboxIDStr := chi.URLParam(r, "id") ctx := r.Context() @@ -78,14 +89,54 @@ func (h *execHandler) Exec(w http.ResponseWriter, r *http.Request) { return } - start := time.Now() - agent, err := agentForHost(ctx, h.db, h.pool, sb.HostID) if err != nil { writeError(w, http.StatusServiceUnavailable, "host_unavailable", "sandbox host is not reachable") return } + // Background mode: start process and return immediately. + if req.Background { + tag := req.Tag + if tag == "" { + tag = "proc-" + id.NewPtyTag() + } + + bgResp, err := agent.StartBackground(ctx, connect.NewRequest(&pb.StartBackgroundRequest{ + SandboxId: sandboxIDStr, + Tag: tag, + Cmd: req.Cmd, + Args: req.Args, + Envs: req.Envs, + Cwd: req.Cwd, + })) + if err != nil { + status, code, msg := agentErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + if err := h.db.UpdateLastActive(ctx, db.UpdateLastActiveParams{ + ID: sandboxID, + LastActiveAt: pgtype.Timestamptz{ + Time: time.Now(), + Valid: true, + }, + }); err != nil { + slog.Warn("failed to update last_active_at", "id", sandboxIDStr, "error", err) + } + + writeJSON(w, http.StatusAccepted, backgroundExecResponse{ + SandboxID: sandboxIDStr, + Cmd: req.Cmd, + PID: bgResp.Msg.Pid, + Tag: bgResp.Msg.Tag, + }) + return + } + + start := time.Now() + resp, err := agent.Exec(ctx, connect.NewRequest(&pb.ExecRequest{ SandboxId: sandboxIDStr, Cmd: req.Cmd, diff --git a/internal/api/handlers_exec_stream.go b/internal/api/handlers_exec_stream.go index 137885c..c8b101f 100644 --- a/internal/api/handlers_exec_stream.go +++ b/internal/api/handlers_exec_stream.go @@ -12,20 +12,21 @@ import ( "github.com/gorilla/websocket" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) type execStreamHandler struct { - db *db.Queries - pool *lifecycle.HostClientPool + db *db.Queries + pool *lifecycle.HostClientPool + jwtSecret []byte } -func newExecStreamHandler(db *db.Queries, pool *lifecycle.HostClientPool) *execStreamHandler { - return &execStreamHandler{db: db, pool: pool} +func newExecStreamHandler(db *db.Queries, pool *lifecycle.HostClientPool, jwtSecret []byte) *execStreamHandler { + return &execStreamHandler{db: db, pool: pool, jwtSecret: jwtSecret} } var upgrader = websocket.Upgrader{ @@ -47,11 +48,10 @@ type wsOutMsg struct { ExitCode *int32 `json:"exit_code,omitempty"` // only for "exit" } -// ExecStream handles WS /v1/sandboxes/{id}/exec/stream. +// ExecStream handles WS /v1/capsules/{id}/exec/stream. func (h *execStreamHandler) ExecStream(w http.ResponseWriter, r *http.Request) { sandboxIDStr := chi.URLParam(r, "id") ctx := r.Context() - ac := auth.MustFromContext(ctx) sandboxID, err := id.ParseSandboxID(sandboxIDStr) if err != nil { @@ -59,13 +59,31 @@ func (h *execStreamHandler) ExecStream(w http.ResponseWriter, r *http.Request) { return } - sb, err := h.db.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: ac.TeamID}) - if err != nil { - writeError(w, http.StatusNotFound, "not_found", "sandbox not found") - return - } - if sb.Status != "running" { - writeError(w, http.StatusConflict, "invalid_state", "sandbox is not running (status: "+sb.Status+")") + // Authenticate: use context from middleware (API key) or WS first message (JWT). + ac, hasAuth := auth.FromContext(ctx) + + if !hasAuth { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + slog.Error("websocket upgrade failed", "error", err) + return + } + defer conn.Close() + + var wsAC auth.AuthContext + var authErr error + if isAdminWSRoute(ctx) { + wsAC, authErr = wsAuthenticateAdmin(ctx, conn, h.jwtSecret, h.db) + } else { + wsAC, authErr = wsAuthenticate(ctx, conn, h.jwtSecret, h.db) + } + if authErr != nil { + sendWSError(conn, "authentication failed") + return + } + ac = wsAC + + h.runExecStream(ctx, conn, ac, sandboxID, sandboxIDStr) return } @@ -76,6 +94,20 @@ func (h *execStreamHandler) ExecStream(w http.ResponseWriter, r *http.Request) { } defer conn.Close() + h.runExecStream(ctx, conn, ac, sandboxID, sandboxIDStr) +} + +func (h *execStreamHandler) runExecStream(ctx context.Context, conn *websocket.Conn, ac auth.AuthContext, sandboxID pgtype.UUID, sandboxIDStr string) { + sb, err := h.db.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: ac.TeamID}) + if err != nil { + sendWSError(conn, "sandbox not found") + return + } + if sb.Status != "running" { + sendWSError(conn, "sandbox is not running (status: "+sb.Status+")") + return + } + // Read the start message. var startMsg wsStartMsg if err := conn.ReadJSON(&startMsg); err != nil { diff --git a/internal/api/handlers_files.go b/internal/api/handlers_files.go index 5dd2006..f69c8f1 100644 --- a/internal/api/handlers_files.go +++ b/internal/api/handlers_files.go @@ -9,10 +9,10 @@ import ( "connectrpc.com/connect" "github.com/go-chi/chi/v5" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) @@ -25,7 +25,7 @@ func newFilesHandler(db *db.Queries, pool *lifecycle.HostClientPool) *filesHandl return &filesHandler{db: db, pool: pool} } -// Upload handles POST /v1/sandboxes/{id}/files/write. +// Upload handles POST /v1/capsules/{id}/files/write. // Expects multipart/form-data with: // - "path" text field: absolute destination path inside the sandbox // - "file" file field: binary content to write @@ -105,7 +105,7 @@ type readFileRequest struct { Path string `json:"path"` } -// Download handles POST /v1/sandboxes/{id}/files/read. +// Download handles POST /v1/capsules/{id}/files/read. // Accepts JSON body with path, returns raw file content with Content-Disposition. func (h *filesHandler) Download(w http.ResponseWriter, r *http.Request) { sandboxIDStr := chi.URLParam(r, "id") diff --git a/internal/api/handlers_files_stream.go b/internal/api/handlers_files_stream.go index 99066b4..88377ae 100644 --- a/internal/api/handlers_files_stream.go +++ b/internal/api/handlers_files_stream.go @@ -10,10 +10,10 @@ import ( "connectrpc.com/connect" "github.com/go-chi/chi/v5" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) @@ -26,7 +26,7 @@ func newFilesStreamHandler(db *db.Queries, pool *lifecycle.HostClientPool) *file return &filesStreamHandler{db: db, pool: pool} } -// StreamUpload handles POST /v1/sandboxes/{id}/files/stream/write. +// StreamUpload handles POST /v1/capsules/{id}/files/stream/write. // Expects multipart/form-data with "path" text field and "file" file field. // Streams file content directly from the request body to the host agent without buffering. func (h *filesStreamHandler) StreamUpload(w http.ResponseWriter, r *http.Request) { @@ -150,7 +150,7 @@ func (h *filesStreamHandler) StreamUpload(w http.ResponseWriter, r *http.Request w.WriteHeader(http.StatusNoContent) } -// StreamDownload handles POST /v1/sandboxes/{id}/files/stream/read. +// StreamDownload handles POST /v1/capsules/{id}/files/stream/read. // Accepts JSON body with path, streams file content back without buffering. func (h *filesStreamHandler) StreamDownload(w http.ResponseWriter, r *http.Request) { sandboxIDStr := chi.URLParam(r, "id") diff --git a/internal/api/handlers_fs.go b/internal/api/handlers_fs.go new file mode 100644 index 0000000..cfdd6a7 --- /dev/null +++ b/internal/api/handlers_fs.go @@ -0,0 +1,236 @@ +package api + +import ( + "net/http" + + "connectrpc.com/connect" + "github.com/go-chi/chi/v5" + + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" +) + +type fsHandler struct { + db *db.Queries + pool *lifecycle.HostClientPool +} + +func newFSHandler(db *db.Queries, pool *lifecycle.HostClientPool) *fsHandler { + return &fsHandler{db: db, pool: pool} +} + +type listDirRequest struct { + Path string `json:"path"` + Depth uint32 `json:"depth"` +} + +type fileEntryResponse struct { + Name string `json:"name"` + Path string `json:"path"` + Type string `json:"type"` + Size int64 `json:"size"` + Mode uint32 `json:"mode"` + Permissions string `json:"permissions"` + Owner string `json:"owner"` + Group string `json:"group"` + ModifiedAt int64 `json:"modified_at"` + SymlinkTarget *string `json:"symlink_target,omitempty"` +} + +type listDirResponse struct { + Entries []fileEntryResponse `json:"entries"` +} + +type makeDirRequest struct { + Path string `json:"path"` +} + +type makeDirResponse struct { + Entry fileEntryResponse `json:"entry"` +} + +type removeRequest struct { + Path string `json:"path"` +} + +// ListDir handles POST /v1/capsules/{id}/files/list. +func (h *fsHandler) ListDir(w http.ResponseWriter, r *http.Request) { + sandboxIDStr := chi.URLParam(r, "id") + ctx := r.Context() + ac := auth.MustFromContext(ctx) + + sandboxID, err := id.ParseSandboxID(sandboxIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid sandbox ID") + return + } + + sb, err := h.db.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: ac.TeamID}) + if err != nil { + writeError(w, http.StatusNotFound, "not_found", "sandbox not found") + return + } + if sb.Status != "running" { + writeError(w, http.StatusConflict, "invalid_state", "sandbox is not running") + return + } + + var req listDirRequest + if err := decodeJSON(r, &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + if req.Path == "" { + writeError(w, http.StatusBadRequest, "invalid_request", "path is required") + return + } + + agent, err := agentForHost(ctx, h.db, h.pool, sb.HostID) + if err != nil { + writeError(w, http.StatusServiceUnavailable, "host_unavailable", "sandbox host is not reachable") + return + } + + resp, err := agent.ListDir(ctx, connect.NewRequest(&pb.ListDirRequest{ + SandboxId: sandboxIDStr, + Path: req.Path, + Depth: req.Depth, + })) + if err != nil { + status, code, msg := agentErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + entries := make([]fileEntryResponse, 0, len(resp.Msg.Entries)) + for _, e := range resp.Msg.Entries { + entries = append(entries, fileEntryFromPB(e)) + } + + writeJSON(w, http.StatusOK, listDirResponse{Entries: entries}) +} + +// MakeDir handles POST /v1/capsules/{id}/files/mkdir. +func (h *fsHandler) MakeDir(w http.ResponseWriter, r *http.Request) { + sandboxIDStr := chi.URLParam(r, "id") + ctx := r.Context() + ac := auth.MustFromContext(ctx) + + sandboxID, err := id.ParseSandboxID(sandboxIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid sandbox ID") + return + } + + sb, err := h.db.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: ac.TeamID}) + if err != nil { + writeError(w, http.StatusNotFound, "not_found", "sandbox not found") + return + } + if sb.Status != "running" { + writeError(w, http.StatusConflict, "invalid_state", "sandbox is not running") + return + } + + var req makeDirRequest + if err := decodeJSON(r, &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + if req.Path == "" { + writeError(w, http.StatusBadRequest, "invalid_request", "path is required") + return + } + + agent, err := agentForHost(ctx, h.db, h.pool, sb.HostID) + if err != nil { + writeError(w, http.StatusServiceUnavailable, "host_unavailable", "sandbox host is not reachable") + return + } + + resp, err := agent.MakeDir(ctx, connect.NewRequest(&pb.MakeDirRequest{ + SandboxId: sandboxIDStr, + Path: req.Path, + })) + if err != nil { + status, code, msg := agentErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + writeJSON(w, http.StatusOK, makeDirResponse{Entry: fileEntryFromPB(resp.Msg.Entry)}) +} + +// Remove handles POST /v1/capsules/{id}/files/remove. +func (h *fsHandler) Remove(w http.ResponseWriter, r *http.Request) { + sandboxIDStr := chi.URLParam(r, "id") + ctx := r.Context() + ac := auth.MustFromContext(ctx) + + sandboxID, err := id.ParseSandboxID(sandboxIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid sandbox ID") + return + } + + sb, err := h.db.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: ac.TeamID}) + if err != nil { + writeError(w, http.StatusNotFound, "not_found", "sandbox not found") + return + } + if sb.Status != "running" { + writeError(w, http.StatusConflict, "invalid_state", "sandbox is not running") + return + } + + var req removeRequest + if err := decodeJSON(r, &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + if req.Path == "" { + writeError(w, http.StatusBadRequest, "invalid_request", "path is required") + return + } + + agent, err := agentForHost(ctx, h.db, h.pool, sb.HostID) + if err != nil { + writeError(w, http.StatusServiceUnavailable, "host_unavailable", "sandbox host is not reachable") + return + } + + if _, err := agent.RemovePath(ctx, connect.NewRequest(&pb.RemovePathRequest{ + SandboxId: sandboxIDStr, + Path: req.Path, + })); err != nil { + status, code, msg := agentErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +func fileEntryFromPB(e *pb.FileEntry) fileEntryResponse { + if e == nil { + return fileEntryResponse{} + } + resp := fileEntryResponse{ + Name: e.Name, + Path: e.Path, + Type: e.Type, + Size: e.Size, + Mode: e.Mode, + Permissions: e.Permissions, + Owner: e.Owner, + Group: e.Group, + ModifiedAt: e.ModifiedAt, + } + if e.SymlinkTarget != nil { + resp.SymlinkTarget = e.SymlinkTarget + } + return resp +} diff --git a/internal/api/handlers_hosts.go b/internal/api/handlers_hosts.go index 51aa833..9536197 100644 --- a/internal/api/handlers_hosts.go +++ b/internal/api/handlers_hosts.go @@ -10,11 +10,11 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/audit" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/service" + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/service" ) type hostHandler struct { diff --git a/internal/api/handlers_me.go b/internal/api/handlers_me.go new file mode 100644 index 0000000..15aa0bb --- /dev/null +++ b/internal/api/handlers_me.go @@ -0,0 +1,585 @@ +package api + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "log/slog" + "net/http" + "strings" + "time" + + "github.com/go-chi/chi/v5" + "github.com/jackc/pgx/v5/pgtype" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/redis/go-redis/v9" + + "git.omukk.dev/wrenn/wrenn/internal/email" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/auth/oauth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/service" +) + +const ( + passwordResetKeyPrefix = "wrenn:password_reset:" + passwordResetTTL = 15 * time.Minute +) + +type meHandler struct { + db *db.Queries + pool *pgxpool.Pool + rdb *redis.Client + jwtSecret []byte + mailer email.Mailer + oauthRegistry *oauth.Registry + redirectURL string + teamSvc *service.TeamService +} + +func newMeHandler( + db *db.Queries, + pool *pgxpool.Pool, + rdb *redis.Client, + jwtSecret []byte, + mailer email.Mailer, + registry *oauth.Registry, + redirectURL string, + teamSvc *service.TeamService, +) *meHandler { + return &meHandler{ + db: db, + pool: pool, + rdb: rdb, + jwtSecret: jwtSecret, + mailer: mailer, + oauthRegistry: registry, + redirectURL: strings.TrimRight(redirectURL, "/"), + teamSvc: teamSvc, + } +} + +type meResponse struct { + Name string `json:"name"` + Email string `json:"email"` + HasPassword bool `json:"has_password"` + Providers []string `json:"providers"` +} + +type updateNameRequest struct { + Name string `json:"name"` +} + +type changePasswordRequest struct { + CurrentPassword string `json:"current_password"` + NewPassword string `json:"new_password"` + ConfirmPassword string `json:"confirm_password"` +} + +type requestPasswordResetRequest struct { + Email string `json:"email"` +} + +type confirmPasswordResetRequest struct { + Token string `json:"token"` + NewPassword string `json:"new_password"` +} + +type deleteAccountRequest struct { + Confirmation string `json:"confirmation"` +} + +// GetMe handles GET /v1/me. +func (h *meHandler) GetMe(w http.ResponseWriter, r *http.Request) { + ac := auth.MustFromContext(r.Context()) + ctx := r.Context() + + user, err := h.db.GetUserByID(ctx, ac.UserID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to get user") + return + } + + providers, err := h.db.GetOAuthProvidersByUserID(ctx, ac.UserID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to get providers") + return + } + + providerNames := make([]string, 0, len(providers)) + for _, p := range providers { + providerNames = append(providerNames, p.Provider) + } + + writeJSON(w, http.StatusOK, meResponse{ + Name: user.Name, + Email: user.Email, + HasPassword: user.PasswordHash.Valid, + Providers: providerNames, + }) +} + +// UpdateName handles PATCH /v1/me — updates the user's name and re-issues a JWT. +func (h *meHandler) UpdateName(w http.ResponseWriter, r *http.Request) { + ac := auth.MustFromContext(r.Context()) + ctx := r.Context() + + var req updateNameRequest + if err := decodeJSON(r, &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + + req.Name = strings.TrimSpace(req.Name) + if req.Name == "" || len(req.Name) > 100 { + writeError(w, http.StatusBadRequest, "invalid_request", "name must be between 1 and 100 characters") + return + } + + if err := h.db.UpdateUserName(ctx, db.UpdateUserNameParams{ + ID: ac.UserID, + Name: req.Name, + }); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to update name") + return + } + + user, err := h.db.GetUserByID(ctx, ac.UserID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to get user") + return + } + + team, role, err := loginTeam(ctx, h.db, ac.UserID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to get team") + return + } + + token, err := auth.SignJWT(h.jwtSecret, ac.UserID, team.ID, user.Email, req.Name, role, user.IsAdmin) + if err != nil { + writeError(w, http.StatusInternalServerError, "internal_error", "failed to generate token") + return + } + + writeJSON(w, http.StatusOK, authResponse{ + Token: token, + UserID: id.FormatUserID(ac.UserID), + TeamID: id.FormatTeamID(team.ID), + Email: user.Email, + Name: req.Name, + }) +} + +// ChangePassword handles POST /v1/me/password. +// For users with a password: requires current_password + new_password. +// For OAuth-only users: requires new_password + confirm_password. +func (h *meHandler) ChangePassword(w http.ResponseWriter, r *http.Request) { + ac := auth.MustFromContext(r.Context()) + ctx := r.Context() + + var req changePasswordRequest + if err := decodeJSON(r, &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + + user, err := h.db.GetUserByID(ctx, ac.UserID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to get user") + return + } + + if user.PasswordHash.Valid { + // Changing existing password — verify current. + if req.CurrentPassword == "" { + writeError(w, http.StatusBadRequest, "invalid_request", "current_password is required") + return + } + if err := auth.CheckPassword(user.PasswordHash.String, req.CurrentPassword); err != nil { + writeError(w, http.StatusUnauthorized, "wrong_password", "current password is incorrect") + return + } + } else { + // OAuth user adding a password — confirm must match. + if req.ConfirmPassword == "" { + writeError(w, http.StatusBadRequest, "invalid_request", "confirm_password is required") + return + } + if req.NewPassword != req.ConfirmPassword { + writeError(w, http.StatusBadRequest, "invalid_request", "passwords do not match") + return + } + } + + if len(req.NewPassword) < 8 { + writeError(w, http.StatusBadRequest, "invalid_request", "password must be at least 8 characters") + return + } + + hash, err := auth.HashPassword(req.NewPassword) + if err != nil { + writeError(w, http.StatusInternalServerError, "internal_error", "failed to hash password") + return + } + + if err := h.db.UpdateUserPassword(ctx, db.UpdateUserPasswordParams{ + ID: ac.UserID, + PasswordHash: pgtype.Text{String: hash, Valid: true}, + }); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to update password") + return + } + + isAdding := !user.PasswordHash.Valid + go func() { + sendCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + subject, message := "Your Wrenn password was changed", "Your account password was successfully updated. If you did not make this change, reset your password immediately." + if isAdding { + subject = "Password added to your Wrenn account" + message = "A password has been added to your Wrenn account. You can now sign in with your email and password in addition to any connected OAuth providers." + } + if err := h.mailer.Send(sendCtx, user.Email, subject, email.EmailData{ + RecipientName: user.Name, + Message: message, + Closing: "If you didn't make this change, contact support immediately.", + }); err != nil { + slog.Warn("change password: failed to send notification", "email", user.Email, "error", err) + } + }() + + w.WriteHeader(http.StatusNoContent) +} + +// RequestPasswordReset handles POST /v1/me/password/reset (unauthenticated). +// Always returns 200 to avoid leaking account existence. +func (h *meHandler) RequestPasswordReset(w http.ResponseWriter, r *http.Request) { + var req requestPasswordResetRequest + if err := decodeJSON(r, &req); err != nil { + w.WriteHeader(http.StatusNoContent) + return + } + + req.Email = strings.TrimSpace(strings.ToLower(req.Email)) + if req.Email == "" { + w.WriteHeader(http.StatusNoContent) + return + } + + ctx := r.Context() + + user, err := h.db.GetUserByEmail(ctx, req.Email) + if err != nil { + // Don't leak whether the email exists. + w.WriteHeader(http.StatusNoContent) + return + } + + if user.Status != "active" { + w.WriteHeader(http.StatusNoContent) + return + } + + rawToken := generateResetToken() + tokenHash := hashResetToken(rawToken) + redisKey := passwordResetKeyPrefix + tokenHash + + if err := h.rdb.Set(ctx, redisKey, id.FormatUserID(user.ID), passwordResetTTL).Err(); err != nil { + slog.Error("password reset: failed to store token in redis", "error", err) + w.WriteHeader(http.StatusNoContent) + return + } + + resetURL := h.redirectURL + "/reset-password?token=" + rawToken + go func() { + sendCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := h.mailer.Send(sendCtx, user.Email, "Reset your Wrenn password", email.EmailData{ + RecipientName: user.Name, + Message: "We received a request to reset your password. Click the button below to set a new password. This link expires in 15 minutes.", + Button: &email.Button{Text: "Reset Password", URL: resetURL}, + Closing: "If you didn't request a password reset, you can safely ignore this email.", + }); err != nil { + slog.Error("password reset: failed to send email", "email", user.Email, "error", err) + } + }() + + w.WriteHeader(http.StatusNoContent) +} + +// ConfirmPasswordReset handles POST /v1/me/password/reset/confirm (unauthenticated). +func (h *meHandler) ConfirmPasswordReset(w http.ResponseWriter, r *http.Request) { + var req confirmPasswordResetRequest + if err := decodeJSON(r, &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + + if req.Token == "" { + writeError(w, http.StatusBadRequest, "invalid_request", "token is required") + return + } + if len(req.NewPassword) < 8 { + writeError(w, http.StatusBadRequest, "invalid_request", "password must be at least 8 characters") + return + } + + ctx := r.Context() + tokenHash := hashResetToken(req.Token) + redisKey := passwordResetKeyPrefix + tokenHash + + // GetDel atomically retrieves and removes the token in a single round-trip, + // preventing concurrent requests from both consuming the same token. + userIDStr, err := h.rdb.GetDel(ctx, redisKey).Result() + if errors.Is(err, redis.Nil) { + writeError(w, http.StatusBadRequest, "invalid_token", "reset token is invalid or has expired") + return + } + if err != nil { + writeError(w, http.StatusInternalServerError, "internal_error", "failed to verify token") + return + } + + userID, err := id.ParseUserID(userIDStr) + if err != nil { + writeError(w, http.StatusInternalServerError, "internal_error", "invalid stored user ID") + return + } + + user, err := h.db.GetUserByID(ctx, userID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to get user") + return + } + + hash, err := auth.HashPassword(req.NewPassword) + if err != nil { + writeError(w, http.StatusInternalServerError, "internal_error", "failed to hash password") + return + } + + if err := h.db.UpdateUserPassword(ctx, db.UpdateUserPasswordParams{ + ID: userID, + PasswordHash: pgtype.Text{String: hash, Valid: true}, + }); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to update password") + return + } + + go func() { + sendCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := h.mailer.Send(sendCtx, user.Email, "Your Wrenn password was reset", email.EmailData{ + RecipientName: user.Name, + Message: "Your password has been successfully reset. You can now sign in with your new password.", + Closing: "If you didn't request this change, contact support immediately.", + }); err != nil { + slog.Warn("confirm password reset: failed to send notification", "email", user.Email, "error", err) + } + }() + + w.WriteHeader(http.StatusNoContent) +} + +// ConnectProvider handles GET /v1/me/providers/{provider}/connect. +// Sets OAuth state + link cookies and returns the provider auth URL. +func (h *meHandler) ConnectProvider(w http.ResponseWriter, r *http.Request) { + ac := auth.MustFromContext(r.Context()) + provider := chi.URLParam(r, "provider") + + p, ok := h.oauthRegistry.Get(provider) + if !ok { + writeError(w, http.StatusNotFound, "provider_not_found", "unsupported OAuth provider") + return + } + + state, err := generateState() + if err != nil { + writeError(w, http.StatusInternalServerError, "internal_error", "failed to generate state") + return + } + + mac := computeHMAC(h.jwtSecret, state) + http.SetCookie(w, &http.Cookie{ + Name: "oauth_state", + Value: state + ":" + mac, + Path: "/", + MaxAge: 600, + HttpOnly: true, + SameSite: http.SameSiteLaxMode, + Secure: isSecure(r), + }) + + userIDStr := id.FormatUserID(ac.UserID) + linkMac := computeHMAC(h.jwtSecret, userIDStr) + http.SetCookie(w, &http.Cookie{ + Name: "oauth_link_user_id", + Value: userIDStr + ":" + linkMac, + Path: "/", + MaxAge: 600, + HttpOnly: true, + SameSite: http.SameSiteLaxMode, + Secure: isSecure(r), + }) + + writeJSON(w, http.StatusOK, map[string]string{"auth_url": p.AuthCodeURL(state)}) +} + +// DisconnectProvider handles DELETE /v1/me/providers/{provider}. +func (h *meHandler) DisconnectProvider(w http.ResponseWriter, r *http.Request) { + ac := auth.MustFromContext(r.Context()) + provider := chi.URLParam(r, "provider") + ctx := r.Context() + + user, err := h.db.GetUserByID(ctx, ac.UserID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to get user") + return + } + + providers, err := h.db.GetOAuthProvidersByUserID(ctx, ac.UserID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to get providers") + return + } + + // Ensure the user will still have at least one login method after disconnecting. + if !user.PasswordHash.Valid && len(providers) <= 1 { + writeError(w, http.StatusBadRequest, "last_login_method", "cannot disconnect your only login method — add a password first") + return + } + + // Check the provider is actually linked to this user. + found := false + for _, p := range providers { + if p.Provider == provider { + found = true + break + } + } + if !found { + writeError(w, http.StatusNotFound, "not_found", "provider not connected") + return + } + + if err := h.db.DeleteOAuthProvider(ctx, db.DeleteOAuthProviderParams{ + UserID: ac.UserID, + Provider: provider, + }); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to disconnect provider") + return + } + + w.WriteHeader(http.StatusNoContent) +} + +// DeleteAccount handles DELETE /v1/me — soft-deletes the user's account. +func (h *meHandler) DeleteAccount(w http.ResponseWriter, r *http.Request) { + ac := auth.MustFromContext(r.Context()) + ctx := r.Context() + + var req deleteAccountRequest + if err := decodeJSON(r, &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + + user, err := h.db.GetUserByID(ctx, ac.UserID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to get user") + return + } + + if !strings.EqualFold(strings.TrimSpace(req.Confirmation), user.Email) { + writeError(w, http.StatusBadRequest, "invalid_request", "confirmation does not match your email address") + return + } + + teamsBlocking, err := h.db.CountUserOwnedTeamsWithOtherMembers(ctx, ac.UserID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to check team ownership") + return + } + if teamsBlocking > 0 { + writeError(w, http.StatusConflict, "owns_team_with_members", + fmt.Sprintf("you own %d team(s) with other members — transfer ownership or remove members before deleting your account", teamsBlocking)) + return + } + + // Delete all teams the user solely owns (no other members). + // Team deletion involves RPC calls (sandbox destruction) that cannot be + // transactional, so we do those first as best-effort, then wrap the + // DB-only cleanup in a transaction. + soleTeams, err := h.db.ListSoleOwnedTeams(ctx, ac.UserID) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to list owned teams") + return + } + for _, teamID := range soleTeams { + if err := h.teamSvc.DeleteTeamInternal(ctx, teamID); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", + fmt.Sprintf("failed to delete sole-owned team %s", id.FormatTeamID(teamID))) + return + } + } + + tx, err := h.pool.Begin(ctx) + if err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to start transaction") + return + } + defer tx.Rollback(ctx) + + qtx := h.db.WithTx(tx) + + if err := qtx.DeleteAPIKeysByCreator(ctx, ac.UserID); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to delete user's API keys") + return + } + + if err := qtx.SoftDeleteUser(ctx, ac.UserID); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to delete account") + return + } + + if err := tx.Commit(ctx); err != nil { + writeError(w, http.StatusInternalServerError, "db_error", "failed to commit account deletion") + return + } + + slog.Info("account soft-deleted", "user_id", id.FormatUserID(ac.UserID), "email", user.Email) + + go func() { + sendCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := h.mailer.Send(sendCtx, user.Email, "Your Wrenn account has been deleted", email.EmailData{ + RecipientName: user.Name, + Message: "Your Wrenn account has been deactivated and is scheduled for permanent deletion in 15 days. If this was a mistake, contact support before then to recover your account.", + Closing: "Thank you for using Wrenn.", + }); err != nil { + slog.Warn("delete account: failed to send notification", "email", user.Email, "error", err) + } + }() + + w.WriteHeader(http.StatusNoContent) +} + +// --- helpers --- + +func generateResetToken() string { + b := make([]byte, 16) + if _, err := rand.Read(b); err != nil { + panic(fmt.Sprintf("crypto/rand failed: %v", err)) + } + return hex.EncodeToString(b) +} + +func hashResetToken(raw string) string { + h := sha256.Sum256([]byte(raw)) + return hex.EncodeToString(h[:]) +} diff --git a/internal/api/handlers_metrics.go b/internal/api/handlers_metrics.go index 4d04430..28a2157 100644 --- a/internal/api/handlers_metrics.go +++ b/internal/api/handlers_metrics.go @@ -9,10 +9,10 @@ import ( "github.com/go-chi/chi/v5" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) @@ -38,7 +38,7 @@ type metricsResponse struct { Points []metricPointResponse `json:"points"` } -// GetMetrics handles GET /v1/sandboxes/{id}/metrics?range=10m|2h|24h. +// GetMetrics handles GET /v1/capsules/{id}/metrics?range=10m|2h|24h. func (h *sandboxMetricsHandler) GetMetrics(w http.ResponseWriter, r *http.Request) { sandboxIDStr := chi.URLParam(r, "id") ctx := r.Context() diff --git a/internal/api/handlers_oauth.go b/internal/api/handlers_oauth.go index 037929f..9c9a14a 100644 --- a/internal/api/handlers_oauth.go +++ b/internal/api/handlers_oauth.go @@ -16,10 +16,10 @@ import ( "github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgxpool" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/auth/oauth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/auth/oauth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) type oauthHandler struct { @@ -137,6 +137,73 @@ func (h *oauthHandler) Callback(w http.ResponseWriter, r *http.Request) { email := strings.TrimSpace(strings.ToLower(profile.Email)) + // Check for a link operation initiated from the settings page. + if linkCookie, err := r.Cookie("oauth_link_user_id"); err == nil && linkCookie.Value != "" { + // Clear the link cookie immediately. + http.SetCookie(w, &http.Cookie{ + Name: "oauth_link_user_id", + Value: "", + Path: "/", + MaxAge: -1, + HttpOnly: true, + SameSite: http.SameSiteLaxMode, + Secure: isSecure(r), + }) + + settingsBase := h.redirectURL + "/dashboard/settings" + + // Verify the HMAC to prevent cookie forgery. + linkParts := strings.SplitN(linkCookie.Value, ":", 2) + if len(linkParts) != 2 || !hmac.Equal([]byte(computeHMAC(h.jwtSecret, linkParts[0])), []byte(linkParts[1])) { + slog.Warn("oauth link: invalid or tampered link cookie") + http.Redirect(w, r, settingsBase+"?connect_error=invalid_state", http.StatusFound) + return + } + + userID, parseErr := id.ParseUserID(linkParts[0]) + if parseErr != nil { + slog.Error("oauth link: invalid user ID in cookie", "error", parseErr) + http.Redirect(w, r, settingsBase+"?connect_error=invalid_state", http.StatusFound) + return + } + + // Ensure the GitHub account isn't already linked to a different user. + existing, lookupErr := h.db.GetOAuthProvider(ctx, db.GetOAuthProviderParams{ + Provider: provider, + ProviderID: profile.ProviderID, + }) + if lookupErr == nil && existing.UserID != userID { + slog.Warn("oauth link: provider already linked to another account", "provider", provider) + http.Redirect(w, r, settingsBase+"?connect_error=already_linked", http.StatusFound) + return + } + if lookupErr == nil && existing.UserID == userID { + // Already linked to this user — treat as success. + http.Redirect(w, r, settingsBase+"?connected="+provider, http.StatusFound) + return + } + if !errors.Is(lookupErr, pgx.ErrNoRows) { + slog.Error("oauth link: db lookup failed", "error", lookupErr) + http.Redirect(w, r, settingsBase+"?connect_error=db_error", http.StatusFound) + return + } + + if insertErr := h.db.InsertOAuthProvider(ctx, db.InsertOAuthProviderParams{ + Provider: provider, + ProviderID: profile.ProviderID, + UserID: userID, + Email: email, + }); insertErr != nil { + slog.Error("oauth link: failed to insert provider", "error", insertErr) + http.Redirect(w, r, settingsBase+"?connect_error=db_error", http.StatusFound) + return + } + + slog.Info("oauth link: provider linked", "provider", provider, "user_id", id.FormatUserID(userID)) + http.Redirect(w, r, settingsBase+"?connected="+provider, http.StatusFound) + return + } + // Check if this OAuth identity already exists. existing, err := h.db.GetOAuthProvider(ctx, db.GetOAuthProviderParams{ Provider: provider, @@ -145,18 +212,29 @@ func (h *oauthHandler) Callback(w http.ResponseWriter, r *http.Request) { if err == nil { // Existing OAuth user — log them in. user, err := h.db.GetUserByID(ctx, existing.UserID) + if errors.Is(err, pgx.ErrNoRows) { + slog.Warn("oauth login: user no longer exists", "user_id", existing.UserID) + redirectWithError(w, r, redirectBase, "account_deactivated") + return + } if err != nil { slog.Error("oauth login: failed to get user", "error", err) redirectWithError(w, r, redirectBase, "db_error") return } - team, role, err := loginTeam(ctx, h.db, user.ID) + if user.Status != "active" { + slog.Warn("oauth login: account not active", "email", user.Email, "status", user.Status) + redirectWithError(w, r, redirectBase, "account_deactivated") + return + } + team, role, isFirstUser, err := ensureDefaultTeam(ctx, h.db, h.pool, user.ID, user.Name) if err != nil { - slog.Error("oauth login: failed to get team", "error", err) + slog.Error("oauth login: failed to ensure team", "error", err) redirectWithError(w, r, redirectBase, "db_error") return } - token, err := auth.SignJWT(h.jwtSecret, user.ID, team.ID, user.Email, user.Name, role, user.IsAdmin) + isAdmin := user.IsAdmin || isFirstUser + token, err := auth.SignJWT(h.jwtSecret, user.ID, team.ID, user.Email, user.Name, role, isAdmin) if err != nil { slog.Error("oauth login: failed to sign jwt", "error", err) redirectWithError(w, r, redirectBase, "internal_error") @@ -172,13 +250,21 @@ func (h *oauthHandler) Callback(w http.ResponseWriter, r *http.Request) { } // New OAuth identity — check for email collision. - _, err = h.db.GetUserByEmail(ctx, email) + existingUser, err := h.db.GetUserByEmail(ctx, email) if err == nil { - // Email already taken by another account. - redirectWithError(w, r, redirectBase, "email_taken") - return - } - if !errors.Is(err, pgx.ErrNoRows) { + if existingUser.Status == "inactive" { + // Unactivated email signup — delete and let OAuth take over. + if delErr := h.db.HardDeleteUser(ctx, existingUser.ID); delErr != nil { + slog.Error("oauth: failed to delete inactive user", "error", delErr) + redirectWithError(w, r, redirectBase, "db_error") + return + } + } else { + // Email already taken by an active/disabled/deleted account. + redirectWithError(w, r, redirectBase, "email_taken") + return + } + } else if !errors.Is(err, pgx.ErrNoRows) { slog.Error("oauth: email check failed", "error", err) redirectWithError(w, r, redirectBase, "db_error") return @@ -195,6 +281,15 @@ func (h *oauthHandler) Callback(w http.ResponseWriter, r *http.Request) { qtx := h.db.WithTx(tx) + // The first user to sign up becomes a platform admin. + userCount, err := qtx.CountUsers(ctx) + if err != nil { + slog.Error("oauth: failed to count users", "error", err) + redirectWithError(w, r, redirectBase, "db_error") + return + } + isFirstUser := userCount == 0 + userID := id.NewUserID() _, err = qtx.InsertUserOAuth(ctx, db.InsertUserOAuthParams{ ID: userID, @@ -238,6 +333,14 @@ func (h *oauthHandler) Callback(w http.ResponseWriter, r *http.Request) { return } + if isFirstUser { + if err := qtx.SetUserAdmin(ctx, db.SetUserAdminParams{ID: userID, IsAdmin: true}); err != nil { + slog.Error("oauth: failed to set admin status", "error", err) + redirectWithError(w, r, redirectBase, "db_error") + return + } + } + if err := qtx.InsertOAuthProvider(ctx, db.InsertOAuthProviderParams{ Provider: provider, ProviderID: profile.ProviderID, @@ -255,7 +358,7 @@ func (h *oauthHandler) Callback(w http.ResponseWriter, r *http.Request) { return } - token, err := auth.SignJWT(h.jwtSecret, userID, teamID, email, profile.Name, "owner", false) + token, err := auth.SignJWT(h.jwtSecret, userID, teamID, email, profile.Name, "owner", isFirstUser) if err != nil { slog.Error("oauth: failed to sign jwt", "error", err) redirectWithError(w, r, redirectBase, "internal_error") @@ -279,18 +382,29 @@ func (h *oauthHandler) retryAsLogin(w http.ResponseWriter, r *http.Request, prov return } user, err := h.db.GetUserByID(ctx, existing.UserID) + if errors.Is(err, pgx.ErrNoRows) { + slog.Warn("oauth: retry login: user no longer exists", "user_id", existing.UserID) + redirectWithError(w, r, redirectBase, "account_deactivated") + return + } if err != nil { slog.Error("oauth: retry login: failed to get user", "error", err) redirectWithError(w, r, redirectBase, "db_error") return } - team, role, err := loginTeam(ctx, h.db, user.ID) + if user.Status != "active" { + slog.Warn("oauth: retry login: account not active", "email", user.Email, "status", user.Status) + redirectWithError(w, r, redirectBase, "account_deactivated") + return + } + team, role, isFirstUser, err := ensureDefaultTeam(ctx, h.db, h.pool, user.ID, user.Name) if err != nil { - slog.Error("oauth: retry login: failed to get team", "error", err) + slog.Error("oauth: retry login: failed to ensure team", "error", err) redirectWithError(w, r, redirectBase, "db_error") return } - token, err := auth.SignJWT(h.jwtSecret, user.ID, team.ID, user.Email, user.Name, role, user.IsAdmin) + isAdmin := user.IsAdmin || isFirstUser + token, err := auth.SignJWT(h.jwtSecret, user.ID, team.ID, user.Email, user.Name, role, isAdmin) if err != nil { slog.Error("oauth: retry login: failed to sign jwt", "error", err) redirectWithError(w, r, redirectBase, "internal_error") diff --git a/internal/api/handlers_process.go b/internal/api/handlers_process.go new file mode 100644 index 0000000..2bb6bb9 --- /dev/null +++ b/internal/api/handlers_process.go @@ -0,0 +1,298 @@ +package api + +import ( + "context" + "log/slog" + "net/http" + "strconv" + "time" + + "connectrpc.com/connect" + "github.com/go-chi/chi/v5" + "github.com/gorilla/websocket" + "github.com/jackc/pgx/v5/pgtype" + + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" +) + +type processHandler struct { + db *db.Queries + pool *lifecycle.HostClientPool + jwtSecret []byte +} + +func newProcessHandler(db *db.Queries, pool *lifecycle.HostClientPool, jwtSecret []byte) *processHandler { + return &processHandler{db: db, pool: pool, jwtSecret: jwtSecret} +} + +// processResponse is a single entry in the process list. +type processResponse struct { + PID uint32 `json:"pid"` + Tag string `json:"tag,omitempty"` + Cmd string `json:"cmd"` + Args []string `json:"args,omitempty"` +} + +// processListResponse wraps the list of processes. +type processListResponse struct { + Processes []processResponse `json:"processes"` +} + +// ListProcesses handles GET /v1/capsules/{id}/processes. +func (h *processHandler) ListProcesses(w http.ResponseWriter, r *http.Request) { + sandboxIDStr := chi.URLParam(r, "id") + ctx := r.Context() + ac := auth.MustFromContext(ctx) + + sandboxID, err := id.ParseSandboxID(sandboxIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid sandbox ID") + return + } + + sb, err := h.db.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: ac.TeamID}) + if err != nil { + writeError(w, http.StatusNotFound, "not_found", "sandbox not found") + return + } + if sb.Status != "running" { + writeError(w, http.StatusConflict, "invalid_state", "sandbox is not running (status: "+sb.Status+")") + return + } + + agent, err := agentForHost(ctx, h.db, h.pool, sb.HostID) + if err != nil { + writeError(w, http.StatusServiceUnavailable, "host_unavailable", "sandbox host is not reachable") + return + } + + resp, err := agent.ListProcesses(ctx, connect.NewRequest(&pb.ListProcessesRequest{ + SandboxId: sandboxIDStr, + })) + if err != nil { + status, code, msg := agentErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + procs := make([]processResponse, 0, len(resp.Msg.Processes)) + for _, p := range resp.Msg.Processes { + procs = append(procs, processResponse{ + PID: p.Pid, + Tag: p.Tag, + Cmd: p.Cmd, + Args: p.Args, + }) + } + + writeJSON(w, http.StatusOK, processListResponse{Processes: procs}) +} + +// KillProcess handles DELETE /v1/capsules/{id}/processes/{selector}. +// The selector can be a numeric PID or a string tag. +func (h *processHandler) KillProcess(w http.ResponseWriter, r *http.Request) { + sandboxIDStr := chi.URLParam(r, "id") + selectorStr := chi.URLParam(r, "selector") + ctx := r.Context() + ac := auth.MustFromContext(ctx) + + sandboxID, err := id.ParseSandboxID(sandboxIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid sandbox ID") + return + } + + sb, err := h.db.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: ac.TeamID}) + if err != nil { + writeError(w, http.StatusNotFound, "not_found", "sandbox not found") + return + } + if sb.Status != "running" { + writeError(w, http.StatusConflict, "invalid_state", "sandbox is not running (status: "+sb.Status+")") + return + } + + agent, err := agentForHost(ctx, h.db, h.pool, sb.HostID) + if err != nil { + writeError(w, http.StatusServiceUnavailable, "host_unavailable", "sandbox host is not reachable") + return + } + + // Build the kill request with PID or tag selector. + killReq := &pb.KillProcessRequest{ + SandboxId: sandboxIDStr, + Signal: "SIGKILL", + } + if sig := r.URL.Query().Get("signal"); sig == "SIGTERM" { + killReq.Signal = "SIGTERM" + } + + if pid, err := strconv.ParseUint(selectorStr, 10, 32); err == nil { + killReq.Selector = &pb.KillProcessRequest_Pid{Pid: uint32(pid)} + } else { + killReq.Selector = &pb.KillProcessRequest_Tag{Tag: selectorStr} + } + + if _, err := agent.KillProcess(ctx, connect.NewRequest(killReq)); err != nil { + status, code, msg := agentErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +// wsProcessOut is the JSON message sent to the WebSocket client. +type wsProcessOut struct { + Type string `json:"type"` // "start", "stdout", "stderr", "exit", "error" + PID uint32 `json:"pid,omitempty"` // only for "start" + Data string `json:"data,omitempty"` // only for "stdout", "stderr", "error" + ExitCode *int32 `json:"exit_code,omitempty"` // only for "exit" +} + +// ConnectProcess handles WS /v1/capsules/{id}/processes/{selector}/stream. +func (h *processHandler) ConnectProcess(w http.ResponseWriter, r *http.Request) { + sandboxIDStr := chi.URLParam(r, "id") + selectorStr := chi.URLParam(r, "selector") + ctx := r.Context() + + sandboxID, err := id.ParseSandboxID(sandboxIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid sandbox ID") + return + } + + // Authenticate: use context from middleware (API key) or WS first message (JWT). + ac, hasAuth := auth.FromContext(ctx) + + if !hasAuth { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + slog.Error("process stream websocket upgrade failed", "error", err) + return + } + defer conn.Close() + + var wsAC auth.AuthContext + var authErr error + if isAdminWSRoute(ctx) { + wsAC, authErr = wsAuthenticateAdmin(ctx, conn, h.jwtSecret, h.db) + } else { + wsAC, authErr = wsAuthenticate(ctx, conn, h.jwtSecret, h.db) + } + if authErr != nil { + sendProcessWSError(conn, "authentication failed") + return + } + ac = wsAC + + h.runConnectProcess(ctx, conn, ac, sandboxID, sandboxIDStr, selectorStr) + return + } + + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + slog.Error("process stream websocket upgrade failed", "error", err) + return + } + defer conn.Close() + + h.runConnectProcess(ctx, conn, ac, sandboxID, sandboxIDStr, selectorStr) +} + +func (h *processHandler) runConnectProcess(ctx context.Context, conn *websocket.Conn, ac auth.AuthContext, sandboxID pgtype.UUID, sandboxIDStr, selectorStr string) { + sb, err := h.db.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: ac.TeamID}) + if err != nil { + sendProcessWSError(conn, "sandbox not found") + return + } + if sb.Status != "running" { + sendProcessWSError(conn, "sandbox is not running (status: "+sb.Status+")") + return + } + + agent, err := agentForHost(ctx, h.db, h.pool, sb.HostID) + if err != nil { + sendProcessWSError(conn, "sandbox host is not reachable") + return + } + + // Build the connect request with PID or tag selector. + connectReq := &pb.ConnectProcessRequest{ + SandboxId: sandboxIDStr, + } + if pid, err := strconv.ParseUint(selectorStr, 10, 32); err == nil { + connectReq.Selector = &pb.ConnectProcessRequest_Pid{Pid: uint32(pid)} + } else { + connectReq.Selector = &pb.ConnectProcessRequest_Tag{Tag: selectorStr} + } + + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + + stream, err := agent.ConnectProcess(streamCtx, connect.NewRequest(connectReq)) + if err != nil { + sendProcessWSError(conn, "failed to connect to process: "+err.Error()) + return + } + defer stream.Close() + + // Listen for client disconnect in a goroutine. + go func() { + for { + _, _, err := conn.ReadMessage() + if err != nil { + cancel() + return + } + } + }() + + // Forward stream events to WebSocket. + for stream.Receive() { + resp := stream.Msg() + switch ev := resp.Event.(type) { + case *pb.ConnectProcessResponse_Start: + writeWSJSON(conn, wsProcessOut{Type: "start", PID: ev.Start.Pid}) + + case *pb.ConnectProcessResponse_Data: + switch o := ev.Data.Output.(type) { + case *pb.ExecStreamData_Stdout: + writeWSJSON(conn, wsProcessOut{Type: "stdout", Data: string(o.Stdout)}) + case *pb.ExecStreamData_Stderr: + writeWSJSON(conn, wsProcessOut{Type: "stderr", Data: string(o.Stderr)}) + } + + case *pb.ConnectProcessResponse_End: + exitCode := ev.End.ExitCode + writeWSJSON(conn, wsProcessOut{Type: "exit", ExitCode: &exitCode}) + } + } + + if err := stream.Err(); err != nil { + if streamCtx.Err() == nil { + sendProcessWSError(conn, err.Error()) + } + } + + // Update last active using a fresh context. + updateCtx, updateCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer updateCancel() + if err := h.db.UpdateLastActive(updateCtx, db.UpdateLastActiveParams{ + ID: sandboxID, + LastActiveAt: pgtype.Timestamptz{ + Time: time.Now(), + Valid: true, + }, + }); err != nil { + slog.Warn("failed to update last active after process stream", "sandbox_id", sandboxIDStr, "error", err) + } +} + +func sendProcessWSError(conn *websocket.Conn, msg string) { + writeWSJSON(conn, wsProcessOut{Type: "error", Data: msg}) +} diff --git a/internal/api/handlers_pty.go b/internal/api/handlers_pty.go new file mode 100644 index 0000000..181fc9d --- /dev/null +++ b/internal/api/handlers_pty.go @@ -0,0 +1,400 @@ +package api + +import ( + "context" + "encoding/base64" + "encoding/json" + "log/slog" + "net/http" + "sync" + "time" + + "connectrpc.com/connect" + "github.com/go-chi/chi/v5" + "github.com/gorilla/websocket" + "github.com/jackc/pgx/v5/pgtype" + + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" + "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen/hostagentv1connect" +) + +const ( + ptyKeepaliveInterval = 30 * time.Second + ptyDefaultCmd = "/bin/bash" + ptyDefaultCols = 80 + ptyDefaultRows = 24 +) + +type ptyHandler struct { + db *db.Queries + pool *lifecycle.HostClientPool + jwtSecret []byte +} + +func newPtyHandler(db *db.Queries, pool *lifecycle.HostClientPool, jwtSecret []byte) *ptyHandler { + return &ptyHandler{db: db, pool: pool, jwtSecret: jwtSecret} +} + +// --- WebSocket message types --- + +// wsPtyIn is the inbound message from the client. +type wsPtyIn struct { + Type string `json:"type"` // "start", "connect", "input", "resize", "kill" + Cmd string `json:"cmd,omitempty"` // for "start" + Args []string `json:"args,omitempty"` // for "start" + Cols uint32 `json:"cols,omitempty"` // for "start", "resize" + Rows uint32 `json:"rows,omitempty"` // for "start", "resize" + Envs map[string]string `json:"envs,omitempty"` // for "start" + Cwd string `json:"cwd,omitempty"` // for "start" + User string `json:"user,omitempty"` // for "start" + Tag string `json:"tag,omitempty"` // for "connect" + Data string `json:"data,omitempty"` // for "input" (base64) +} + +// wsPtyOut is the outbound message to the client. +type wsPtyOut struct { + Type string `json:"type"` // "started", "output", "exit", "error" + Tag string `json:"tag,omitempty"` // for "started" + PID uint32 `json:"pid,omitempty"` // for "started" + Data string `json:"data,omitempty"` // for "output" (base64), "error" + ExitCode *int32 `json:"exit_code,omitempty"` // for "exit" + Fatal bool `json:"fatal,omitempty"` // for "error" +} + +// wsWriter wraps a websocket.Conn with a mutex for concurrent writes. +type wsWriter struct { + conn *websocket.Conn + mu sync.Mutex +} + +func (w *wsWriter) writeJSON(v any) { + w.mu.Lock() + defer w.mu.Unlock() + if err := w.conn.WriteJSON(v); err != nil { + slog.Debug("pty websocket write error", "error", err) + } +} + +// PtySession handles WS /v1/capsules/{id}/pty. +func (h *ptyHandler) PtySession(w http.ResponseWriter, r *http.Request) { + sandboxIDStr := chi.URLParam(r, "id") + ctx := r.Context() + + sandboxID, err := id.ParseSandboxID(sandboxIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid sandbox ID") + return + } + + // API key auth is handled by middleware (sets context). + // For browser JWT auth, we authenticate after upgrade via first WS message. + ac, hasAuth := auth.FromContext(ctx) + + if !hasAuth { + // No pre-upgrade auth — upgrade first, then authenticate via WS message. + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + slog.Error("pty websocket upgrade failed", "error", err) + return + } + defer conn.Close() + + ws := &wsWriter{conn: conn} + + var wsAC auth.AuthContext + if isAdminWSRoute(ctx) { + wsAC, err = wsAuthenticateAdmin(ctx, conn, h.jwtSecret, h.db) + } else { + wsAC, err = wsAuthenticate(ctx, conn, h.jwtSecret, h.db) + } + if err != nil { + ws.writeJSON(wsPtyOut{Type: "error", Data: "authentication failed", Fatal: true}) + return + } + ac = wsAC + + h.runPtySession(ctx, ws, conn, ac, sandboxID, sandboxIDStr) + return + } + + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + slog.Error("pty websocket upgrade failed", "error", err) + return + } + defer conn.Close() + + ws := &wsWriter{conn: conn} + h.runPtySession(ctx, ws, conn, ac, sandboxID, sandboxIDStr) +} + +func (h *ptyHandler) runPtySession(ctx context.Context, ws *wsWriter, conn *websocket.Conn, ac auth.AuthContext, sandboxID pgtype.UUID, sandboxIDStr string) { + sb, err := h.db.GetSandboxByTeam(ctx, db.GetSandboxByTeamParams{ID: sandboxID, TeamID: ac.TeamID}) + if err != nil { + ws.writeJSON(wsPtyOut{Type: "error", Data: "sandbox not found", Fatal: true}) + return + } + if sb.Status != "running" { + ws.writeJSON(wsPtyOut{Type: "error", Data: "sandbox is not running (status: " + sb.Status + ")", Fatal: true}) + return + } + + // Read the first message to determine start vs connect. + var firstMsg wsPtyIn + if err := conn.ReadJSON(&firstMsg); err != nil { + ws.writeJSON(wsPtyOut{Type: "error", Data: "failed to read first message: " + err.Error(), Fatal: true}) + return + } + + agent, err := agentForHost(ctx, h.db, h.pool, sb.HostID) + if err != nil { + ws.writeJSON(wsPtyOut{Type: "error", Data: "sandbox host is not reachable", Fatal: true}) + return + } + + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + + switch firstMsg.Type { + case "start": + h.handleStart(streamCtx, cancel, ws, agent, sandboxIDStr, firstMsg) + case "connect": + h.handleConnect(streamCtx, cancel, ws, agent, sandboxIDStr, firstMsg) + default: + ws.writeJSON(wsPtyOut{Type: "error", Data: "first message must be type 'start' or 'connect'", Fatal: true}) + } + + // Update last active using a fresh context. + updateCtx, updateCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer updateCancel() + if err := h.db.UpdateLastActive(updateCtx, db.UpdateLastActiveParams{ + ID: sandboxID, + LastActiveAt: pgtype.Timestamptz{ + Time: time.Now(), + Valid: true, + }, + }); err != nil { + slog.Warn("failed to update last active after pty session", "sandbox_id", sandboxIDStr, "error", err) + } +} + +func (h *ptyHandler) handleStart( + ctx context.Context, + cancel context.CancelFunc, + ws *wsWriter, + agent hostagentv1connect.HostAgentServiceClient, + sandboxIDStr string, + msg wsPtyIn, +) { + cmd := msg.Cmd + if cmd == "" { + cmd = ptyDefaultCmd + } + cols := msg.Cols + if cols == 0 { + cols = ptyDefaultCols + } + rows := msg.Rows + if rows == 0 { + rows = ptyDefaultRows + } + + tag := newPtyTag() + + stream, err := agent.PtyAttach(ctx, connect.NewRequest(&pb.PtyAttachRequest{ + SandboxId: sandboxIDStr, + Tag: tag, + Cmd: cmd, + Args: msg.Args, + Cols: cols, + Rows: rows, + Envs: msg.Envs, + Cwd: msg.Cwd, + User: msg.User, + })) + if err != nil { + ws.writeJSON(wsPtyOut{Type: "error", Data: "failed to start pty: " + err.Error(), Fatal: true}) + return + } + defer stream.Close() + + // Wait for the started event and forward it. + if !stream.Receive() { + if err := stream.Err(); err != nil { + ws.writeJSON(wsPtyOut{Type: "error", Data: "pty stream failed: " + err.Error(), Fatal: true}) + } + return + } + resp := stream.Msg() + started, ok := resp.Event.(*pb.PtyAttachResponse_Started) + if !ok { + ws.writeJSON(wsPtyOut{Type: "error", Data: "expected started event from host agent", Fatal: true}) + return + } + ws.writeJSON(wsPtyOut{Type: "started", Tag: started.Started.Tag, PID: started.Started.Pid}) + + runPtyLoop(ctx, cancel, ws, stream, agent, sandboxIDStr, tag) +} + +func (h *ptyHandler) handleConnect( + ctx context.Context, + cancel context.CancelFunc, + ws *wsWriter, + agent hostagentv1connect.HostAgentServiceClient, + sandboxIDStr string, + msg wsPtyIn, +) { + if msg.Tag == "" { + ws.writeJSON(wsPtyOut{Type: "error", Data: "connect requires a 'tag' field", Fatal: true}) + return + } + + stream, err := agent.PtyAttach(ctx, connect.NewRequest(&pb.PtyAttachRequest{ + SandboxId: sandboxIDStr, + Tag: msg.Tag, + })) + if err != nil { + ws.writeJSON(wsPtyOut{Type: "error", Data: "failed to connect to pty: " + err.Error(), Fatal: true}) + return + } + defer stream.Close() + + runPtyLoop(ctx, cancel, ws, stream, agent, sandboxIDStr, msg.Tag) +} + +// runPtyLoop drives the bidirectional communication between the WebSocket +// and the host agent PTY stream. +func runPtyLoop( + ctx context.Context, + cancel context.CancelFunc, + ws *wsWriter, + stream *connect.ServerStreamForClient[pb.PtyAttachResponse], + agent hostagentv1connect.HostAgentServiceClient, + sandboxID string, + tag string, +) { + var wg sync.WaitGroup + + // Output pump: read from Connect stream, write to WebSocket. + wg.Add(1) + go func() { + defer wg.Done() + defer cancel() + + for stream.Receive() { + resp := stream.Msg() + switch ev := resp.Event.(type) { + case *pb.PtyAttachResponse_Started: + // Already handled before the loop for "start" mode. + // For "connect" mode this won't appear. + ws.writeJSON(wsPtyOut{Type: "started", Tag: ev.Started.Tag, PID: ev.Started.Pid}) + + case *pb.PtyAttachResponse_Output: + ws.writeJSON(wsPtyOut{ + Type: "output", + Data: base64.StdEncoding.EncodeToString(ev.Output.Data), + }) + + case *pb.PtyAttachResponse_Exited: + exitCode := ev.Exited.ExitCode + ws.writeJSON(wsPtyOut{Type: "exit", ExitCode: &exitCode}) + return + } + } + + if err := stream.Err(); err != nil && ctx.Err() == nil { + ws.writeJSON(wsPtyOut{Type: "error", Data: err.Error()}) + } + }() + + // Input pump: read from WebSocket, dispatch to host agent. + wg.Add(1) + go func() { + defer wg.Done() + defer cancel() + + for { + _, raw, err := ws.conn.ReadMessage() + if err != nil { + return + } + + var msg wsPtyIn + if json.Unmarshal(raw, &msg) != nil { + continue + } + + // Use a background context for unary RPCs so they complete + // even if the stream context is being cancelled. + rpcCtx, rpcCancel := context.WithTimeout(context.Background(), 5*time.Second) + + switch msg.Type { + case "input": + data, err := base64.StdEncoding.DecodeString(msg.Data) + if err != nil { + rpcCancel() + continue + } + if _, err := agent.PtySendInput(rpcCtx, connect.NewRequest(&pb.PtySendInputRequest{ + SandboxId: sandboxID, + Tag: tag, + Data: data, + })); err != nil { + slog.Debug("pty send input error", "error", err) + } + + case "resize": + cols := msg.Cols + rows := msg.Rows + if cols > 0 && rows > 0 { + if _, err := agent.PtyResize(rpcCtx, connect.NewRequest(&pb.PtyResizeRequest{ + SandboxId: sandboxID, + Tag: tag, + Cols: cols, + Rows: rows, + })); err != nil { + slog.Debug("pty resize error", "error", err) + } + } + + case "kill": + if _, err := agent.PtyKill(rpcCtx, connect.NewRequest(&pb.PtyKillRequest{ + SandboxId: sandboxID, + Tag: tag, + })); err != nil { + slog.Debug("pty kill error", "error", err) + } + } + + rpcCancel() + } + }() + + // Keepalive pump: send periodic pings to prevent idle WS closure. + wg.Add(1) + go func() { + defer wg.Done() + ticker := time.NewTicker(ptyKeepaliveInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + ws.writeJSON(wsPtyOut{Type: "ping"}) + case <-ctx.Done(): + return + } + } + }() + + wg.Wait() +} + +// newPtyTag returns a PTY session tag: "pty-" + 8 random hex chars. +func newPtyTag() string { + return "pty-" + id.NewPtyTag() +} diff --git a/internal/api/handlers_sandbox.go b/internal/api/handlers_sandbox.go index 18d5305..badb3d0 100644 --- a/internal/api/handlers_sandbox.go +++ b/internal/api/handlers_sandbox.go @@ -7,11 +7,11 @@ import ( "github.com/go-chi/chi/v5" - "git.omukk.dev/wrenn/wrenn/internal/audit" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/service" + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/service" ) type sandboxHandler struct { @@ -31,18 +31,19 @@ type createSandboxRequest struct { } type sandboxResponse struct { - ID string `json:"id"` - Status string `json:"status"` - Template string `json:"template"` - VCPUs int32 `json:"vcpus"` - MemoryMB int32 `json:"memory_mb"` - TimeoutSec int32 `json:"timeout_sec"` - GuestIP string `json:"guest_ip,omitempty"` - HostIP string `json:"host_ip,omitempty"` - CreatedAt string `json:"created_at"` - StartedAt *string `json:"started_at,omitempty"` - LastActiveAt *string `json:"last_active_at,omitempty"` - LastUpdated string `json:"last_updated"` + ID string `json:"id"` + Status string `json:"status"` + Template string `json:"template"` + VCPUs int32 `json:"vcpus"` + MemoryMB int32 `json:"memory_mb"` + TimeoutSec int32 `json:"timeout_sec"` + GuestIP string `json:"guest_ip,omitempty"` + HostIP string `json:"host_ip,omitempty"` + CreatedAt string `json:"created_at"` + StartedAt *string `json:"started_at,omitempty"` + LastActiveAt *string `json:"last_active_at,omitempty"` + LastUpdated string `json:"last_updated"` + Metadata map[string]string `json:"metadata,omitempty"` } func sandboxToResponse(sb db.Sandbox) sandboxResponse { @@ -56,6 +57,12 @@ func sandboxToResponse(sb db.Sandbox) sandboxResponse { GuestIP: sb.GuestIp, HostIP: sb.HostIp, } + if len(sb.Metadata) > 0 { + var meta map[string]string + if err := json.Unmarshal(sb.Metadata, &meta); err == nil && len(meta) > 0 { + resp.Metadata = meta + } + } if sb.CreatedAt.Valid { resp.CreatedAt = sb.CreatedAt.Time.Format(time.RFC3339) } @@ -73,7 +80,7 @@ func sandboxToResponse(sb db.Sandbox) sandboxResponse { return resp } -// Create handles POST /v1/sandboxes. +// Create handles POST /v1/capsules. func (h *sandboxHandler) Create(w http.ResponseWriter, r *http.Request) { var req createSandboxRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { @@ -104,7 +111,7 @@ func (h *sandboxHandler) Create(w http.ResponseWriter, r *http.Request) { writeJSON(w, http.StatusCreated, sandboxToResponse(sb)) } -// List handles GET /v1/sandboxes. +// List handles GET /v1/capsules. func (h *sandboxHandler) List(w http.ResponseWriter, r *http.Request) { ac := auth.MustFromContext(r.Context()) sandboxes, err := h.svc.List(r.Context(), ac.TeamID) @@ -121,7 +128,7 @@ func (h *sandboxHandler) List(w http.ResponseWriter, r *http.Request) { writeJSON(w, http.StatusOK, resp) } -// Get handles GET /v1/sandboxes/{id}. +// Get handles GET /v1/capsules/{id}. func (h *sandboxHandler) Get(w http.ResponseWriter, r *http.Request) { sandboxIDStr := chi.URLParam(r, "id") ac := auth.MustFromContext(r.Context()) @@ -141,7 +148,7 @@ func (h *sandboxHandler) Get(w http.ResponseWriter, r *http.Request) { writeJSON(w, http.StatusOK, sandboxToResponse(sb)) } -// Pause handles POST /v1/sandboxes/{id}/pause. +// Pause handles POST /v1/capsules/{id}/pause. func (h *sandboxHandler) Pause(w http.ResponseWriter, r *http.Request) { sandboxIDStr := chi.URLParam(r, "id") ac := auth.MustFromContext(r.Context()) @@ -163,7 +170,7 @@ func (h *sandboxHandler) Pause(w http.ResponseWriter, r *http.Request) { writeJSON(w, http.StatusOK, sandboxToResponse(sb)) } -// Resume handles POST /v1/sandboxes/{id}/resume. +// Resume handles POST /v1/capsules/{id}/resume. func (h *sandboxHandler) Resume(w http.ResponseWriter, r *http.Request) { sandboxIDStr := chi.URLParam(r, "id") ac := auth.MustFromContext(r.Context()) @@ -185,7 +192,7 @@ func (h *sandboxHandler) Resume(w http.ResponseWriter, r *http.Request) { writeJSON(w, http.StatusOK, sandboxToResponse(sb)) } -// Ping handles POST /v1/sandboxes/{id}/ping. +// Ping handles POST /v1/capsules/{id}/ping. func (h *sandboxHandler) Ping(w http.ResponseWriter, r *http.Request) { sandboxIDStr := chi.URLParam(r, "id") ac := auth.MustFromContext(r.Context()) @@ -205,7 +212,7 @@ func (h *sandboxHandler) Ping(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } -// Destroy handles DELETE /v1/sandboxes/{id}. +// Destroy handles DELETE /v1/capsules/{id}. func (h *sandboxHandler) Destroy(w http.ResponseWriter, r *http.Request) { sandboxIDStr := chi.URLParam(r, "id") ac := auth.MustFromContext(r.Context()) diff --git a/internal/api/handlers_snapshots.go b/internal/api/handlers_snapshots.go index 8855c29..43d3148 100644 --- a/internal/api/handlers_snapshots.go +++ b/internal/api/handlers_snapshots.go @@ -13,14 +13,14 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/audit" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" "git.omukk.dev/wrenn/wrenn/internal/layout" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" - "git.omukk.dev/wrenn/wrenn/internal/service" - "git.omukk.dev/wrenn/wrenn/internal/validate" + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/service" + "git.omukk.dev/wrenn/wrenn/pkg/validate" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) @@ -38,8 +38,8 @@ func newSnapshotHandler(svc *service.TemplateService, db *db.Queries, pool *life // deleteSnapshotBroadcast attempts to delete snapshot files on all online hosts. // Snapshots aren't currently host-tracked in the DB, so we broadcast to all hosts // and ignore NotFound errors. -func (h *snapshotHandler) deleteSnapshotBroadcast(ctx context.Context, teamID, templateID pgtype.UUID) error { - hosts, err := h.db.ListActiveHosts(ctx) +func deleteSnapshotBroadcast(ctx context.Context, queries *db.Queries, pool *lifecycle.HostClientPool, teamID, templateID pgtype.UUID) error { + hosts, err := queries.ListActiveHosts(ctx) if err != nil { return fmt.Errorf("list hosts: %w", err) } @@ -47,7 +47,7 @@ func (h *snapshotHandler) deleteSnapshotBroadcast(ctx context.Context, teamID, t if host.Status != "online" { continue } - agent, err := h.pool.GetForHost(host) + agent, err := pool.GetForHost(host) if err != nil { continue } @@ -69,13 +69,14 @@ type createSnapshotRequest struct { } type snapshotResponse struct { - Name string `json:"name"` - Type string `json:"type"` - VCPUs *int32 `json:"vcpus,omitempty"` - MemoryMB *int32 `json:"memory_mb,omitempty"` - SizeBytes int64 `json:"size_bytes"` - CreatedAt string `json:"created_at"` - Platform bool `json:"platform"` + Name string `json:"name"` + Type string `json:"type"` + VCPUs *int32 `json:"vcpus,omitempty"` + MemoryMB *int32 `json:"memory_mb,omitempty"` + SizeBytes int64 `json:"size_bytes"` + CreatedAt string `json:"created_at"` + Platform bool `json:"platform"` + Metadata map[string]string `json:"metadata,omitempty"` } func templateToResponse(t db.Template) snapshotResponse { @@ -94,6 +95,12 @@ func templateToResponse(t db.Template) snapshotResponse { if t.CreatedAt.Valid { resp.CreatedAt = t.CreatedAt.Time.Format(time.RFC3339) } + if len(t.Metadata) > 0 { + var meta map[string]string + if err := json.Unmarshal(t.Metadata, &meta); err == nil && len(meta) > 0 { + resp.Metadata = meta + } + } return resp } @@ -126,7 +133,6 @@ func (h *snapshotHandler) Create(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ac := auth.MustFromContext(ctx) - overwrite := r.URL.Query().Get("overwrite") == "true" // Check for global name collision. if _, err := h.db.GetPlatformTemplateByName(ctx, req.Name); err == nil { @@ -135,20 +141,10 @@ func (h *snapshotHandler) Create(w http.ResponseWriter, r *http.Request) { } // Check if name already exists for this team. - if existing, err := h.db.GetTemplateByTeam(ctx, db.GetTemplateByTeamParams{Name: req.Name, TeamID: ac.TeamID}); err == nil { - if !overwrite { - writeError(w, http.StatusConflict, "already_exists", "snapshot name already exists; use ?overwrite=true to replace") - return - } - // Delete old snapshot files from all hosts before removing the DB record. - if err := h.deleteSnapshotBroadcast(ctx, existing.TeamID, existing.ID); err != nil { - writeError(w, http.StatusInternalServerError, "agent_error", "failed to delete existing snapshot files") - return - } - if err := h.db.DeleteTemplateByTeam(ctx, db.DeleteTemplateByTeamParams{Name: req.Name, TeamID: ac.TeamID}); err != nil { - writeError(w, http.StatusInternalServerError, "db_error", "failed to remove existing template record") - return - } + if _, err := h.db.GetTemplateByTeam(ctx, db.GetTemplateByTeamParams{Name: req.Name, TeamID: ac.TeamID}); err == nil { + writeError(w, http.StatusConflict, "template_name_taken", + "snapshot name already exists; delete the existing snapshot first to reuse this name") + return } // Verify sandbox exists, belongs to team, and is running or paused. @@ -210,13 +206,16 @@ func (h *snapshotHandler) Create(w http.ResponseWriter, r *http.Request) { } tmpl, err := h.db.InsertTemplate(snapCtx, db.InsertTemplateParams{ - ID: newTemplateID, - Name: req.Name, - Type: "snapshot", - Vcpus: sb.Vcpus, - MemoryMb: sb.MemoryMb, - SizeBytes: resp.Msg.SizeBytes, - TeamID: ac.TeamID, + ID: newTemplateID, + Name: req.Name, + Type: "snapshot", + Vcpus: sb.Vcpus, + MemoryMb: sb.MemoryMb, + SizeBytes: resp.Msg.SizeBytes, + TeamID: ac.TeamID, + DefaultUser: "root", + DefaultEnv: []byte("{}"), + Metadata: sb.Metadata, }) if err != nil { slog.Error("failed to insert template record", "name", req.Name, "error", err) @@ -277,7 +276,7 @@ func (h *snapshotHandler) Delete(w http.ResponseWriter, r *http.Request) { return } - if err := h.deleteSnapshotBroadcast(ctx, tmpl.TeamID, tmpl.ID); err != nil { + if err := deleteSnapshotBroadcast(ctx, h.db, h.pool, tmpl.TeamID, tmpl.ID); err != nil { writeError(w, http.StatusInternalServerError, "agent_error", "failed to delete snapshot files") return } diff --git a/internal/api/handlers_stats.go b/internal/api/handlers_stats.go index 6c04c7e..1289d68 100644 --- a/internal/api/handlers_stats.go +++ b/internal/api/handlers_stats.go @@ -5,8 +5,8 @@ import ( "net/http" "time" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/service" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/service" ) type statsHandler struct { @@ -43,7 +43,7 @@ type statsResponse struct { Series statsSeriesResponse `json:"series"` } -// GetStats handles GET /v1/sandboxes/stats?range=5m|1h|6h|24h|30d +// GetStats handles GET /v1/capsules/stats?range=5m|1h|6h|24h|30d func (h *statsHandler) GetStats(w http.ResponseWriter, r *http.Request) { ac := auth.MustFromContext(r.Context()) diff --git a/internal/api/handlers_team.go b/internal/api/handlers_team.go index ed23134..bfbe76c 100644 --- a/internal/api/handlers_team.go +++ b/internal/api/handlers_team.go @@ -1,6 +1,8 @@ package api import ( + "context" + "fmt" "log/slog" "net/http" "strings" @@ -9,20 +11,22 @@ import ( "github.com/go-chi/chi/v5" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/audit" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/service" + "git.omukk.dev/wrenn/wrenn/internal/email" + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/service" ) type teamHandler struct { - svc *service.TeamService - audit *audit.AuditLogger + svc *service.TeamService + audit *audit.AuditLogger + mailer email.Mailer } -func newTeamHandler(svc *service.TeamService, al *audit.AuditLogger) *teamHandler { - return &teamHandler{svc: svc, audit: al} +func newTeamHandler(svc *service.TeamService, al *audit.AuditLogger, mailer email.Mailer) *teamHandler { + return &teamHandler{svc: svc, audit: al, mailer: mailer} } // teamResponse is the JSON shape for a team. @@ -131,6 +135,15 @@ func (h *teamHandler) Create(w http.ResponseWriter, r *http.Request) { return } + go func() { + if err := h.mailer.Send(context.Background(), ac.Email, "Your team has been created", email.EmailData{ + RecipientName: ac.Name, + Message: fmt.Sprintf("Your team \"%s\" has been created on Wrenn. You can now invite members and start creating sandboxes under this team.", req.Name), + }); err != nil { + slog.Warn("failed to send team created email", "email", ac.Email, "error", err) + } + }() + writeJSON(w, http.StatusCreated, teamWithRoleResponse{ teamResponse: teamToResponse(team.Team), Role: team.Role, @@ -279,6 +292,21 @@ func (h *teamHandler) AddMember(w http.ResponseWriter, r *http.Request) { if parseErr == nil { h.audit.LogMemberAdd(r.Context(), ac, targetUserID, member.Email, member.Role) } + + go func() { + team, err := h.svc.GetTeam(context.Background(), teamID) + teamName := "a team" + if err == nil { + teamName = team.Name + } + if err := h.mailer.Send(context.Background(), member.Email, "You've been added to a team on Wrenn", email.EmailData{ + RecipientName: member.Name, + Message: fmt.Sprintf("%s has added you to the team \"%s\" on Wrenn.", ac.Name, teamName), + }); err != nil { + slog.Warn("failed to send team invitation email", "email", member.Email, "error", err) + } + }() + writeJSON(w, http.StatusCreated, memberInfoToResponse(member)) } @@ -388,3 +416,87 @@ func (h *teamHandler) SetBYOC(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } + +// AdminListTeams handles GET /v1/admin/teams?page=1 +// Returns a paginated list of all teams with member counts, owner info, and active sandbox counts. +func (h *teamHandler) AdminListTeams(w http.ResponseWriter, r *http.Request) { + page := 1 + if p := r.URL.Query().Get("page"); p != "" { + if _, err := fmt.Sscanf(p, "%d", &page); err != nil || page < 1 { + page = 1 + } + } + const perPage = 100 + offset := int32((page - 1) * perPage) + + teams, total, err := h.svc.AdminListTeams(r.Context(), perPage, offset) + if err != nil { + status, code, msg := serviceErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + type adminTeamResponse struct { + ID string `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + IsByoc bool `json:"is_byoc"` + CreatedAt string `json:"created_at"` + DeletedAt *string `json:"deleted_at"` + MemberCount int32 `json:"member_count"` + OwnerName string `json:"owner_name"` + OwnerEmail string `json:"owner_email"` + ActiveSandboxCount int32 `json:"active_sandbox_count"` + ChannelCount int32 `json:"channel_count"` + } + + resp := make([]adminTeamResponse, len(teams)) + for i, t := range teams { + r := adminTeamResponse{ + ID: id.FormatTeamID(t.ID), + Name: t.Name, + Slug: t.Slug, + IsByoc: t.IsByoc, + CreatedAt: t.CreatedAt.Format(time.RFC3339), + MemberCount: t.MemberCount, + OwnerName: t.OwnerName, + OwnerEmail: t.OwnerEmail, + ActiveSandboxCount: t.ActiveSandboxCount, + ChannelCount: t.ChannelCount, + } + if t.DeletedAt != nil { + s := t.DeletedAt.Format(time.RFC3339) + r.DeletedAt = &s + } + resp[i] = r + } + + totalPages := (total + perPage - 1) / perPage + writeJSON(w, http.StatusOK, map[string]any{ + "teams": resp, + "total": total, + "page": page, + "per_page": perPage, + "total_pages": totalPages, + }) +} + +// AdminDeleteTeam handles DELETE /v1/admin/teams/{id} +// Soft-deletes a team and destroys all its active sandboxes. +func (h *teamHandler) AdminDeleteTeam(w http.ResponseWriter, r *http.Request) { + teamIDStr := chi.URLParam(r, "id") + + teamID, err := id.ParseTeamID(teamIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid team ID") + return + } + + if err := h.svc.AdminDeleteTeam(r.Context(), teamID); err != nil { + status, code, msg := serviceErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + w.WriteHeader(http.StatusNoContent) +} diff --git a/internal/api/handlers_users.go b/internal/api/handlers_users.go index 5f9ef6a..f8a8b67 100644 --- a/internal/api/handlers_users.go +++ b/internal/api/handlers_users.go @@ -1,22 +1,27 @@ package api import ( + "fmt" "net/http" "strings" + "time" + "github.com/go-chi/chi/v5" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/service" ) type usersHandler struct { - db *db.Queries + db *db.Queries + svc *service.UserService } -func newUsersHandler(db *db.Queries) *usersHandler { - return &usersHandler{db: db} +func newUsersHandler(db *db.Queries, svc *service.UserService) *usersHandler { + return &usersHandler{db: db, svc: svc} } // Search handles GET /v1/users/search?email= @@ -50,3 +55,96 @@ func (h *usersHandler) Search(w http.ResponseWriter, r *http.Request) { } writeJSON(w, http.StatusOK, resp) } + +// AdminListUsers handles GET /v1/admin/users?page=1 +// Returns a paginated list of all users with team counts. +func (h *usersHandler) AdminListUsers(w http.ResponseWriter, r *http.Request) { + page := 1 + if p := r.URL.Query().Get("page"); p != "" { + if _, err := fmt.Sscanf(p, "%d", &page); err != nil || page < 1 { + page = 1 + } + } + const perPage = 100 + offset := int32((page - 1) * perPage) + + users, total, err := h.svc.AdminListUsers(r.Context(), perPage, offset) + if err != nil { + status, code, msg := serviceErrToHTTP(err) + writeError(w, status, code, msg) + return + } + + type adminUserResponse struct { + ID string `json:"id"` + Email string `json:"email"` + Name string `json:"name"` + IsAdmin bool `json:"is_admin"` + Status string `json:"status"` + CreatedAt string `json:"created_at"` + TeamsJoined int32 `json:"teams_joined"` + TeamsOwned int32 `json:"teams_owned"` + } + + resp := make([]adminUserResponse, len(users)) + for i, u := range users { + resp[i] = adminUserResponse{ + ID: id.FormatUserID(u.ID), + Email: u.Email, + Name: u.Name, + IsAdmin: u.IsAdmin, + Status: u.Status, + CreatedAt: u.CreatedAt.Format(time.RFC3339), + TeamsJoined: u.TeamsJoined, + TeamsOwned: u.TeamsOwned, + } + } + + totalPages := (total + perPage - 1) / perPage + writeJSON(w, http.StatusOK, map[string]any{ + "users": resp, + "total": total, + "page": page, + "per_page": perPage, + "total_pages": totalPages, + }) +} + +// SetUserActive handles PUT /v1/admin/users/{id}/active +// Enables or disables a user account. Admins cannot deactivate themselves. +func (h *usersHandler) SetUserActive(w http.ResponseWriter, r *http.Request) { + ac := auth.MustFromContext(r.Context()) + userIDStr := chi.URLParam(r, "id") + + userID, err := id.ParseUserID(userIDStr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid user ID") + return + } + + var req struct { + Active bool `json:"active"` + } + if err := decodeJSON(r, &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } + + if ac.UserID == userID && !req.Active { + writeError(w, http.StatusBadRequest, "invalid_request", "cannot deactivate your own account") + return + } + + newStatus := "active" + if !req.Active { + newStatus = "disabled" + } + + if err := h.svc.SetUserStatus(r.Context(), userID, newStatus); err != nil { + httpStatus, code, msg := serviceErrToHTTP(err) + writeError(w, httpStatus, code, msg) + return + } + + w.WriteHeader(http.StatusNoContent) +} diff --git a/internal/api/helpers_ws.go b/internal/api/helpers_ws.go new file mode 100644 index 0000000..ec1c126 --- /dev/null +++ b/internal/api/helpers_ws.go @@ -0,0 +1,109 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/gorilla/websocket" + + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" +) + +// isWebSocketUpgrade returns true if the request is a WebSocket upgrade. +func isWebSocketUpgrade(r *http.Request) bool { + return strings.EqualFold(r.Header.Get("Upgrade"), "websocket") +} + +// ctxKeyAdminWS is a context key for flagging admin WS routes. +type ctxKeyAdminWS struct{} + +// setAdminWSFlag marks the context as an admin WebSocket route. +func setAdminWSFlag(ctx context.Context) context.Context { + return context.WithValue(ctx, ctxKeyAdminWS{}, true) +} + +// isAdminWSRoute checks if the request context was marked as admin WS. +func isAdminWSRoute(ctx context.Context) bool { + v, _ := ctx.Value(ctxKeyAdminWS{}).(bool) + return v +} + +// wsAuthMsg is the first message a browser WS client sends to authenticate. +type wsAuthMsg struct { + Type string `json:"type"` + Token string `json:"token"` +} + +// wsAuthenticate reads a JWT auth message from the WebSocket and returns the +// authenticated context. The caller must send this as the first message after +// connecting. +func wsAuthenticate(ctx context.Context, conn *websocket.Conn, jwtSecret []byte, queries *db.Queries) (auth.AuthContext, error) { + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + + var msg wsAuthMsg + if err := conn.ReadJSON(&msg); err != nil { + return auth.AuthContext{}, fmt.Errorf("read auth message: %w", err) + } + + conn.SetReadDeadline(time.Time{}) // clear deadline + + if msg.Type != "auth" || msg.Token == "" { + return auth.AuthContext{}, fmt.Errorf("first message must be type 'auth' with a token") + } + + claims, err := auth.VerifyJWT(jwtSecret, msg.Token) + if err != nil { + return auth.AuthContext{}, fmt.Errorf("invalid or expired token: %w", err) + } + + teamID, err := id.ParseTeamID(claims.TeamID) + if err != nil { + return auth.AuthContext{}, fmt.Errorf("invalid team ID in token: %w", err) + } + + userID, err := id.ParseUserID(claims.Subject) + if err != nil { + return auth.AuthContext{}, fmt.Errorf("invalid user ID in token: %w", err) + } + + user, err := queries.GetUserByID(ctx, userID) + if err != nil { + return auth.AuthContext{}, fmt.Errorf("user not found") + } + if user.Status != "active" { + return auth.AuthContext{}, fmt.Errorf("account deactivated") + } + + return auth.AuthContext{ + TeamID: teamID, + UserID: userID, + Email: claims.Email, + Name: claims.Name, + Role: claims.Role, + }, nil +} + +// wsAuthenticateAdmin performs WS-based auth and verifies admin status, +// returning an AuthContext with the platform team ID. +func wsAuthenticateAdmin(ctx context.Context, conn *websocket.Conn, jwtSecret []byte, queries *db.Queries) (auth.AuthContext, error) { + ac, err := wsAuthenticate(ctx, conn, jwtSecret, queries) + if err != nil { + return auth.AuthContext{}, err + } + + user, err := queries.GetUserByID(ctx, ac.UserID) + if err != nil { + return auth.AuthContext{}, fmt.Errorf("user not found") + } + if !user.IsAdmin { + return auth.AuthContext{}, fmt.Errorf("admin access required") + } + + ac.TeamID = id.PlatformTeamID + return ac, nil +} diff --git a/internal/api/host_monitor.go b/internal/api/host_monitor.go index 0779de8..763555e 100644 --- a/internal/api/host_monitor.go +++ b/internal/api/host_monitor.go @@ -8,10 +8,10 @@ import ( "connectrpc.com/connect" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/audit" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) diff --git a/internal/api/metrics_sampler.go b/internal/api/metrics_sampler.go index 096c2a2..864a789 100644 --- a/internal/api/metrics_sampler.go +++ b/internal/api/metrics_sampler.go @@ -5,7 +5,7 @@ import ( "log/slog" "time" - "git.omukk.dev/wrenn/wrenn/internal/db" + "git.omukk.dev/wrenn/wrenn/pkg/db" ) // MetricsSampler records per-team sandbox resource usage to diff --git a/internal/api/middleware.go b/internal/api/middleware.go index cf9108f..b1c9f00 100644 --- a/internal/api/middleware.go +++ b/internal/api/middleware.go @@ -14,7 +14,7 @@ import ( "connectrpc.com/connect" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) type errorResponse struct { @@ -50,8 +50,12 @@ func agentErrToHTTP(err error) (int, string, string) { return http.StatusNotFound, "not_found", err.Error() case connect.CodeInvalidArgument: return http.StatusBadRequest, "invalid_request", err.Error() - case connect.CodeFailedPrecondition: + case connect.CodeFailedPrecondition, connect.CodeAlreadyExists: return http.StatusConflict, "conflict", err.Error() + case connect.CodePermissionDenied: + return http.StatusForbidden, "forbidden", err.Error() + case connect.CodeUnimplemented: + return http.StatusNotImplemented, "agent_error", err.Error() default: return http.StatusBadGateway, "agent_error", err.Error() } @@ -90,21 +94,25 @@ func serviceErrToHTTP(err error) (int, string, string) { } // Map well-known service error patterns. + // Return generic messages for most cases to avoid leaking internal details. switch { case strings.Contains(msg, "not found"): - return http.StatusNotFound, "not_found", msg - case strings.Contains(msg, "not running"), strings.Contains(msg, "not paused"): - return http.StatusConflict, "invalid_state", msg + return http.StatusNotFound, "not_found", "resource not found" + case strings.Contains(msg, "not running"): + return http.StatusConflict, "invalid_state", "resource is not running" + case strings.Contains(msg, "not paused"): + return http.StatusConflict, "invalid_state", "resource is not paused" case strings.Contains(msg, "conflict:"): - return http.StatusConflict, "conflict", msg + return http.StatusConflict, "conflict", strings.TrimPrefix(msg, "conflict: ") case strings.Contains(msg, "forbidden"): - return http.StatusForbidden, "forbidden", msg + return http.StatusForbidden, "forbidden", "forbidden" case strings.Contains(msg, "invalid or expired"): - return http.StatusUnauthorized, "unauthorized", msg + return http.StatusUnauthorized, "unauthorized", "invalid or expired credentials" case strings.Contains(msg, "invalid"): - return http.StatusBadRequest, "invalid_request", msg + return http.StatusBadRequest, "invalid_request", "invalid request" default: - return http.StatusInternalServerError, "internal_error", msg + slog.Error("unhandled service error", "error", err) + return http.StatusInternalServerError, "internal_error", "an internal error occurred" } } diff --git a/internal/api/middleware_admin.go b/internal/api/middleware_admin.go index 0df76d2..670c586 100644 --- a/internal/api/middleware_admin.go +++ b/internal/api/middleware_admin.go @@ -3,19 +3,47 @@ package api import ( "net/http" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) +// injectPlatformTeam overwrites the AuthContext's TeamID with the platform +// sentinel UUID. This lets existing team-scoped handlers (exec, files, pty, +// metrics) work unchanged under admin routes. Must run after requireAdmin. +func injectPlatformTeam() func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if _, ok := auth.FromContext(r.Context()); !ok { + // No auth context yet (WS upgrade); handler will inject platform team after WS auth. + next.ServeHTTP(w, r) + return + } + ac := auth.MustFromContext(r.Context()) + ac.TeamID = id.PlatformTeamID + ctx := auth.WithAuthContext(r.Context(), ac) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + } +} + // requireAdmin validates that the authenticated user is a platform admin. // Must run after requireJWT (depends on AuthContext being present). // Re-validates against the DB — the JWT is_admin claim is for UI only; // the DB is the source of truth for admin access. +// WebSocket upgrade requests without auth context are passed through — +// admin WS handlers verify admin status after upgrade via wsAuthenticateAdmin. func requireAdmin(queries *db.Queries) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ac, ok := auth.FromContext(r.Context()) if !ok { + if isWebSocketUpgrade(r) { + ctx := r.Context() + ctx = setAdminWSFlag(ctx) + next.ServeHTTP(w, r.WithContext(ctx)) + return + } writeError(w, http.StatusUnauthorized, "unauthorized", "authentication required") return } diff --git a/internal/api/middleware_auth.go b/internal/api/middleware_auth.go index c8e2056..b671047 100644 --- a/internal/api/middleware_auth.go +++ b/internal/api/middleware_auth.go @@ -5,9 +5,9 @@ import ( "net/http" "strings" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) // requireAPIKeyOrJWT accepts either X-API-Key header or Authorization: Bearer JWT. @@ -38,9 +38,12 @@ func requireAPIKeyOrJWT(queries *db.Queries, jwtSecret []byte) func(http.Handler return } - // Try JWT bearer token. + // Try JWT bearer token from Authorization header. + tokenStr := "" if header := r.Header.Get("Authorization"); strings.HasPrefix(header, "Bearer ") { - tokenStr := strings.TrimPrefix(header, "Bearer ") + tokenStr = strings.TrimPrefix(header, "Bearer ") + } + if tokenStr != "" { claims, err := auth.VerifyJWT(jwtSecret, tokenStr) if err != nil { slog.Warn("jwt auth failed", "error", err, "ip", r.RemoteAddr) @@ -59,6 +62,18 @@ func requireAPIKeyOrJWT(queries *db.Queries, jwtSecret []byte) func(http.Handler return } + // Verify user is still active in the database. + user, err := queries.GetUserByID(r.Context(), userID) + if err != nil { + slog.Warn("jwt auth: failed to look up user", "user_id", claims.Subject, "error", err) + writeError(w, http.StatusUnauthorized, "unauthorized", "user not found") + return + } + if user.Status != "active" { + writeError(w, http.StatusForbidden, "account_deactivated", "your account has been deactivated — contact your administrator to regain access") + return + } + ctx := auth.WithAuthContext(r.Context(), auth.AuthContext{ TeamID: teamID, UserID: userID, @@ -70,7 +85,15 @@ func requireAPIKeyOrJWT(queries *db.Queries, jwtSecret []byte) func(http.Handler return } + // WebSocket upgrade requests may not carry auth headers (browsers + // cannot set custom headers on WS connections). Pass through — + // the WS handler authenticates via the first message after upgrade. + if isWebSocketUpgrade(r) { + next.ServeHTTP(w, r) + return + } + writeError(w, http.StatusUnauthorized, "unauthorized", "X-API-Key or Authorization: Bearer required") }) } -} +} \ No newline at end of file diff --git a/internal/api/middleware_hosttoken.go b/internal/api/middleware_hosttoken.go index 9f8cfc0..39ebdd9 100644 --- a/internal/api/middleware_hosttoken.go +++ b/internal/api/middleware_hosttoken.go @@ -3,8 +3,8 @@ package api import ( "net/http" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) // requireHostToken validates the X-Host-Token header containing a host JWT, diff --git a/internal/api/middleware_jwt.go b/internal/api/middleware_jwt.go index 16852e6..b19c838 100644 --- a/internal/api/middleware_jwt.go +++ b/internal/api/middleware_jwt.go @@ -1,25 +1,37 @@ package api import ( + "log/slog" "net/http" "strings" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) -// requireJWT validates the Authorization: Bearer header, verifies the JWT -// signature and expiry, and stamps UserID + TeamID + Email into the request context. -func requireJWT(secret []byte) func(http.Handler) http.Handler { +// requireJWT validates a JWT from the Authorization: Bearer header. +// It also verifies the user is still active in the database. +// WebSocket upgrade requests without an Authorization header are passed through +// — WS handlers authenticate via the first message after upgrade. +func requireJWT(secret []byte, queries *db.Queries) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - header := r.Header.Get("Authorization") - if !strings.HasPrefix(header, "Bearer ") { + var tokenStr string + if header := r.Header.Get("Authorization"); strings.HasPrefix(header, "Bearer ") { + tokenStr = strings.TrimPrefix(header, "Bearer ") + } + if tokenStr == "" { + // WebSocket upgrade requests may not have an Authorization header + // (browsers cannot set custom headers on WS connections). Let them + // through — the handler authenticates via the first WS message. + if isWebSocketUpgrade(r) { + next.ServeHTTP(w, r) + return + } writeError(w, http.StatusUnauthorized, "unauthorized", "Authorization: Bearer required") return } - - tokenStr := strings.TrimPrefix(header, "Bearer ") claims, err := auth.VerifyJWT(secret, tokenStr) if err != nil { writeError(w, http.StatusUnauthorized, "unauthorized", "invalid or expired token") @@ -37,6 +49,18 @@ func requireJWT(secret []byte) func(http.Handler) http.Handler { return } + // Verify user is still active in the database. + user, err := queries.GetUserByID(r.Context(), userID) + if err != nil { + slog.Warn("jwt auth: failed to look up user", "user_id", claims.Subject, "error", err) + writeError(w, http.StatusUnauthorized, "unauthorized", "user not found") + return + } + if user.Status != "active" { + writeError(w, http.StatusForbidden, "account_deactivated", "your account has been deactivated — contact your administrator to regain access") + return + } + ctx := auth.WithAuthContext(r.Context(), auth.AuthContext{ TeamID: teamID, UserID: userID, diff --git a/internal/api/openapi.yaml b/internal/api/openapi.yaml index 0b4fe74..f4c369d 100644 --- a/internal/api/openapi.yaml +++ b/internal/api/openapi.yaml @@ -1,6 +1,6 @@ openapi: "3.1.0" info: - title: Wrenn Sandbox API + title: Wrenn API description: MicroVM-based code execution platform API. version: "0.1.0" @@ -16,6 +16,10 @@ paths: summary: Create a new account operationId: signup tags: [auth] + description: | + Creates an inactive user account and sends an activation email. + The user must activate their account within 30 minutes. + Does not return a JWT — the user must activate first, then sign in. requestBody: required: true content: @@ -24,11 +28,11 @@ paths: $ref: "#/components/schemas/SignupRequest" responses: "201": - description: Account created + description: Account created, activation email sent content: application/json: schema: - $ref: "#/components/schemas/AuthResponse" + $ref: "#/components/schemas/SignupResponse" "400": description: Invalid request (bad email, short password) content: @@ -36,7 +40,39 @@ paths: schema: $ref: "#/components/schemas/Error" "409": - description: Email already registered + description: Email already registered or signup cooldown active + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/auth/activate: + post: + summary: Activate account via email token + operationId: activate + tags: [auth] + description: | + Consumes the activation token sent via email and activates the user account. + Creates a default team and returns a JWT to log the user in. + requestBody: + required: true + content: + application/json: + schema: + type: object + required: [token] + properties: + token: + type: string + responses: + "200": + description: Account activated, JWT issued + content: + application/json: + schema: + $ref: "#/components/schemas/AuthResponse" + "400": + description: Invalid or expired token content: application/json: schema: @@ -175,6 +211,252 @@ paths: "302": description: Redirect to frontend with token or error + /v1/me: + get: + summary: Get current user profile + operationId: getMe + tags: [account] + security: + - bearerAuth: [] + responses: + "200": + description: User profile + content: + application/json: + schema: + $ref: "#/components/schemas/MeResponse" + + patch: + summary: Update display name + operationId: updateName + tags: [account] + security: + - bearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + required: [name] + properties: + name: + type: string + minLength: 1 + maxLength: 100 + responses: + "200": + description: Name updated, new JWT issued + content: + application/json: + schema: + $ref: "#/components/schemas/AuthResponse" + "400": + description: Invalid name + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + delete: + summary: Delete current account + operationId: deleteAccount + tags: [account] + security: + - bearerAuth: [] + description: | + Soft-deletes the account (sets status=deleted, deleted_at=now). + The account is permanently removed after 15 days. Blocked if the user + owns any team that has other members. + requestBody: + required: true + content: + application/json: + schema: + type: object + required: [confirmation] + properties: + confirmation: + type: string + description: Must match the user's email address (case-insensitive) + responses: + "204": + description: Account scheduled for deletion + "400": + description: Confirmation does not match email + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "409": + description: User owns teams with other members + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/me/password: + post: + summary: Change or add password + operationId: changePassword + tags: [account] + security: + - bearerAuth: [] + description: | + For users with an existing password: requires `current_password` and `new_password`. + For OAuth-only users adding a password: requires `new_password` and `confirm_password`. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ChangePasswordRequest" + responses: + "204": + description: Password updated + "400": + description: Invalid request (short password, mismatch, etc.) + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "401": + description: Current password is incorrect + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/me/password/reset: + post: + summary: Request a password reset email + operationId: requestPasswordReset + tags: [account] + description: | + Sends a password reset link to the given email. Always returns 200 + regardless of whether the email exists, to prevent account enumeration. + The reset token expires in 15 minutes. + requestBody: + required: true + content: + application/json: + schema: + type: object + required: [email] + properties: + email: + type: string + format: email + responses: + "204": + description: Request accepted (email sent if account exists) + + /v1/me/password/reset/confirm: + post: + summary: Confirm password reset + operationId: confirmPasswordReset + tags: [account] + description: | + Consumes a password reset token and sets a new password. The token is + single-use and expires after 15 minutes. + requestBody: + required: true + content: + application/json: + schema: + type: object + required: [token, new_password] + properties: + token: + type: string + description: Raw reset token from the email link + new_password: + type: string + minLength: 8 + responses: + "204": + description: Password reset successful + "400": + description: Invalid or expired token, or password too short + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/me/providers/{provider}/connect: + parameters: + - name: provider + in: path + required: true + schema: + type: string + enum: [github] + description: OAuth provider name + + get: + summary: Initiate OAuth provider link + operationId: connectProvider + tags: [account] + security: + - bearerAuth: [] + description: | + Sets OAuth state and link cookies, then returns the provider's + authorization URL. The frontend navigates to this URL to start the + OAuth flow. On callback, the provider is linked to the current account + (not a new registration). + responses: + "200": + description: Authorization URL + content: + application/json: + schema: + type: object + properties: + auth_url: + type: string + format: uri + "404": + description: Provider not found or not configured + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/me/providers/{provider}: + parameters: + - name: provider + in: path + required: true + schema: + type: string + enum: [github] + description: OAuth provider name + + delete: + summary: Disconnect an OAuth provider + operationId: disconnectProvider + tags: [account] + security: + - bearerAuth: [] + description: | + Unlinks the OAuth provider from the current account. Blocked if this + is the user's only login method (no password and no other providers). + responses: + "204": + description: Provider disconnected + "400": + description: Cannot disconnect last login method + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "404": + description: Provider not connected + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + /v1/api-keys: post: summary: Create an API key @@ -393,7 +675,7 @@ paths: - bearerAuth: [] description: | Owner only. Soft-deletes the team and destroys all running/paused/starting - sandboxes. All DB records are preserved. The team slug is permanently reserved. + capsulees. All DB records are preserved. The team slug is permanently reserved. responses: "204": description: Team deleted @@ -570,11 +852,11 @@ paths: schema: $ref: "#/components/schemas/Error" - /v1/sandboxes: + /v1/capsules: post: - summary: Create a sandbox - operationId: createSandbox - tags: [sandboxes] + summary: Create a capsule + operationId: createCapsule + tags: [capsules] security: - apiKeyAuth: [] requestBody: @@ -582,14 +864,14 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/CreateSandboxRequest" + $ref: "#/components/schemas/CreateCapsuleRequest" responses: "201": - description: Sandbox created + description: Capsule created content: application/json: schema: - $ref: "#/components/schemas/Sandbox" + $ref: "#/components/schemas/Capsule" "502": description: Host agent error content: @@ -598,26 +880,26 @@ paths: $ref: "#/components/schemas/Error" get: - summary: List sandboxes for your team - operationId: listSandboxes - tags: [sandboxes] + summary: List capsulees for your team + operationId: listCapsules + tags: [capsules] security: - apiKeyAuth: [] responses: "200": - description: List of sandboxes + description: List of capsulees content: application/json: schema: type: array items: - $ref: "#/components/schemas/Sandbox" + $ref: "#/components/schemas/Capsule" - /v1/sandboxes/stats: + /v1/capsules/stats: get: - summary: Get sandbox usage stats for your team - operationId: getSandboxStats - tags: [sandboxes] + summary: Get capsule usage stats for your team + operationId: getCapsuleStats + tags: [capsules] security: - apiKeyAuth: [] parameters: @@ -631,15 +913,15 @@ paths: description: Time window for the time-series data. responses: "200": - description: Sandbox stats for the team + description: Capsule stats for the team content: application/json: schema: - $ref: "#/components/schemas/SandboxStats" + $ref: "#/components/schemas/CapsuleStats" "400": $ref: "#/components/responses/BadRequest" - /v1/sandboxes/{id}: + /v1/capsules/{id}: parameters: - name: id in: path @@ -648,36 +930,36 @@ paths: type: string get: - summary: Get sandbox details - operationId: getSandbox - tags: [sandboxes] + summary: Get capsule details + operationId: getCapsule + tags: [capsules] security: - apiKeyAuth: [] responses: "200": - description: Sandbox details + description: Capsule details content: application/json: schema: - $ref: "#/components/schemas/Sandbox" + $ref: "#/components/schemas/Capsule" "404": - description: Sandbox not found + description: Capsule not found content: application/json: schema: $ref: "#/components/schemas/Error" delete: - summary: Destroy a sandbox - operationId: destroySandbox - tags: [sandboxes] + summary: Destroy a capsule + operationId: destroyCapsule + tags: [capsules] security: - apiKeyAuth: [] responses: "204": - description: Sandbox destroyed + description: Capsule destroyed - /v1/sandboxes/{id}/exec: + /v1/capsules/{id}/exec: parameters: - name: id in: path @@ -688,7 +970,7 @@ paths: post: summary: Execute a command operationId: execCommand - tags: [sandboxes] + tags: [capsules] security: - apiKeyAuth: [] requestBody: @@ -699,59 +981,31 @@ paths: $ref: "#/components/schemas/ExecRequest" responses: "200": - description: Command output + description: Command output (foreground exec) content: application/json: schema: $ref: "#/components/schemas/ExecResponse" + "202": + description: Background process started + content: + application/json: + schema: + $ref: "#/components/schemas/BackgroundExecResponse" "404": - description: Sandbox not found + description: Capsule not found content: application/json: schema: $ref: "#/components/schemas/Error" "409": - description: Sandbox not running + description: Capsule not running content: application/json: schema: $ref: "#/components/schemas/Error" - /v1/sandboxes/{id}/ping: - parameters: - - name: id - in: path - required: true - schema: - type: string - - post: - summary: Reset sandbox inactivity timer - operationId: pingSandbox - tags: [sandboxes] - security: - - apiKeyAuth: [] - description: | - Resets the last_active_at timestamp for a running sandbox, preventing - the auto-pause TTL from expiring. Use this as a keepalive for sandboxes - that are idle but should remain running. - responses: - "204": - description: Ping acknowledged, inactivity timer reset - "404": - description: Sandbox not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - "409": - description: Sandbox not running - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - - /v1/sandboxes/{id}/metrics: + /v1/capsules/{id}/processes: parameters: - name: id in: path @@ -760,22 +1014,172 @@ paths: type: string get: - summary: Get per-sandbox resource metrics - operationId: getSandboxMetrics - tags: [sandboxes] + summary: List running processes + operationId: listProcesses + tags: [capsules] + security: + - apiKeyAuth: [] + description: | + Returns all running processes inside the capsule, including background + processes and any processes started by templates or init scripts. + responses: + "200": + description: Process list + content: + application/json: + schema: + $ref: "#/components/schemas/ProcessListResponse" + "404": + description: Capsule not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "409": + description: Capsule not running + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/capsules/{id}/processes/{selector}: + parameters: + - name: id + in: path + required: true + schema: + type: string + - name: selector + in: path + required: true + description: Process PID (numeric) or tag (string) + schema: + type: string + + delete: + summary: Kill a process + operationId: killProcess + tags: [capsules] + security: + - apiKeyAuth: [] + parameters: + - name: signal + in: query + required: false + description: Signal to send (SIGKILL or SIGTERM, default SIGKILL) + schema: + type: string + enum: [SIGKILL, SIGTERM] + default: SIGKILL + responses: + "204": + description: Process killed + "404": + description: Capsule or process not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "409": + description: Capsule not running + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/capsules/{id}/processes/{selector}/stream: + parameters: + - name: id + in: path + required: true + schema: + type: string + - name: selector + in: path + required: true + description: Process PID (numeric) or tag (string) + schema: + type: string + + get: + summary: Stream process output via WebSocket + operationId: connectProcess + tags: [capsules] + security: + - apiKeyAuth: [] + description: | + Opens a WebSocket connection to stream stdout/stderr from a running + background process. The selector can be a numeric PID or a string tag. + + Server sends JSON messages: + - `{"type": "start", "pid": 42}` — connected to process + - `{"type": "stdout", "data": "..."}` — stdout output + - `{"type": "stderr", "data": "..."}` — stderr output + - `{"type": "exit", "exit_code": 0}` — process exited + - `{"type": "error", "data": "..."}` — error message + responses: + "101": + description: WebSocket upgrade + + /v1/capsules/{id}/ping: + parameters: + - name: id + in: path + required: true + schema: + type: string + + post: + summary: Reset capsule inactivity timer + operationId: pingCapsule + tags: [capsules] + security: + - apiKeyAuth: [] + description: | + Resets the last_active_at timestamp for a running capsule, preventing + the auto-pause TTL from expiring. Use this as a keepalive for capsulees + that are idle but should remain running. + responses: + "204": + description: Ping acknowledged, inactivity timer reset + "404": + description: Capsule not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "409": + description: Capsule not running + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/capsules/{id}/metrics: + parameters: + - name: id + in: path + required: true + schema: + type: string + + get: + summary: Get per-capsule resource metrics + operationId: getCapsuleMetrics + tags: [capsules] security: - apiKeyAuth: [] - bearerAuth: [] description: | - Returns time-series CPU, memory, and disk metrics for a sandbox. + Returns time-series CPU, memory, and disk metrics for a capsule. Three tiers are available with different granularity and retention: - `10m`: 500ms samples, last 10 minutes - `2h`: 30-second averages, last 2 hours - `24h`: 5-minute averages, last 24 hours - For running sandboxes, data comes from the host agent's in-memory - ring buffer. For paused sandboxes, data is read from persisted - snapshots in the database. Stopped/destroyed sandboxes return 404. + For running capsulees, data comes from the host agent's in-memory + ring buffer. For paused capsulees, data is read from persisted + snapshots in the database. Stopped/destroyed capsulees return 404. parameters: - name: range in: query @@ -791,7 +1195,7 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/SandboxMetrics" + $ref: "#/components/schemas/CapsuleMetrics" "400": description: Invalid range parameter content: @@ -799,13 +1203,13 @@ paths: schema: $ref: "#/components/schemas/Error" "404": - description: Sandbox not found or metrics not available + description: Capsule not found or metrics not available content: application/json: schema: $ref: "#/components/schemas/Error" - /v1/sandboxes/{id}/pause: + /v1/capsules/{id}/pause: parameters: - name: id in: path @@ -814,30 +1218,30 @@ paths: type: string post: - summary: Pause a running sandbox - operationId: pauseSandbox - tags: [sandboxes] + summary: Pause a running capsule + operationId: pauseCapsule + tags: [capsules] security: - apiKeyAuth: [] description: | - Takes a snapshot of the sandbox (VM state + memory + rootfs), then - destroys all running resources. The sandbox exists only as files on + Takes a snapshot of the capsule (VM state + memory + rootfs), then + destroys all running resources. The capsule exists only as files on disk and can be resumed later. responses: "200": - description: Sandbox paused (snapshot taken, resources released) + description: Capsule paused (snapshot taken, resources released) content: application/json: schema: - $ref: "#/components/schemas/Sandbox" + $ref: "#/components/schemas/Capsule" "409": - description: Sandbox not running + description: Capsule not running content: application/json: schema: $ref: "#/components/schemas/Error" - /v1/sandboxes/{id}/resume: + /v1/capsules/{id}/resume: parameters: - name: id in: path @@ -846,24 +1250,24 @@ paths: type: string post: - summary: Resume a paused sandbox - operationId: resumeSandbox - tags: [sandboxes] + summary: Resume a paused capsule + operationId: resumeCapsule + tags: [capsules] security: - apiKeyAuth: [] description: | - Restores a paused sandbox from its snapshot using UFFD for lazy + Restores a paused capsule from its snapshot using UFFD for lazy memory loading. Boots a fresh Firecracker process, sets up a new network slot, and waits for envd to become ready. responses: "200": - description: Sandbox resumed (new VM booted from snapshot) + description: Capsule resumed (new VM booted from snapshot) content: application/json: schema: - $ref: "#/components/schemas/Sandbox" + $ref: "#/components/schemas/Capsule" "409": - description: Sandbox not paused + description: Capsule not paused content: application/json: schema: @@ -877,9 +1281,9 @@ paths: security: - apiKeyAuth: [] description: | - Pauses a running sandbox, takes a full snapshot, copies the snapshot + Pauses a running capsule, takes a full snapshot, copies the snapshot files to the images directory as a reusable template, then destroys - the sandbox. The template can be used to create new sandboxes. + the capsule. The template can be used to create new capsulees. parameters: - name: overwrite in: query @@ -902,7 +1306,7 @@ paths: schema: $ref: "#/components/schemas/Template" "409": - description: Name already exists or sandbox not running + description: Name already exists or capsule not running content: application/json: schema: @@ -957,7 +1361,7 @@ paths: schema: $ref: "#/components/schemas/Error" - /v1/sandboxes/{id}/files/write: + /v1/capsules/{id}/files/write: parameters: - name: id in: path @@ -968,7 +1372,7 @@ paths: post: summary: Upload a file operationId: uploadFile - tags: [sandboxes] + tags: [capsules] security: - apiKeyAuth: [] requestBody: @@ -981,7 +1385,7 @@ paths: properties: path: type: string - description: Absolute destination path inside the sandbox + description: Absolute destination path inside the capsule file: type: string format: binary @@ -990,7 +1394,7 @@ paths: "204": description: File uploaded "409": - description: Sandbox not running + description: Capsule not running content: application/json: schema: @@ -1002,7 +1406,7 @@ paths: schema: $ref: "#/components/schemas/Error" - /v1/sandboxes/{id}/files/read: + /v1/capsules/{id}/files/read: parameters: - name: id in: path @@ -1013,7 +1417,7 @@ paths: post: summary: Download a file operationId: downloadFile - tags: [sandboxes] + tags: [capsules] security: - apiKeyAuth: [] requestBody: @@ -1031,13 +1435,129 @@ paths: type: string format: binary "404": - description: Sandbox or file not found + description: Capsule or file not found content: application/json: schema: $ref: "#/components/schemas/Error" - /v1/sandboxes/{id}/exec/stream: + /v1/capsules/{id}/files/list: + parameters: + - name: id + in: path + required: true + schema: + type: string + + post: + summary: List directory contents + operationId: listDir + tags: [capsules] + security: + - apiKeyAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ListDirRequest" + responses: + "200": + description: Directory listing + content: + application/json: + schema: + $ref: "#/components/schemas/ListDirResponse" + "404": + description: Capsule not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "409": + description: Capsule not running + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/capsules/{id}/files/mkdir: + parameters: + - name: id + in: path + required: true + schema: + type: string + + post: + summary: Create a directory + operationId: makeDir + tags: [capsules] + security: + - apiKeyAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/MakeDirRequest" + responses: + "200": + description: Directory created + content: + application/json: + schema: + $ref: "#/components/schemas/MakeDirResponse" + "404": + description: Capsule not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "409": + description: Capsule not running + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/capsules/{id}/files/remove: + parameters: + - name: id + in: path + required: true + schema: + type: string + + post: + summary: Remove a file or directory + operationId: removePath + tags: [capsules] + security: + - apiKeyAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/RemoveRequest" + responses: + "204": + description: File or directory removed + "404": + description: Capsule not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "409": + description: Capsule not running + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/capsules/{id}/exec/stream: parameters: - name: id in: path @@ -1048,7 +1568,7 @@ paths: get: summary: Stream command execution via WebSocket operationId: execStream - tags: [sandboxes] + tags: [capsules] security: - apiKeyAuth: [] description: | @@ -1078,19 +1598,96 @@ paths: "101": description: WebSocket upgrade "404": - description: Sandbox not found + description: Capsule not found content: application/json: schema: $ref: "#/components/schemas/Error" "409": - description: Sandbox not running + description: Capsule not running content: application/json: schema: $ref: "#/components/schemas/Error" - /v1/sandboxes/{id}/files/stream/write: + /v1/capsules/{id}/pty: + parameters: + - name: id + in: path + required: true + schema: + type: string + + get: + summary: Interactive PTY session via WebSocket + operationId: ptySession + tags: [capsules] + security: + - apiKeyAuth: [] + description: | + Opens a WebSocket connection for an interactive PTY (terminal) session. + Supports creating new sessions, sending input, resizing, killing, and + reconnecting to existing sessions. + + **Client sends** (first message — start a new PTY): + ```json + { + "type": "start", + "cmd": "/bin/bash", + "args": [], + "cols": 80, + "rows": 24, + "envs": {"TERM": "xterm-256color"}, + "cwd": "/home/user", + "user": "user" + } + ``` + All fields except `type` are optional. Defaults: cmd="/bin/bash", cols=80, rows=24. + + **Client sends** (first message — reconnect to existing PTY): + ```json + {"type": "connect", "tag": "pty-abc123de"} + ``` + + **Client sends** (after session is established): + ```json + {"type": "input", "data": ""} + {"type": "resize", "cols": 120, "rows": 40} + {"type": "kill"} + ``` + + **Server sends**: + ```json + {"type": "started", "tag": "pty-abc123de", "pid": 42} + {"type": "output", "data": ""} + {"type": "exit", "exit_code": 0} + {"type": "error", "data": "description", "fatal": true} + {"type": "ping"} + ``` + + PTY data (input and output) is base64-encoded because it contains raw + terminal bytes (escape sequences, control codes) that are not valid UTF-8. + + Sessions persist across WebSocket disconnections — the process keeps + running in the capsule. Use the `tag` from the "started" response to + reconnect later. + responses: + "101": + description: WebSocket upgrade + "404": + description: Capsule not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + "409": + description: Capsule not running + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /v1/capsules/{id}/files/stream/write: parameters: - name: id in: path @@ -1101,11 +1698,11 @@ paths: post: summary: Upload a file (streaming) operationId: streamUploadFile - tags: [sandboxes] + tags: [capsules] security: - apiKeyAuth: [] description: | - Streams file content to the sandbox without buffering in memory. + Streams file content to the capsule without buffering in memory. Suitable for large files. Uses the same multipart/form-data format as the non-streaming upload endpoint. requestBody: @@ -1118,7 +1715,7 @@ paths: properties: path: type: string - description: Absolute destination path inside the sandbox + description: Absolute destination path inside the capsule file: type: string format: binary @@ -1127,19 +1724,19 @@ paths: "204": description: File uploaded "404": - description: Sandbox not found + description: Capsule not found content: application/json: schema: $ref: "#/components/schemas/Error" "409": - description: Sandbox not running + description: Capsule not running content: application/json: schema: $ref: "#/components/schemas/Error" - /v1/sandboxes/{id}/files/stream/read: + /v1/capsules/{id}/files/stream/read: parameters: - name: id in: path @@ -1150,11 +1747,11 @@ paths: post: summary: Download a file (streaming) operationId: streamDownloadFile - tags: [sandboxes] + tags: [capsules] security: - apiKeyAuth: [] description: | - Streams file content from the sandbox without buffering in memory. + Streams file content from the capsule without buffering in memory. Suitable for large files. Returns raw bytes with chunked transfer encoding. requestBody: required: true @@ -1171,13 +1768,13 @@ paths: type: string format: binary "404": - description: Sandbox or file not found + description: Capsule or file not found content: application/json: schema: $ref: "#/components/schemas/Error" "409": - description: Sandbox not running + description: Capsule not running content: application/json: schema: @@ -1275,14 +1872,14 @@ paths: description: | Admins can delete any host. Team owners and admins can delete BYOC hosts belonging to their team. Without `?force=true`, returns 409 if the host - has active sandboxes. With `?force=true`, destroys all sandboxes first. + has active capsulees. With `?force=true`, destroys all capsulees first. parameters: - name: force in: query required: false schema: type: boolean - description: If true, destroy all sandboxes on the host before deleting. + description: If true, destroy all capsulees on the host before deleting. responses: "204": description: Host deleted @@ -1293,11 +1890,11 @@ paths: schema: $ref: "#/components/schemas/Error" "409": - description: Host has active sandboxes (only when force is not set) + description: Host has active capsulees (only when force is not set) content: application/json: schema: - $ref: "#/components/schemas/HostHasSandboxesError" + $ref: "#/components/schemas/HostHasCapsulesError" /v1/hosts/{id}/token: parameters: @@ -1450,7 +2047,7 @@ paths: security: - bearerAuth: [] description: | - Returns the list of sandbox IDs that would be destroyed if the host + Returns the list of capsule IDs that would be destroyed if the host were deleted with `?force=true`. No state is modified. responses: "200": @@ -1723,7 +2320,7 @@ components: type: apiKey in: header name: X-API-Key - description: API key for sandbox lifecycle operations. Create via POST /v1/api-keys. + description: API key for capsule lifecycle operations. Create via POST /v1/api-keys. bearerAuth: type: http @@ -1762,6 +2359,13 @@ components: password: type: string + SignupResponse: + type: object + properties: + message: + type: string + description: Confirmation message instructing user to check email + AuthResponse: type: object properties: @@ -1808,7 +2412,7 @@ components: description: Full plaintext key. Only returned on creation, never again. nullable: true - CreateSandboxRequest: + CreateCapsuleRequest: type: object properties: template: @@ -1824,11 +2428,11 @@ components: type: integer default: 0 description: > - Auto-pause TTL in seconds. The sandbox is automatically paused + Auto-pause TTL in seconds. The capsule is automatically paused after this duration of inactivity (no exec or ping). 0 means no auto-pause. - SandboxStats: + CapsuleStats: type: object properties: range: @@ -1879,7 +2483,7 @@ components: items: type: integer - Sandbox: + Capsule: type: object properties: id: @@ -1920,7 +2524,7 @@ components: properties: sandbox_id: type: string - description: ID of the running sandbox to snapshot. + description: ID of the running capsule to snapshot. name: type: string description: Name for the snapshot template. Auto-generated if omitted. @@ -1959,6 +2563,56 @@ components: timeout_sec: type: integer default: 30 + description: Timeout in seconds (foreground exec only, default 30) + background: + type: boolean + default: false + description: If true, starts the process in the background and returns immediately with a PID and tag (HTTP 202) + tag: + type: string + description: Optional user-chosen tag for the background process. Auto-generated if omitted. Only used when background is true. + envs: + type: object + additionalProperties: + type: string + description: Environment variables for the process (background exec only) + cwd: + type: string + description: Working directory for the process (background exec only) + + BackgroundExecResponse: + type: object + properties: + sandbox_id: + type: string + cmd: + type: string + pid: + type: integer + tag: + type: string + + ProcessEntry: + type: object + properties: + pid: + type: integer + tag: + type: string + cmd: + type: string + args: + type: array + items: + type: string + + ProcessListResponse: + type: object + properties: + processes: + type: array + items: + $ref: "#/components/schemas/ProcessEntry" ExecResponse: type: object @@ -1986,7 +2640,79 @@ components: properties: path: type: string - description: Absolute file path inside the sandbox + description: Absolute file path inside the capsule + + ListDirRequest: + type: object + required: [path] + properties: + path: + type: string + description: Directory path inside the capsule + depth: + type: integer + default: 1 + description: Recursion depth (0 = non-recursive, 1 = immediate children) + + ListDirResponse: + type: object + properties: + entries: + type: array + items: + $ref: "#/components/schemas/FileEntry" + + FileEntry: + type: object + properties: + name: + type: string + path: + type: string + type: + type: string + enum: [file, directory, symlink] + size: + type: integer + format: int64 + mode: + type: integer + permissions: + type: string + description: Human-readable permissions (e.g. "-rwxr-xr-x") + owner: + type: string + group: + type: string + modified_at: + type: integer + format: int64 + description: Unix timestamp (seconds) + symlink_target: + type: string + nullable: true + + MakeDirRequest: + type: object + required: [path] + properties: + path: + type: string + description: Directory path to create inside the capsule + + MakeDirResponse: + type: object + properties: + entry: + $ref: "#/components/schemas/FileEntry" + + RemoveRequest: + type: object + required: [path] + properties: + path: + type: string + description: Path to remove inside the capsule CreateHostRequest: type: object @@ -2124,9 +2850,9 @@ components: type: array items: type: string - description: IDs of sandboxes that would be destroyed on force-delete. + description: IDs of capsulees that would be destroyed on force-delete. - HostHasSandboxesError: + HostHasCapsulesError: type: object properties: error: @@ -2141,7 +2867,7 @@ components: type: array items: type: string - description: IDs of active sandboxes blocking deletion. + description: IDs of active capsulees blocking deletion. AddTagRequest: type: object @@ -2205,7 +2931,7 @@ components: items: $ref: "#/components/schemas/TeamMember" - SandboxMetrics: + CapsuleMetrics: type: object properties: sandbox_id: @@ -2343,6 +3069,37 @@ components: nullable: true description: Webhook secret. Only returned on creation, never again. + MeResponse: + type: object + properties: + name: + type: string + email: + type: string + format: email + has_password: + type: boolean + description: Whether the user has a password set (false for OAuth-only accounts) + providers: + type: array + items: + type: string + description: List of linked OAuth provider names (e.g. ["github"]) + + ChangePasswordRequest: + type: object + required: [new_password] + properties: + current_password: + type: string + description: Required when changing an existing password + new_password: + type: string + minLength: 8 + confirm_password: + type: string + description: Required when adding a password to an OAuth-only account (must match new_password) + Error: type: object properties: diff --git a/internal/api/server.go b/internal/api/server.go index aeb3625..9e81340 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -9,14 +9,16 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/redis/go-redis/v9" - "git.omukk.dev/wrenn/wrenn/internal/audit" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/auth/oauth" - "git.omukk.dev/wrenn/wrenn/internal/channels" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" - "git.omukk.dev/wrenn/wrenn/internal/scheduler" - "git.omukk.dev/wrenn/wrenn/internal/service" + "git.omukk.dev/wrenn/wrenn/internal/email" + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/auth/oauth" + "git.omukk.dev/wrenn/wrenn/pkg/channels" + "git.omukk.dev/wrenn/wrenn/pkg/cpextension" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/scheduler" + "git.omukk.dev/wrenn/wrenn/pkg/service" ) //go:embed openapi.yaml @@ -26,9 +28,12 @@ var openapiYAML []byte type Server struct { router chi.Router BuildSvc *service.BuildService + version string } // New constructs the chi router and registers all routes. +// Extensions are called after core routes are registered, allowing enterprise +// or third-party code to add routes and middleware. func New( queries *db.Queries, pool *lifecycle.HostClientPool, @@ -41,6 +46,10 @@ func New( ca *auth.CA, al *audit.AuditLogger, channelSvc *channels.Service, + mailer email.Mailer, + extensions []cpextension.Extension, + sctx cpextension.ServerContext, + version string, ) *Server { r := chi.NewRouter() r.Use(requestLogger()) @@ -51,27 +60,39 @@ func New( templateSvc := &service.TemplateService{DB: queries} hostSvc := &service.HostService{DB: queries, Redis: rdb, JWT: jwtSecret, Pool: pool, CA: ca} teamSvc := &service.TeamService{DB: queries, Pool: pgPool, HostPool: pool} + userSvc := &service.UserService{DB: queries, SandboxSvc: sandboxSvc} auditSvc := &service.AuditService{DB: queries} statsSvc := &service.StatsService{DB: queries, Pool: pgPool} buildSvc := &service.BuildService{DB: queries, Redis: rdb, Pool: pool, Scheduler: sched} sandbox := newSandboxHandler(sandboxSvc, al) exec := newExecHandler(queries, pool) - execStream := newExecStreamHandler(queries, pool) + execStream := newExecStreamHandler(queries, pool, jwtSecret) files := newFilesHandler(queries, pool) filesStream := newFilesStreamHandler(queries, pool) + fsH := newFSHandler(queries, pool) snapshots := newSnapshotHandler(templateSvc, queries, pool, al) - authH := newAuthHandler(queries, pgPool, jwtSecret) + authH := newAuthHandler(queries, pgPool, jwtSecret, mailer, rdb, oauthRedirectURL) oauthH := newOAuthHandler(queries, pgPool, jwtSecret, oauthRegistry, oauthRedirectURL) apiKeys := newAPIKeyHandler(apiKeySvc, al) hostH := newHostHandler(hostSvc, queries, al) - teamH := newTeamHandler(teamSvc, al) - usersH := newUsersHandler(queries) + teamH := newTeamHandler(teamSvc, al, mailer) + usersH := newUsersHandler(queries, userSvc) auditH := newAuditHandler(auditSvc) statsH := newStatsHandler(statsSvc) metricsH := newSandboxMetricsHandler(queries, pool) buildH := newBuildHandler(buildSvc, queries, pool) channelH := newChannelHandler(channelSvc, al) + ptyH := newPtyHandler(queries, pool, jwtSecret) + processH := newProcessHandler(queries, pool, jwtSecret) + adminCapsules := newAdminCapsuleHandler(sandboxSvc, queries, pool, al) + meH := newMeHandler(queries, pgPool, rdb, jwtSecret, mailer, oauthRegistry, oauthRedirectURL, teamSvc) + + // Health check. + r.Get("/health", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `{"status":"ok","version":%q}`, version) + }) // OpenAPI spec and docs. r.Get("/openapi.yaml", serveOpenAPI) @@ -80,15 +101,31 @@ func New( // Unauthenticated auth endpoints. r.Post("/v1/auth/signup", authH.Signup) r.Post("/v1/auth/login", authH.Login) + r.Post("/v1/auth/activate", authH.Activate) r.Get("/auth/oauth/{provider}", oauthH.Redirect) r.Get("/auth/oauth/{provider}/callback", oauthH.Callback) + // Unauthenticated: password reset request and confirmation. + r.Post("/v1/me/password/reset", meH.RequestPasswordReset) + r.Post("/v1/me/password/reset/confirm", meH.ConfirmPasswordReset) + + // JWT-authenticated: self-service account management. + r.Route("/v1/me", func(r chi.Router) { + r.Use(requireJWT(jwtSecret, queries)) + r.Get("/", meH.GetMe) + r.Patch("/", meH.UpdateName) + r.Post("/password", meH.ChangePassword) + r.Get("/providers/{provider}/connect", meH.ConnectProvider) + r.Delete("/providers/{provider}", meH.DisconnectProvider) + r.Delete("/", meH.DeleteAccount) + }) + // JWT-authenticated: switch active team. - r.With(requireJWT(jwtSecret)).Post("/v1/auth/switch-team", authH.SwitchTeam) + r.With(requireJWT(jwtSecret, queries)).Post("/v1/auth/switch-team", authH.SwitchTeam) // JWT-authenticated: API key management. r.Route("/v1/api-keys", func(r chi.Router) { - r.Use(requireJWT(jwtSecret)) + r.Use(requireJWT(jwtSecret, queries)) r.Post("/", apiKeys.Create) r.Get("/", apiKeys.List) r.Delete("/{id}", apiKeys.Delete) @@ -96,7 +133,7 @@ func New( // JWT-authenticated: team management. r.Route("/v1/teams", func(r chi.Router) { - r.Use(requireJWT(jwtSecret)) + r.Use(requireJWT(jwtSecret, queries)) r.Get("/", teamH.List) r.Post("/", teamH.Create) r.Route("/{id}", func(r chi.Router) { @@ -112,10 +149,12 @@ func New( }) // JWT-authenticated: user search (for add-member UI). - r.With(requireJWT(jwtSecret)).Get("/v1/users/search", usersH.Search) + r.With(requireJWT(jwtSecret, queries)).Get("/v1/users/search", usersH.Search) - // Sandbox lifecycle: accepts API key or JWT bearer token. - r.Route("/v1/sandboxes", func(r chi.Router) { + // Capsule lifecycle: accepts API key or JWT bearer token. + // WebSocket upgrade requests without auth headers are passed through by + // requireAPIKeyOrJWT — the WS handlers authenticate via first message. + r.Route("/v1/capsules", func(r chi.Router) { r.Use(requireAPIKeyOrJWT(queries, jwtSecret)) r.Post("/", sandbox.Create) r.Get("/", sandbox.List) @@ -133,7 +172,14 @@ func New( r.Post("/files/read", files.Download) r.Post("/files/stream/write", filesStream.StreamUpload) r.Post("/files/stream/read", filesStream.StreamDownload) + r.Post("/files/list", fsH.ListDir) + r.Post("/files/mkdir", fsH.MakeDir) + r.Post("/files/remove", fsH.Remove) r.Get("/metrics", metricsH.GetMetrics) + r.Get("/pty", ptyH.PtySession) + r.Get("/processes", processH.ListProcesses) + r.Delete("/processes/{selector}", processH.KillProcess) + r.Get("/processes/{selector}/stream", processH.ConnectProcess) }) }) @@ -158,7 +204,7 @@ func New( // JWT-authenticated: host CRUD and tags. r.Group(func(r chi.Router) { - r.Use(requireJWT(jwtSecret)) + r.Use(requireJWT(jwtSecret, queries)) r.Post("/", hostH.Create) r.Get("/", hostH.List) r.Route("/{id}", func(r chi.Router) { @@ -175,7 +221,7 @@ func New( // JWT-authenticated: notification channels. r.Route("/v1/channels", func(r chi.Router) { - r.Use(requireJWT(jwtSecret)) + r.Use(requireJWT(jwtSecret, queries)) r.Post("/", channelH.Create) r.Get("/", channelH.List) r.Post("/test", channelH.Test) @@ -188,22 +234,51 @@ func New( }) // JWT-authenticated: audit log. - r.With(requireJWT(jwtSecret)).Get("/v1/audit-logs", auditH.List) + r.With(requireJWT(jwtSecret, queries)).Get("/v1/audit-logs", auditH.List) // Platform admin routes — require JWT + DB-validated admin status. r.Route("/v1/admin", func(r chi.Router) { - r.Use(requireJWT(jwtSecret)) + r.Use(requireJWT(jwtSecret, queries)) r.Use(requireAdmin(queries)) + r.Get("/teams", teamH.AdminListTeams) r.Put("/teams/{id}/byoc", teamH.SetBYOC) + r.Delete("/teams/{id}", teamH.AdminDeleteTeam) + r.Get("/users", usersH.AdminListUsers) + r.Put("/users/{id}/active", usersH.SetUserActive) r.Get("/templates", buildH.ListTemplates) r.Delete("/templates/{name}", buildH.DeleteTemplate) r.Post("/builds", buildH.Create) r.Get("/builds", buildH.List) r.Get("/builds/{id}", buildH.Get) r.Post("/builds/{id}/cancel", buildH.Cancel) + r.Post("/capsules", adminCapsules.Create) + r.Get("/capsules", adminCapsules.List) + r.Route("/capsules/{id}", func(r chi.Router) { + r.Use(injectPlatformTeam()) + r.Get("/", adminCapsules.Get) + r.Delete("/", adminCapsules.Destroy) + r.Post("/snapshot", adminCapsules.Snapshot) + r.Post("/exec", exec.Exec) + r.Get("/exec/stream", execStream.ExecStream) + r.Post("/files/write", files.Upload) + r.Post("/files/read", files.Download) + r.Post("/files/list", fsH.ListDir) + r.Post("/files/mkdir", fsH.MakeDir) + r.Post("/files/remove", fsH.Remove) + r.Get("/metrics", metricsH.GetMetrics) + r.Get("/pty", ptyH.PtySession) + r.Get("/processes", processH.ListProcesses) + r.Delete("/processes/{selector}", processH.KillProcess) + r.Get("/processes/{selector}/stream", processH.ConnectProcess) + }) }) - return &Server{router: r, BuildSvc: buildSvc} + // Let extensions register their routes after all core routes. + for _, ext := range extensions { + ext.RegisterRoutes(r, sctx) + } + + return &Server{router: r, BuildSvc: buildSvc, version: version} } // Handler returns the HTTP handler. @@ -211,6 +286,11 @@ func (s *Server) Handler() http.Handler { return s.router } +// Router returns the underlying chi.Router for direct access. +func (s *Server) Router() chi.Router { + return s.router +} + func serveOpenAPI(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/yaml") _, _ = w.Write(openapiYAML) @@ -223,7 +303,7 @@ func serveDocs(w http.ResponseWriter, r *http.Request) { - Wrenn Sandbox API + Wrenn API + + + + + + + + +
+ + + + + + +
+ + + + + + +
+ Wrenn + + Wrenn +
+
+
+ + + + + + +
+ + +

+ Hello{{if .RecipientName}} {{.RecipientName}}{{end}}, +

+ + +

+ {{.Message}} +

+ + + {{if .Button}} + + + + +
+ + + + {{.Button.Text}} + + +
+ +

+ If the button doesn't work, copy and paste this URL into your browser:
+ {{.Button.URL}} +

+ {{end}} + + + {{if .Closing}} +

+ {{.Closing}} +

+ {{end}} + +
+ + + + + + +
+

+ This is a transactional email from Wrenn. +

+
+ +
+ + + diff --git a/internal/email/templates/base.txt b/internal/email/templates/base.txt new file mode 100644 index 0000000..499ec01 --- /dev/null +++ b/internal/email/templates/base.txt @@ -0,0 +1,13 @@ +Hello{{if .RecipientName}} {{.RecipientName}}{{end}}, + +{{.Message}} +{{if .Button}} + +{{.Button.Text}}: {{.Button.URL}} +{{end}}{{if .Closing}} + +{{.Closing}} +{{end}} + +--- +This is a transactional email from Wrenn (https://wrenn.dev). diff --git a/internal/envdclient/client.go b/internal/envdclient/client.go index 3e05f7d..03994b2 100644 --- a/internal/envdclient/client.go +++ b/internal/envdclient/client.go @@ -3,6 +3,7 @@ package envdclient import ( "bytes" "context" + "encoding/json" "fmt" "io" "log/slog" @@ -268,6 +269,82 @@ func (c *Client) ReadFile(ctx context.Context, path string) ([]byte, error) { return data, nil } +// PrepareSnapshot calls envd's POST /snapshot/prepare endpoint, which quiesces +// continuous goroutines (port scanner, forwarder) and forces a GC cycle before +// Firecracker takes a VM snapshot. This ensures the Go runtime's page allocator +// is in a consistent state when vCPUs are frozen. +// +// Best-effort: the caller should log a warning on error but not abort the pause. +func (c *Client) PrepareSnapshot(ctx context.Context) error { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.base+"/snapshot/prepare", nil) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("prepare snapshot: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + respBody, _ := io.ReadAll(resp.Body) + return fmt.Errorf("prepare snapshot: status %d: %s", resp.StatusCode, string(respBody)) + } + + return nil +} + +// PostInit calls envd's POST /init endpoint, which triggers a re-read of +// Firecracker MMDS metadata. This updates WRENN_SANDBOX_ID, WRENN_TEMPLATE_ID +// env vars and the corresponding files under /run/wrenn/ inside the guest. +// Must be called after snapshot restore so envd picks up the new sandbox's metadata. +func (c *Client) PostInit(ctx context.Context) error { + return c.PostInitWithDefaults(ctx, "", nil) +} + +// PostInitWithDefaults calls envd's POST /init endpoint with optional default +// user and environment variables. These are applied to envd's defaults so all +// subsequent process executions use them. +func (c *Client) PostInitWithDefaults(ctx context.Context, defaultUser string, envVars map[string]string) error { + var body io.Reader + if defaultUser != "" || len(envVars) > 0 { + payload := make(map[string]any) + if defaultUser != "" { + payload["defaultUser"] = defaultUser + } + if len(envVars) > 0 { + payload["envVars"] = envVars + } + data, err := json.Marshal(payload) + if err != nil { + return fmt.Errorf("marshal init body: %w", err) + } + body = bytes.NewReader(data) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.base+"/init", body) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("post init: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + respBody, _ := io.ReadAll(resp.Body) + return fmt.Errorf("post init: status %d: %s", resp.StatusCode, string(respBody)) + } + + return nil +} + // ListDir lists directory contents inside the sandbox. func (c *Client) ListDir(ctx context.Context, path string, depth uint32) (*envdpb.ListDirResponse, error) { req := connect.NewRequest(&envdpb.ListDirRequest{ @@ -282,3 +359,30 @@ func (c *Client) ListDir(ctx context.Context, path string, depth uint32) (*envdp return resp.Msg, nil } + +// MakeDir creates a directory inside the sandbox. +func (c *Client) MakeDir(ctx context.Context, path string) (*envdpb.MakeDirResponse, error) { + req := connect.NewRequest(&envdpb.MakeDirRequest{ + Path: path, + }) + + resp, err := c.filesystem.MakeDir(ctx, req) + if err != nil { + return nil, fmt.Errorf("make dir: %w", err) + } + + return resp.Msg, nil +} + +// Remove removes a file or directory inside the sandbox. +func (c *Client) Remove(ctx context.Context, path string) error { + req := connect.NewRequest(&envdpb.RemoveRequest{ + Path: path, + }) + + if _, err := c.filesystem.Remove(ctx, req); err != nil { + return fmt.Errorf("remove: %w", err) + } + + return nil +} diff --git a/internal/envdclient/health.go b/internal/envdclient/health.go index dfb7df8..4837051 100644 --- a/internal/envdclient/health.go +++ b/internal/envdclient/health.go @@ -2,7 +2,9 @@ package envdclient import ( "context" + "encoding/json" "fmt" + "io" "log/slog" "net/http" "time" @@ -31,6 +33,38 @@ func (c *Client) WaitUntilReady(ctx context.Context) error { } } +// FetchVersion queries envd's health endpoint and returns the reported version. +func (c *Client) FetchVersion(ctx context.Context) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.healthURL, nil) + if err != nil { + return "", fmt.Errorf("build health request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("fetch envd version: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return "", fmt.Errorf("health check returned %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil || len(body) == 0 { + return "", nil // envd may not support version reporting yet + } + + var data struct { + Version string `json:"version"` + } + if err := json.Unmarshal(body, &data); err != nil { + return "", nil // non-JSON response, old envd + } + + return data.Version, nil +} + // healthCheck sends a single GET /health request to envd. func (c *Client) healthCheck(ctx context.Context) error { req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.healthURL, nil) diff --git a/internal/envdclient/process.go b/internal/envdclient/process.go new file mode 100644 index 0000000..adb807b --- /dev/null +++ b/internal/envdclient/process.go @@ -0,0 +1,187 @@ +package envdclient + +import ( + "context" + "fmt" + "io" + "log/slog" + + "connectrpc.com/connect" + + envdpb "git.omukk.dev/wrenn/wrenn/proto/envd/gen" +) + +// ProcessInfo holds metadata about a running process inside the sandbox. +type ProcessInfo struct { + PID uint32 + Tag string + Cmd string + Args []string +} + +// StartBackground starts a process that runs independently of the RPC stream. +// It opens a Start stream, reads the first StartEvent to obtain the PID, +// then closes the stream. The process continues running inside the VM because +// envd binds it to context.Background(). +func (c *Client) StartBackground(ctx context.Context, tag, cmd string, args []string, envs map[string]string, cwd string) (uint32, error) { + stdin := false + cfg := &envdpb.ProcessConfig{ + Cmd: cmd, + Args: args, + Envs: envs, + } + if cwd != "" { + cfg.Cwd = &cwd + } + + req := connect.NewRequest(&envdpb.StartRequest{ + Process: cfg, + Tag: &tag, + Stdin: &stdin, + }) + + stream, err := c.process.Start(ctx, req) + if err != nil { + return 0, fmt.Errorf("start background process: %w", err) + } + defer stream.Close() + + // Read events until we get the StartEvent with the PID. + for stream.Receive() { + msg := stream.Msg() + if msg.Event == nil { + continue + } + if start, ok := msg.Event.GetEvent().(*envdpb.ProcessEvent_Start); ok { + return start.Start.GetPid(), nil + } + } + + if err := stream.Err(); err != nil && err != io.EOF { + return 0, fmt.Errorf("start background process stream: %w", err) + } + + return 0, fmt.Errorf("start background process: no start event received") +} + +// ConnectProcess re-attaches to a running process by PID or tag and returns +// a channel of streaming events. The channel is closed when the process ends +// or the context is cancelled. +func (c *Client) ConnectProcess(ctx context.Context, pid uint32, tag string) (<-chan ExecStreamEvent, error) { + var selector *envdpb.ProcessSelector + if tag != "" { + selector = &envdpb.ProcessSelector{ + Selector: &envdpb.ProcessSelector_Tag{Tag: tag}, + } + } else { + selector = &envdpb.ProcessSelector{ + Selector: &envdpb.ProcessSelector_Pid{Pid: pid}, + } + } + + stream, err := c.process.Connect(ctx, connect.NewRequest(&envdpb.ConnectRequest{ + Process: selector, + })) + if err != nil { + return nil, fmt.Errorf("connect process: %w", err) + } + + ch := make(chan ExecStreamEvent, 16) + go func() { + defer close(ch) + defer stream.Close() + + for stream.Receive() { + msg := stream.Msg() + if msg.Event == nil { + continue + } + + var ev ExecStreamEvent + switch e := msg.Event.GetEvent().(type) { + case *envdpb.ProcessEvent_Start: + ev = ExecStreamEvent{Type: "start", PID: e.Start.GetPid()} + + case *envdpb.ProcessEvent_Data: + switch o := e.Data.GetOutput().(type) { + case *envdpb.ProcessEvent_DataEvent_Stdout: + ev = ExecStreamEvent{Type: "stdout", Data: o.Stdout} + case *envdpb.ProcessEvent_DataEvent_Stderr: + ev = ExecStreamEvent{Type: "stderr", Data: o.Stderr} + default: + continue + } + + case *envdpb.ProcessEvent_End: + ev = ExecStreamEvent{Type: "end", ExitCode: e.End.GetExitCode()} + if e.End.Error != nil { + ev.Error = e.End.GetError() + } + + case *envdpb.ProcessEvent_Keepalive: + continue + } + + select { + case ch <- ev: + case <-ctx.Done(): + return + } + } + + if err := stream.Err(); err != nil && err != io.EOF { + slog.Debug("connect process stream error", "error", err) + } + }() + + return ch, nil +} + +// ListProcesses returns all running processes inside the sandbox. +func (c *Client) ListProcesses(ctx context.Context) ([]ProcessInfo, error) { + resp, err := c.process.List(ctx, connect.NewRequest(&envdpb.ListRequest{})) + if err != nil { + return nil, fmt.Errorf("list processes: %w", err) + } + + procs := make([]ProcessInfo, 0, len(resp.Msg.Processes)) + for _, p := range resp.Msg.Processes { + info := ProcessInfo{ + PID: p.Pid, + } + if p.Tag != nil { + info.Tag = *p.Tag + } + if p.Config != nil { + info.Cmd = p.Config.Cmd + info.Args = p.Config.Args + } + procs = append(procs, info) + } + + return procs, nil +} + +// KillProcess sends a signal to a process identified by PID or tag. +func (c *Client) KillProcess(ctx context.Context, pid uint32, tag string, signal envdpb.Signal) error { + var selector *envdpb.ProcessSelector + if tag != "" { + selector = &envdpb.ProcessSelector{ + Selector: &envdpb.ProcessSelector_Tag{Tag: tag}, + } + } else { + selector = &envdpb.ProcessSelector{ + Selector: &envdpb.ProcessSelector_Pid{Pid: pid}, + } + } + + _, err := c.process.SendSignal(ctx, connect.NewRequest(&envdpb.SendSignalRequest{ + Process: selector, + Signal: signal, + })) + if err != nil { + return fmt.Errorf("kill process: %w", err) + } + + return nil +} diff --git a/internal/envdclient/pty.go b/internal/envdclient/pty.go new file mode 100644 index 0000000..7a625fb --- /dev/null +++ b/internal/envdclient/pty.go @@ -0,0 +1,220 @@ +package envdclient + +import ( + "context" + "fmt" + "io" + "log/slog" + + "connectrpc.com/connect" + + envdpb "git.omukk.dev/wrenn/wrenn/proto/envd/gen" +) + +// PtyEvent represents a single event from a PTY output stream. +type PtyEvent struct { + Type string // "started", "output", "end" + PID uint32 + Data []byte + ExitCode int32 + Error string +} + +// PtyStart starts a new PTY process in the guest and returns a channel of events. +// The tag is the stable identifier used to reconnect via PtyConnect. +// The channel is closed when the process ends or ctx is cancelled. +// NOTE: The user parameter from PtyAttachRequest is not yet supported by envd's +// ProcessConfig proto. When envd adds user support, thread it through here. +func (c *Client) PtyStart(ctx context.Context, tag, cmd string, args []string, cols, rows uint32, envs map[string]string, cwd string) (<-chan PtyEvent, error) { + stdin := true + cfg := &envdpb.ProcessConfig{ + Cmd: cmd, + Args: args, + Envs: envs, + } + if cwd != "" { + cfg.Cwd = &cwd + } + + req := connect.NewRequest(&envdpb.StartRequest{ + Process: cfg, + Pty: &envdpb.PTY{ + Size: &envdpb.PTY_Size{ + Cols: cols, + Rows: rows, + }, + }, + Tag: &tag, + Stdin: &stdin, + }) + + stream, err := c.process.Start(ctx, req) + if err != nil { + return nil, fmt.Errorf("pty start: %w", err) + } + + return drainPtyStream(ctx, &startStream{s: stream}, true), nil +} + +// PtyConnect re-attaches to an existing PTY process by tag. +// Returns a channel of output events starting from the current point. +func (c *Client) PtyConnect(ctx context.Context, tag string) (<-chan PtyEvent, error) { + req := connect.NewRequest(&envdpb.ConnectRequest{ + Process: &envdpb.ProcessSelector{ + Selector: &envdpb.ProcessSelector_Tag{Tag: tag}, + }, + }) + + stream, err := c.process.Connect(ctx, req) + if err != nil { + return nil, fmt.Errorf("pty connect: %w", err) + } + + return drainPtyStream(ctx, &connectStream{s: stream}, false), nil +} + +// PtySendInput sends raw bytes to the PTY process identified by tag. +func (c *Client) PtySendInput(ctx context.Context, tag string, data []byte) error { + req := connect.NewRequest(&envdpb.SendInputRequest{ + Process: &envdpb.ProcessSelector{ + Selector: &envdpb.ProcessSelector_Tag{Tag: tag}, + }, + Input: &envdpb.ProcessInput{ + Input: &envdpb.ProcessInput_Pty{Pty: data}, + }, + }) + + if _, err := c.process.SendInput(ctx, req); err != nil { + return fmt.Errorf("pty send input: %w", err) + } + return nil +} + +// PtyResize updates the terminal dimensions for the PTY process identified by tag. +func (c *Client) PtyResize(ctx context.Context, tag string, cols, rows uint32) error { + req := connect.NewRequest(&envdpb.UpdateRequest{ + Process: &envdpb.ProcessSelector{ + Selector: &envdpb.ProcessSelector_Tag{Tag: tag}, + }, + Pty: &envdpb.PTY{ + Size: &envdpb.PTY_Size{ + Cols: cols, + Rows: rows, + }, + }, + }) + + if _, err := c.process.Update(ctx, req); err != nil { + return fmt.Errorf("pty resize: %w", err) + } + return nil +} + +// PtyKill sends SIGKILL to the PTY process identified by tag. +func (c *Client) PtyKill(ctx context.Context, tag string) error { + req := connect.NewRequest(&envdpb.SendSignalRequest{ + Process: &envdpb.ProcessSelector{ + Selector: &envdpb.ProcessSelector_Tag{Tag: tag}, + }, + Signal: envdpb.Signal_SIGNAL_SIGKILL, + }) + + if _, err := c.process.SendSignal(ctx, req); err != nil { + return fmt.Errorf("pty kill: %w", err) + } + return nil +} + +// eventStream is an interface covering both StartResponse and ConnectResponse streams. +type eventStream interface { + Receive() bool + Err() error + Close() error +} + +type startStream struct { + s *connect.ServerStreamForClient[envdpb.StartResponse] +} + +func (s *startStream) Receive() bool { return s.s.Receive() } +func (s *startStream) Err() error { return s.s.Err() } +func (s *startStream) Close() error { return s.s.Close() } +func (s *startStream) Event() *envdpb.ProcessEvent { + return s.s.Msg().GetEvent() +} + +type connectStream struct { + s *connect.ServerStreamForClient[envdpb.ConnectResponse] +} + +func (s *connectStream) Receive() bool { return s.s.Receive() } +func (s *connectStream) Err() error { return s.s.Err() } +func (s *connectStream) Close() error { return s.s.Close() } +func (s *connectStream) Event() *envdpb.ProcessEvent { + return s.s.Msg().GetEvent() +} + +type eventProvider interface { + eventStream + Event() *envdpb.ProcessEvent +} + +// drainPtyStream reads events from either a Start or Connect stream and maps +// them into PtyEvent values on a channel. +func drainPtyStream(ctx context.Context, stream eventProvider, expectStart bool) <-chan PtyEvent { + ch := make(chan PtyEvent, 16) + go func() { + defer close(ch) + defer stream.Close() + + for stream.Receive() { + event := stream.Event() + if event == nil { + continue + } + + var ev PtyEvent + switch e := event.GetEvent().(type) { + case *envdpb.ProcessEvent_Start: + if expectStart { + ev = PtyEvent{Type: "started", PID: e.Start.GetPid()} + } else { + continue + } + + case *envdpb.ProcessEvent_Data: + switch o := e.Data.GetOutput().(type) { + case *envdpb.ProcessEvent_DataEvent_Pty: + ev = PtyEvent{Type: "output", Data: o.Pty} + case *envdpb.ProcessEvent_DataEvent_Stdout: + ev = PtyEvent{Type: "output", Data: o.Stdout} + case *envdpb.ProcessEvent_DataEvent_Stderr: + ev = PtyEvent{Type: "output", Data: o.Stderr} + default: + continue + } + + case *envdpb.ProcessEvent_End: + ev = PtyEvent{Type: "end", ExitCode: e.End.GetExitCode()} + if e.End.Error != nil { + ev.Error = e.End.GetError() + } + + case *envdpb.ProcessEvent_Keepalive: + continue + } + + select { + case ch <- ev: + case <-ctx.Done(): + return + } + } + + if err := stream.Err(); err != nil && err != io.EOF { + slog.Debug("pty stream error", "error", err) + } + }() + + return ch +} diff --git a/internal/hostagent/server.go b/internal/hostagent/server.go index f6a5522..663d2cb 100644 --- a/internal/hostagent/server.go +++ b/internal/hostagent/server.go @@ -15,6 +15,7 @@ import ( "github.com/google/uuid" "github.com/jackc/pgx/v5/pgtype" + envdpb "git.omukk.dev/wrenn/wrenn/proto/envd/gen" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen/hostagentv1connect" @@ -68,10 +69,18 @@ func (s *Server) CreateSandbox( return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("create sandbox: %w", err)) } + // Apply template defaults (user, env vars) if provided. + if msg.DefaultUser != "" || len(msg.DefaultEnv) > 0 { + if err := s.mgr.SetDefaults(ctx, sb.ID, msg.DefaultUser, msg.DefaultEnv); err != nil { + slog.Warn("failed to set sandbox defaults", "sandbox", sb.ID, "error", err) + } + } + return connect.NewResponse(&pb.CreateSandboxResponse{ SandboxId: sb.ID, Status: string(sb.Status), HostIp: sb.HostIP.String(), + Metadata: sb.Metadata, }), nil } @@ -99,14 +108,24 @@ func (s *Server) ResumeSandbox( ctx context.Context, req *connect.Request[pb.ResumeSandboxRequest], ) (*connect.Response[pb.ResumeSandboxResponse], error) { - sb, err := s.mgr.Resume(ctx, req.Msg.SandboxId, int(req.Msg.TimeoutSec)) + msg := req.Msg + sb, err := s.mgr.Resume(ctx, msg.SandboxId, int(msg.TimeoutSec), msg.KernelVersion) if err != nil { return nil, connect.NewError(connect.CodeInternal, err) } + + // Apply template defaults (user, env vars) if provided. + if msg.DefaultUser != "" || len(msg.DefaultEnv) > 0 { + if err := s.mgr.SetDefaults(ctx, sb.ID, msg.DefaultUser, msg.DefaultEnv); err != nil { + slog.Warn("failed to set sandbox defaults on resume", "sandbox", sb.ID, "error", err) + } + } + return connect.NewResponse(&pb.ResumeSandboxResponse{ SandboxId: sb.ID, Status: string(sb.Status), HostIp: sb.HostIP.String(), + Metadata: sb.Metadata, }), nil } @@ -252,6 +271,69 @@ func (s *Server) ReadFile( return connect.NewResponse(&pb.ReadFileResponse{Content: content}), nil } +func (s *Server) ListDir( + ctx context.Context, + req *connect.Request[pb.ListDirRequest], +) (*connect.Response[pb.ListDirResponse], error) { + msg := req.Msg + + client, err := s.mgr.GetClient(msg.SandboxId) + if err != nil { + return nil, connect.NewError(connect.CodeNotFound, err) + } + + resp, err := client.ListDir(ctx, msg.Path, msg.Depth) + if err != nil { + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("list dir: %w", err)) + } + + entries := make([]*pb.FileEntry, 0, len(resp.Entries)) + for _, e := range resp.Entries { + entries = append(entries, entryInfoToPB(e)) + } + + return connect.NewResponse(&pb.ListDirResponse{Entries: entries}), nil +} + +func (s *Server) MakeDir( + ctx context.Context, + req *connect.Request[pb.MakeDirRequest], +) (*connect.Response[pb.MakeDirResponse], error) { + msg := req.Msg + + client, err := s.mgr.GetClient(msg.SandboxId) + if err != nil { + return nil, connect.NewError(connect.CodeNotFound, err) + } + + resp, err := client.MakeDir(ctx, msg.Path) + if err != nil { + return nil, fmt.Errorf("make dir: %w", err) + } + + return connect.NewResponse(&pb.MakeDirResponse{ + Entry: entryInfoToPB(resp.Entry), + }), nil +} + +func (s *Server) RemovePath( + ctx context.Context, + req *connect.Request[pb.RemovePathRequest], +) (*connect.Response[pb.RemovePathResponse], error) { + msg := req.Msg + + client, err := s.mgr.GetClient(msg.SandboxId) + if err != nil { + return nil, connect.NewError(connect.CodeNotFound, err) + } + + if err := client.Remove(ctx, msg.Path); err != nil { + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("remove: %w", err)) + } + + return connect.NewResponse(&pb.RemovePathResponse{}), nil +} + func (s *Server) ExecStream( ctx context.Context, req *connect.Request[pb.ExecStreamRequest], @@ -436,6 +518,16 @@ func (s *Server) ReadFileStream( // Stream file content in 64KB chunks. buf := make([]byte, 64*1024) for { + // Bail out early if the client disconnected or the context was cancelled. + select { + case <-ctx.Done(): + if ctx.Err() == context.DeadlineExceeded { + return connect.NewError(connect.CodeDeadlineExceeded, ctx.Err()) + } + return connect.NewError(connect.CodeCanceled, ctx.Err()) + default: + } + n, err := resp.Body.Read(buf) if n > 0 { chunk := make([]byte, n) @@ -474,6 +566,7 @@ func (s *Server) ListSandboxes( CreatedAtUnix: sb.CreatedAt.Unix(), LastActiveAtUnix: sb.LastActiveAt.Unix(), TimeoutSec: int32(sb.TimeoutSec), + Metadata: sb.Metadata, } } @@ -545,3 +638,269 @@ func metricPointsToPB(pts []sandbox.MetricPoint) []*pb.MetricPoint { } return out } + +func (s *Server) PtyAttach( + ctx context.Context, + req *connect.Request[pb.PtyAttachRequest], + stream *connect.ServerStream[pb.PtyAttachResponse], +) error { + msg := req.Msg + + events, err := s.mgr.PtyAttach(ctx, msg.SandboxId, msg.Tag, msg.Cmd, msg.Args, msg.Cols, msg.Rows, msg.Envs, msg.Cwd) + if err != nil { + return connect.NewError(connect.CodeInternal, fmt.Errorf("pty attach: %w", err)) + } + + for ev := range events { + var resp pb.PtyAttachResponse + switch ev.Type { + case "started": + resp.Event = &pb.PtyAttachResponse_Started{ + Started: &pb.PtyStarted{Pid: ev.PID, Tag: msg.Tag}, + } + case "output": + resp.Event = &pb.PtyAttachResponse_Output{ + Output: &pb.PtyOutput{Data: ev.Data}, + } + case "end": + resp.Event = &pb.PtyAttachResponse_Exited{ + Exited: &pb.PtyExited{ExitCode: ev.ExitCode, Error: ev.Error}, + } + default: + continue + } + if err := stream.Send(&resp); err != nil { + return err + } + } + + return nil +} + +func (s *Server) PtySendInput( + ctx context.Context, + req *connect.Request[pb.PtySendInputRequest], +) (*connect.Response[pb.PtySendInputResponse], error) { + msg := req.Msg + + if err := s.mgr.PtySendInput(ctx, msg.SandboxId, msg.Tag, msg.Data); err != nil { + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("pty send input: %w", err)) + } + + return connect.NewResponse(&pb.PtySendInputResponse{}), nil +} + +func (s *Server) PtyResize( + ctx context.Context, + req *connect.Request[pb.PtyResizeRequest], +) (*connect.Response[pb.PtyResizeResponse], error) { + msg := req.Msg + + if err := s.mgr.PtyResize(ctx, msg.SandboxId, msg.Tag, msg.Cols, msg.Rows); err != nil { + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("pty resize: %w", err)) + } + + return connect.NewResponse(&pb.PtyResizeResponse{}), nil +} + +func (s *Server) PtyKill( + ctx context.Context, + req *connect.Request[pb.PtyKillRequest], +) (*connect.Response[pb.PtyKillResponse], error) { + msg := req.Msg + + if err := s.mgr.PtyKill(ctx, msg.SandboxId, msg.Tag); err != nil { + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("pty kill: %w", err)) + } + + return connect.NewResponse(&pb.PtyKillResponse{}), nil +} + +// entryInfoToPB maps an envd EntryInfo to a hostagent FileEntry. +func entryInfoToPB(e *envdpb.EntryInfo) *pb.FileEntry { + if e == nil { + return nil + } + + var fileType string + switch e.Type { + case envdpb.FileType_FILE_TYPE_FILE: + fileType = "file" + case envdpb.FileType_FILE_TYPE_DIRECTORY: + fileType = "directory" + case envdpb.FileType_FILE_TYPE_SYMLINK: + fileType = "symlink" + default: + fileType = "unknown" + } + + entry := &pb.FileEntry{ + Name: e.Name, + Path: e.Path, + Type: fileType, + Size: e.Size, + Mode: e.Mode, + Permissions: e.Permissions, + Owner: e.Owner, + Group: e.Group, + } + + if e.ModifiedTime != nil { + entry.ModifiedAt = e.ModifiedTime.GetSeconds() + } + + if e.SymlinkTarget != nil { + entry.SymlinkTarget = e.SymlinkTarget + } + + return entry +} + +// ── Background Processes ──────────────────────────────────────────── + +func (s *Server) StartBackground( + ctx context.Context, + req *connect.Request[pb.StartBackgroundRequest], +) (*connect.Response[pb.StartBackgroundResponse], error) { + msg := req.Msg + + pid, err := s.mgr.StartBackground(ctx, msg.SandboxId, msg.Tag, msg.Cmd, msg.Args, msg.Envs, msg.Cwd) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, connect.NewError(connect.CodeNotFound, err) + } + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("start background: %w", err)) + } + + return connect.NewResponse(&pb.StartBackgroundResponse{ + Pid: pid, + Tag: msg.Tag, + }), nil +} + +func (s *Server) ListProcesses( + ctx context.Context, + req *connect.Request[pb.ListProcessesRequest], +) (*connect.Response[pb.ListProcessesResponse], error) { + procs, err := s.mgr.ListProcesses(ctx, req.Msg.SandboxId) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, connect.NewError(connect.CodeNotFound, err) + } + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("list processes: %w", err)) + } + + entries := make([]*pb.ProcessEntry, 0, len(procs)) + for _, p := range procs { + entries = append(entries, &pb.ProcessEntry{ + Pid: p.PID, + Tag: p.Tag, + Cmd: p.Cmd, + Args: p.Args, + }) + } + + return connect.NewResponse(&pb.ListProcessesResponse{ + Processes: entries, + }), nil +} + +func (s *Server) KillProcess( + ctx context.Context, + req *connect.Request[pb.KillProcessRequest], +) (*connect.Response[pb.KillProcessResponse], error) { + msg := req.Msg + + // Resolve PID/tag selector. + var pid uint32 + var tag string + switch sel := msg.Selector.(type) { + case *pb.KillProcessRequest_Pid: + pid = sel.Pid + case *pb.KillProcessRequest_Tag: + tag = sel.Tag + default: + return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("pid or tag is required")) + } + + // Map signal string to envd enum. + var signal envdpb.Signal + switch msg.Signal { + case "", "SIGKILL": + signal = envdpb.Signal_SIGNAL_SIGKILL + case "SIGTERM": + signal = envdpb.Signal_SIGNAL_SIGTERM + default: + return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("unsupported signal: %s (use SIGKILL or SIGTERM)", msg.Signal)) + } + + if err := s.mgr.KillProcess(ctx, msg.SandboxId, pid, tag, signal); err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, connect.NewError(connect.CodeNotFound, err) + } + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("kill process: %w", err)) + } + + return connect.NewResponse(&pb.KillProcessResponse{}), nil +} + +func (s *Server) ConnectProcess( + ctx context.Context, + req *connect.Request[pb.ConnectProcessRequest], + stream *connect.ServerStream[pb.ConnectProcessResponse], +) error { + msg := req.Msg + + var pid uint32 + var tag string + switch sel := msg.Selector.(type) { + case *pb.ConnectProcessRequest_Pid: + pid = sel.Pid + case *pb.ConnectProcessRequest_Tag: + tag = sel.Tag + default: + return connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("pid or tag is required")) + } + + events, err := s.mgr.ConnectProcess(ctx, msg.SandboxId, pid, tag) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return connect.NewError(connect.CodeNotFound, err) + } + return connect.NewError(connect.CodeInternal, fmt.Errorf("connect process: %w", err)) + } + + for ev := range events { + var resp pb.ConnectProcessResponse + switch ev.Type { + case "start": + resp.Event = &pb.ConnectProcessResponse_Start{ + Start: &pb.ExecStreamStart{Pid: ev.PID}, + } + case "stdout": + resp.Event = &pb.ConnectProcessResponse_Data{ + Data: &pb.ExecStreamData{ + Output: &pb.ExecStreamData_Stdout{Stdout: ev.Data}, + }, + } + case "stderr": + resp.Event = &pb.ConnectProcessResponse_Data{ + Data: &pb.ExecStreamData{ + Output: &pb.ExecStreamData_Stderr{Stderr: ev.Data}, + }, + } + case "end": + resp.Event = &pb.ConnectProcessResponse_End{ + End: &pb.ExecStreamEnd{ + ExitCode: ev.ExitCode, + Error: ev.Error, + }, + } + } + if err := stream.Send(&resp); err != nil { + return err + } + } + + return nil +} diff --git a/internal/layout/layout.go b/internal/layout/layout.go index dfe2f9b..fcb11ad 100644 --- a/internal/layout/layout.go +++ b/internal/layout/layout.go @@ -1,11 +1,15 @@ package layout import ( + "fmt" + "os" "path/filepath" + "sort" + "strings" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) // IsMinimal reports whether the given team and template IDs represent the @@ -47,6 +51,75 @@ func KernelPath(wrennDir string) string { return filepath.Join(wrennDir, "kernels", "vmlinux") } +// KernelPathVersioned returns the path to a specific kernel version. +func KernelPathVersioned(wrennDir, version string) string { + return filepath.Join(wrennDir, "kernels", "vmlinux-"+version) +} + +// LatestKernel scans the kernels directory for files matching vmlinux-{semver} +// and returns the path and version of the latest one (by semver sort). +func LatestKernel(wrennDir string) (path, version string, err error) { + dir := filepath.Join(wrennDir, "kernels") + return latestVersionedFile(dir, "vmlinux-") +} + +// latestVersionedFile scans dir for files with the given prefix, extracts the +// version suffix, sorts by semver, and returns the path and version of the latest. +func latestVersionedFile(dir, prefix string) (path, version string, err error) { + entries, err := os.ReadDir(dir) + if err != nil { + return "", "", fmt.Errorf("read directory %s: %w", dir, err) + } + + var versions []string + for _, e := range entries { + if e.IsDir() { + continue + } + name := e.Name() + if v, ok := strings.CutPrefix(name, prefix); ok && v != "" { + versions = append(versions, v) + } + } + + if len(versions) == 0 { + return "", "", fmt.Errorf("no %s* files found in %s", prefix, dir) + } + + sort.Slice(versions, func(i, j int) bool { + return compareSemver(versions[i], versions[j]) < 0 + }) + + latest := versions[len(versions)-1] + return filepath.Join(dir, prefix+latest), latest, nil +} + +// compareSemver compares two dotted-numeric version strings. +// Returns -1 if a < b, 0 if equal, 1 if a > b. +func compareSemver(a, b string) int { + aParts := strings.Split(a, ".") + bParts := strings.Split(b, ".") + + maxLen := max(len(aParts), len(bParts)) + + for i := 0; i < maxLen; i++ { + var av, bv int + if i < len(aParts) { + _, _ = fmt.Sscanf(aParts[i], "%d", &av) + } + if i < len(bParts) { + _, _ = fmt.Sscanf(bParts[i], "%d", &bv) + } + if av < bv { + return -1 + } + if av > bv { + return 1 + } + } + return 0 +} + // ImagesRoot returns the root images directory. func ImagesRoot(wrennDir string) string { return filepath.Join(wrennDir, "images") diff --git a/internal/layout/layout_test.go b/internal/layout/layout_test.go index 3501ee4..f3e3532 100644 --- a/internal/layout/layout_test.go +++ b/internal/layout/layout_test.go @@ -6,7 +6,7 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) func TestIsMinimal(t *testing.T) { diff --git a/internal/models/sandbox.go b/internal/models/sandbox.go index ab72cd3..8228679 100644 --- a/internal/models/sandbox.go +++ b/internal/models/sandbox.go @@ -30,4 +30,5 @@ type Sandbox struct { RootfsPath string CreatedAt time.Time LastActiveAt time.Time + Metadata map[string]string } diff --git a/internal/network/allocator.go b/internal/network/allocator.go index b7265e6..6a929d0 100644 --- a/internal/network/allocator.go +++ b/internal/network/allocator.go @@ -24,7 +24,7 @@ func (a *SlotAllocator) Allocate() (int, error) { a.mu.Lock() defer a.mu.Unlock() - for i := 1; i <= 65534; i++ { + for i := 1; i <= 32767; i++ { if !a.inUse[i] { a.inUse[i] = true return i, nil diff --git a/internal/network/setup.go b/internal/network/setup.go index 614235a..3874c79 100644 --- a/internal/network/setup.go +++ b/internal/network/setup.go @@ -131,26 +131,31 @@ type Slot struct { } // NewSlot computes the addressing for the given slot index (1-based). +// Index must be in [1, 32767] so that veth offset (index*2) fits in 16 bits. func NewSlot(index int) *Slot { + if index < 1 || index > 32767 { + panic(fmt.Sprintf("slot index %d out of range [1, 32767]", index)) + } + hostBaseIP := net.ParseIP(hostBase).To4() vrtBaseIP := net.ParseIP(vrtBase).To4() hostIP := make(net.IP, 4) copy(hostIP, hostBaseIP) - hostIP[2] += byte(index >> 8) - hostIP[3] += byte(index & 0xFF) + hostIP[2] = hostBaseIP[2] + byte(index>>8) + hostIP[3] = hostBaseIP[3] + byte(index&0xFF) vethOffset := index * vrtAddressesPerSlot vethIP := make(net.IP, 4) copy(vethIP, vrtBaseIP) - vethIP[2] += byte(vethOffset >> 8) - vethIP[3] += byte(vethOffset & 0xFF) + vethIP[2] = vrtBaseIP[2] + byte(vethOffset>>8) + vethIP[3] = vrtBaseIP[3] + byte(vethOffset&0xFF) vpeerOffset := vethOffset + 1 vpeerIP := make(net.IP, 4) copy(vpeerIP, vrtBaseIP) - vpeerIP[2] += byte(vpeerOffset >> 8) - vpeerIP[3] += byte(vpeerOffset & 0xFF) + vpeerIP[2] = vrtBaseIP[2] + byte(vpeerOffset>>8) + vpeerIP[3] = vrtBaseIP[3] + byte(vpeerOffset&0xFF) return &Slot{ Index: index, diff --git a/internal/recipe/context.go b/internal/recipe/context.go index 71cc0bc..3a64059 100644 --- a/internal/recipe/context.go +++ b/internal/recipe/context.go @@ -7,10 +7,11 @@ import ( ) // ExecContext holds mutable state that persists across recipe steps. -// It is initialized empty and updated by ENV and WORKDIR steps. +// It is initialized empty and updated by ENV, WORKDIR, and USER steps. type ExecContext struct { WorkDir string EnvVars map[string]string + User string // Current unix user for command execution. } // This regex matches: @@ -25,7 +26,20 @@ var envRegex = regexp.MustCompile(`\$\$|\$\{([a-zA-Z0-9_]*)\}|\$([a-zA-Z0-9_]+)` // If WORKDIR and/or ENV are set, they are prepended as a shell preamble: // // cd '/the/dir' && KEY='val' /bin/sh -c 'original command' +// +// If USER is set to a non-root user, the entire command is wrapped with su: +// +// su -s /bin/sh -c '' func (c *ExecContext) WrappedCommand(cmd string) string { + inner := c.innerCommand(cmd) + if c.User != "" && c.User != "root" { + return "su " + shellescape(c.User) + " -s /bin/sh -c " + shellescape(inner) + } + return inner +} + +// innerCommand builds the command with workdir/env preamble but without user wrapping. +func (c *ExecContext) innerCommand(cmd string) string { prefix := c.shellPrefix() if prefix == "" { return cmd @@ -42,7 +56,11 @@ func (c *ExecContext) WrappedCommand(cmd string) string { // simultaneously before a healthcheck is evaluated. func (c *ExecContext) StartCommand(cmd string) string { prefix := c.shellPrefix() - return prefix + "nohup /bin/sh -c " + shellescape(cmd) + " >/dev/null 2>&1 &" + inner := prefix + "nohup /bin/sh -c " + shellescape(cmd) + " >/dev/null 2>&1 &" + if c.User != "" && c.User != "root" { + return "su " + shellescape(c.User) + " -s /bin/sh -c " + shellescape(inner) + } + return inner } // shellPrefix builds the "cd ... && KEY=val " preamble for a shell command. @@ -97,8 +115,11 @@ func expandEnv(s string, vars map[string]string) string { }) } -// shellescape wraps s in single quotes, escaping any embedded single quotes. +// Shellescape wraps s in single quotes, escaping any embedded single quotes. // This is POSIX-safe for paths, env values, and shell commands. -func shellescape(s string) string { +func Shellescape(s string) string { return "'" + strings.ReplaceAll(s, "'", `'\''`) + "'" } + +// shellescape is the package-internal alias for Shellescape. +func shellescape(s string) string { return Shellescape(s) } diff --git a/internal/recipe/executor.go b/internal/recipe/executor.go index 53aaeeb..ffecf04 100644 --- a/internal/recipe/executor.go +++ b/internal/recipe/executor.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log/slog" + "path" "strings" "time" @@ -16,6 +17,10 @@ import ( // explicit --timeout flag. const DefaultStepTimeout = 30 * time.Second +// BuildFilesDir is the directory inside the sandbox where uploaded build +// archives are extracted. COPY instructions reference paths relative to this. +const BuildFilesDir = "/tmp/build-files" + // BuildLogEntry is the per-step record stored in template_builds.logs (JSONB). type BuildLogEntry struct { Step int `json:"step"` @@ -32,13 +37,18 @@ type BuildLogEntry struct { // the method on the hostagent Connect RPC client. type ExecFunc func(ctx context.Context, req *connect.Request[pb.ExecRequest]) (*connect.Response[pb.ExecResponse], error) +// ProgressFunc is called after each step with the current step counter and +// accumulated log entries. Used for per-step DB progress updates. +type ProgressFunc func(step int, entries []BuildLogEntry) + // Execute runs steps sequentially against sandboxID using execFn. // // - phase labels the log entries (e.g., "pre-build", "recipe", "post-build"). // - startStep is the 1-based offset so entries are globally numbered across phases. // - defaultTimeout applies to RUN steps with no per-step --timeout; 0 → 10 minutes. -// - bctx is mutated in place as ENV/WORKDIR steps execute, and carries forward +// - bctx is mutated in place as ENV/WORKDIR/USER steps execute, and carries forward // into subsequent phases when the caller passes the same pointer. +// - onProgress is called after each step for live progress updates (may be nil). // // Returns all log entries appended during this call, the next step counter // value, and whether all steps succeeded. On false the last entry contains @@ -53,6 +63,7 @@ func Execute( defaultTimeout time.Duration, bctx *ExecContext, execFn ExecFunc, + onProgress ProgressFunc, ) (entries []BuildLogEntry, nextStep int, ok bool) { if defaultTimeout <= 0 { defaultTimeout = 10 * time.Minute @@ -72,19 +83,30 @@ func Execute( entries = append(entries, BuildLogEntry{Step: step, Phase: phase, Cmd: st.Raw, Ok: true}) case KindWORKDIR: + // Create the directory if it doesn't exist. + mkdirEntry := execRawShell(ctx, st.Raw, sandboxID, phase, step, 10*time.Second, execFn, + "mkdir -p "+shellescape(st.Path)) + if !mkdirEntry.Ok { + entries = append(entries, mkdirEntry) + return entries, step, false + } bctx.WorkDir = st.Path - entries = append(entries, BuildLogEntry{Step: step, Phase: phase, Cmd: st.Raw, Ok: true}) + mkdirEntry.Ok = true + entries = append(entries, mkdirEntry) - case KindUSER, KindCOPY: - verb := strings.ToUpper(strings.Fields(st.Raw)[0]) - entries = append(entries, BuildLogEntry{ - Step: step, - Phase: phase, - Cmd: st.Raw, - Stderr: verb + " is not yet supported", - Ok: false, - }) - return entries, step, false + case KindUSER: + entry, succeeded := execUser(ctx, st, sandboxID, phase, step, bctx, execFn) + entries = append(entries, entry) + if !succeeded { + return entries, step, false + } + + case KindCOPY: + entry, succeeded := execCopy(ctx, st, sandboxID, phase, step, bctx, execFn) + entries = append(entries, entry) + if !succeeded { + return entries, step, false + } case KindSTART: entry, succeeded := execStart(ctx, st, sandboxID, phase, step, bctx, execFn) @@ -104,6 +126,10 @@ func Execute( return entries, step, false } } + + if onProgress != nil { + onProgress(step, entries) + } } return entries, step, true } @@ -145,6 +171,123 @@ func execRun( return entry, entry.Ok } +// execUser creates a unix user (if not exists), grants passwordless sudo, +// and updates bctx.User for subsequent steps. +func execUser( + ctx context.Context, + st Step, + sandboxID, phase string, + step int, + bctx *ExecContext, + execFn ExecFunc, +) (BuildLogEntry, bool) { + username := st.Key + // Create user if not exists, with home directory and bash shell. + // Grant passwordless sudo access (E2B convention). + // Uses printf %s to avoid shell injection in the sudoers line. + script := fmt.Sprintf( + "id %s >/dev/null 2>&1 || (adduser --disabled-password --gecos '' --shell /bin/bash %s && printf '%%s ALL=(ALL) NOPASSWD:ALL\\n' %s >> /etc/sudoers)", + shellescape(username), shellescape(username), shellescape(username), + ) + + entry := execRawShell(ctx, st.Raw, sandboxID, phase, step, 30*time.Second, execFn, script) + if entry.Ok { + bctx.User = username + // Update HOME so ~ expands correctly in subsequent RUN/WORKDIR steps. + if bctx.EnvVars == nil { + bctx.EnvVars = make(map[string]string) + } + if username == "root" { + bctx.EnvVars["HOME"] = "/root" + } else { + bctx.EnvVars["HOME"] = "/home/" + username + } + } + return entry, entry.Ok +} + +// execCopy copies a file or directory from the build archive (extracted at +// BuildFilesDir) to the destination path inside the sandbox. Ownership is +// set to the current user from bctx. +func execCopy( + ctx context.Context, + st Step, + sandboxID, phase string, + step int, + bctx *ExecContext, + execFn ExecFunc, +) (BuildLogEntry, bool) { + // Validate all source paths: must be relative and not escape the archive directory. + var srcPaths []string + for _, s := range st.Srcs { + cleaned := path.Clean(s) + if strings.HasPrefix(cleaned, "..") || strings.HasPrefix(cleaned, "/") { + return BuildLogEntry{ + Step: step, + Phase: phase, + Cmd: st.Raw, + Stderr: fmt.Sprintf("COPY source must be a relative path within the archive: %q", s), + }, false + } + srcPaths = append(srcPaths, shellescape(BuildFilesDir+"/"+cleaned)) + } + + dst := st.Dst + // Resolve relative destination against the current WORKDIR. + if dst != "" && dst[0] != '/' && bctx.WorkDir != "" { + dst = bctx.WorkDir + "/" + dst + } + owner := "root" + if bctx.User != "" { + owner = bctx.User + } + script := fmt.Sprintf( + "cp -r %s %s && chown -R %s:%s %s", + strings.Join(srcPaths, " "), shellescape(dst), shellescape(owner), shellescape(owner), shellescape(dst), + ) + + entry := execRawShell(ctx, st.Raw, sandboxID, phase, step, 60*time.Second, execFn, script) + return entry, entry.Ok +} + +// execRawShell runs a shell command directly (as root) without ExecContext +// wrapping. Used for internal operations like user creation and file copy. +func execRawShell( + ctx context.Context, + raw, sandboxID, phase string, + step int, + timeout time.Duration, + execFn ExecFunc, + shellCmd string, +) BuildLogEntry { + execCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + start := time.Now() + resp, err := execFn(execCtx, connect.NewRequest(&pb.ExecRequest{ + SandboxId: sandboxID, + Cmd: "/bin/sh", + Args: []string{"-c", shellCmd}, + TimeoutSec: int32(timeout.Seconds()), + })) + + entry := BuildLogEntry{ + Step: step, + Phase: phase, + Cmd: raw, + Elapsed: time.Since(start).Milliseconds(), + } + if err != nil { + entry.Stderr = fmt.Sprintf("exec error: %v", err) + return entry + } + entry.Stdout = string(resp.Msg.Stdout) + entry.Stderr = string(resp.Msg.Stderr) + entry.Exit = resp.Msg.ExitCode + entry.Ok = resp.Msg.ExitCode == 0 + return entry +} + func execStart( ctx context.Context, st Step, diff --git a/internal/recipe/step.go b/internal/recipe/step.go index 7d51036..07e167e 100644 --- a/internal/recipe/step.go +++ b/internal/recipe/step.go @@ -24,9 +24,11 @@ type Step struct { Raw string // original string, preserved for logging Shell string // KindRUN, KindSTART: the shell command text Timeout time.Duration // KindRUN: 0 means use caller's default - Key string // KindENV: variable name + Key string // KindENV: variable name; KindUSER: username Value string // KindENV: variable value Path string // KindWORKDIR: directory path + Srcs []string // KindCOPY: source paths (relative to build archive) + Dst string // KindCOPY: destination path inside sandbox } // ParseStep parses a single recipe instruction string into a Step. @@ -61,9 +63,9 @@ func ParseStep(s string) (Step, error) { case "WORKDIR": return parseWORKDIR(s, rest) case "USER": - return Step{Kind: KindUSER, Raw: s}, nil + return parseUSER(s, rest) case "COPY": - return Step{Kind: KindCOPY, Raw: s}, nil + return parseCOPY(s, rest) default: return Step{}, fmt.Errorf("unknown instruction %q (expected RUN, START, ENV, WORKDIR, USER, or COPY)", keyword) } @@ -127,3 +129,33 @@ func parseWORKDIR(raw, path string) (Step, error) { } return Step{Kind: KindWORKDIR, Raw: raw, Path: path}, nil } + +func parseUSER(raw, username string) (Step, error) { + if username == "" { + return Step{}, fmt.Errorf("USER requires a username: %q", raw) + } + // Validate: alphanumeric, hyphens, underscores only; must start with a letter or underscore. + for i, c := range username { + if i == 0 && !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_') { + return Step{}, fmt.Errorf("USER username must start with a letter or underscore: %q", raw) + } + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-') { + return Step{}, fmt.Errorf("USER username contains invalid character %q: %q", string(c), raw) + } + } + return Step{Kind: KindUSER, Raw: raw, Key: username}, nil +} + +func parseCOPY(raw, rest string) (Step, error) { + if rest == "" { + return Step{}, fmt.Errorf("COPY requires ... : %q", raw) + } + parts := strings.Fields(rest) + if len(parts) < 2 { + return Step{}, fmt.Errorf("COPY requires ... : %q", raw) + } + // Last argument is the destination, everything before is sources. + dst := parts[len(parts)-1] + srcs := parts[:len(parts)-1] + return Step{Kind: KindCOPY, Raw: raw, Srcs: srcs, Dst: dst}, nil +} diff --git a/internal/recipe/step_test.go b/internal/recipe/step_test.go index 2370bb2..2d0c9e2 100644 --- a/internal/recipe/step_test.go +++ b/internal/recipe/step_test.go @@ -1,6 +1,7 @@ package recipe import ( + "reflect" "testing" "time" ) @@ -111,16 +112,42 @@ func TestParseStep(t *testing.T) { input: "WORKDIR", wantErr: true, }, - // USER and COPY stubs + // USER { - name: "USER stub", + name: "USER basic", input: "USER www-data", - want: Step{Kind: KindUSER, Raw: "USER www-data"}, + want: Step{Kind: KindUSER, Raw: "USER www-data", Key: "www-data"}, }, { - name: "COPY stub", + name: "USER empty", + input: "USER", + wantErr: true, + }, + { + name: "USER invalid chars", + input: "USER bad user", + wantErr: true, + }, + // COPY + { + name: "COPY basic", input: "COPY config.yaml /etc/app/config.yaml", - want: Step{Kind: KindCOPY, Raw: "COPY config.yaml /etc/app/config.yaml"}, + want: Step{Kind: KindCOPY, Raw: "COPY config.yaml /etc/app/config.yaml", Srcs: []string{"config.yaml"}, Dst: "/etc/app/config.yaml"}, + }, + { + name: "COPY multiple sources", + input: "COPY a.txt b.txt /dest/", + want: Step{Kind: KindCOPY, Raw: "COPY a.txt b.txt /dest/", Srcs: []string{"a.txt", "b.txt"}, Dst: "/dest/"}, + }, + { + name: "COPY missing dst", + input: "COPY config.yaml", + wantErr: true, + }, + { + name: "COPY empty", + input: "COPY", + wantErr: true, }, // Unknown keyword { @@ -148,7 +175,7 @@ func TestParseStep(t *testing.T) { if err != nil { t.Fatalf("ParseStep(%q) unexpected error: %v", tc.input, err) } - if got != tc.want { + if !reflect.DeepEqual(got, tc.want) { t.Errorf("ParseStep(%q)\n got %+v\n want %+v", tc.input, got, tc.want) } }) diff --git a/internal/sandbox/fcversion.go b/internal/sandbox/fcversion.go new file mode 100644 index 0000000..092fbe0 --- /dev/null +++ b/internal/sandbox/fcversion.go @@ -0,0 +1,30 @@ +package sandbox + +import ( + "fmt" + "os/exec" + "strings" +) + +// DetectFirecrackerVersion runs the firecracker binary with --version and +// parses the semver from the output (e.g. "Firecracker v1.14.1" → "1.14.1"). +func DetectFirecrackerVersion(binaryPath string) (string, error) { + out, err := exec.Command(binaryPath, "--version").Output() + if err != nil { + return "", fmt.Errorf("run %s --version: %w", binaryPath, err) + } + + // Output is typically "Firecracker v1.14.1\n" or similar. + line := strings.TrimSpace(string(out)) + for _, field := range strings.Fields(line) { + v := strings.TrimPrefix(field, "v") + if v != field || strings.Contains(field, ".") { + // Either had a "v" prefix or contains a dot — likely the version. + if strings.Count(v, ".") >= 1 { + return v, nil + } + } + } + + return "", fmt.Errorf("could not parse version from firecracker output: %q", line) +} diff --git a/internal/sandbox/images.go b/internal/sandbox/images.go index ecee469..b1a848d 100644 --- a/internal/sandbox/images.go +++ b/internal/sandbox/images.go @@ -6,9 +6,11 @@ import ( "os" "os/exec" "path/filepath" + "strconv" + "strings" - "git.omukk.dev/wrenn/wrenn/internal/id" "git.omukk.dev/wrenn/wrenn/internal/layout" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) // DefaultDiskSizeMB is the standard disk size for base images. Images smaller @@ -66,6 +68,73 @@ func EnsureImageSizes(wrennDir string, targetMB int) error { return nil } +// ParseSizeToMB parses a human-readable size string into megabytes. +// Supported suffixes: G, Gi (gibibytes), M, Mi (mebibytes). +// Examples: "5G" → 5120, "2Gi" → 2048, "1000M" → 1000, "512Mi" → 512. +func ParseSizeToMB(s string) (int, error) { + s = strings.TrimSpace(s) + if s == "" { + return 0, fmt.Errorf("empty size string") + } + + // Find where the numeric part ends. + i := 0 + for i < len(s) && (s[i] == '.' || (s[i] >= '0' && s[i] <= '9')) { + i++ + } + if i == 0 { + return 0, fmt.Errorf("invalid size %q: no numeric value", s) + } + + numStr := s[:i] + suffix := strings.TrimSpace(s[i:]) + + num, err := strconv.ParseFloat(numStr, 64) + if err != nil { + return 0, fmt.Errorf("invalid size %q: %w", s, err) + } + + switch suffix { + case "G", "Gi": + return int(num * 1024), nil + case "M", "Mi", "": + return int(num), nil + default: + return 0, fmt.Errorf("invalid size %q: unknown suffix %q (use G, Gi, M, or Mi)", s, suffix) + } +} + +// ShrinkMinimalImage shrinks the built-in minimal rootfs back to its minimum +// size using resize2fs -M. This is the inverse of EnsureImageSizes and should +// be called during graceful shutdown so the image is stored compactly on disk. +func ShrinkMinimalImage(wrennDir string) { + minimalRootfs := layout.TemplateRootfs(wrennDir, id.PlatformTeamID, id.MinimalTemplateID) + shrinkImage(minimalRootfs) +} + +// shrinkImage shrinks a single rootfs image to its minimum size. +func shrinkImage(rootfs string) { + if _, err := os.Stat(rootfs); err != nil { + return + } + + slog.Info("shrinking base image", "path", rootfs) + + if out, err := exec.Command("e2fsck", "-fy", rootfs).CombinedOutput(); err != nil { + if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() > 1 { + slog.Warn("e2fsck before shrink failed", "path", rootfs, "output", string(out), "error", err) + return + } + } + + if out, err := exec.Command("resize2fs", "-M", rootfs).CombinedOutput(); err != nil { + slog.Warn("resize2fs -M failed", "path", rootfs, "output", string(out), "error", err) + return + } + + slog.Info("base image shrunk", "path", rootfs) +} + // expandImage expands a single rootfs image if it is smaller than targetBytes. func expandImage(rootfs string, targetBytes int64, targetMB int) error { info, err := os.Stat(rootfs) diff --git a/internal/sandbox/manager.go b/internal/sandbox/manager.go index e920476..524631d 100644 --- a/internal/sandbox/manager.go +++ b/internal/sandbox/manager.go @@ -17,19 +17,28 @@ import ( "git.omukk.dev/wrenn/wrenn/internal/devicemapper" "git.omukk.dev/wrenn/wrenn/internal/envdclient" - "git.omukk.dev/wrenn/wrenn/internal/id" "git.omukk.dev/wrenn/wrenn/internal/layout" "git.omukk.dev/wrenn/wrenn/internal/models" "git.omukk.dev/wrenn/wrenn/internal/network" "git.omukk.dev/wrenn/wrenn/internal/snapshot" "git.omukk.dev/wrenn/wrenn/internal/uffd" "git.omukk.dev/wrenn/wrenn/internal/vm" + "git.omukk.dev/wrenn/wrenn/pkg/id" + envdpb "git.omukk.dev/wrenn/wrenn/proto/envd/gen" ) // Config holds the paths and defaults for the sandbox manager. type Config struct { - WrennDir string // root directory (e.g. /var/lib/wrenn); all sub-paths derived via layout package - EnvdTimeout time.Duration + WrennDir string // root directory (e.g. /var/lib/wrenn); all sub-paths derived via layout package + EnvdTimeout time.Duration + DefaultRootfsSizeMB int // target size for template rootfs images; 0 → DefaultDiskSizeMB + + // Resolved at startup by the host agent. + KernelPath string // path to the latest vmlinux-x.y.z + KernelVersion string // semver extracted from filename + FirecrackerBin string // path to the firecracker binary + FirecrackerVersion string // semver from firecracker --version + AgentVersion string // host agent version (injected via ldflags) } // Manager orchestrates sandbox lifecycle: VM, network, filesystem, envd. @@ -84,6 +93,35 @@ type snapshotParent struct { // preventing the crash. const maxDiffGenerations = 8 +// buildMetadata constructs the metadata map with version information. +func (m *Manager) buildMetadata(envdVersion string) map[string]string { + meta := map[string]string{ + "kernel_version": m.cfg.KernelVersion, + "firecracker_version": m.cfg.FirecrackerVersion, + "agent_version": m.cfg.AgentVersion, + } + if envdVersion != "" { + meta["envd_version"] = envdVersion + } + return meta +} + +// resolveKernelPath returns the kernel path for the given version hint. +// If the exact version exists on disk, it is used. Otherwise, falls back to +// the latest kernel (m.cfg.KernelPath). +func (m *Manager) resolveKernelPath(versionHint string) string { + if versionHint == "" { + return m.cfg.KernelPath + } + exact := layout.KernelPathVersioned(m.cfg.WrennDir, versionHint) + if _, err := os.Stat(exact); err == nil { + return exact + } + slog.Warn("requested kernel version not found, using latest", + "requested", versionHint, "latest", m.cfg.KernelVersion) + return m.cfg.KernelPath +} + // New creates a new sandbox manager. func New(cfg Config) *Manager { if cfg.EnvdTimeout == 0 { @@ -173,7 +211,7 @@ func (m *Manager) Create(ctx context.Context, sandboxID string, teamID, template vmCfg := vm.VMConfig{ SandboxID: sandboxID, TemplateID: id.UUIDString(templateID), - KernelPath: layout.KernelPath(m.cfg.WrennDir), + KernelPath: m.cfg.KernelPath, RootfsPath: dmDev.DevicePath, VCPUs: vcpus, MemoryMB: memoryMB, @@ -183,6 +221,7 @@ func (m *Manager) Create(ctx context.Context, sandboxID string, teamID, template GuestIP: slot.GuestIP, GatewayIP: slot.TapIP, NetMask: slot.GuestNetMask, + FirecrackerBin: m.cfg.FirecrackerBin, } if _, err := m.vm.Create(ctx, vmCfg); err != nil { @@ -209,6 +248,9 @@ func (m *Manager) Create(ctx context.Context, sandboxID string, teamID, template return nil, fmt.Errorf("wait for envd: %w", err) } + // Fetch envd version (best-effort). + envdVersion, _ := client.FetchVersion(ctx) + now := time.Now() sb := &sandboxState{ Sandbox: models.Sandbox{ @@ -224,6 +266,7 @@ func (m *Manager) Create(ctx context.Context, sandboxID string, teamID, template RootfsPath: dmDev.DevicePath, CreatedAt: now, LastActiveAt: now, + Metadata: m.buildMetadata(envdVersion), }, slot: slot, client: client, @@ -326,6 +369,20 @@ func (m *Manager) Pause(ctx context.Context, sandboxID string) error { sb.connTracker.Drain(2 * time.Second) slog.Debug("pause: proxy connections drained", "id", sandboxID) + // Step 0b: Signal envd to quiesce continuous goroutines (port scanner, + // forwarder) and run GC before freezing vCPUs. This prevents Go runtime + // page allocator corruption ("bad summary data") on snapshot restore. + // Best-effort: a failure is logged but does not abort the pause. + func() { + prepCtx, prepCancel := context.WithTimeout(ctx, 3*time.Second) + defer prepCancel() + if err := sb.client.PrepareSnapshot(prepCtx); err != nil { + slog.Warn("pause: pre-snapshot quiesce failed (best-effort)", "id", sandboxID, "error", err) + } else { + slog.Debug("pause: envd goroutines quiesced", "id", sandboxID) + } + }() + pauseStart := time.Now() // Step 1: Pause the VM (freeze vCPUs). @@ -542,7 +599,7 @@ func (m *Manager) Pause(ctx context.Context, sandboxID string) error { // Resume restores a paused sandbox from its snapshot using UFFD for // lazy memory loading. The sandbox gets a new network slot. -func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int) (*models.Sandbox, error) { +func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int, kernelVersion string) (*models.Sandbox, error) { pauseDir := layout.PauseSnapshotDir(m.cfg.WrennDir, sandboxID) if _, err := os.Stat(pauseDir); err != nil { return nil, fmt.Errorf("no snapshot found for sandbox %s", sandboxID) @@ -656,7 +713,7 @@ func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int) // Restore VM from snapshot. vmCfg := vm.VMConfig{ SandboxID: sandboxID, - KernelPath: layout.KernelPath(m.cfg.WrennDir), + KernelPath: m.resolveKernelPath(kernelVersion), RootfsPath: dmDev.DevicePath, VCPUs: 1, // Placeholder; overridden by snapshot. MemoryMB: int(header.Metadata.Size / (1024 * 1024)), // Placeholder; overridden by snapshot. @@ -666,6 +723,7 @@ func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int) GuestIP: slot.GuestIP, GatewayIP: slot.TapIP, NetMask: slot.GuestNetMask, + FirecrackerBin: m.cfg.FirecrackerBin, } resumeSnapPath := filepath.Join(pauseDir, snapshot.SnapFileName) @@ -697,6 +755,14 @@ func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int) return nil, fmt.Errorf("wait for envd: %w", err) } + // Trigger envd to re-read MMDS so it picks up the new sandbox/template IDs. + if err := client.PostInit(waitCtx); err != nil { + slog.Warn("post-init failed after resume, metadata files may be stale", "sandbox", sandboxID, "error", err) + } + + // Fetch envd version (best-effort). + envdVersion, _ := client.FetchVersion(ctx) + now := time.Now() sb := &sandboxState{ Sandbox: models.Sandbox{ @@ -710,6 +776,7 @@ func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int) RootfsPath: dmDev.DevicePath, CreatedAt: now, LastActiveAt: now, + Metadata: m.buildMetadata(envdVersion), }, slot: slot, client: client, @@ -880,6 +947,18 @@ func (m *Manager) FlattenRootfs(ctx context.Context, sandboxID string, teamID, t return 0, fmt.Errorf("sandbox %s not found", sandboxID) } + // Flush guest page cache to disk before stopping the VM. Without this, + // files written by the build (e.g. pip-installed packages) may exist in the + // guest's page cache but not yet on the dm block device — flatten would then + // capture 0-byte files. + func() { + syncCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + if _, err := sb.client.Exec(syncCtx, "/bin/sync"); err != nil { + slog.Warn("flatten: guest sync failed (non-fatal)", "id", sb.ID, "error", err) + } + }() + // Stop the VM but keep the dm device alive for flattening. m.stopSampler(sb) if err := m.vm.Destroy(ctx, sb.ID); err != nil { @@ -919,8 +998,8 @@ func (m *Manager) FlattenRootfs(ctx context.Context, sandboxID string, teamID, t // Clean up dm device and loop device now that flatten is complete. m.cleanupDM(sb) - // Shrink the flattened image to its minimum size so stored templates are - // compact. EnsureImageSizes will re-expand them on the next agent startup. + // Shrink the flattened image to its minimum size, then re-expand to the + // configured default rootfs size so sandboxes see the full disk from boot. if out, err := exec.Command("e2fsck", "-fy", outputPath).CombinedOutput(); err != nil { if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() > 1 { slog.Warn("e2fsck before shrink failed (non-fatal)", "output", string(out), "error", err) @@ -930,6 +1009,15 @@ func (m *Manager) FlattenRootfs(ctx context.Context, sandboxID string, teamID, t slog.Warn("resize2fs -M failed (non-fatal)", "output", string(out), "error", err) } + // Re-expand to default rootfs size. + targetMB := m.cfg.DefaultRootfsSizeMB + if targetMB <= 0 { + targetMB = DefaultDiskSizeMB + } + if err := expandImage(outputPath, int64(targetMB)*1024*1024, targetMB); err != nil { + slog.Warn("failed to expand template to default size (non-fatal)", "error", err) + } + sizeBytes, err := snapshot.DirSize(flattenDstDir, "") if err != nil { slog.Warn("failed to calculate template size", "error", err) @@ -1057,7 +1145,7 @@ func (m *Manager) createFromSnapshot(ctx context.Context, sandboxID string, team vmCfg := vm.VMConfig{ SandboxID: sandboxID, TemplateID: id.UUIDString(templateID), - KernelPath: layout.KernelPath(m.cfg.WrennDir), + KernelPath: m.cfg.KernelPath, RootfsPath: dmDev.DevicePath, VCPUs: vcpus, MemoryMB: memoryMB, @@ -1067,6 +1155,7 @@ func (m *Manager) createFromSnapshot(ctx context.Context, sandboxID string, team GuestIP: slot.GuestIP, GatewayIP: slot.TapIP, NetMask: slot.GuestNetMask, + FirecrackerBin: m.cfg.FirecrackerBin, } snapPath := filepath.Join(tmplDir, snapshot.SnapFileName) @@ -1098,6 +1187,14 @@ func (m *Manager) createFromSnapshot(ctx context.Context, sandboxID string, team return nil, fmt.Errorf("wait for envd: %w", err) } + // Trigger envd to re-read MMDS so it picks up the new sandbox/template IDs. + if err := client.PostInit(waitCtx); err != nil { + slog.Warn("post-init failed after template restore, metadata files may be stale", "sandbox", sandboxID, "error", err) + } + + // Fetch envd version (best-effort). + envdVersion, _ := client.FetchVersion(ctx) + now := time.Now() sb := &sandboxState{ Sandbox: models.Sandbox{ @@ -1113,6 +1210,7 @@ func (m *Manager) createFromSnapshot(ctx context.Context, sandboxID string, team RootfsPath: dmDev.DevicePath, CreatedAt: now, LastActiveAt: now, + Metadata: m.buildMetadata(envdVersion), }, slot: slot, client: client, @@ -1213,6 +1311,155 @@ func (m *Manager) GetClient(sandboxID string) (*envdclient.Client, error) { return sb.client, nil } +// SetDefaults calls envd's PostInit to configure the default user and +// environment variables for a running sandbox. This is called by the host +// agent after sandbox creation or resume when the template specifies defaults. +func (m *Manager) SetDefaults(ctx context.Context, sandboxID, defaultUser string, defaultEnv map[string]string) error { + if defaultUser == "" && len(defaultEnv) == 0 { + return nil + } + sb, err := m.get(sandboxID) + if err != nil { + return err + } + if sb.Status != models.StatusRunning { + return fmt.Errorf("sandbox %s is not running (status: %s)", sandboxID, sb.Status) + } + return sb.client.PostInitWithDefaults(ctx, defaultUser, defaultEnv) +} + +// PtyAttach starts a new PTY process or reconnects to an existing one. +// If cmd is non-empty, starts a new process. If empty, reconnects using tag. +func (m *Manager) PtyAttach(ctx context.Context, sandboxID, tag, cmd string, args []string, cols, rows uint32, envs map[string]string, cwd string) (<-chan envdclient.PtyEvent, error) { + sb, err := m.get(sandboxID) + if err != nil { + return nil, err + } + if sb.Status != models.StatusRunning { + return nil, fmt.Errorf("sandbox %s is not running (status: %s)", sandboxID, sb.Status) + } + + m.mu.Lock() + sb.LastActiveAt = time.Now() + m.mu.Unlock() + + if cmd != "" { + return sb.client.PtyStart(ctx, tag, cmd, args, cols, rows, envs, cwd) + } + return sb.client.PtyConnect(ctx, tag) +} + +// PtySendInput sends raw bytes to a PTY process in a sandbox. +func (m *Manager) PtySendInput(ctx context.Context, sandboxID, tag string, data []byte) error { + sb, err := m.get(sandboxID) + if err != nil { + return err + } + if sb.Status != models.StatusRunning { + return fmt.Errorf("sandbox %s is not running (status: %s)", sandboxID, sb.Status) + } + + m.mu.Lock() + sb.LastActiveAt = time.Now() + m.mu.Unlock() + + return sb.client.PtySendInput(ctx, tag, data) +} + +// PtyResize updates the terminal dimensions for a PTY process in a sandbox. +func (m *Manager) PtyResize(ctx context.Context, sandboxID, tag string, cols, rows uint32) error { + sb, err := m.get(sandboxID) + if err != nil { + return err + } + if sb.Status != models.StatusRunning { + return fmt.Errorf("sandbox %s is not running (status: %s)", sandboxID, sb.Status) + } + + return sb.client.PtyResize(ctx, tag, cols, rows) +} + +// PtyKill sends SIGKILL to a PTY process in a sandbox. +func (m *Manager) PtyKill(ctx context.Context, sandboxID, tag string) error { + sb, err := m.get(sandboxID) + if err != nil { + return err + } + if sb.Status != models.StatusRunning { + return fmt.Errorf("sandbox %s is not running (status: %s)", sandboxID, sb.Status) + } + + return sb.client.PtyKill(ctx, tag) +} + +// StartBackground starts a background process inside a sandbox. +func (m *Manager) StartBackground(ctx context.Context, sandboxID, tag, cmd string, args []string, envs map[string]string, cwd string) (uint32, error) { + sb, err := m.get(sandboxID) + if err != nil { + return 0, err + } + if sb.Status != models.StatusRunning { + return 0, fmt.Errorf("sandbox %s is not running (status: %s)", sandboxID, sb.Status) + } + + m.mu.Lock() + sb.LastActiveAt = time.Now() + m.mu.Unlock() + + return sb.client.StartBackground(ctx, tag, cmd, args, envs, cwd) +} + +// ConnectProcess re-attaches to a running process inside a sandbox. +func (m *Manager) ConnectProcess(ctx context.Context, sandboxID string, pid uint32, tag string) (<-chan envdclient.ExecStreamEvent, error) { + sb, err := m.get(sandboxID) + if err != nil { + return nil, err + } + if sb.Status != models.StatusRunning { + return nil, fmt.Errorf("sandbox %s is not running (status: %s)", sandboxID, sb.Status) + } + + m.mu.Lock() + sb.LastActiveAt = time.Now() + m.mu.Unlock() + + return sb.client.ConnectProcess(ctx, pid, tag) +} + +// ListProcesses returns all running processes inside a sandbox. +func (m *Manager) ListProcesses(ctx context.Context, sandboxID string) ([]envdclient.ProcessInfo, error) { + sb, err := m.get(sandboxID) + if err != nil { + return nil, err + } + if sb.Status != models.StatusRunning { + return nil, fmt.Errorf("sandbox %s is not running (status: %s)", sandboxID, sb.Status) + } + + m.mu.Lock() + sb.LastActiveAt = time.Now() + m.mu.Unlock() + + return sb.client.ListProcesses(ctx) +} + +// KillProcess sends a signal to a process inside a sandbox. +func (m *Manager) KillProcess(ctx context.Context, sandboxID string, pid uint32, tag string, signal envdpb.Signal) error { + sb, err := m.get(sandboxID) + if err != nil { + return err + } + if sb.Status != models.StatusRunning { + return fmt.Errorf("sandbox %s is not running (status: %s)", sandboxID, sb.Status) + } + + m.mu.Lock() + sb.LastActiveAt = time.Now() + m.mu.Unlock() + + return sb.client.KillProcess(ctx, pid, tag, signal) +} + // AcquireProxyConn atomically looks up a sandbox by ID and registers an // in-flight proxy connection. Returns the sandbox's host-reachable IP, the // connection tracker, and true on success. The caller must call diff --git a/internal/scheduler/least_loaded.go b/internal/scheduler/least_loaded.go deleted file mode 100644 index 6990da0..0000000 --- a/internal/scheduler/least_loaded.go +++ /dev/null @@ -1 +0,0 @@ -package scheduler diff --git a/internal/vm/manager.go b/internal/vm/manager.go index 9e9466f..99dbfe3 100644 --- a/internal/vm/manager.go +++ b/internal/vm/manager.go @@ -5,6 +5,7 @@ import ( "fmt" "log/slog" "os" + "sync" "time" ) @@ -17,6 +18,7 @@ type VM struct { // Manager handles the lifecycle of Firecracker microVMs. type Manager struct { + mu sync.RWMutex // vms tracks running VMs by sandbox ID. vms map[string]*VM } @@ -84,7 +86,9 @@ func (m *Manager) Create(ctx context.Context, cfg VMConfig) (*VM, error) { client: client, } + m.mu.Lock() m.vms[cfg.SandboxID] = vm + m.mu.Unlock() slog.Info("VM started successfully", "sandbox", cfg.SandboxID) @@ -126,7 +130,9 @@ func configureVM(ctx context.Context, client *fcClient, cfg *VMConfig) error { // Pause pauses a running VM. func (m *Manager) Pause(ctx context.Context, sandboxID string) error { + m.mu.RLock() vm, ok := m.vms[sandboxID] + m.mu.RUnlock() if !ok { return fmt.Errorf("VM not found: %s", sandboxID) } @@ -141,7 +147,9 @@ func (m *Manager) Pause(ctx context.Context, sandboxID string) error { // Resume resumes a paused VM. func (m *Manager) Resume(ctx context.Context, sandboxID string) error { + m.mu.RLock() vm, ok := m.vms[sandboxID] + m.mu.RUnlock() if !ok { return fmt.Errorf("VM not found: %s", sandboxID) } @@ -156,10 +164,14 @@ func (m *Manager) Resume(ctx context.Context, sandboxID string) error { // Destroy stops and cleans up a VM. func (m *Manager) Destroy(ctx context.Context, sandboxID string) error { + m.mu.Lock() vm, ok := m.vms[sandboxID] if !ok { + m.mu.Unlock() return fmt.Errorf("VM not found: %s", sandboxID) } + delete(m.vms, sandboxID) + m.mu.Unlock() slog.Info("destroying VM", "sandbox", sandboxID) @@ -171,8 +183,6 @@ func (m *Manager) Destroy(ctx context.Context, sandboxID string) error { // Clean up the API socket. os.Remove(vm.Config.SocketPath) - delete(m.vms, sandboxID) - slog.Info("VM destroyed", "sandbox", sandboxID) return nil } @@ -180,7 +190,9 @@ func (m *Manager) Destroy(ctx context.Context, sandboxID string) error { // Snapshot creates a VM snapshot. The VM must already be paused. // snapshotType is "Full" (all memory) or "Diff" (only dirty pages since last resume). func (m *Manager) Snapshot(ctx context.Context, sandboxID, snapPath, memPath, snapshotType string) error { + m.mu.RLock() vm, ok := m.vms[sandboxID] + m.mu.RUnlock() if !ok { return fmt.Errorf("VM not found: %s", sandboxID) } @@ -263,7 +275,9 @@ func (m *Manager) CreateFromSnapshot(ctx context.Context, cfg VMConfig, snapPath client: client, } + m.mu.Lock() m.vms[cfg.SandboxID] = vm + m.mu.Unlock() slog.Info("VM restored from snapshot", "sandbox", cfg.SandboxID) return vm, nil @@ -277,7 +291,9 @@ func (v *VM) PID() int { // Get returns a running VM by sandbox ID. func (m *Manager) Get(sandboxID string) (*VM, bool) { + m.mu.RLock() vm, ok := m.vms[sandboxID] + m.mu.RUnlock() return vm, ok } diff --git a/internal/audit/logger.go b/pkg/audit/logger.go similarity index 99% rename from internal/audit/logger.go rename to pkg/audit/logger.go index 2210594..e101b3c 100644 --- a/internal/audit/logger.go +++ b/pkg/audit/logger.go @@ -7,10 +7,10 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/events" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/events" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) // AuditLogger writes audit log entries for user-initiated and system events. diff --git a/internal/auth/apikey.go b/pkg/auth/apikey.go similarity index 100% rename from internal/auth/apikey.go rename to pkg/auth/apikey.go diff --git a/internal/auth/cert.go b/pkg/auth/cert.go similarity index 100% rename from internal/auth/cert.go rename to pkg/auth/cert.go diff --git a/internal/auth/context.go b/pkg/auth/context.go similarity index 100% rename from internal/auth/context.go rename to pkg/auth/context.go diff --git a/internal/auth/jwt.go b/pkg/auth/jwt.go similarity index 98% rename from internal/auth/jwt.go rename to pkg/auth/jwt.go index 21cd589..70dd947 100644 --- a/internal/auth/jwt.go +++ b/pkg/auth/jwt.go @@ -7,7 +7,7 @@ import ( "github.com/golang-jwt/jwt/v5" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) const jwtExpiry = 6 * time.Hour diff --git a/internal/auth/oauth/github.go b/pkg/auth/oauth/github.go similarity index 100% rename from internal/auth/oauth/github.go rename to pkg/auth/oauth/github.go diff --git a/internal/auth/oauth/provider.go b/pkg/auth/oauth/provider.go similarity index 100% rename from internal/auth/oauth/provider.go rename to pkg/auth/oauth/provider.go diff --git a/internal/auth/password.go b/pkg/auth/password.go similarity index 100% rename from internal/auth/password.go rename to pkg/auth/password.go diff --git a/internal/channels/crypto.go b/pkg/channels/crypto.go similarity index 100% rename from internal/channels/crypto.go rename to pkg/channels/crypto.go diff --git a/internal/channels/deliver.go b/pkg/channels/deliver.go similarity index 94% rename from internal/channels/deliver.go rename to pkg/channels/deliver.go index 1f5e333..51e01db 100644 --- a/internal/channels/deliver.go +++ b/pkg/channels/deliver.go @@ -7,7 +7,7 @@ import ( "github.com/containrrr/shoutrrr" - "git.omukk.dev/wrenn/wrenn/internal/events" + "git.omukk.dev/wrenn/wrenn/pkg/events" ) // Deliver sends a notification to a single provider with the given config. diff --git a/internal/channels/dispatcher.go b/pkg/channels/dispatcher.go similarity index 97% rename from internal/channels/dispatcher.go rename to pkg/channels/dispatcher.go index b28b05d..4a24d5a 100644 --- a/internal/channels/dispatcher.go +++ b/pkg/channels/dispatcher.go @@ -8,9 +8,9 @@ import ( "github.com/redis/go-redis/v9" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/events" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/events" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) const ( diff --git a/internal/channels/message.go b/pkg/channels/message.go similarity index 97% rename from internal/channels/message.go rename to pkg/channels/message.go index 2900c40..fe512de 100644 --- a/internal/channels/message.go +++ b/pkg/channels/message.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "git.omukk.dev/wrenn/wrenn/internal/events" + "git.omukk.dev/wrenn/wrenn/pkg/events" ) // FormatMessage produces a human-readable notification string containing diff --git a/internal/channels/publisher.go b/pkg/channels/publisher.go similarity index 95% rename from internal/channels/publisher.go rename to pkg/channels/publisher.go index da632c5..3f2c36f 100644 --- a/internal/channels/publisher.go +++ b/pkg/channels/publisher.go @@ -7,7 +7,7 @@ import ( "github.com/redis/go-redis/v9" - "git.omukk.dev/wrenn/wrenn/internal/events" + "git.omukk.dev/wrenn/wrenn/pkg/events" ) const streamKey = "wrenn:events" diff --git a/internal/channels/service.go b/pkg/channels/service.go similarity index 97% rename from internal/channels/service.go rename to pkg/channels/service.go index 5f53742..248abfc 100644 --- a/internal/channels/service.go +++ b/pkg/channels/service.go @@ -13,10 +13,10 @@ import ( "github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/events" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/validate" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/events" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/validate" ) // Valid providers. diff --git a/internal/channels/shoutrrr.go b/pkg/channels/shoutrrr.go similarity index 100% rename from internal/channels/shoutrrr.go rename to pkg/channels/shoutrrr.go diff --git a/internal/channels/webhook.go b/pkg/channels/webhook.go similarity index 100% rename from internal/channels/webhook.go rename to pkg/channels/webhook.go diff --git a/internal/config/config.go b/pkg/config/config.go similarity index 70% rename from internal/config/config.go rename to pkg/config/config.go index dbc1f1f..a695392 100644 --- a/internal/config/config.go +++ b/pkg/config/config.go @@ -3,6 +3,7 @@ package config import ( "encoding/hex" "os" + "strconv" "github.com/joho/godotenv" ) @@ -13,6 +14,7 @@ type Config struct { RedisURL string ListenAddr string JWTSecret string + WrennDir string // WRENN_DIR — base directory for wrenn data (logs, etc.) // mTLS — CP→Agent channel. Both must be set to enable mTLS; omitting either // disables cert issuance and leaves agent connections on plain HTTP (dev mode). @@ -27,6 +29,13 @@ type Config struct { // Channels — encryption for channel secrets (AES-256-GCM). EncryptionKeyHex string // WRENN_ENCRYPTION_KEY raw hex string (for validation) EncryptionKey [32]byte // parsed 32-byte key + + // SMTP — transactional email. All fields optional; omitting SMTPHost disables email. + SMTPHost string // SMTP_HOST + SMTPPort int // SMTP_PORT (default 587) + SMTPUsername string // SMTP_USERNAME + SMTPPassword string // SMTP_PASSWORD + SMTPFromEmail string // SMTP_FROM_EMAIL } // Load reads configuration from a .env file (if present) and environment variables. @@ -40,6 +49,7 @@ func Load() Config { RedisURL: envOrDefault("REDIS_URL", "redis://localhost:6379/0"), ListenAddr: envOrDefault("WRENN_CP_LISTEN_ADDR", ":8080"), JWTSecret: os.Getenv("JWT_SECRET"), + WrennDir: envOrDefault("WRENN_DIR", "/var/lib/wrenn"), CACert: os.Getenv("WRENN_CA_CERT"), CAKey: os.Getenv("WRENN_CA_KEY"), @@ -50,6 +60,12 @@ func Load() Config { CPPublicURL: os.Getenv("CP_PUBLIC_URL"), EncryptionKeyHex: os.Getenv("WRENN_ENCRYPTION_KEY"), + + SMTPHost: os.Getenv("SMTP_HOST"), + SMTPPort: envOrDefaultInt("SMTP_PORT", 587), + SMTPUsername: os.Getenv("SMTP_USERNAME"), + SMTPPassword: os.Getenv("SMTP_PASSWORD"), + SMTPFromEmail: envOrDefault("SMTP_FROM_EMAIL", "noreply@wrenn.dev"), } if cfg.EncryptionKeyHex != "" { @@ -68,3 +84,15 @@ func envOrDefault(key, def string) string { } return def } + +func envOrDefaultInt(key string, def int) int { + v := os.Getenv(key) + if v == "" { + return def + } + n, err := strconv.Atoi(v) + if err != nil { + return def + } + return n +} diff --git a/pkg/cpextension/extension.go b/pkg/cpextension/extension.go new file mode 100644 index 0000000..b2065f2 --- /dev/null +++ b/pkg/cpextension/extension.go @@ -0,0 +1,50 @@ +// Package cpextension defines the types for extending the control plane server. +// This package is intentionally minimal and dependency-free (relative to internal/) +// to avoid import cycles between pkg/cpserver and internal/api. +package cpextension + +import ( + "context" + + "github.com/go-chi/chi/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/redis/go-redis/v9" + + "git.omukk.dev/wrenn/wrenn/internal/email" + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/config" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/scheduler" +) + +// ServerContext exposes the initialized dependencies that extensions can use +// to register routes and start background workers. All fields are read-only +// from the extension's perspective. +type ServerContext struct { + Queries *db.Queries + PgPool *pgxpool.Pool + Redis *redis.Client + HostPool *lifecycle.HostClientPool + Scheduler scheduler.HostScheduler + CA *auth.CA + Audit *audit.AuditLogger + Mailer email.Mailer + JWTSecret []byte + Config config.Config +} + +// Extension allows enterprise (or any external) code to plug additional +// routes and background workers into the control plane without modifying +// the core server. +type Extension interface { + // RegisterRoutes is called after all core routes are registered. + // The chi.Router supports sub-routing, middleware, etc. + RegisterRoutes(r chi.Router, ctx ServerContext) + + // BackgroundWorkers returns functions that will be called once with + // the application context after the server is fully initialized. + // Each function should start its own goroutine(s) and return. + BackgroundWorkers(ctx ServerContext) []func(context.Context) +} diff --git a/pkg/cpserver/extension.go b/pkg/cpserver/extension.go new file mode 100644 index 0000000..26a0dc6 --- /dev/null +++ b/pkg/cpserver/extension.go @@ -0,0 +1,11 @@ +package cpserver + +import "git.omukk.dev/wrenn/wrenn/pkg/cpextension" + +// ServerContext is an alias for cpextension.ServerContext. +// Enterprise code should use this package (pkg/cpserver) as the main entry point. +type ServerContext = cpextension.ServerContext + +// Extension is an alias for cpextension.Extension. +// Enterprise code should use this package (pkg/cpserver) as the main entry point. +type Extension = cpextension.Extension diff --git a/pkg/cpserver/options.go b/pkg/cpserver/options.go new file mode 100644 index 0000000..dc2ce0a --- /dev/null +++ b/pkg/cpserver/options.go @@ -0,0 +1,27 @@ +package cpserver + +// options holds the configuration for Run. +type options struct { + version string + commit string + extensions []Extension +} + +// Option configures the control plane server. +type Option func(*options) + +// WithVersion sets the version and commit strings for logging. +func WithVersion(version, commit string) Option { + return func(o *options) { + o.version = version + o.commit = commit + } +} + +// WithExtensions registers one or more extensions that add routes and +// background workers to the control plane. +func WithExtensions(exts ...Extension) Option { + return func(o *options) { + o.extensions = append(o.extensions, exts...) + } +} diff --git a/pkg/cpserver/run.go b/pkg/cpserver/run.go new file mode 100644 index 0000000..32a819a --- /dev/null +++ b/pkg/cpserver/run.go @@ -0,0 +1,253 @@ +package cpserver + +import ( + "context" + "log/slog" + "net/http" + "os" + "os/signal" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + "github.com/redis/go-redis/v9" + + "git.omukk.dev/wrenn/wrenn/internal/api" + "git.omukk.dev/wrenn/wrenn/internal/email" + "git.omukk.dev/wrenn/wrenn/pkg/audit" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/auth/oauth" + "git.omukk.dev/wrenn/wrenn/pkg/channels" + "git.omukk.dev/wrenn/wrenn/pkg/config" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/logging" + "git.omukk.dev/wrenn/wrenn/pkg/scheduler" +) + +// Run initializes and starts the control plane server. It blocks until a +// SIGINT or SIGTERM signal is received, then shuts down gracefully. +// +// Extensions registered via WithExtensions get to add routes and start +// background workers after the core server is fully initialized. +func Run(opts ...Option) { + o := &options{ + version: "dev", + commit: "unknown", + } + for _, opt := range opts { + opt(o) + } + + cfg := config.Load() + cleanupLog := logging.Setup(filepath.Join(cfg.WrennDir, "logs"), "control-plane") + defer cleanupLog() + + if len(cfg.JWTSecret) < 32 { + slog.Error("JWT_SECRET must be at least 32 characters") + os.Exit(1) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Database connection pool. + pool, err := pgxpool.New(ctx, cfg.DatabaseURL) + if err != nil { + slog.Error("failed to connect to database", "error", err) + os.Exit(1) + } + defer pool.Close() + + if err := pool.Ping(ctx); err != nil { + slog.Error("failed to ping database", "error", err) + os.Exit(1) + } + slog.Info("connected to database") + + queries := db.New(pool) + + // Redis client. + redisOpts, err := redis.ParseURL(cfg.RedisURL) + if err != nil { + slog.Error("failed to parse REDIS_URL", "error", err) + os.Exit(1) + } + rdb := redis.NewClient(redisOpts) + defer rdb.Close() + + if err := rdb.Ping(ctx).Err(); err != nil { + slog.Error("failed to ping redis", "error", err) + os.Exit(1) + } + slog.Info("connected to redis") + + // mTLS is mandatory — parse internal CA for CP↔agent communication. + if cfg.CACert == "" || cfg.CAKey == "" { + slog.Error("WRENN_CA_CERT and WRENN_CA_KEY are required — mTLS is mandatory for CP↔agent communication") + os.Exit(1) + } + ca, err := auth.ParseCA(cfg.CACert, cfg.CAKey) + if err != nil { + slog.Error("failed to parse mTLS CA from environment", "error", err) + os.Exit(1) + } + slog.Info("mTLS enabled: CA loaded") + + // Host client pool — manages Connect RPC clients to host agents. + cpCertStore, err := auth.NewCPCertStore(ca) + if err != nil { + slog.Error("failed to issue CP client certificate", "error", err) + os.Exit(1) + } + // Renew the CP client certificate periodically so it never expires + // while the control plane is running (TTL = 24h, renewal = every 12h). + go func() { + ticker := time.NewTicker(auth.CPCertRenewInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if err := cpCertStore.Refresh(); err != nil { + slog.Error("failed to renew CP client certificate", "error", err) + } else { + slog.Info("CP client certificate renewed") + } + } + } + }() + hostPool := lifecycle.NewHostClientPoolTLS(auth.CPClientTLSConfig(ca, cpCertStore)) + slog.Info("host client pool: mTLS enabled") + + // Scheduler — picks a host for each new sandbox (least-loaded, bottleneck-first). + hostScheduler := scheduler.NewLeastLoadedScheduler(queries) + + // OAuth provider registry. + oauthRegistry := oauth.NewRegistry() + if cfg.OAuthGitHubClientID != "" && cfg.OAuthGitHubClientSecret != "" { + if cfg.CPPublicURL == "" { + slog.Error("CP_PUBLIC_URL must be set when OAuth providers are configured") + os.Exit(1) + } + callbackURL := strings.TrimRight(cfg.CPPublicURL, "/") + "/auth/oauth/github/callback" + ghProvider := oauth.NewGitHubProvider(cfg.OAuthGitHubClientID, cfg.OAuthGitHubClientSecret, callbackURL) + oauthRegistry.Register(ghProvider) + slog.Info("registered OAuth provider", "provider", "github") + } + + // Channels: publisher, service, dispatcher. + if len(cfg.EncryptionKeyHex) != 64 { + slog.Error("WRENN_ENCRYPTION_KEY must be a hex-encoded 32-byte key (64 hex chars)") + os.Exit(1) + } + channelPub := channels.NewPublisher(rdb) + channelSvc := &channels.Service{DB: queries, EncKey: cfg.EncryptionKey} + channelDispatcher := channels.NewDispatcher(rdb, queries, cfg.EncryptionKey) + + // Shared audit logger with event publishing. + al := audit.NewWithPublisher(queries, channelPub) + + // Transactional email (no-op if SMTP_HOST is not set). + mailer := email.New(email.Config{ + Host: cfg.SMTPHost, + Port: cfg.SMTPPort, + Username: cfg.SMTPUsername, + Password: cfg.SMTPPassword, + FromEmail: cfg.SMTPFromEmail, + }) + + // Build the server context that extensions receive. + sctx := ServerContext{ + Queries: queries, + PgPool: pool, + Redis: rdb, + HostPool: hostPool, + Scheduler: hostScheduler, + CA: ca, + Audit: al, + Mailer: mailer, + JWTSecret: []byte(cfg.JWTSecret), + Config: cfg, + } + + // API server. + srv := api.New(queries, hostPool, hostScheduler, pool, rdb, []byte(cfg.JWTSecret), oauthRegistry, cfg.OAuthRedirectURL, ca, al, channelSvc, mailer, o.extensions, sctx, o.version) + + // Start template build workers (2 concurrent). + stopBuildWorkers := srv.BuildSvc.StartWorkers(ctx, 2) + defer stopBuildWorkers() + + // Start channel event dispatcher. + channelDispatcher.Start(ctx) + + // Start host monitor (passive + active reconciliation every 30s). + monitor := api.NewHostMonitor(queries, hostPool, al, 30*time.Second) + monitor.Start(ctx) + + // Hard-delete accounts that have been soft-deleted for more than 15 days (runs every 24h). + go func() { + ticker := time.NewTicker(24 * time.Hour) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if err := queries.HardDeleteExpiredUsers(ctx); err != nil { + slog.Error("account cleanup: failed to hard-delete expired users", "error", err) + } else { + slog.Info("account cleanup: hard-deleted expired users") + } + } + } + }() + + // Start metrics sampler (records per-team sandbox stats every 10s). + sampler := api.NewMetricsSampler(queries, 10*time.Second) + sampler.Start(ctx) + + // Start extension background workers. + for _, ext := range o.extensions { + for _, worker := range ext.BackgroundWorkers(sctx) { + worker(ctx) + } + } + + // Wrap the API handler with the sandbox proxy so that requests with + // {port}-{sandbox_id}.{domain} Host headers are routed to the sandbox's + // host agent. All other requests pass through to the normal API router. + proxyWrapper := api.NewSandboxProxyWrapper(srv.Handler(), queries, hostPool) + + httpServer := &http.Server{ + Addr: cfg.ListenAddr, + Handler: proxyWrapper, + } + + // Graceful shutdown on signal. + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + sig := <-sigCh + slog.Info("received signal, shutting down", "signal", sig) + cancel() + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer shutdownCancel() + + if err := httpServer.Shutdown(shutdownCtx); err != nil { + slog.Error("http server shutdown error", "error", err) + } + }() + + slog.Info("control plane starting", "addr", cfg.ListenAddr, "version", o.version, "commit", o.commit) + if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { + slog.Error("http server error", "error", err) + os.Exit(1) + } + + slog.Info("control plane stopped") +} diff --git a/internal/db/api_keys.sql.go b/pkg/db/api_keys.sql.go similarity index 87% rename from internal/db/api_keys.sql.go rename to pkg/db/api_keys.sql.go index 4b8d369..b157931 100644 --- a/internal/db/api_keys.sql.go +++ b/pkg/db/api_keys.sql.go @@ -25,6 +25,24 @@ func (q *Queries) DeleteAPIKey(ctx context.Context, arg DeleteAPIKeyParams) erro return err } +const deleteAPIKeysByCreator = `-- name: DeleteAPIKeysByCreator :exec +DELETE FROM team_api_keys WHERE created_by = $1 +` + +func (q *Queries) DeleteAPIKeysByCreator(ctx context.Context, createdBy pgtype.UUID) error { + _, err := q.db.Exec(ctx, deleteAPIKeysByCreator, createdBy) + return err +} + +const deleteAPIKeysByTeam = `-- name: DeleteAPIKeysByTeam :exec +DELETE FROM team_api_keys WHERE team_id = $1 +` + +func (q *Queries) DeleteAPIKeysByTeam(ctx context.Context, teamID pgtype.UUID) error { + _, err := q.db.Exec(ctx, deleteAPIKeysByTeam, teamID) + return err +} + const getAPIKeyByHash = `-- name: GetAPIKeyByHash :one SELECT id, team_id, name, key_hash, key_prefix, created_by, created_at, last_used FROM team_api_keys WHERE key_hash = $1 ` @@ -120,7 +138,7 @@ const listAPIKeysByTeamWithCreator = `-- name: ListAPIKeysByTeamWithCreator :man SELECT k.id, k.team_id, k.name, k.key_hash, k.key_prefix, k.created_by, k.created_at, k.last_used, u.email AS creator_email FROM team_api_keys k -JOIN users u ON u.id = k.created_by +LEFT JOIN users u ON u.id = k.created_by WHERE k.team_id = $1 ORDER BY k.created_at DESC ` @@ -134,7 +152,7 @@ type ListAPIKeysByTeamWithCreatorRow struct { CreatedBy pgtype.UUID `json:"created_by"` CreatedAt pgtype.Timestamptz `json:"created_at"` LastUsed pgtype.Timestamptz `json:"last_used"` - CreatorEmail string `json:"creator_email"` + CreatorEmail pgtype.Text `json:"creator_email"` } func (q *Queries) ListAPIKeysByTeamWithCreator(ctx context.Context, teamID pgtype.UUID) ([]ListAPIKeysByTeamWithCreatorRow, error) { diff --git a/internal/db/audit.sql.go b/pkg/db/audit.sql.go similarity index 100% rename from internal/db/audit.sql.go rename to pkg/db/audit.sql.go diff --git a/internal/db/channels.sql.go b/pkg/db/channels.sql.go similarity index 95% rename from internal/db/channels.sql.go rename to pkg/db/channels.sql.go index 18f9048..d668700 100644 --- a/internal/db/channels.sql.go +++ b/pkg/db/channels.sql.go @@ -11,6 +11,15 @@ import ( "github.com/jackc/pgx/v5/pgtype" ) +const deleteAllChannelsByTeam = `-- name: DeleteAllChannelsByTeam :exec +DELETE FROM channels WHERE team_id = $1 +` + +func (q *Queries) DeleteAllChannelsByTeam(ctx context.Context, teamID pgtype.UUID) error { + _, err := q.db.Exec(ctx, deleteAllChannelsByTeam, teamID) + return err +} + const deleteChannelByTeam = `-- name: DeleteChannelByTeam :exec DELETE FROM channels WHERE id = $1 AND team_id = $2 ` diff --git a/internal/db/db.go b/pkg/db/db.go similarity index 100% rename from internal/db/db.go rename to pkg/db/db.go diff --git a/internal/db/host_refresh_tokens.sql.go b/pkg/db/host_refresh_tokens.sql.go similarity index 100% rename from internal/db/host_refresh_tokens.sql.go rename to pkg/db/host_refresh_tokens.sql.go diff --git a/internal/db/hosts.sql.go b/pkg/db/hosts.sql.go similarity index 82% rename from internal/db/hosts.sql.go rename to pkg/db/hosts.sql.go index 2e3962b..0e8f415 100644 --- a/internal/db/hosts.sql.go +++ b/pkg/db/hosts.sql.go @@ -154,6 +154,112 @@ func (q *Queries) GetHostTokensByHost(ctx context.Context, hostID pgtype.UUID) ( return items, nil } +const getHostsWithLoad = `-- name: GetHostsWithLoad :many +SELECT + h.id, + h.type, + h.team_id, + h.provider, + h.availability_zone, + h.arch, + h.cpu_cores, + h.memory_mb, + h.disk_gb, + h.address, + h.status, + h.last_heartbeat_at, + h.metadata, + h.created_by, + h.created_at, + h.updated_at, + h.cert_fingerprint, + h.cert_expires_at, + COALESCE(SUM(s.vcpus) FILTER (WHERE s.status IN ('running', 'starting', 'pending')), 0)::int AS running_vcpus, + COALESCE(SUM(s.memory_mb) FILTER (WHERE s.status IN ('running', 'starting', 'pending')), 0)::int AS running_memory_mb, + COALESCE(SUM(s.disk_size_mb) FILTER (WHERE s.status IN ('running', 'starting', 'pending')), 0)::int AS running_disk_mb, + COALESCE(SUM(s.memory_mb) FILTER (WHERE s.status = 'paused'), 0)::int AS paused_memory_mb, + COALESCE(SUM(s.disk_size_mb) FILTER (WHERE s.status = 'paused'), 0)::int AS paused_disk_mb +FROM hosts h +LEFT JOIN sandboxes s ON s.host_id = h.id + AND s.status IN ('running', 'paused', 'starting', 'pending') +WHERE h.status = 'online' + AND h.address != '' +GROUP BY h.id +ORDER BY h.created_at +` + +type GetHostsWithLoadRow struct { + ID pgtype.UUID `json:"id"` + Type string `json:"type"` + TeamID pgtype.UUID `json:"team_id"` + Provider string `json:"provider"` + AvailabilityZone string `json:"availability_zone"` + Arch string `json:"arch"` + CpuCores int32 `json:"cpu_cores"` + MemoryMb int32 `json:"memory_mb"` + DiskGb int32 `json:"disk_gb"` + Address string `json:"address"` + Status string `json:"status"` + LastHeartbeatAt pgtype.Timestamptz `json:"last_heartbeat_at"` + Metadata []byte `json:"metadata"` + CreatedBy pgtype.UUID `json:"created_by"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + UpdatedAt pgtype.Timestamptz `json:"updated_at"` + CertFingerprint string `json:"cert_fingerprint"` + CertExpiresAt pgtype.Timestamptz `json:"cert_expires_at"` + RunningVcpus int32 `json:"running_vcpus"` + RunningMemoryMb int32 `json:"running_memory_mb"` + RunningDiskMb int32 `json:"running_disk_mb"` + PausedMemoryMb int32 `json:"paused_memory_mb"` + PausedDiskMb int32 `json:"paused_disk_mb"` +} + +// Returns all online hosts with raw per-host sandbox resource consumption. +// Separates running and paused sandbox totals so the caller can apply its own formulas. +func (q *Queries) GetHostsWithLoad(ctx context.Context) ([]GetHostsWithLoadRow, error) { + rows, err := q.db.Query(ctx, getHostsWithLoad) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetHostsWithLoadRow + for rows.Next() { + var i GetHostsWithLoadRow + if err := rows.Scan( + &i.ID, + &i.Type, + &i.TeamID, + &i.Provider, + &i.AvailabilityZone, + &i.Arch, + &i.CpuCores, + &i.MemoryMb, + &i.DiskGb, + &i.Address, + &i.Status, + &i.LastHeartbeatAt, + &i.Metadata, + &i.CreatedBy, + &i.CreatedAt, + &i.UpdatedAt, + &i.CertFingerprint, + &i.CertExpiresAt, + &i.RunningVcpus, + &i.RunningMemoryMb, + &i.RunningDiskMb, + &i.PausedMemoryMb, + &i.PausedDiskMb, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const insertHost = `-- name: InsertHost :one INSERT INTO hosts (id, type, team_id, provider, availability_zone, created_by) VALUES ($1, $2, $3, $4, $5, $6) diff --git a/internal/db/metrics.sql.go b/pkg/db/metrics.sql.go similarity index 92% rename from internal/db/metrics.sql.go rename to pkg/db/metrics.sql.go index f522dc2..886daca 100644 --- a/internal/db/metrics.sql.go +++ b/pkg/db/metrics.sql.go @@ -11,6 +11,25 @@ import ( "github.com/jackc/pgx/v5/pgtype" ) +const deleteMetricPointsByTeam = `-- name: DeleteMetricPointsByTeam :exec +DELETE FROM sandbox_metric_points +WHERE sandbox_id IN (SELECT id FROM sandboxes WHERE team_id = $1) +` + +func (q *Queries) DeleteMetricPointsByTeam(ctx context.Context, teamID pgtype.UUID) error { + _, err := q.db.Exec(ctx, deleteMetricPointsByTeam, teamID) + return err +} + +const deleteMetricsSnapshotsByTeam = `-- name: DeleteMetricsSnapshotsByTeam :exec +DELETE FROM sandbox_metrics_snapshots WHERE team_id = $1 +` + +func (q *Queries) DeleteMetricsSnapshotsByTeam(ctx context.Context, teamID pgtype.UUID) error { + _, err := q.db.Exec(ctx, deleteMetricsSnapshotsByTeam, teamID) + return err +} + const deleteSandboxMetricPoints = `-- name: DeleteSandboxMetricPoints :exec DELETE FROM sandbox_metric_points WHERE sandbox_id = $1 diff --git a/internal/db/models.go b/pkg/db/models.go similarity index 89% rename from internal/db/models.go rename to pkg/db/models.go index 3b9cd9e..3111952 100644 --- a/internal/db/models.go +++ b/pkg/db/models.go @@ -111,6 +111,7 @@ type Sandbox struct { LastUpdated pgtype.Timestamptz `json:"last_updated"` TemplateID pgtype.UUID `json:"template_id"` TemplateTeamID pgtype.UUID `json:"template_team_id"` + Metadata []byte `json:"metadata"` } type SandboxMetricPoint struct { @@ -152,14 +153,17 @@ type TeamApiKey struct { } type Template struct { - Name string `json:"name"` - Type string `json:"type"` - Vcpus int32 `json:"vcpus"` - MemoryMb int32 `json:"memory_mb"` - SizeBytes int64 `json:"size_bytes"` - CreatedAt pgtype.Timestamptz `json:"created_at"` - TeamID pgtype.UUID `json:"team_id"` - ID pgtype.UUID `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Vcpus int32 `json:"vcpus"` + MemoryMb int32 `json:"memory_mb"` + SizeBytes int64 `json:"size_bytes"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + TeamID pgtype.UUID `json:"team_id"` + ID pgtype.UUID `json:"id"` + DefaultUser string `json:"default_user"` + DefaultEnv []byte `json:"default_env"` + Metadata []byte `json:"metadata"` } type TemplateBuild struct { @@ -183,6 +187,9 @@ type TemplateBuild struct { TemplateID pgtype.UUID `json:"template_id"` TeamID pgtype.UUID `json:"team_id"` SkipPrePost bool `json:"skip_pre_post"` + DefaultUser string `json:"default_user"` + DefaultEnv []byte `json:"default_env"` + Metadata []byte `json:"metadata"` } type User struct { @@ -193,6 +200,8 @@ type User struct { IsAdmin bool `json:"is_admin"` CreatedAt pgtype.Timestamptz `json:"created_at"` UpdatedAt pgtype.Timestamptz `json:"updated_at"` + DeletedAt pgtype.Timestamptz `json:"deleted_at"` + Status string `json:"status"` } type UsersTeam struct { diff --git a/internal/db/oauth.sql.go b/pkg/db/oauth.sql.go similarity index 53% rename from internal/db/oauth.sql.go rename to pkg/db/oauth.sql.go index 0270def..724277e 100644 --- a/internal/db/oauth.sql.go +++ b/pkg/db/oauth.sql.go @@ -11,6 +11,20 @@ import ( "github.com/jackc/pgx/v5/pgtype" ) +const deleteOAuthProvider = `-- name: DeleteOAuthProvider :exec +DELETE FROM oauth_providers WHERE user_id = $1 AND provider = $2 +` + +type DeleteOAuthProviderParams struct { + UserID pgtype.UUID `json:"user_id"` + Provider string `json:"provider"` +} + +func (q *Queries) DeleteOAuthProvider(ctx context.Context, arg DeleteOAuthProviderParams) error { + _, err := q.db.Exec(ctx, deleteOAuthProvider, arg.UserID, arg.Provider) + return err +} + const getOAuthProvider = `-- name: GetOAuthProvider :one SELECT provider, provider_id, user_id, email, created_at FROM oauth_providers WHERE provider = $1 AND provider_id = $2 @@ -34,6 +48,36 @@ func (q *Queries) GetOAuthProvider(ctx context.Context, arg GetOAuthProviderPara return i, err } +const getOAuthProvidersByUserID = `-- name: GetOAuthProvidersByUserID :many +SELECT provider, provider_id, user_id, email, created_at FROM oauth_providers WHERE user_id = $1 +` + +func (q *Queries) GetOAuthProvidersByUserID(ctx context.Context, userID pgtype.UUID) ([]OauthProvider, error) { + rows, err := q.db.Query(ctx, getOAuthProvidersByUserID, userID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []OauthProvider + for rows.Next() { + var i OauthProvider + if err := rows.Scan( + &i.Provider, + &i.ProviderID, + &i.UserID, + &i.Email, + &i.CreatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const insertOAuthProvider = `-- name: InsertOAuthProvider :exec INSERT INTO oauth_providers (provider, provider_id, user_id, email) VALUES ($1, $2, $3, $4) diff --git a/internal/db/sandboxes.sql.go b/pkg/db/sandboxes.sql.go similarity index 90% rename from internal/db/sandboxes.sql.go rename to pkg/db/sandboxes.sql.go index 3ce1644..c48c9ab 100644 --- a/internal/db/sandboxes.sql.go +++ b/pkg/db/sandboxes.sql.go @@ -43,7 +43,7 @@ func (q *Queries) BulkUpdateStatusByIDs(ctx context.Context, arg BulkUpdateStatu } const getSandbox = `-- name: GetSandbox :one -SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id FROM sandboxes WHERE id = $1 +SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata FROM sandboxes WHERE id = $1 ` func (q *Queries) GetSandbox(ctx context.Context, id pgtype.UUID) (Sandbox, error) { @@ -67,12 +67,13 @@ func (q *Queries) GetSandbox(ctx context.Context, id pgtype.UUID) (Sandbox, erro &i.LastUpdated, &i.TemplateID, &i.TemplateTeamID, + &i.Metadata, ) return i, err } const getSandboxByTeam = `-- name: GetSandboxByTeam :one -SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id FROM sandboxes WHERE id = $1 AND team_id = $2 +SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata FROM sandboxes WHERE id = $1 AND team_id = $2 ` type GetSandboxByTeamParams struct { @@ -101,6 +102,7 @@ func (q *Queries) GetSandboxByTeam(ctx context.Context, arg GetSandboxByTeamPara &i.LastUpdated, &i.TemplateID, &i.TemplateTeamID, + &i.Metadata, ) return i, err } @@ -109,14 +111,9 @@ const getSandboxProxyTarget = `-- name: GetSandboxProxyTarget :one SELECT s.status, h.address AS host_address FROM sandboxes s JOIN hosts h ON h.id = s.host_id -WHERE s.id = $1 AND s.team_id = $2 +WHERE s.id = $1 ` -type GetSandboxProxyTargetParams struct { - ID pgtype.UUID `json:"id"` - TeamID pgtype.UUID `json:"team_id"` -} - type GetSandboxProxyTargetRow struct { Status string `json:"status"` HostAddress string `json:"host_address"` @@ -124,17 +121,17 @@ type GetSandboxProxyTargetRow struct { // Returns the sandbox status and its host's address in one query. // Used by SandboxProxyWrapper to avoid two round-trips. -func (q *Queries) GetSandboxProxyTarget(ctx context.Context, arg GetSandboxProxyTargetParams) (GetSandboxProxyTargetRow, error) { - row := q.db.QueryRow(ctx, getSandboxProxyTarget, arg.ID, arg.TeamID) +func (q *Queries) GetSandboxProxyTarget(ctx context.Context, id pgtype.UUID) (GetSandboxProxyTargetRow, error) { + row := q.db.QueryRow(ctx, getSandboxProxyTarget, id) var i GetSandboxProxyTargetRow err := row.Scan(&i.Status, &i.HostAddress) return i, err } const insertSandbox = `-- name: InsertSandbox :one -INSERT INTO sandboxes (id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, template_id, template_team_id) -VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) -RETURNING id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id +INSERT INTO sandboxes (id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, template_id, template_team_id, metadata) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) +RETURNING id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata ` type InsertSandboxParams struct { @@ -149,6 +146,7 @@ type InsertSandboxParams struct { DiskSizeMb int32 `json:"disk_size_mb"` TemplateID pgtype.UUID `json:"template_id"` TemplateTeamID pgtype.UUID `json:"template_team_id"` + Metadata []byte `json:"metadata"` } func (q *Queries) InsertSandbox(ctx context.Context, arg InsertSandboxParams) (Sandbox, error) { @@ -164,6 +162,7 @@ func (q *Queries) InsertSandbox(ctx context.Context, arg InsertSandboxParams) (S arg.DiskSizeMb, arg.TemplateID, arg.TemplateTeamID, + arg.Metadata, ) var i Sandbox err := row.Scan( @@ -184,13 +183,14 @@ func (q *Queries) InsertSandbox(ctx context.Context, arg InsertSandboxParams) (S &i.LastUpdated, &i.TemplateID, &i.TemplateTeamID, + &i.Metadata, ) return i, err } const listActiveSandboxesByTeam = `-- name: ListActiveSandboxesByTeam :many -SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id FROM sandboxes -WHERE team_id = $1 AND status IN ('running', 'paused', 'starting') +SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata FROM sandboxes +WHERE team_id = $1 AND status IN ('running', 'paused', 'starting', 'hibernated') ORDER BY created_at DESC ` @@ -221,6 +221,7 @@ func (q *Queries) ListActiveSandboxesByTeam(ctx context.Context, teamID pgtype.U &i.LastUpdated, &i.TemplateID, &i.TemplateTeamID, + &i.Metadata, ); err != nil { return nil, err } @@ -233,7 +234,7 @@ func (q *Queries) ListActiveSandboxesByTeam(ctx context.Context, teamID pgtype.U } const listSandboxes = `-- name: ListSandboxes :many -SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id FROM sandboxes ORDER BY created_at DESC +SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata FROM sandboxes ORDER BY created_at DESC ` func (q *Queries) ListSandboxes(ctx context.Context) ([]Sandbox, error) { @@ -263,6 +264,7 @@ func (q *Queries) ListSandboxes(ctx context.Context) ([]Sandbox, error) { &i.LastUpdated, &i.TemplateID, &i.TemplateTeamID, + &i.Metadata, ); err != nil { return nil, err } @@ -275,7 +277,7 @@ func (q *Queries) ListSandboxes(ctx context.Context) ([]Sandbox, error) { } const listSandboxesByHostAndStatus = `-- name: ListSandboxesByHostAndStatus :many -SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id FROM sandboxes +SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata FROM sandboxes WHERE host_id = $1 AND status = ANY($2::text[]) ORDER BY created_at DESC ` @@ -312,6 +314,7 @@ func (q *Queries) ListSandboxesByHostAndStatus(ctx context.Context, arg ListSand &i.LastUpdated, &i.TemplateID, &i.TemplateTeamID, + &i.Metadata, ); err != nil { return nil, err } @@ -324,7 +327,7 @@ func (q *Queries) ListSandboxesByHostAndStatus(ctx context.Context, arg ListSand } const listSandboxesByTeam = `-- name: ListSandboxesByTeam :many -SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id FROM sandboxes +SELECT id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata FROM sandboxes WHERE team_id = $1 AND status NOT IN ('stopped', 'error') ORDER BY created_at DESC ` @@ -356,6 +359,7 @@ func (q *Queries) ListSandboxesByTeam(ctx context.Context, teamID pgtype.UUID) ( &i.LastUpdated, &i.TemplateID, &i.TemplateTeamID, + &i.Metadata, ); err != nil { return nil, err } @@ -399,6 +403,23 @@ func (q *Queries) UpdateLastActive(ctx context.Context, arg UpdateLastActivePara return err } +const updateSandboxMetadata = `-- name: UpdateSandboxMetadata :exec +UPDATE sandboxes +SET metadata = $2, + last_updated = NOW() +WHERE id = $1 +` + +type UpdateSandboxMetadataParams struct { + ID pgtype.UUID `json:"id"` + Metadata []byte `json:"metadata"` +} + +func (q *Queries) UpdateSandboxMetadata(ctx context.Context, arg UpdateSandboxMetadataParams) error { + _, err := q.db.Exec(ctx, updateSandboxMetadata, arg.ID, arg.Metadata) + return err +} + const updateSandboxRunning = `-- name: UpdateSandboxRunning :one UPDATE sandboxes SET status = 'running', @@ -408,7 +429,7 @@ SET status = 'running', last_active_at = $4, last_updated = NOW() WHERE id = $1 -RETURNING id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id +RETURNING id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata ` type UpdateSandboxRunningParams struct { @@ -444,6 +465,7 @@ func (q *Queries) UpdateSandboxRunning(ctx context.Context, arg UpdateSandboxRun &i.LastUpdated, &i.TemplateID, &i.TemplateTeamID, + &i.Metadata, ) return i, err } @@ -453,7 +475,7 @@ UPDATE sandboxes SET status = $2, last_updated = NOW() WHERE id = $1 -RETURNING id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id +RETURNING id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, guest_ip, host_ip, created_at, started_at, last_active_at, last_updated, template_id, template_team_id, metadata ` type UpdateSandboxStatusParams struct { @@ -482,6 +504,7 @@ func (q *Queries) UpdateSandboxStatus(ctx context.Context, arg UpdateSandboxStat &i.LastUpdated, &i.TemplateID, &i.TemplateTeamID, + &i.Metadata, ) return i, err } diff --git a/internal/db/teams.sql.go b/pkg/db/teams.sql.go similarity index 65% rename from internal/db/teams.sql.go rename to pkg/db/teams.sql.go index 334141f..30874b3 100644 --- a/internal/db/teams.sql.go +++ b/pkg/db/teams.sql.go @@ -11,6 +11,19 @@ import ( "github.com/jackc/pgx/v5/pgtype" ) +const countTeamsAdmin = `-- name: CountTeamsAdmin :one +SELECT COUNT(*)::int AS total +FROM teams +WHERE id != '00000000-0000-0000-0000-000000000000' +` + +func (q *Queries) CountTeamsAdmin(ctx context.Context) (int32, error) { + row := q.db.QueryRow(ctx, countTeamsAdmin) + var total int32 + err := row.Scan(&total) + return total, err +} + const deleteTeamMember = `-- name: DeleteTeamMember :exec DELETE FROM users_teams WHERE team_id = $1 AND user_id = $2 ` @@ -77,6 +90,35 @@ func (q *Queries) GetDefaultTeamForUser(ctx context.Context, userID pgtype.UUID) return i, err } +const getOwnedTeamIDs = `-- name: GetOwnedTeamIDs :many +SELECT t.id FROM teams t +JOIN users_teams ut ON ut.team_id = t.id +WHERE ut.user_id = $1 + AND ut.role = 'owner' + AND t.deleted_at IS NULL +` + +// Returns team IDs where the given user has the 'owner' role. +func (q *Queries) GetOwnedTeamIDs(ctx context.Context, userID pgtype.UUID) ([]pgtype.UUID, error) { + rows, err := q.db.Query(ctx, getOwnedTeamIDs, userID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []pgtype.UUID + for rows.Next() { + var id pgtype.UUID + if err := rows.Scan(&id); err != nil { + return nil, err + } + items = append(items, id) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const getTeam = `-- name: GetTeam :one SELECT id, name, slug, is_byoc, created_at, deleted_at FROM teams WHERE id = $1 ` @@ -271,6 +313,111 @@ func (q *Queries) InsertTeamMember(ctx context.Context, arg InsertTeamMemberPara return err } +const listSoleOwnedTeams = `-- name: ListSoleOwnedTeams :many +SELECT t.id FROM teams t +JOIN users_teams ut ON ut.team_id = t.id +WHERE ut.user_id = $1 + AND ut.role = 'owner' + AND t.deleted_at IS NULL + AND NOT EXISTS ( + SELECT 1 FROM users_teams ut2 + WHERE ut2.team_id = t.id AND ut2.user_id <> $1 + ) +` + +// Returns teams where the user is the owner and no other members exist. +func (q *Queries) ListSoleOwnedTeams(ctx context.Context, userID pgtype.UUID) ([]pgtype.UUID, error) { + rows, err := q.db.Query(ctx, listSoleOwnedTeams, userID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []pgtype.UUID + for rows.Next() { + var id pgtype.UUID + if err := rows.Scan(&id); err != nil { + return nil, err + } + items = append(items, id) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listTeamsAdmin = `-- name: ListTeamsAdmin :many +SELECT + t.id, + t.name, + t.slug, + t.is_byoc, + t.created_at, + t.deleted_at, + (SELECT COUNT(*) FROM users_teams ut WHERE ut.team_id = t.id)::int AS member_count, + COALESCE(owner_u.name, '') AS owner_name, + COALESCE(owner_u.email, '') AS owner_email, + (SELECT COUNT(*) FROM sandboxes s WHERE s.team_id = t.id AND s.status IN ('running', 'paused', 'starting'))::int AS active_sandbox_count, + (SELECT COUNT(*) FROM channels c WHERE c.team_id = t.id)::int AS channel_count +FROM teams t +LEFT JOIN users_teams owner_ut ON owner_ut.team_id = t.id AND owner_ut.role = 'owner' +LEFT JOIN users owner_u ON owner_u.id = owner_ut.user_id +WHERE t.id != '00000000-0000-0000-0000-000000000000' +ORDER BY t.deleted_at ASC NULLS FIRST, t.created_at DESC +LIMIT $1 OFFSET $2 +` + +type ListTeamsAdminParams struct { + Limit int32 `json:"limit"` + Offset int32 `json:"offset"` +} + +type ListTeamsAdminRow struct { + ID pgtype.UUID `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + IsByoc bool `json:"is_byoc"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + DeletedAt pgtype.Timestamptz `json:"deleted_at"` + MemberCount int32 `json:"member_count"` + OwnerName string `json:"owner_name"` + OwnerEmail string `json:"owner_email"` + ActiveSandboxCount int32 `json:"active_sandbox_count"` + ChannelCount int32 `json:"channel_count"` +} + +func (q *Queries) ListTeamsAdmin(ctx context.Context, arg ListTeamsAdminParams) ([]ListTeamsAdminRow, error) { + rows, err := q.db.Query(ctx, listTeamsAdmin, arg.Limit, arg.Offset) + if err != nil { + return nil, err + } + defer rows.Close() + var items []ListTeamsAdminRow + for rows.Next() { + var i ListTeamsAdminRow + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Slug, + &i.IsByoc, + &i.CreatedAt, + &i.DeletedAt, + &i.MemberCount, + &i.OwnerName, + &i.OwnerEmail, + &i.ActiveSandboxCount, + &i.ChannelCount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const setTeamBYOC = `-- name: SetTeamBYOC :exec UPDATE teams SET is_byoc = $2 WHERE id = $1 ` diff --git a/internal/db/template_builds.sql.go b/pkg/db/template_builds.sql.go similarity index 84% rename from internal/db/template_builds.sql.go rename to pkg/db/template_builds.sql.go index facfb19..051547d 100644 --- a/internal/db/template_builds.sql.go +++ b/pkg/db/template_builds.sql.go @@ -12,7 +12,7 @@ import ( ) const getTemplateBuild = `-- name: GetTemplateBuild :one -SELECT id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post FROM template_builds WHERE id = $1 +SELECT id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post, default_user, default_env, metadata FROM template_builds WHERE id = $1 ` func (q *Queries) GetTemplateBuild(ctx context.Context, id pgtype.UUID) (TemplateBuild, error) { @@ -39,6 +39,9 @@ func (q *Queries) GetTemplateBuild(ctx context.Context, id pgtype.UUID) (Templat &i.TemplateID, &i.TeamID, &i.SkipPrePost, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ) return i, err } @@ -46,7 +49,7 @@ func (q *Queries) GetTemplateBuild(ctx context.Context, id pgtype.UUID) (Templat const insertTemplateBuild = `-- name: InsertTemplateBuild :one INSERT INTO template_builds (id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, total_steps, template_id, team_id, skip_pre_post) VALUES ($1, $2, $3, $4, $5, $6, $7, 'pending', $8, $9, $10, $11) -RETURNING id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post +RETURNING id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post, default_user, default_env, metadata ` type InsertTemplateBuildParams struct { @@ -99,12 +102,15 @@ func (q *Queries) InsertTemplateBuild(ctx context.Context, arg InsertTemplateBui &i.TemplateID, &i.TeamID, &i.SkipPrePost, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ) return i, err } const listTemplateBuilds = `-- name: ListTemplateBuilds :many -SELECT id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post FROM template_builds ORDER BY created_at DESC +SELECT id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post, default_user, default_env, metadata FROM template_builds ORDER BY created_at DESC ` func (q *Queries) ListTemplateBuilds(ctx context.Context) ([]TemplateBuild, error) { @@ -137,6 +143,9 @@ func (q *Queries) ListTemplateBuilds(ctx context.Context) ([]TemplateBuild, erro &i.TemplateID, &i.TeamID, &i.SkipPrePost, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ); err != nil { return nil, err } @@ -148,6 +157,29 @@ func (q *Queries) ListTemplateBuilds(ctx context.Context) ([]TemplateBuild, erro return items, nil } +const updateBuildDefaults = `-- name: UpdateBuildDefaults :exec +UPDATE template_builds +SET default_user = $2, default_env = $3, metadata = $4 +WHERE id = $1 +` + +type UpdateBuildDefaultsParams struct { + ID pgtype.UUID `json:"id"` + DefaultUser string `json:"default_user"` + DefaultEnv []byte `json:"default_env"` + Metadata []byte `json:"metadata"` +} + +func (q *Queries) UpdateBuildDefaults(ctx context.Context, arg UpdateBuildDefaultsParams) error { + _, err := q.db.Exec(ctx, updateBuildDefaults, + arg.ID, + arg.DefaultUser, + arg.DefaultEnv, + arg.Metadata, + ) + return err +} + const updateBuildError = `-- name: UpdateBuildError :exec UPDATE template_builds SET error = $2, status = 'failed', completed_at = NOW() @@ -204,7 +236,7 @@ SET status = $2, started_at = CASE WHEN $2 = 'running' AND started_at IS NULL THEN NOW() ELSE started_at END, completed_at = CASE WHEN $2 IN ('success', 'failed', 'cancelled') THEN NOW() ELSE completed_at END WHERE id = $1 -RETURNING id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post +RETURNING id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post, default_user, default_env, metadata ` type UpdateBuildStatusParams struct { @@ -236,6 +268,9 @@ func (q *Queries) UpdateBuildStatus(ctx context.Context, arg UpdateBuildStatusPa &i.TemplateID, &i.TeamID, &i.SkipPrePost, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ) return i, err } diff --git a/internal/db/templates.sql.go b/pkg/db/templates.sql.go similarity index 77% rename from internal/db/templates.sql.go rename to pkg/db/templates.sql.go index 7d37808..97e528b 100644 --- a/internal/db/templates.sql.go +++ b/pkg/db/templates.sql.go @@ -45,7 +45,7 @@ func (q *Queries) DeleteTemplatesByTeam(ctx context.Context, teamID pgtype.UUID) } const getPlatformTemplateByName = `-- name: GetPlatformTemplateByName :one -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE team_id = '00000000-0000-0000-0000-000000000000' AND name = $1 +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env, metadata FROM templates WHERE team_id = '00000000-0000-0000-0000-000000000000' AND name = $1 ` // Check if a global (platform) template exists with the given name. @@ -61,12 +61,15 @@ func (q *Queries) GetPlatformTemplateByName(ctx context.Context, name string) (T &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ) return i, err } const getTemplate = `-- name: GetTemplate :one -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE id = $1 +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env, metadata FROM templates WHERE id = $1 ` func (q *Queries) GetTemplate(ctx context.Context, id pgtype.UUID) (Template, error) { @@ -81,12 +84,15 @@ func (q *Queries) GetTemplate(ctx context.Context, id pgtype.UUID) (Template, er &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ) return i, err } const getTemplateByName = `-- name: GetTemplateByName :one -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE team_id = $1 AND name = $2 +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env, metadata FROM templates WHERE team_id = $1 AND name = $2 ` type GetTemplateByNameParams struct { @@ -107,12 +113,15 @@ func (q *Queries) GetTemplateByName(ctx context.Context, arg GetTemplateByNamePa &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ) return i, err } const getTemplateByTeam = `-- name: GetTemplateByTeam :one -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE name = $1 AND (team_id = $2 OR team_id = '00000000-0000-0000-0000-000000000000') +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env, metadata FROM templates WHERE name = $1 AND (team_id = $2 OR team_id = '00000000-0000-0000-0000-000000000000') ` type GetTemplateByTeamParams struct { @@ -133,24 +142,30 @@ func (q *Queries) GetTemplateByTeam(ctx context.Context, arg GetTemplateByTeamPa &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ) return i, err } const insertTemplate = `-- name: InsertTemplate :one -INSERT INTO templates (id, name, type, vcpus, memory_mb, size_bytes, team_id) -VALUES ($1, $2, $3, $4, $5, $6, $7) -RETURNING name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id +INSERT INTO templates (id, name, type, vcpus, memory_mb, size_bytes, team_id, default_user, default_env, metadata) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) +RETURNING name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env, metadata ` type InsertTemplateParams struct { - ID pgtype.UUID `json:"id"` - Name string `json:"name"` - Type string `json:"type"` - Vcpus int32 `json:"vcpus"` - MemoryMb int32 `json:"memory_mb"` - SizeBytes int64 `json:"size_bytes"` - TeamID pgtype.UUID `json:"team_id"` + ID pgtype.UUID `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Vcpus int32 `json:"vcpus"` + MemoryMb int32 `json:"memory_mb"` + SizeBytes int64 `json:"size_bytes"` + TeamID pgtype.UUID `json:"team_id"` + DefaultUser string `json:"default_user"` + DefaultEnv []byte `json:"default_env"` + Metadata []byte `json:"metadata"` } func (q *Queries) InsertTemplate(ctx context.Context, arg InsertTemplateParams) (Template, error) { @@ -162,6 +177,9 @@ func (q *Queries) InsertTemplate(ctx context.Context, arg InsertTemplateParams) arg.MemoryMb, arg.SizeBytes, arg.TeamID, + arg.DefaultUser, + arg.DefaultEnv, + arg.Metadata, ) var i Template err := row.Scan( @@ -173,12 +191,15 @@ func (q *Queries) InsertTemplate(ctx context.Context, arg InsertTemplateParams) &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ) return i, err } const listTemplates = `-- name: ListTemplates :many -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates ORDER BY created_at DESC +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env, metadata FROM templates ORDER BY created_at DESC ` func (q *Queries) ListTemplates(ctx context.Context) ([]Template, error) { @@ -199,6 +220,9 @@ func (q *Queries) ListTemplates(ctx context.Context) ([]Template, error) { &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ); err != nil { return nil, err } @@ -211,7 +235,7 @@ func (q *Queries) ListTemplates(ctx context.Context) ([]Template, error) { } const listTemplatesByTeam = `-- name: ListTemplatesByTeam :many -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE (team_id = $1 OR team_id = '00000000-0000-0000-0000-000000000000') ORDER BY created_at DESC +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env, metadata FROM templates WHERE (team_id = $1 OR team_id = '00000000-0000-0000-0000-000000000000') ORDER BY created_at DESC ` // Platform templates are visible to all teams. @@ -233,6 +257,9 @@ func (q *Queries) ListTemplatesByTeam(ctx context.Context, teamID pgtype.UUID) ( &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ); err != nil { return nil, err } @@ -245,7 +272,7 @@ func (q *Queries) ListTemplatesByTeam(ctx context.Context, teamID pgtype.UUID) ( } const listTemplatesByTeamAndType = `-- name: ListTemplatesByTeamAndType :many -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE (team_id = $1 OR team_id = '00000000-0000-0000-0000-000000000000') AND type = $2 ORDER BY created_at DESC +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env, metadata FROM templates WHERE (team_id = $1 OR team_id = '00000000-0000-0000-0000-000000000000') AND type = $2 ORDER BY created_at DESC ` type ListTemplatesByTeamAndTypeParams struct { @@ -272,6 +299,9 @@ func (q *Queries) ListTemplatesByTeamAndType(ctx context.Context, arg ListTempla &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ); err != nil { return nil, err } @@ -284,7 +314,7 @@ func (q *Queries) ListTemplatesByTeamAndType(ctx context.Context, arg ListTempla } const listTemplatesByTeamOnly = `-- name: ListTemplatesByTeamOnly :many -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE team_id = $1 ORDER BY created_at DESC +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env, metadata FROM templates WHERE team_id = $1 ORDER BY created_at DESC ` // List templates owned by a specific team (NOT including platform templates). @@ -306,6 +336,9 @@ func (q *Queries) ListTemplatesByTeamOnly(ctx context.Context, teamID pgtype.UUI &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ); err != nil { return nil, err } @@ -318,7 +351,7 @@ func (q *Queries) ListTemplatesByTeamOnly(ctx context.Context, teamID pgtype.UUI } const listTemplatesByType = `-- name: ListTemplatesByType :many -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE type = $1 ORDER BY created_at DESC +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env, metadata FROM templates WHERE type = $1 ORDER BY created_at DESC ` func (q *Queries) ListTemplatesByType(ctx context.Context, type_ string) ([]Template, error) { @@ -339,6 +372,9 @@ func (q *Queries) ListTemplatesByType(ctx context.Context, type_ string) ([]Temp &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, + &i.Metadata, ); err != nil { return nil, err } diff --git a/internal/db/users.sql.go b/pkg/db/users.sql.go similarity index 53% rename from internal/db/users.sql.go rename to pkg/db/users.sql.go index 9de866b..c48d9c9 100644 --- a/internal/db/users.sql.go +++ b/pkg/db/users.sql.go @@ -11,6 +11,59 @@ import ( "github.com/jackc/pgx/v5/pgtype" ) +const countActiveUsers = `-- name: CountActiveUsers :one +SELECT COUNT(*) FROM users WHERE status = 'active' +` + +func (q *Queries) CountActiveUsers(ctx context.Context) (int64, error) { + row := q.db.QueryRow(ctx, countActiveUsers) + var count int64 + err := row.Scan(&count) + return count, err +} + +const countUserOwnedTeamsWithOtherMembers = `-- name: CountUserOwnedTeamsWithOtherMembers :one +SELECT COUNT(DISTINCT ut.team_id)::int +FROM users_teams ut +WHERE ut.user_id = $1 + AND ut.role = 'owner' + AND EXISTS ( + SELECT 1 FROM users_teams ut2 + WHERE ut2.team_id = ut.team_id AND ut2.user_id <> $1 + ) +` + +func (q *Queries) CountUserOwnedTeamsWithOtherMembers(ctx context.Context, userID pgtype.UUID) (int32, error) { + row := q.db.QueryRow(ctx, countUserOwnedTeamsWithOtherMembers, userID) + var column_1 int32 + err := row.Scan(&column_1) + return column_1, err +} + +const countUsers = `-- name: CountUsers :one +SELECT COUNT(*) FROM users +` + +func (q *Queries) CountUsers(ctx context.Context) (int64, error) { + row := q.db.QueryRow(ctx, countUsers) + var count int64 + err := row.Scan(&count) + return count, err +} + +const countUsersAdmin = `-- name: CountUsersAdmin :one +SELECT COUNT(*)::int AS total +FROM users +WHERE status != 'deleted' +` + +func (q *Queries) CountUsersAdmin(ctx context.Context) (int32, error) { + row := q.db.QueryRow(ctx, countUsersAdmin) + var total int32 + err := row.Scan(&total) + return total, err +} + const deleteAdminPermission = `-- name: DeleteAdminPermission :exec DELETE FROM admin_permissions WHERE user_id = $1 AND permission = $2 ` @@ -55,7 +108,7 @@ func (q *Queries) GetAdminPermissions(ctx context.Context, userID pgtype.UUID) ( } const getAdminUsers = `-- name: GetAdminUsers :many -SELECT id, email, password_hash, name, is_admin, created_at, updated_at FROM users WHERE is_admin = TRUE ORDER BY created_at +SELECT id, email, password_hash, name, is_admin, created_at, updated_at, deleted_at, status FROM users WHERE is_admin = TRUE ORDER BY created_at ` func (q *Queries) GetAdminUsers(ctx context.Context) ([]User, error) { @@ -75,6 +128,8 @@ func (q *Queries) GetAdminUsers(ctx context.Context) ([]User, error) { &i.IsAdmin, &i.CreatedAt, &i.UpdatedAt, + &i.DeletedAt, + &i.Status, ); err != nil { return nil, err } @@ -87,7 +142,7 @@ func (q *Queries) GetAdminUsers(ctx context.Context) ([]User, error) { } const getUserByEmail = `-- name: GetUserByEmail :one -SELECT id, email, password_hash, name, is_admin, created_at, updated_at FROM users WHERE email = $1 +SELECT id, email, password_hash, name, is_admin, created_at, updated_at, deleted_at, status FROM users WHERE email = $1 AND status != 'deleted' ` func (q *Queries) GetUserByEmail(ctx context.Context, email string) (User, error) { @@ -101,12 +156,14 @@ func (q *Queries) GetUserByEmail(ctx context.Context, email string) (User, error &i.IsAdmin, &i.CreatedAt, &i.UpdatedAt, + &i.DeletedAt, + &i.Status, ) return i, err } const getUserByID = `-- name: GetUserByID :one -SELECT id, email, password_hash, name, is_admin, created_at, updated_at FROM users WHERE id = $1 +SELECT id, email, password_hash, name, is_admin, created_at, updated_at, deleted_at, status FROM users WHERE id = $1 AND status != 'deleted' ` func (q *Queries) GetUserByID(ctx context.Context, id pgtype.UUID) (User, error) { @@ -120,10 +177,30 @@ func (q *Queries) GetUserByID(ctx context.Context, id pgtype.UUID) (User, error) &i.IsAdmin, &i.CreatedAt, &i.UpdatedAt, + &i.DeletedAt, + &i.Status, ) return i, err } +const hardDeleteExpiredUsers = `-- name: HardDeleteExpiredUsers :exec +DELETE FROM users WHERE deleted_at IS NOT NULL AND deleted_at < NOW() - INTERVAL '15 days' +` + +func (q *Queries) HardDeleteExpiredUsers(ctx context.Context) error { + _, err := q.db.Exec(ctx, hardDeleteExpiredUsers) + return err +} + +const hardDeleteUser = `-- name: HardDeleteUser :exec +DELETE FROM users WHERE id = $1 +` + +func (q *Queries) HardDeleteUser(ctx context.Context, id pgtype.UUID) error { + _, err := q.db.Exec(ctx, hardDeleteUser, id) + return err +} + const hasAdminPermission = `-- name: HasAdminPermission :one SELECT EXISTS( SELECT 1 FROM admin_permissions WHERE user_id = $1 AND permission = $2 @@ -161,7 +238,7 @@ func (q *Queries) InsertAdminPermission(ctx context.Context, arg InsertAdminPerm const insertUser = `-- name: InsertUser :one INSERT INTO users (id, email, password_hash, name) VALUES ($1, $2, $3, $4) -RETURNING id, email, password_hash, name, is_admin, created_at, updated_at +RETURNING id, email, password_hash, name, is_admin, created_at, updated_at, deleted_at, status ` type InsertUserParams struct { @@ -187,6 +264,43 @@ func (q *Queries) InsertUser(ctx context.Context, arg InsertUserParams) (User, e &i.IsAdmin, &i.CreatedAt, &i.UpdatedAt, + &i.DeletedAt, + &i.Status, + ) + return i, err +} + +const insertUserInactive = `-- name: InsertUserInactive :one +INSERT INTO users (id, email, password_hash, name, status) +VALUES ($1, $2, $3, $4, 'inactive') +RETURNING id, email, password_hash, name, is_admin, created_at, updated_at, deleted_at, status +` + +type InsertUserInactiveParams struct { + ID pgtype.UUID `json:"id"` + Email string `json:"email"` + PasswordHash pgtype.Text `json:"password_hash"` + Name string `json:"name"` +} + +func (q *Queries) InsertUserInactive(ctx context.Context, arg InsertUserInactiveParams) (User, error) { + row := q.db.QueryRow(ctx, insertUserInactive, + arg.ID, + arg.Email, + arg.PasswordHash, + arg.Name, + ) + var i User + err := row.Scan( + &i.ID, + &i.Email, + &i.PasswordHash, + &i.Name, + &i.IsAdmin, + &i.CreatedAt, + &i.UpdatedAt, + &i.DeletedAt, + &i.Status, ) return i, err } @@ -194,7 +308,7 @@ func (q *Queries) InsertUser(ctx context.Context, arg InsertUserParams) (User, e const insertUserOAuth = `-- name: InsertUserOAuth :one INSERT INTO users (id, email, name) VALUES ($1, $2, $3) -RETURNING id, email, password_hash, name, is_admin, created_at, updated_at +RETURNING id, email, password_hash, name, is_admin, created_at, updated_at, deleted_at, status ` type InsertUserOAuthParams struct { @@ -214,10 +328,73 @@ func (q *Queries) InsertUserOAuth(ctx context.Context, arg InsertUserOAuthParams &i.IsAdmin, &i.CreatedAt, &i.UpdatedAt, + &i.DeletedAt, + &i.Status, ) return i, err } +const listUsersAdmin = `-- name: ListUsersAdmin :many +SELECT + u.id, + u.email, + u.name, + u.is_admin, + u.status, + u.created_at, + (SELECT COUNT(*) FROM users_teams ut WHERE ut.user_id = u.id)::int AS teams_joined, + (SELECT COUNT(*) FROM users_teams ut WHERE ut.user_id = u.id AND ut.role = 'owner')::int AS teams_owned +FROM users u +WHERE u.status != 'deleted' +ORDER BY u.created_at DESC +LIMIT $1 OFFSET $2 +` + +type ListUsersAdminParams struct { + Limit int32 `json:"limit"` + Offset int32 `json:"offset"` +} + +type ListUsersAdminRow struct { + ID pgtype.UUID `json:"id"` + Email string `json:"email"` + Name string `json:"name"` + IsAdmin bool `json:"is_admin"` + Status string `json:"status"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + TeamsJoined int32 `json:"teams_joined"` + TeamsOwned int32 `json:"teams_owned"` +} + +func (q *Queries) ListUsersAdmin(ctx context.Context, arg ListUsersAdminParams) ([]ListUsersAdminRow, error) { + rows, err := q.db.Query(ctx, listUsersAdmin, arg.Limit, arg.Offset) + if err != nil { + return nil, err + } + defer rows.Close() + var items []ListUsersAdminRow + for rows.Next() { + var i ListUsersAdminRow + if err := rows.Scan( + &i.ID, + &i.Email, + &i.Name, + &i.IsAdmin, + &i.Status, + &i.CreatedAt, + &i.TeamsJoined, + &i.TeamsOwned, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const searchUsersByEmailPrefix = `-- name: SearchUsersByEmailPrefix :many SELECT id, email FROM users WHERE email LIKE $1 || '%' ORDER BY email LIMIT 10 ` @@ -261,6 +438,29 @@ func (q *Queries) SetUserAdmin(ctx context.Context, arg SetUserAdminParams) erro return err } +const setUserStatus = `-- name: SetUserStatus :exec +UPDATE users SET status = $2, updated_at = NOW() WHERE id = $1 +` + +type SetUserStatusParams struct { + ID pgtype.UUID `json:"id"` + Status string `json:"status"` +} + +func (q *Queries) SetUserStatus(ctx context.Context, arg SetUserStatusParams) error { + _, err := q.db.Exec(ctx, setUserStatus, arg.ID, arg.Status) + return err +} + +const softDeleteUser = `-- name: SoftDeleteUser :exec +UPDATE users SET deleted_at = NOW(), status = 'deleted', updated_at = NOW() WHERE id = $1 +` + +func (q *Queries) SoftDeleteUser(ctx context.Context, id pgtype.UUID) error { + _, err := q.db.Exec(ctx, softDeleteUser, id) + return err +} + const updateUserName = `-- name: UpdateUserName :exec UPDATE users SET name = $2, updated_at = NOW() WHERE id = $1 ` @@ -274,3 +474,17 @@ func (q *Queries) UpdateUserName(ctx context.Context, arg UpdateUserNameParams) _, err := q.db.Exec(ctx, updateUserName, arg.ID, arg.Name) return err } + +const updateUserPassword = `-- name: UpdateUserPassword :exec +UPDATE users SET password_hash = $2, updated_at = NOW() WHERE id = $1 +` + +type UpdateUserPasswordParams struct { + ID pgtype.UUID `json:"id"` + PasswordHash pgtype.Text `json:"password_hash"` +} + +func (q *Queries) UpdateUserPassword(ctx context.Context, arg UpdateUserPasswordParams) error { + _, err := q.db.Exec(ctx, updateUserPassword, arg.ID, arg.PasswordHash) + return err +} diff --git a/internal/events/event.go b/pkg/events/event.go similarity index 100% rename from internal/events/event.go rename to pkg/events/event.go diff --git a/internal/id/id.go b/pkg/id/id.go similarity index 98% rename from internal/id/id.go rename to pkg/id/id.go index 2ef5d88..6e1fd9e 100644 --- a/internal/id/id.go +++ b/pkg/id/id.go @@ -167,6 +167,11 @@ func UUIDString(id pgtype.UUID) string { return uuid.UUID(id.Bytes).String() } +// NewPtyTag generates a PTY session tag: 8 random hex characters. +func NewPtyTag() string { + return hex8() +} + // --- Helpers --- func hex8() string { diff --git a/internal/id/id_test.go b/pkg/id/id_test.go similarity index 100% rename from internal/id/id_test.go rename to pkg/id/id_test.go diff --git a/internal/lifecycle/hostpool.go b/pkg/lifecycle/hostpool.go similarity index 98% rename from internal/lifecycle/hostpool.go rename to pkg/lifecycle/hostpool.go index ca9e8bb..3931d7b 100644 --- a/internal/lifecycle/hostpool.go +++ b/pkg/lifecycle/hostpool.go @@ -8,8 +8,8 @@ import ( "sync" "time" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen/hostagentv1connect" ) diff --git a/internal/lifecycle/manager.go b/pkg/lifecycle/manager.go similarity index 100% rename from internal/lifecycle/manager.go rename to pkg/lifecycle/manager.go diff --git a/pkg/logging/logging.go b/pkg/logging/logging.go new file mode 100644 index 0000000..6159a9c --- /dev/null +++ b/pkg/logging/logging.go @@ -0,0 +1,135 @@ +package logging + +import ( + "io" + "log/slog" + "os" + "os/signal" + "path/filepath" + "strings" + "sync" + "syscall" +) + +// Setup configures the global slog logger with dual output (stderr + rotating +// log file). logsDir is the directory where log files are written. binaryName +// is used as the log filename (e.g. "control-plane" → "control-plane.log"). +// +// If logsDir is empty or the directory cannot be created, Setup falls back to +// stderr-only logging and returns a no-op cleanup function. +// +// The returned cleanup function closes the log file and must be deferred. +// Setup also installs a SIGHUP handler that reopens the log file, allowing +// external log rotation tools (e.g. logrotate) to rotate files in place. +func Setup(logsDir, binaryName string) func() { + level := parseLevel(os.Getenv("LOG_LEVEL")) + + if logsDir == "" { + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ + Level: level, + }))) + return func() {} + } + + if err := os.MkdirAll(logsDir, 0750); err != nil { + // Fall back to stderr-only; log the error so operators notice. + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ + Level: level, + }))) + slog.Warn("file logging unavailable: failed to create log directory", "dir", logsDir, "error", err) + return func() {} + } + + logPath := filepath.Join(logsDir, binaryName+".log") + rf, err := newReopenableFile(logPath) + if err != nil { + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ + Level: level, + }))) + slog.Warn("file logging unavailable: failed to open log file", "path", logPath, "error", err) + return func() {} + } + + mw := io.MultiWriter(os.Stderr, rf) + slog.SetDefault(slog.New(slog.NewTextHandler(mw, &slog.HandlerOptions{ + Level: level, + }))) + + // SIGHUP reopens the log file so logrotate can rotate in place. + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGHUP) + go func() { + for range sigCh { + if err := rf.Reopen(); err != nil { + slog.Error("failed to reopen log file on SIGHUP", "path", logPath, "error", err) + } else { + slog.Info("log file reopened", "path", logPath) + } + } + }() + + return func() { + signal.Stop(sigCh) + close(sigCh) + rf.Close() + } +} + +func parseLevel(s string) slog.Level { + switch strings.ToLower(strings.TrimSpace(s)) { + case "debug": + return slog.LevelDebug + case "warn", "warning": + return slog.LevelWarn + case "error": + return slog.LevelError + default: + return slog.LevelInfo + } +} + +// reopenableFile is an io.Writer backed by an *os.File that can be atomically +// reopened (for log rotation via SIGHUP). All operations are goroutine-safe. +type reopenableFile struct { + path string + mu sync.Mutex + f *os.File +} + +func newReopenableFile(path string) (*reopenableFile, error) { + f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640) + if err != nil { + return nil, err + } + return &reopenableFile{path: path, f: f}, nil +} + +func (r *reopenableFile) Write(p []byte) (int, error) { + r.mu.Lock() + defer r.mu.Unlock() + return r.f.Write(p) +} + +// Reopen closes the current file and opens a new one at the same path. +// This is the mechanism that makes logrotate's copytruncate-free rotation work: +// logrotate renames the old file, then sends SIGHUP, and the process opens a +// fresh file at the original path. +func (r *reopenableFile) Reopen() error { + r.mu.Lock() + defer r.mu.Unlock() + // Open the new file before closing the old one so a failed open doesn't + // leave the writer in a broken state with a closed fd. + f, err := os.OpenFile(r.path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640) + if err != nil { + return err + } + r.f.Close() + r.f = f + return nil +} + +func (r *reopenableFile) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + return r.f.Close() +} diff --git a/pkg/scheduler/least_loaded.go b/pkg/scheduler/least_loaded.go new file mode 100644 index 0000000..57c4a18 --- /dev/null +++ b/pkg/scheduler/least_loaded.go @@ -0,0 +1,171 @@ +package scheduler + +import ( + "context" + "fmt" + + "github.com/jackc/pgx/v5/pgtype" + + "git.omukk.dev/wrenn/wrenn/pkg/db" +) + +// Resource overhead reserved for the host OS. +const ( + reservedMemoryMB = 8192 + reservedCPU = 4 + reservedDiskMB = 30720 // 30 GB + cpuOvercommit = 1.5 + pausedMemoryFrac = 0.5 + pausedDiskFrac = 2.0 / 3.0 +) + +// LeastLoadedScheduler picks the online host with the most headroom at its +// tightest resource (bottleneck-first strategy). +// +// For each eligible host it computes the remaining fraction of each resource: +// +// RAM: usable / total where total = host.memory_mb - 8192 +// CPU: usable / total where total = host.cpu_cores * 1.5 - 4 +// Disk: usable / total where total = host.disk_gb * 1024 - 30720 +// +// The host's score is min(ram_frac, cpu_frac, disk_frac). The host with the +// highest score wins. Admission control rejects when no host can fit the +// requested sandbox on RAM or disk; CPU overcommit is allowed. +type LeastLoadedScheduler struct { + db *db.Queries +} + +// NewLeastLoadedScheduler creates a LeastLoadedScheduler backed by the given DB. +func NewLeastLoadedScheduler(queries *db.Queries) *LeastLoadedScheduler { + return &LeastLoadedScheduler{db: queries} +} + +// hostResources holds the computed resource availability for a single host. +type hostResources struct { + host db.Host + ramTotal float64 + ramUsable float64 + cpuTotal float64 + cpuUsable float64 + diskTotal float64 + diskUsable float64 +} + +// bottleneckScore returns the fraction of the tightest resource remaining. +func (h *hostResources) bottleneckScore() float64 { + ramFrac := safeFrac(h.ramUsable, h.ramTotal) + cpuFrac := safeFrac(h.cpuUsable, h.cpuTotal) + diskFrac := safeFrac(h.diskUsable, h.diskTotal) + return min(ramFrac, cpuFrac, diskFrac) +} + +// safeFrac returns usable/total, or 0 when total <= 0. +func safeFrac(usable, total float64) float64 { + if total <= 0 { + return 0 + } + return usable / total +} + +// SelectHost returns the eligible host with the most resource headroom. +func (s *LeastLoadedScheduler) SelectHost(ctx context.Context, teamID pgtype.UUID, isByoc bool, memoryMb, diskSizeMb int32) (db.Host, error) { + rows, err := s.db.GetHostsWithLoad(ctx) + if err != nil { + return db.Host{}, fmt.Errorf("get hosts with load: %w", err) + } + + // Phase 1: filter eligible hosts and compute resources. + var candidates []hostResources + for i := range rows { + row := &rows[i] + + if isByoc { + if row.Type != "byoc" || !row.TeamID.Valid || row.TeamID != teamID { + continue + } + } else { + if row.Type != "regular" { + continue + } + } + + hr := computeResources(row) + candidates = append(candidates, hr) + } + + if len(candidates) == 0 { + if isByoc { + return db.Host{}, fmt.Errorf("no online BYOC hosts available for team") + } + return db.Host{}, fmt.Errorf("no online platform hosts available") + } + + // Phase 2: admission control + selection — pick the highest-scoring host + // that can actually fit the requested sandbox (RAM and disk). + best := -1 + bestScore := 0.0 + for i := range candidates { + if memoryMb > 0 && candidates[i].ramUsable < float64(memoryMb) { + continue + } + if diskSizeMb > 0 && candidates[i].diskUsable < float64(diskSizeMb) { + continue + } + score := candidates[i].bottleneckScore() + if best == -1 || score > bestScore { + best = i + bestScore = score + } + } + + if best == -1 { + return db.Host{}, fmt.Errorf("no host has sufficient resources: need %d MB memory, %d MB disk", memoryMb, diskSizeMb) + } + + return candidates[best].host, nil +} + +// computeResources converts a raw DB row into computed resource availability. +func computeResources(row *db.GetHostsWithLoadRow) hostResources { + ramTotal := float64(row.MemoryMb) - reservedMemoryMB + cpuTotal := float64(row.CpuCores)*cpuOvercommit - reservedCPU + diskTotal := float64(row.DiskGb)*1024 - reservedDiskMB + + usedMemory := float64(row.RunningMemoryMb) + pausedMemoryFrac*float64(row.PausedMemoryMb) + usedCPU := float64(row.RunningVcpus) + usedDisk := float64(row.RunningDiskMb) + pausedDiskFrac*float64(row.PausedDiskMb) + + return hostResources{ + host: hostFromRow(row), + ramTotal: ramTotal, + ramUsable: ramTotal - usedMemory, + cpuTotal: cpuTotal, + cpuUsable: cpuTotal - usedCPU, + diskTotal: diskTotal, + diskUsable: diskTotal - usedDisk, + } +} + +// hostFromRow converts the query row back to a plain db.Host. +func hostFromRow(r *db.GetHostsWithLoadRow) db.Host { + return db.Host{ + ID: r.ID, + Type: r.Type, + TeamID: r.TeamID, + Provider: r.Provider, + AvailabilityZone: r.AvailabilityZone, + Arch: r.Arch, + CpuCores: r.CpuCores, + MemoryMb: r.MemoryMb, + DiskGb: r.DiskGb, + Address: r.Address, + Status: r.Status, + LastHeartbeatAt: r.LastHeartbeatAt, + Metadata: r.Metadata, + CreatedBy: r.CreatedBy, + CreatedAt: r.CreatedAt, + UpdatedAt: r.UpdatedAt, + CertFingerprint: r.CertFingerprint, + CertExpiresAt: r.CertExpiresAt, + } +} diff --git a/internal/scheduler/round_robin.go b/pkg/scheduler/round_robin.go similarity index 80% rename from internal/scheduler/round_robin.go rename to pkg/scheduler/round_robin.go index 7e4962d..693de30 100644 --- a/internal/scheduler/round_robin.go +++ b/pkg/scheduler/round_robin.go @@ -7,7 +7,7 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/db" + "git.omukk.dev/wrenn/wrenn/pkg/db" ) // HostScheduler selects a host for a new sandbox. Implementations may use @@ -16,8 +16,11 @@ type HostScheduler interface { // SelectHost returns a host that can accept a new sandbox. // For BYOC teams (isByoc=true), only online BYOC hosts belonging to teamID // are considered. For non-BYOC teams, only online regular (platform) hosts - // are considered. Returns an error if no suitable host is available. - SelectHost(ctx context.Context, teamID pgtype.UUID, isByoc bool) (db.Host, error) + // are considered. + // memoryMb and diskSizeMb describe the sandbox's resource requirements so + // the scheduler can perform admission control (reject when no host has + // enough RAM or disk). Pass 0 to skip admission checks. + SelectHost(ctx context.Context, teamID pgtype.UUID, isByoc bool, memoryMb, diskSizeMb int32) (db.Host, error) } // RoundRobinScheduler cycles through eligible online hosts in round-robin order. @@ -34,7 +37,9 @@ func NewRoundRobinScheduler(queries *db.Queries) *RoundRobinScheduler { } // SelectHost returns the next eligible online host in round-robin order. -func (s *RoundRobinScheduler) SelectHost(ctx context.Context, teamID pgtype.UUID, isByoc bool) (db.Host, error) { +// The memoryMb and diskSizeMb parameters are ignored — round-robin performs +// no admission control. +func (s *RoundRobinScheduler) SelectHost(ctx context.Context, teamID pgtype.UUID, isByoc bool, _, _ int32) (db.Host, error) { hosts, err := s.db.ListActiveHosts(ctx) if err != nil { return db.Host{}, fmt.Errorf("list hosts: %w", err) diff --git a/internal/scheduler/scheduler.go b/pkg/scheduler/scheduler.go similarity index 100% rename from internal/scheduler/scheduler.go rename to pkg/scheduler/scheduler.go diff --git a/internal/scheduler/single_host.go b/pkg/scheduler/single_host.go similarity index 100% rename from internal/scheduler/single_host.go rename to pkg/scheduler/single_host.go diff --git a/internal/service/apikey.go b/pkg/service/apikey.go similarity index 93% rename from internal/service/apikey.go rename to pkg/service/apikey.go index 90bdfb6..8eea896 100644 --- a/internal/service/apikey.go +++ b/pkg/service/apikey.go @@ -6,9 +6,9 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) // APIKeyService provides API key operations shared between the REST API and the dashboard. diff --git a/internal/service/audit.go b/pkg/service/audit.go similarity index 97% rename from internal/service/audit.go rename to pkg/service/audit.go index e028625..cee95d6 100644 --- a/internal/service/audit.go +++ b/pkg/service/audit.go @@ -8,8 +8,8 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" ) const auditMaxLimit = 200 diff --git a/internal/service/build.go b/pkg/service/build.go similarity index 71% rename from internal/service/build.go rename to pkg/service/build.go index b45ba1d..bdb1620 100644 --- a/internal/service/build.go +++ b/pkg/service/build.go @@ -13,11 +13,11 @@ import ( "github.com/jackc/pgx/v5/pgtype" "github.com/redis/go-redis/v9" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" "git.omukk.dev/wrenn/wrenn/internal/recipe" - "git.omukk.dev/wrenn/wrenn/internal/scheduler" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/scheduler" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) @@ -27,8 +27,11 @@ const ( ) // preBuildCmds run before the user recipe to prepare the build environment. +// apt update runs as root first, then USER switches to wrenn-user for the recipe. var preBuildCmds = []string{ "RUN apt update", + "USER wrenn-user", + "WORKDIR /home/wrenn-user", } // postBuildCmds run after the user recipe to clean up caches and reduce image size. @@ -36,6 +39,7 @@ var postBuildCmds = []string{ "RUN apt clean", "RUN apt autoremove -y", "RUN rm -rf /var/lib/apt/lists/*", + "RUN rm -rf /tmp/build-files /tmp/build-files.*", } // buildAgentClient is the subset of the host agent client used by the build worker. @@ -43,6 +47,7 @@ type buildAgentClient interface { CreateSandbox(ctx context.Context, req *connect.Request[pb.CreateSandboxRequest]) (*connect.Response[pb.CreateSandboxResponse], error) DestroySandbox(ctx context.Context, req *connect.Request[pb.DestroySandboxRequest]) (*connect.Response[pb.DestroySandboxResponse], error) Exec(ctx context.Context, req *connect.Request[pb.ExecRequest]) (*connect.Response[pb.ExecResponse], error) + WriteFile(ctx context.Context, req *connect.Request[pb.WriteFileRequest]) (*connect.Response[pb.WriteFileResponse], error) CreateSnapshot(ctx context.Context, req *connect.Request[pb.CreateSnapshotRequest]) (*connect.Response[pb.CreateSnapshotResponse], error) FlattenRootfs(ctx context.Context, req *connect.Request[pb.FlattenRootfsRequest]) (*connect.Response[pb.FlattenRootfsResponse], error) } @@ -56,6 +61,7 @@ type BuildService struct { mu sync.Mutex cancelMap map[string]context.CancelFunc // buildID → per-build cancel func + filesMap map[string][]byte // buildID → uploaded archive bytes } // BuildCreateParams holds the parameters for creating a template build. @@ -67,6 +73,27 @@ type BuildCreateParams struct { VCPUs int32 MemoryMB int32 SkipPrePost bool + Archive []byte // Optional tar/tar.gz/zip archive for COPY commands. + ArchiveName string // Original filename (used to detect format). +} + +// storeArchive stores uploaded archive bytes keyed by build ID for the worker. +func (s *BuildService) storeArchive(buildID string, data []byte) { + s.mu.Lock() + defer s.mu.Unlock() + if s.filesMap == nil { + s.filesMap = make(map[string][]byte) + } + s.filesMap[buildID] = data +} + +// takeArchive retrieves and removes stored archive bytes for a build. +func (s *BuildService) takeArchive(buildID string) []byte { + s.mu.Lock() + defer s.mu.Unlock() + data := s.filesMap[buildID] + delete(s.filesMap, buildID) + return data } // Create inserts a new build record and enqueues it to Redis. @@ -112,8 +139,13 @@ func (s *BuildService) Create(ctx context.Context, p BuildCreateParams) (db.Temp return db.TemplateBuild{}, fmt.Errorf("insert build: %w", err) } - // Enqueue build ID (as formatted string) to Redis for workers to pick up. + // Store archive before enqueue so the worker never dequeues without files. + if len(p.Archive) > 0 { + s.storeArchive(buildIDStr, p.Archive) + } + if err := s.Redis.RPush(ctx, buildQueueKey, buildIDStr).Err(); err != nil { + s.takeArchive(buildIDStr) // clean up on enqueue failure return db.TemplateBuild{}, fmt.Errorf("enqueue build: %w", err) } @@ -251,7 +283,7 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { } // Pick a platform host and create a sandbox. - host, err := s.Scheduler.SelectHost(buildCtx, id.PlatformTeamID, false) + host, err := s.Scheduler.SelectHost(buildCtx, id.PlatformTeamID, false, build.MemoryMb, 5120) if err != nil { s.failBuild(buildCtx, buildID, fmt.Sprintf("no host available: %v", err)) return @@ -294,7 +326,8 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { s.failBuild(buildCtx, buildID, fmt.Sprintf("create sandbox failed: %v", err)) return } - _ = resp + // Capture sandbox metadata (envd/kernel/firecracker/agent versions). + sandboxMetadata := resp.Msg.Metadata // Record sandbox/host association. _ = s.DB.UpdateBuildSandbox(buildCtx, db.UpdateBuildSandboxParams{ @@ -303,6 +336,16 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { HostID: host.ID, }) + // Upload and extract build archive if provided. + archive := s.takeArchive(buildIDStr) + if len(archive) > 0 { + if err := s.uploadAndExtractArchive(buildCtx, agent, sandboxIDStr, archive, buildIDStr); err != nil { + s.destroySandbox(buildCtx, agent, sandboxIDStr) + s.failBuild(buildCtx, buildID, fmt.Sprintf("archive upload failed: %v", err)) + return + } + } + // Parse recipe steps. preBuildCmds and postBuildCmds are hardcoded and always // valid; panic on error is appropriate here since it would be a programmer mistake. preBuildSteps, err := recipe.ParseRecipe(preBuildCmds) @@ -331,10 +374,18 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { "HOME": "/root", } } - bctx := &recipe.ExecContext{EnvVars: envVars} + bctx := &recipe.ExecContext{EnvVars: envVars, User: "root"} + + // Per-step progress callback for live UI updates. + progressFn := func(currentStep int, allEntries []recipe.BuildLogEntry) { + s.updateLogs(buildCtx, buildID, currentStep, allEntries) + } runPhase := func(phase string, steps []recipe.Step, defaultTimeout time.Duration) bool { - newEntries, nextStep, ok := recipe.Execute(buildCtx, phase, steps, sandboxIDStr, step, defaultTimeout, bctx, agent.Exec) + newEntries, nextStep, ok := recipe.Execute(buildCtx, phase, steps, sandboxIDStr, step, defaultTimeout, bctx, agent.Exec, func(currentStep int, phaseEntries []recipe.BuildLogEntry) { + // Progress callback: combine prior logs with current phase entries. + progressFn(currentStep, append(logs, phaseEntries...)) + }) logs = append(logs, newEntries...) step = nextStep s.updateLogs(buildCtx, buildID, step, logs) @@ -344,24 +395,40 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { if buildCtx.Err() != nil { return false } - last := newEntries[len(newEntries)-1] - reason := last.Stderr - if reason == "" { - reason = fmt.Sprintf("exit code %d", last.Exit) + reason := "unknown error" + if len(newEntries) > 0 { + last := newEntries[len(newEntries)-1] + reason = last.Stderr + if reason == "" { + reason = fmt.Sprintf("exit code %d", last.Exit) + } } s.failBuild(buildCtx, buildID, fmt.Sprintf("%s step %d failed: %s", phase, step, reason)) } return ok } + // Phase 1: Pre-build (as root) — creates wrenn-user, updates apt. if !build.SkipPrePost { if !runPhase("pre-build", preBuildSteps, 0) { return } } + + // Phase 2: User recipe — starts as wrenn-user (set by USER in pre-build) + // or root if skip_pre_post. if !runPhase("recipe", userRecipeSteps, buildCommandTimeout) { return } + + // Capture the final user and env vars as template defaults. + // Filter out user-specific and runtime vars that should be resolved at + // sandbox creation time, not baked in from the build environment. + templateDefaultUser := bctx.User + templateDefaultEnv := filterBuildEnv(bctx.EnvVars) + + // Phase 3: Post-build (as root) — cleanup. + bctx.User = "root" if !build.SkipPrePost { if !runPhase("post-build", postBuildSteps, 0) { return @@ -378,7 +445,7 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { return } log.Info("running healthcheck", "cmd", hc.Cmd, "interval", hc.Interval, "timeout", hc.Timeout, "start_period", hc.StartPeriod, "retries", hc.Retries) - if err := s.waitForHealthcheck(buildCtx, agent, sandboxIDStr, hc); err != nil { + if err := s.waitForHealthcheck(buildCtx, agent, sandboxIDStr, hc, templateDefaultUser); err != nil { s.destroySandbox(buildCtx, agent, sandboxIDStr) if buildCtx.Err() != nil { return @@ -430,19 +497,42 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { templateType = "snapshot" } + // Serialize env vars for DB storage. + defaultEnvJSON, err := json.Marshal(templateDefaultEnv) + if err != nil { + defaultEnvJSON = []byte("{}") + } + + // Serialize sandbox metadata for DB storage. + metadataJSON, err := json.Marshal(sandboxMetadata) + if err != nil || len(sandboxMetadata) == 0 { + metadataJSON = []byte("{}") + } + if _, err := s.DB.InsertTemplate(buildCtx, db.InsertTemplateParams{ - ID: build.TemplateID, - Name: build.Name, - Type: templateType, - Vcpus: build.Vcpus, - MemoryMb: build.MemoryMb, - SizeBytes: sizeBytes, - TeamID: id.PlatformTeamID, + ID: build.TemplateID, + Name: build.Name, + Type: templateType, + Vcpus: build.Vcpus, + MemoryMb: build.MemoryMb, + SizeBytes: sizeBytes, + TeamID: id.PlatformTeamID, + DefaultUser: templateDefaultUser, + DefaultEnv: defaultEnvJSON, + Metadata: metadataJSON, }); err != nil { log.Error("failed to insert template record", "error", err) // Build succeeded on disk, just DB record failed — don't mark as failed. } + // Record defaults and metadata on the build record for inspection. + _ = s.DB.UpdateBuildDefaults(buildCtx, db.UpdateBuildDefaultsParams{ + ID: buildID, + DefaultUser: templateDefaultUser, + DefaultEnv: defaultEnvJSON, + Metadata: metadataJSON, + }) + // For CreateSnapshot, the sandbox is already destroyed by the snapshot process. // For FlattenRootfs, the sandbox is already destroyed by the flatten process. // No additional destroy needed. @@ -463,7 +553,14 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { // During the start period, failures are not counted toward the retry budget. // Returns nil on the first successful check, or an error if retries are // exhausted, the deadline passes, or the context is cancelled. -func (s *BuildService) waitForHealthcheck(ctx context.Context, agent buildAgentClient, sandboxIDStr string, hc recipe.HealthcheckConfig) error { +func (s *BuildService) waitForHealthcheck(ctx context.Context, agent buildAgentClient, sandboxIDStr string, hc recipe.HealthcheckConfig, user string) error { + // Wrap the healthcheck command with su when a non-root user is set, so that + // ~ expands to the correct home directory and the process runs with the + // right UID (matching the template's default user). + cmd := hc.Cmd + if user != "" && user != "root" { + cmd = "su " + recipe.Shellescape(user) + " -s /bin/sh -c " + recipe.Shellescape(hc.Cmd) + } ticker := time.NewTicker(hc.Interval) defer ticker.Stop() @@ -490,7 +587,7 @@ func (s *BuildService) waitForHealthcheck(ctx context.Context, agent buildAgentC resp, err := agent.Exec(execCtx, connect.NewRequest(&pb.ExecRequest{ SandboxId: sandboxIDStr, Cmd: "/bin/sh", - Args: []string{"-c", hc.Cmd}, + Args: []string{"-c", cmd}, TimeoutSec: int32(hc.Timeout.Seconds()), })) cancel() @@ -603,3 +700,87 @@ func parseSandboxEnv(raw string) map[string]string { return envVars } + +// uploadAndExtractArchive writes the archive to the sandbox and extracts it +// to /tmp/build-files/. Detects format from content (tar.gz, tar, zip). +func (s *BuildService) uploadAndExtractArchive( + ctx context.Context, + agent buildAgentClient, + sandboxID string, + archive []byte, + buildID string, +) error { + // Detect archive type from magic bytes. + var archivePath, extractCmd string + switch { + case len(archive) >= 2 && archive[0] == 0x1f && archive[1] == 0x8b: + // gzip (tar.gz) + archivePath = "/tmp/build-files.tar.gz" + extractCmd = "mkdir -p /tmp/build-files && tar xzf /tmp/build-files.tar.gz -C /tmp/build-files" + case len(archive) >= 4 && string(archive[:4]) == "PK\x03\x04": + // zip + archivePath = "/tmp/build-files.zip" + extractCmd = "mkdir -p /tmp/build-files && unzip -o /tmp/build-files.zip -d /tmp/build-files" + case len(archive) >= 262 && string(archive[257:262]) == "ustar": + // tar (ustar magic at offset 257) + archivePath = "/tmp/build-files.tar" + extractCmd = "mkdir -p /tmp/build-files && tar xf /tmp/build-files.tar -C /tmp/build-files" + default: + // Fallback: try tar.gz + archivePath = "/tmp/build-files.tar.gz" + extractCmd = "mkdir -p /tmp/build-files && tar xzf /tmp/build-files.tar.gz -C /tmp/build-files" + } + + slog.Info("uploading build archive", "build_id", buildID, "path", archivePath, "size", len(archive)) + + // Write archive to VM. + if _, err := agent.WriteFile(ctx, connect.NewRequest(&pb.WriteFileRequest{ + SandboxId: sandboxID, + Path: archivePath, + Content: archive, + })); err != nil { + return fmt.Errorf("write archive: %w", err) + } + + // Extract and ensure files are readable. + fullCmd := extractCmd + " && chmod -R a+rX /tmp/build-files" + + resp, err := agent.Exec(ctx, connect.NewRequest(&pb.ExecRequest{ + SandboxId: sandboxID, + Cmd: "/bin/sh", + Args: []string{"-c", fullCmd}, + TimeoutSec: 120, + })) + if err != nil { + return fmt.Errorf("extract archive: %w", err) + } + if resp.Msg.ExitCode != 0 { + return fmt.Errorf("extract archive: exit code %d: %s", resp.Msg.ExitCode, string(resp.Msg.Stderr)) + } + + return nil +} + +// runtimeEnvVars lists env vars that are user- or session-specific and should +// not be persisted into template defaults. These are resolved at runtime by +// envd based on the actual user and sandbox context. +var runtimeEnvVars = map[string]bool{ + "HOME": true, "USER": true, "LOGNAME": true, "SHELL": true, + "PWD": true, "OLDPWD": true, "HOSTNAME": true, "TERM": true, + "SHLVL": true, "_": true, + // Per-sandbox identifiers set by envd at boot via MMDS. + "WRENN_SANDBOX_ID": true, "WRENN_TEMPLATE_ID": true, +} + +// filterBuildEnv returns a copy of envVars with runtime/user-specific +// variables removed so they don't override envd's per-user resolution. +func filterBuildEnv(envVars map[string]string) map[string]string { + filtered := make(map[string]string, len(envVars)) + for k, v := range envVars { + if runtimeEnvVars[k] { + continue + } + filtered[k] = v + } + return filtered +} diff --git a/internal/service/host.go b/pkg/service/host.go similarity index 99% rename from internal/service/host.go rename to pkg/service/host.go index 1ddffca..9f5b5c8 100644 --- a/internal/service/host.go +++ b/pkg/service/host.go @@ -14,10 +14,10 @@ import ( "github.com/jackc/pgx/v5/pgtype" "github.com/redis/go-redis/v9" - "git.omukk.dev/wrenn/wrenn/internal/auth" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/auth" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) diff --git a/internal/service/sandbox.go b/pkg/service/sandbox.go similarity index 83% rename from internal/service/sandbox.go rename to pkg/service/sandbox.go index 68c9bbf..d50520b 100644 --- a/internal/service/sandbox.go +++ b/pkg/service/sandbox.go @@ -2,6 +2,7 @@ package service import ( "context" + "encoding/json" "fmt" "log/slog" "time" @@ -9,11 +10,11 @@ import ( "connectrpc.com/connect" "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" - "git.omukk.dev/wrenn/wrenn/internal/scheduler" - "git.omukk.dev/wrenn/wrenn/internal/validate" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/scheduler" + "git.omukk.dev/wrenn/wrenn/pkg/validate" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) @@ -85,6 +86,8 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db. // Resolve template name → (teamID, templateID). templateTeamID := id.PlatformTeamID templateID := id.MinimalTemplateID + var templateDefaultUser string + var templateDefaultEnv map[string]string if p.Template != "minimal" { tmpl, err := s.DB.GetTemplateByTeam(ctx, db.GetTemplateByTeamParams{Name: p.Template, TeamID: p.TeamID}) if err != nil { @@ -92,6 +95,11 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db. } templateTeamID = tmpl.TeamID templateID = tmpl.ID + templateDefaultUser = tmpl.DefaultUser + // Parse default_env JSONB into a map. + if len(tmpl.DefaultEnv) > 0 { + _ = json.Unmarshal(tmpl.DefaultEnv, &templateDefaultEnv) + } // If the template is a snapshot, use its baked-in vcpus/memory. if tmpl.Type == "snapshot" { p.VCPUs = tmpl.Vcpus @@ -110,7 +118,7 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db. } // Pick a host for this sandbox. - host, err := s.Scheduler.SelectHost(ctx, p.TeamID, team.IsByoc) + host, err := s.Scheduler.SelectHost(ctx, p.TeamID, team.IsByoc, p.MemoryMB, p.DiskSizeMB) if err != nil { return db.Sandbox{}, fmt.Errorf("select host: %w", err) } @@ -135,19 +143,22 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db. DiskSizeMb: p.DiskSizeMB, TemplateID: templateID, TemplateTeamID: templateTeamID, + Metadata: []byte("{}"), }); err != nil { return db.Sandbox{}, fmt.Errorf("insert sandbox: %w", err) } resp, err := agent.CreateSandbox(ctx, connect.NewRequest(&pb.CreateSandboxRequest{ - SandboxId: sandboxIDStr, - Template: p.Template, - TeamId: id.UUIDString(templateTeamID), - TemplateId: id.UUIDString(templateID), - Vcpus: p.VCPUs, - MemoryMb: p.MemoryMB, - TimeoutSec: p.TimeoutSec, - DiskSizeMb: p.DiskSizeMB, + SandboxId: sandboxIDStr, + Template: p.Template, + TeamId: id.UUIDString(templateTeamID), + TemplateId: id.UUIDString(templateID), + Vcpus: p.VCPUs, + MemoryMb: p.MemoryMB, + TimeoutSec: p.TimeoutSec, + DiskSizeMb: p.DiskSizeMB, + DefaultUser: templateDefaultUser, + DefaultEnv: templateDefaultEnv, })) if err != nil { if _, dbErr := s.DB.UpdateSandboxStatus(ctx, db.UpdateSandboxStatusParams{ @@ -172,6 +183,18 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db. return db.Sandbox{}, fmt.Errorf("update sandbox running: %w", err) } + // Store runtime metadata from the agent (envd/kernel/firecracker/agent versions). + if meta := resp.Msg.Metadata; len(meta) > 0 { + metaJSON, _ := json.Marshal(meta) + if err := s.DB.UpdateSandboxMetadata(ctx, db.UpdateSandboxMetadataParams{ + ID: sandboxID, + Metadata: metaJSON, + }); err != nil { + slog.Warn("failed to store sandbox metadata", "id", sandboxIDStr, "error", err) + } + sb.Metadata = metaJSON + } + return sb, nil } @@ -249,9 +272,34 @@ func (s *SandboxService) Resume(ctx context.Context, sandboxID, teamID pgtype.UU sandboxIDStr := id.FormatSandboxID(sandboxID) + // Look up template defaults for resume. + var resumeDefaultUser string + var resumeDefaultEnv map[string]string + if sb.TemplateID.Valid { + tmpl, err := s.DB.GetTemplate(ctx, sb.TemplateID) + if err == nil { + resumeDefaultUser = tmpl.DefaultUser + if len(tmpl.DefaultEnv) > 0 { + _ = json.Unmarshal(tmpl.DefaultEnv, &resumeDefaultEnv) + } + } + } + + // Extract kernel version hint from existing sandbox metadata. + var kernelVersion string + if len(sb.Metadata) > 0 { + var meta map[string]string + if err := json.Unmarshal(sb.Metadata, &meta); err == nil { + kernelVersion = meta["kernel_version"] + } + } + resp, err := agent.ResumeSandbox(ctx, connect.NewRequest(&pb.ResumeSandboxRequest{ - SandboxId: sandboxIDStr, - TimeoutSec: sb.TimeoutSec, + SandboxId: sandboxIDStr, + TimeoutSec: sb.TimeoutSec, + DefaultUser: resumeDefaultUser, + DefaultEnv: resumeDefaultEnv, + KernelVersion: kernelVersion, })) if err != nil { return db.Sandbox{}, fmt.Errorf("agent resume: %w", err) @@ -270,6 +318,19 @@ func (s *SandboxService) Resume(ctx context.Context, sandboxID, teamID pgtype.UU if err != nil { return db.Sandbox{}, fmt.Errorf("update status: %w", err) } + + // Update metadata with actual versions used after resume. + if meta := resp.Msg.Metadata; len(meta) > 0 { + metaJSON, _ := json.Marshal(meta) + if err := s.DB.UpdateSandboxMetadata(ctx, db.UpdateSandboxMetadataParams{ + ID: sandboxID, + Metadata: metaJSON, + }); err != nil { + slog.Warn("failed to update sandbox metadata after resume", "id", sandboxIDStr, "error", err) + } + sb.Metadata = metaJSON + } + return sb, nil } diff --git a/internal/service/stats.go b/pkg/service/stats.go similarity index 99% rename from internal/service/stats.go rename to pkg/service/stats.go index 88abde2..d756a74 100644 --- a/internal/service/stats.go +++ b/pkg/service/stats.go @@ -10,7 +10,7 @@ import ( "github.com/jackc/pgx/v5/pgtype" "github.com/jackc/pgx/v5/pgxpool" - "git.omukk.dev/wrenn/wrenn/internal/db" + "git.omukk.dev/wrenn/wrenn/pkg/db" ) // TimeRange identifies a chart time window. diff --git a/internal/service/team.go b/pkg/service/team.go similarity index 78% rename from internal/service/team.go rename to pkg/service/team.go index f386338..0376406 100644 --- a/internal/service/team.go +++ b/pkg/service/team.go @@ -12,9 +12,9 @@ import ( "github.com/jackc/pgx/v5/pgtype" "github.com/jackc/pgx/v5/pgxpool" - "git.omukk.dev/wrenn/wrenn/internal/db" - "git.omukk.dev/wrenn/wrenn/internal/id" - "git.omukk.dev/wrenn/wrenn/internal/lifecycle" + "git.omukk.dev/wrenn/wrenn/pkg/db" + "git.omukk.dev/wrenn/wrenn/pkg/id" + "git.omukk.dev/wrenn/wrenn/pkg/lifecycle" pb "git.omukk.dev/wrenn/wrenn/proto/hostagent/gen" ) @@ -169,6 +169,12 @@ func (s *TeamService) DeleteTeam(ctx context.Context, teamID, callerUserID pgtyp return fmt.Errorf("forbidden: only the owner can delete a team") } + return s.deleteTeamCore(ctx, teamID) +} + +// deleteTeamCore contains the shared team deletion logic: +// destroy active sandboxes, clean up templates, soft-delete the team. +func (s *TeamService) deleteTeamCore(ctx context.Context, teamID pgtype.UUID) error { // Collect active sandboxes and stop them. sandboxes, err := s.DB.ListActiveSandboxesByTeam(ctx, teamID) if err != nil { @@ -202,6 +208,24 @@ func (s *TeamService) DeleteTeam(ctx context.Context, teamID, callerUserID pgtyp } } + // Delete sandbox metrics for this team. + if err := s.DB.DeleteMetricPointsByTeam(ctx, teamID); err != nil { + slog.Warn("team delete: failed to delete metric points", "team_id", id.FormatTeamID(teamID), "error", err) + } + if err := s.DB.DeleteMetricsSnapshotsByTeam(ctx, teamID); err != nil { + slog.Warn("team delete: failed to delete metrics snapshots", "team_id", id.FormatTeamID(teamID), "error", err) + } + + // Delete all API keys for this team. + if err := s.DB.DeleteAPIKeysByTeam(ctx, teamID); err != nil { + slog.Warn("team delete: failed to delete API keys", "team_id", id.FormatTeamID(teamID), "error", err) + } + + // Delete all channels for this team. + if err := s.DB.DeleteAllChannelsByTeam(ctx, teamID); err != nil { + slog.Warn("team delete: failed to delete channels", "team_id", id.FormatTeamID(teamID), "error", err) + } + // Clean up team-owned templates from all hosts in the background. go s.cleanupTeamTemplates(context.Background(), teamID) @@ -441,3 +465,80 @@ func (s *TeamService) SetBYOC(ctx context.Context, teamID pgtype.UUID, enabled b } return nil } + +// AdminTeamRow is the shape returned by AdminListTeams. +type AdminTeamRow struct { + ID pgtype.UUID + Name string + Slug string + IsByoc bool + CreatedAt time.Time + DeletedAt *time.Time + MemberCount int32 + OwnerName string + OwnerEmail string + ActiveSandboxCount int32 + ChannelCount int32 +} + +// AdminListTeams returns a paginated list of all teams (excluding the platform +// team) with member counts, owner info, and active sandbox counts. +// Admin-only — caller must verify admin status. +func (s *TeamService) AdminListTeams(ctx context.Context, limit, offset int32) ([]AdminTeamRow, int32, error) { + teams, err := s.DB.ListTeamsAdmin(ctx, db.ListTeamsAdminParams{ + Limit: limit, + Offset: offset, + }) + if err != nil { + return nil, 0, fmt.Errorf("list teams: %w", err) + } + + total, err := s.DB.CountTeamsAdmin(ctx) + if err != nil { + return nil, 0, fmt.Errorf("count teams: %w", err) + } + + rows := make([]AdminTeamRow, len(teams)) + for i, t := range teams { + row := AdminTeamRow{ + ID: t.ID, + Name: t.Name, + Slug: t.Slug, + IsByoc: t.IsByoc, + CreatedAt: t.CreatedAt.Time, + MemberCount: t.MemberCount, + OwnerName: t.OwnerName, + OwnerEmail: t.OwnerEmail, + ActiveSandboxCount: t.ActiveSandboxCount, + ChannelCount: t.ChannelCount, + } + if t.DeletedAt.Valid { + deletedAt := t.DeletedAt.Time + row.DeletedAt = &deletedAt + } + rows[i] = row + } + return rows, total, nil +} + +// DeleteTeamInternal soft-deletes a team and destroys all its active sandboxes. +// Used for system-initiated deletions (e.g. cascading from user account deletion) +// where no caller role check is needed. +func (s *TeamService) DeleteTeamInternal(ctx context.Context, teamID pgtype.UUID) error { + return s.deleteTeamCore(ctx, teamID) +} + +// AdminDeleteTeam soft-deletes a team and destroys all its active sandboxes. +// Unlike DeleteTeam, this does not require the caller to be the team owner — +// it is admin-only (caller must verify admin status). +func (s *TeamService) AdminDeleteTeam(ctx context.Context, teamID pgtype.UUID) error { + team, err := s.DB.GetTeam(ctx, teamID) + if err != nil { + return fmt.Errorf("team not found: %w", err) + } + if team.DeletedAt.Valid { + return fmt.Errorf("team not found") + } + + return s.deleteTeamCore(ctx, teamID) +} diff --git a/internal/service/template.go b/pkg/service/template.go similarity index 94% rename from internal/service/template.go rename to pkg/service/template.go index af18076..269af10 100644 --- a/internal/service/template.go +++ b/pkg/service/template.go @@ -5,7 +5,7 @@ import ( "github.com/jackc/pgx/v5/pgtype" - "git.omukk.dev/wrenn/wrenn/internal/db" + "git.omukk.dev/wrenn/wrenn/pkg/db" ) // TemplateService provides template/snapshot operations shared between the diff --git a/pkg/service/user.go b/pkg/service/user.go new file mode 100644 index 0000000..f07b3f4 --- /dev/null +++ b/pkg/service/user.go @@ -0,0 +1,107 @@ +package service + +import ( + "context" + "fmt" + "log/slog" + "time" + + "github.com/jackc/pgx/v5/pgtype" + + "git.omukk.dev/wrenn/wrenn/pkg/db" +) + +// UserService provides user management operations. +type UserService struct { + DB *db.Queries + SandboxSvc *SandboxService +} + +// AdminUserRow is the shape returned by AdminListUsers. +type AdminUserRow struct { + ID pgtype.UUID + Email string + Name string + IsAdmin bool + Status string + CreatedAt time.Time + TeamsJoined int32 + TeamsOwned int32 +} + +// AdminListUsers returns a paginated list of all non-deleted users with team counts. +func (s *UserService) AdminListUsers(ctx context.Context, limit, offset int32) ([]AdminUserRow, int32, error) { + users, err := s.DB.ListUsersAdmin(ctx, db.ListUsersAdminParams{ + Limit: limit, + Offset: offset, + }) + if err != nil { + return nil, 0, fmt.Errorf("list users: %w", err) + } + + total, err := s.DB.CountUsersAdmin(ctx) + if err != nil { + return nil, 0, fmt.Errorf("count users: %w", err) + } + + rows := make([]AdminUserRow, len(users)) + for i, u := range users { + rows[i] = AdminUserRow{ + ID: u.ID, + Email: u.Email, + Name: u.Name, + IsAdmin: u.IsAdmin, + Status: u.Status, + CreatedAt: u.CreatedAt.Time, + TeamsJoined: u.TeamsJoined, + TeamsOwned: u.TeamsOwned, + } + } + return rows, total, nil +} + +// SetUserStatus sets the status of a user account. +func (s *UserService) SetUserStatus(ctx context.Context, userID pgtype.UUID, status string) error { + if err := s.DB.SetUserStatus(ctx, db.SetUserStatusParams{ + ID: userID, + Status: status, + }); err != nil { + return fmt.Errorf("set user status: %w", err) + } + if status == "disabled" || status == "deleted" { + if err := s.DB.DeleteAPIKeysByCreator(ctx, userID); err != nil { + slog.Warn("failed to delete API keys for deactivated user", "user_id", userID, "error", err) + } + s.destroySandboxesForOwnedTeams(ctx, userID) + } + return nil +} + +// destroySandboxesForOwnedTeams destroys all active sandboxes (running, paused, +// hibernated, starting) for every team the user owns. Best-effort: errors are +// logged but do not prevent the user from being disabled. +func (s *UserService) destroySandboxesForOwnedTeams(ctx context.Context, userID pgtype.UUID) { + if s.SandboxSvc == nil { + return + } + + teamIDs, err := s.DB.GetOwnedTeamIDs(ctx, userID) + if err != nil { + slog.Warn("failed to list owned teams for sandbox cleanup", "user_id", userID, "error", err) + return + } + + for _, teamID := range teamIDs { + sandboxes, err := s.DB.ListActiveSandboxesByTeam(ctx, teamID) + if err != nil { + slog.Warn("failed to list active sandboxes for team", "team_id", teamID, "user_id", userID, "error", err) + continue + } + for _, sb := range sandboxes { + if err := s.SandboxSvc.Destroy(ctx, sb.ID, teamID); err != nil { + slog.Warn("failed to destroy sandbox during user disable", + "sandbox_id", sb.ID, "team_id", teamID, "user_id", userID, "error", err) + } + } + } +} diff --git a/internal/validate/name.go b/pkg/validate/name.go similarity index 100% rename from internal/validate/name.go rename to pkg/validate/name.go diff --git a/internal/validate/name_test.go b/pkg/validate/name_test.go similarity index 100% rename from internal/validate/name_test.go rename to pkg/validate/name_test.go diff --git a/proto/hostagent/gen/hostagent.pb.go b/proto/hostagent/gen/hostagent.pb.go index b4ab783..96a0512 100644 --- a/proto/hostagent/gen/hostagent.pb.go +++ b/proto/hostagent/gen/hostagent.pb.go @@ -40,7 +40,11 @@ type CreateSandboxRequest struct { // Team UUID that owns the template (hex string). All-zeros = platform. TeamId string `protobuf:"bytes,7,opt,name=team_id,json=teamId,proto3" json:"team_id,omitempty"` // Template UUID (hex string). Both zeros + team zeros = "minimal" sentinel. - TemplateId string `protobuf:"bytes,8,opt,name=template_id,json=templateId,proto3" json:"template_id,omitempty"` + TemplateId string `protobuf:"bytes,8,opt,name=template_id,json=templateId,proto3" json:"template_id,omitempty"` + // Default unix user for the sandbox (set in envd via PostInit). + DefaultUser string `protobuf:"bytes,9,opt,name=default_user,json=defaultUser,proto3" json:"default_user,omitempty"` + // Default environment variables (set in envd via PostInit). + DefaultEnv map[string]string `protobuf:"bytes,10,rep,name=default_env,json=defaultEnv,proto3" json:"default_env,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -131,11 +135,28 @@ func (x *CreateSandboxRequest) GetTemplateId() string { return "" } +func (x *CreateSandboxRequest) GetDefaultUser() string { + if x != nil { + return x.DefaultUser + } + return "" +} + +func (x *CreateSandboxRequest) GetDefaultEnv() map[string]string { + if x != nil { + return x.DefaultEnv + } + return nil +} + type CreateSandboxResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - HostIp string `protobuf:"bytes,3,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + HostIp string `protobuf:"bytes,3,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"` + // Runtime metadata collected during sandbox creation (e.g. envd_version, + // kernel_version, firecracker_version, agent_version). + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -191,6 +212,13 @@ func (x *CreateSandboxResponse) GetHostIp() string { return "" } +func (x *CreateSandboxResponse) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + type DestroySandboxRequest struct { state protoimpl.MessageState `protogen:"open.v1"` SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` @@ -356,7 +384,14 @@ type ResumeSandboxRequest struct { SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` // TTL in seconds restored from the DB so the reaper can auto-pause // the sandbox again after inactivity. 0 means no auto-pause. - TimeoutSec int32 `protobuf:"varint,2,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"` + TimeoutSec int32 `protobuf:"varint,2,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"` + // Default unix user for the sandbox (set in envd via PostInit on resume). + DefaultUser string `protobuf:"bytes,3,opt,name=default_user,json=defaultUser,proto3" json:"default_user,omitempty"` + // Default environment variables (set in envd via PostInit on resume). + DefaultEnv map[string]string `protobuf:"bytes,4,rep,name=default_env,json=defaultEnv,proto3" json:"default_env,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Kernel version hint from the DB — the agent tries to use the exact version, + // falling back to latest if not found on disk. + KernelVersion string `protobuf:"bytes,5,opt,name=kernel_version,json=kernelVersion,proto3" json:"kernel_version,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -405,11 +440,35 @@ func (x *ResumeSandboxRequest) GetTimeoutSec() int32 { return 0 } +func (x *ResumeSandboxRequest) GetDefaultUser() string { + if x != nil { + return x.DefaultUser + } + return "" +} + +func (x *ResumeSandboxRequest) GetDefaultEnv() map[string]string { + if x != nil { + return x.DefaultEnv + } + return nil +} + +func (x *ResumeSandboxRequest) GetKernelVersion() string { + if x != nil { + return x.KernelVersion + } + return "" +} + type ResumeSandboxResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - HostIp string `protobuf:"bytes,3,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + HostIp string `protobuf:"bytes,3,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"` + // Actual runtime metadata after resume (versions may differ from hint if + // the exact kernel was not available). + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -465,6 +524,13 @@ func (x *ResumeSandboxResponse) GetHostIp() string { return "" } +func (x *ResumeSandboxResponse) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + type CreateSnapshotRequest struct { state protoimpl.MessageState `protogen:"open.v1"` SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` @@ -920,8 +986,10 @@ type SandboxInfo struct { TimeoutSec int32 `protobuf:"varint,9,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"` TeamId string `protobuf:"bytes,10,opt,name=team_id,json=teamId,proto3" json:"team_id,omitempty"` TemplateId string `protobuf:"bytes,11,opt,name=template_id,json=templateId,proto3" json:"template_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Runtime metadata (envd_version, kernel_version, etc.). + Metadata map[string]string `protobuf:"bytes,12,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SandboxInfo) Reset() { @@ -1031,6 +1099,13 @@ func (x *SandboxInfo) GetTemplateId() string { return "" } +func (x *SandboxInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + type WriteFileRequest struct { state protoimpl.MessageState `protogen:"open.v1"` SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` @@ -1833,6 +1908,413 @@ func (x *ReadFileStreamResponse) GetChunk() []byte { return nil } +type ListDirRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Depth uint32 `protobuf:"varint,3,opt,name=depth,proto3" json:"depth,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListDirRequest) Reset() { + *x = ListDirRequest{} + mi := &file_hostagent_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListDirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDirRequest) ProtoMessage() {} + +func (x *ListDirRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDirRequest.ProtoReflect.Descriptor instead. +func (*ListDirRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{31} +} + +func (x *ListDirRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *ListDirRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *ListDirRequest) GetDepth() uint32 { + if x != nil { + return x.Depth + } + return 0 +} + +type ListDirResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Entries []*FileEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListDirResponse) Reset() { + *x = ListDirResponse{} + mi := &file_hostagent_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListDirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDirResponse) ProtoMessage() {} + +func (x *ListDirResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDirResponse.ProtoReflect.Descriptor instead. +func (*ListDirResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{32} +} + +func (x *ListDirResponse) GetEntries() []*FileEntry { + if x != nil { + return x.Entries + } + return nil +} + +type FileEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // "file", "directory", or "symlink". + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + Mode uint32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` + // Human-readable permissions string, e.g. "-rwxr-xr-x". + Permissions string `protobuf:"bytes,6,opt,name=permissions,proto3" json:"permissions,omitempty"` + Owner string `protobuf:"bytes,7,opt,name=owner,proto3" json:"owner,omitempty"` + Group string `protobuf:"bytes,8,opt,name=group,proto3" json:"group,omitempty"` + // Last modification time as Unix timestamp (seconds). + ModifiedAt int64 `protobuf:"varint,9,opt,name=modified_at,json=modifiedAt,proto3" json:"modified_at,omitempty"` + SymlinkTarget *string `protobuf:"bytes,10,opt,name=symlink_target,json=symlinkTarget,proto3,oneof" json:"symlink_target,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FileEntry) Reset() { + *x = FileEntry{} + mi := &file_hostagent_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FileEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileEntry) ProtoMessage() {} + +func (x *FileEntry) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[33] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileEntry.ProtoReflect.Descriptor instead. +func (*FileEntry) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{33} +} + +func (x *FileEntry) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *FileEntry) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *FileEntry) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *FileEntry) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *FileEntry) GetMode() uint32 { + if x != nil { + return x.Mode + } + return 0 +} + +func (x *FileEntry) GetPermissions() string { + if x != nil { + return x.Permissions + } + return "" +} + +func (x *FileEntry) GetOwner() string { + if x != nil { + return x.Owner + } + return "" +} + +func (x *FileEntry) GetGroup() string { + if x != nil { + return x.Group + } + return "" +} + +func (x *FileEntry) GetModifiedAt() int64 { + if x != nil { + return x.ModifiedAt + } + return 0 +} + +func (x *FileEntry) GetSymlinkTarget() string { + if x != nil && x.SymlinkTarget != nil { + return *x.SymlinkTarget + } + return "" +} + +type MakeDirRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MakeDirRequest) Reset() { + *x = MakeDirRequest{} + mi := &file_hostagent_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MakeDirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MakeDirRequest) ProtoMessage() {} + +func (x *MakeDirRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[34] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MakeDirRequest.ProtoReflect.Descriptor instead. +func (*MakeDirRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{34} +} + +func (x *MakeDirRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *MakeDirRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +type MakeDirResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Entry *FileEntry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MakeDirResponse) Reset() { + *x = MakeDirResponse{} + mi := &file_hostagent_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MakeDirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MakeDirResponse) ProtoMessage() {} + +func (x *MakeDirResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MakeDirResponse.ProtoReflect.Descriptor instead. +func (*MakeDirResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{35} +} + +func (x *MakeDirResponse) GetEntry() *FileEntry { + if x != nil { + return x.Entry + } + return nil +} + +type RemovePathRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RemovePathRequest) Reset() { + *x = RemovePathRequest{} + mi := &file_hostagent_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RemovePathRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemovePathRequest) ProtoMessage() {} + +func (x *RemovePathRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[36] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemovePathRequest.ProtoReflect.Descriptor instead. +func (*RemovePathRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{36} +} + +func (x *RemovePathRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *RemovePathRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +type RemovePathResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RemovePathResponse) Reset() { + *x = RemovePathResponse{} + mi := &file_hostagent_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RemovePathResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemovePathResponse) ProtoMessage() {} + +func (x *RemovePathResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[37] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemovePathResponse.ProtoReflect.Descriptor instead. +func (*RemovePathResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{37} +} + type PingSandboxRequest struct { state protoimpl.MessageState `protogen:"open.v1"` SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` @@ -1842,7 +2324,7 @@ type PingSandboxRequest struct { func (x *PingSandboxRequest) Reset() { *x = PingSandboxRequest{} - mi := &file_hostagent_proto_msgTypes[31] + mi := &file_hostagent_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1854,7 +2336,7 @@ func (x *PingSandboxRequest) String() string { func (*PingSandboxRequest) ProtoMessage() {} func (x *PingSandboxRequest) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[31] + mi := &file_hostagent_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1867,7 +2349,7 @@ func (x *PingSandboxRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PingSandboxRequest.ProtoReflect.Descriptor instead. func (*PingSandboxRequest) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{31} + return file_hostagent_proto_rawDescGZIP(), []int{38} } func (x *PingSandboxRequest) GetSandboxId() string { @@ -1885,7 +2367,7 @@ type PingSandboxResponse struct { func (x *PingSandboxResponse) Reset() { *x = PingSandboxResponse{} - mi := &file_hostagent_proto_msgTypes[32] + mi := &file_hostagent_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1897,7 +2379,7 @@ func (x *PingSandboxResponse) String() string { func (*PingSandboxResponse) ProtoMessage() {} func (x *PingSandboxResponse) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[32] + mi := &file_hostagent_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1910,7 +2392,7 @@ func (x *PingSandboxResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PingSandboxResponse.ProtoReflect.Descriptor instead. func (*PingSandboxResponse) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{32} + return file_hostagent_proto_rawDescGZIP(), []int{39} } type TerminateRequest struct { @@ -1921,7 +2403,7 @@ type TerminateRequest struct { func (x *TerminateRequest) Reset() { *x = TerminateRequest{} - mi := &file_hostagent_proto_msgTypes[33] + mi := &file_hostagent_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1933,7 +2415,7 @@ func (x *TerminateRequest) String() string { func (*TerminateRequest) ProtoMessage() {} func (x *TerminateRequest) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[33] + mi := &file_hostagent_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1946,7 +2428,7 @@ func (x *TerminateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use TerminateRequest.ProtoReflect.Descriptor instead. func (*TerminateRequest) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{33} + return file_hostagent_proto_rawDescGZIP(), []int{40} } type TerminateResponse struct { @@ -1957,7 +2439,7 @@ type TerminateResponse struct { func (x *TerminateResponse) Reset() { *x = TerminateResponse{} - mi := &file_hostagent_proto_msgTypes[34] + mi := &file_hostagent_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1969,7 +2451,7 @@ func (x *TerminateResponse) String() string { func (*TerminateResponse) ProtoMessage() {} func (x *TerminateResponse) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[34] + mi := &file_hostagent_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1982,7 +2464,7 @@ func (x *TerminateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use TerminateResponse.ProtoReflect.Descriptor instead. func (*TerminateResponse) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{34} + return file_hostagent_proto_rawDescGZIP(), []int{41} } type MetricPoint struct { @@ -1997,7 +2479,7 @@ type MetricPoint struct { func (x *MetricPoint) Reset() { *x = MetricPoint{} - mi := &file_hostagent_proto_msgTypes[35] + mi := &file_hostagent_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2009,7 +2491,7 @@ func (x *MetricPoint) String() string { func (*MetricPoint) ProtoMessage() {} func (x *MetricPoint) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[35] + mi := &file_hostagent_proto_msgTypes[42] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2022,7 +2504,7 @@ func (x *MetricPoint) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricPoint.ProtoReflect.Descriptor instead. func (*MetricPoint) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{35} + return file_hostagent_proto_rawDescGZIP(), []int{42} } func (x *MetricPoint) GetTimestampUnix() int64 { @@ -2064,7 +2546,7 @@ type GetSandboxMetricsRequest struct { func (x *GetSandboxMetricsRequest) Reset() { *x = GetSandboxMetricsRequest{} - mi := &file_hostagent_proto_msgTypes[36] + mi := &file_hostagent_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2076,7 +2558,7 @@ func (x *GetSandboxMetricsRequest) String() string { func (*GetSandboxMetricsRequest) ProtoMessage() {} func (x *GetSandboxMetricsRequest) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[36] + mi := &file_hostagent_proto_msgTypes[43] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2089,7 +2571,7 @@ func (x *GetSandboxMetricsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSandboxMetricsRequest.ProtoReflect.Descriptor instead. func (*GetSandboxMetricsRequest) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{36} + return file_hostagent_proto_rawDescGZIP(), []int{43} } func (x *GetSandboxMetricsRequest) GetSandboxId() string { @@ -2115,7 +2597,7 @@ type GetSandboxMetricsResponse struct { func (x *GetSandboxMetricsResponse) Reset() { *x = GetSandboxMetricsResponse{} - mi := &file_hostagent_proto_msgTypes[37] + mi := &file_hostagent_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2127,7 +2609,7 @@ func (x *GetSandboxMetricsResponse) String() string { func (*GetSandboxMetricsResponse) ProtoMessage() {} func (x *GetSandboxMetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[37] + mi := &file_hostagent_proto_msgTypes[44] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2140,7 +2622,7 @@ func (x *GetSandboxMetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSandboxMetricsResponse.ProtoReflect.Descriptor instead. func (*GetSandboxMetricsResponse) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{37} + return file_hostagent_proto_rawDescGZIP(), []int{44} } func (x *GetSandboxMetricsResponse) GetPoints() []*MetricPoint { @@ -2159,7 +2641,7 @@ type FlushSandboxMetricsRequest struct { func (x *FlushSandboxMetricsRequest) Reset() { *x = FlushSandboxMetricsRequest{} - mi := &file_hostagent_proto_msgTypes[38] + mi := &file_hostagent_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2171,7 +2653,7 @@ func (x *FlushSandboxMetricsRequest) String() string { func (*FlushSandboxMetricsRequest) ProtoMessage() {} func (x *FlushSandboxMetricsRequest) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[38] + mi := &file_hostagent_proto_msgTypes[45] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2184,7 +2666,7 @@ func (x *FlushSandboxMetricsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FlushSandboxMetricsRequest.ProtoReflect.Descriptor instead. func (*FlushSandboxMetricsRequest) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{38} + return file_hostagent_proto_rawDescGZIP(), []int{45} } func (x *FlushSandboxMetricsRequest) GetSandboxId() string { @@ -2205,7 +2687,7 @@ type FlushSandboxMetricsResponse struct { func (x *FlushSandboxMetricsResponse) Reset() { *x = FlushSandboxMetricsResponse{} - mi := &file_hostagent_proto_msgTypes[39] + mi := &file_hostagent_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2217,7 +2699,7 @@ func (x *FlushSandboxMetricsResponse) String() string { func (*FlushSandboxMetricsResponse) ProtoMessage() {} func (x *FlushSandboxMetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[39] + mi := &file_hostagent_proto_msgTypes[46] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2230,7 +2712,7 @@ func (x *FlushSandboxMetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FlushSandboxMetricsResponse.ProtoReflect.Descriptor instead. func (*FlushSandboxMetricsResponse) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{39} + return file_hostagent_proto_rawDescGZIP(), []int{46} } func (x *FlushSandboxMetricsResponse) GetPoints_10M() []*MetricPoint { @@ -2269,7 +2751,7 @@ type FlattenRootfsRequest struct { func (x *FlattenRootfsRequest) Reset() { *x = FlattenRootfsRequest{} - mi := &file_hostagent_proto_msgTypes[40] + mi := &file_hostagent_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2281,7 +2763,7 @@ func (x *FlattenRootfsRequest) String() string { func (*FlattenRootfsRequest) ProtoMessage() {} func (x *FlattenRootfsRequest) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[40] + mi := &file_hostagent_proto_msgTypes[47] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2294,7 +2776,7 @@ func (x *FlattenRootfsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FlattenRootfsRequest.ProtoReflect.Descriptor instead. func (*FlattenRootfsRequest) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{40} + return file_hostagent_proto_rawDescGZIP(), []int{47} } func (x *FlattenRootfsRequest) GetSandboxId() string { @@ -2334,7 +2816,7 @@ type FlattenRootfsResponse struct { func (x *FlattenRootfsResponse) Reset() { *x = FlattenRootfsResponse{} - mi := &file_hostagent_proto_msgTypes[41] + mi := &file_hostagent_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2346,7 +2828,7 @@ func (x *FlattenRootfsResponse) String() string { func (*FlattenRootfsResponse) ProtoMessage() {} func (x *FlattenRootfsResponse) ProtoReflect() protoreflect.Message { - mi := &file_hostagent_proto_msgTypes[41] + mi := &file_hostagent_proto_msgTypes[48] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2359,7 +2841,7 @@ func (x *FlattenRootfsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FlattenRootfsResponse.ProtoReflect.Descriptor instead. func (*FlattenRootfsResponse) Descriptor() ([]byte, []int) { - return file_hostagent_proto_rawDescGZIP(), []int{41} + return file_hostagent_proto_rawDescGZIP(), []int{48} } func (x *FlattenRootfsResponse) GetSizeBytes() int64 { @@ -2369,11 +2851,1277 @@ func (x *FlattenRootfsResponse) GetSizeBytes() int64 { return 0 } +type PtyAttachRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + // Tag is the stable identifier for this PTY session (e.g. "pty-abc123de"). + // Chosen by the caller and used to reconnect later. + Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"` + // If cmd is non-empty, a new process is started. If empty, reconnects to + // the existing process identified by tag. + Cmd string `protobuf:"bytes,3,opt,name=cmd,proto3" json:"cmd,omitempty"` + Args []string `protobuf:"bytes,4,rep,name=args,proto3" json:"args,omitempty"` + Cols uint32 `protobuf:"varint,5,opt,name=cols,proto3" json:"cols,omitempty"` + Rows uint32 `protobuf:"varint,6,opt,name=rows,proto3" json:"rows,omitempty"` + // Environment variables for the process. + Envs map[string]string `protobuf:"bytes,7,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Working directory. Empty means default. + Cwd string `protobuf:"bytes,8,opt,name=cwd,proto3" json:"cwd,omitempty"` + // User to run as. Empty means default (root). + User string `protobuf:"bytes,9,opt,name=user,proto3" json:"user,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtyAttachRequest) Reset() { + *x = PtyAttachRequest{} + mi := &file_hostagent_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtyAttachRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtyAttachRequest) ProtoMessage() {} + +func (x *PtyAttachRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[49] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtyAttachRequest.ProtoReflect.Descriptor instead. +func (*PtyAttachRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{49} +} + +func (x *PtyAttachRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *PtyAttachRequest) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *PtyAttachRequest) GetCmd() string { + if x != nil { + return x.Cmd + } + return "" +} + +func (x *PtyAttachRequest) GetArgs() []string { + if x != nil { + return x.Args + } + return nil +} + +func (x *PtyAttachRequest) GetCols() uint32 { + if x != nil { + return x.Cols + } + return 0 +} + +func (x *PtyAttachRequest) GetRows() uint32 { + if x != nil { + return x.Rows + } + return 0 +} + +func (x *PtyAttachRequest) GetEnvs() map[string]string { + if x != nil { + return x.Envs + } + return nil +} + +func (x *PtyAttachRequest) GetCwd() string { + if x != nil { + return x.Cwd + } + return "" +} + +func (x *PtyAttachRequest) GetUser() string { + if x != nil { + return x.User + } + return "" +} + +type PtyAttachResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Event: + // + // *PtyAttachResponse_Started + // *PtyAttachResponse_Output + // *PtyAttachResponse_Exited + Event isPtyAttachResponse_Event `protobuf_oneof:"event"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtyAttachResponse) Reset() { + *x = PtyAttachResponse{} + mi := &file_hostagent_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtyAttachResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtyAttachResponse) ProtoMessage() {} + +func (x *PtyAttachResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[50] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtyAttachResponse.ProtoReflect.Descriptor instead. +func (*PtyAttachResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{50} +} + +func (x *PtyAttachResponse) GetEvent() isPtyAttachResponse_Event { + if x != nil { + return x.Event + } + return nil +} + +func (x *PtyAttachResponse) GetStarted() *PtyStarted { + if x != nil { + if x, ok := x.Event.(*PtyAttachResponse_Started); ok { + return x.Started + } + } + return nil +} + +func (x *PtyAttachResponse) GetOutput() *PtyOutput { + if x != nil { + if x, ok := x.Event.(*PtyAttachResponse_Output); ok { + return x.Output + } + } + return nil +} + +func (x *PtyAttachResponse) GetExited() *PtyExited { + if x != nil { + if x, ok := x.Event.(*PtyAttachResponse_Exited); ok { + return x.Exited + } + } + return nil +} + +type isPtyAttachResponse_Event interface { + isPtyAttachResponse_Event() +} + +type PtyAttachResponse_Started struct { + Started *PtyStarted `protobuf:"bytes,1,opt,name=started,proto3,oneof"` +} + +type PtyAttachResponse_Output struct { + Output *PtyOutput `protobuf:"bytes,2,opt,name=output,proto3,oneof"` +} + +type PtyAttachResponse_Exited struct { + Exited *PtyExited `protobuf:"bytes,3,opt,name=exited,proto3,oneof"` +} + +func (*PtyAttachResponse_Started) isPtyAttachResponse_Event() {} + +func (*PtyAttachResponse_Output) isPtyAttachResponse_Event() {} + +func (*PtyAttachResponse_Exited) isPtyAttachResponse_Event() {} + +type PtyStarted struct { + state protoimpl.MessageState `protogen:"open.v1"` + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtyStarted) Reset() { + *x = PtyStarted{} + mi := &file_hostagent_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtyStarted) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtyStarted) ProtoMessage() {} + +func (x *PtyStarted) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[51] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtyStarted.ProtoReflect.Descriptor instead. +func (*PtyStarted) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{51} +} + +func (x *PtyStarted) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *PtyStarted) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +type PtyOutput struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtyOutput) Reset() { + *x = PtyOutput{} + mi := &file_hostagent_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtyOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtyOutput) ProtoMessage() {} + +func (x *PtyOutput) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[52] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtyOutput.ProtoReflect.Descriptor instead. +func (*PtyOutput) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{52} +} + +func (x *PtyOutput) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type PtyExited struct { + state protoimpl.MessageState `protogen:"open.v1"` + ExitCode int32 `protobuf:"varint,1,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtyExited) Reset() { + *x = PtyExited{} + mi := &file_hostagent_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtyExited) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtyExited) ProtoMessage() {} + +func (x *PtyExited) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[53] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtyExited.ProtoReflect.Descriptor instead. +func (*PtyExited) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{53} +} + +func (x *PtyExited) GetExitCode() int32 { + if x != nil { + return x.ExitCode + } + return 0 +} + +func (x *PtyExited) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type PtySendInputRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtySendInputRequest) Reset() { + *x = PtySendInputRequest{} + mi := &file_hostagent_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtySendInputRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtySendInputRequest) ProtoMessage() {} + +func (x *PtySendInputRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[54] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtySendInputRequest.ProtoReflect.Descriptor instead. +func (*PtySendInputRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{54} +} + +func (x *PtySendInputRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *PtySendInputRequest) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *PtySendInputRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type PtySendInputResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtySendInputResponse) Reset() { + *x = PtySendInputResponse{} + mi := &file_hostagent_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtySendInputResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtySendInputResponse) ProtoMessage() {} + +func (x *PtySendInputResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[55] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtySendInputResponse.ProtoReflect.Descriptor instead. +func (*PtySendInputResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{55} +} + +type PtyResizeRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"` + Cols uint32 `protobuf:"varint,3,opt,name=cols,proto3" json:"cols,omitempty"` + Rows uint32 `protobuf:"varint,4,opt,name=rows,proto3" json:"rows,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtyResizeRequest) Reset() { + *x = PtyResizeRequest{} + mi := &file_hostagent_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtyResizeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtyResizeRequest) ProtoMessage() {} + +func (x *PtyResizeRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[56] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtyResizeRequest.ProtoReflect.Descriptor instead. +func (*PtyResizeRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{56} +} + +func (x *PtyResizeRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *PtyResizeRequest) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *PtyResizeRequest) GetCols() uint32 { + if x != nil { + return x.Cols + } + return 0 +} + +func (x *PtyResizeRequest) GetRows() uint32 { + if x != nil { + return x.Rows + } + return 0 +} + +type PtyResizeResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtyResizeResponse) Reset() { + *x = PtyResizeResponse{} + mi := &file_hostagent_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtyResizeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtyResizeResponse) ProtoMessage() {} + +func (x *PtyResizeResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[57] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtyResizeResponse.ProtoReflect.Descriptor instead. +func (*PtyResizeResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{57} +} + +type PtyKillRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtyKillRequest) Reset() { + *x = PtyKillRequest{} + mi := &file_hostagent_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtyKillRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtyKillRequest) ProtoMessage() {} + +func (x *PtyKillRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[58] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtyKillRequest.ProtoReflect.Descriptor instead. +func (*PtyKillRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{58} +} + +func (x *PtyKillRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *PtyKillRequest) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +type PtyKillResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PtyKillResponse) Reset() { + *x = PtyKillResponse{} + mi := &file_hostagent_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PtyKillResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PtyKillResponse) ProtoMessage() {} + +func (x *PtyKillResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[59] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PtyKillResponse.ProtoReflect.Descriptor instead. +func (*PtyKillResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{59} +} + +type StartBackgroundRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Cmd string `protobuf:"bytes,2,opt,name=cmd,proto3" json:"cmd,omitempty"` + Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` + // User-chosen tag for the process. If empty, the host agent generates one. + Tag string `protobuf:"bytes,4,opt,name=tag,proto3" json:"tag,omitempty"` + Envs map[string]string `protobuf:"bytes,5,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Cwd string `protobuf:"bytes,6,opt,name=cwd,proto3" json:"cwd,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartBackgroundRequest) Reset() { + *x = StartBackgroundRequest{} + mi := &file_hostagent_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartBackgroundRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartBackgroundRequest) ProtoMessage() {} + +func (x *StartBackgroundRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[60] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartBackgroundRequest.ProtoReflect.Descriptor instead. +func (*StartBackgroundRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{60} +} + +func (x *StartBackgroundRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *StartBackgroundRequest) GetCmd() string { + if x != nil { + return x.Cmd + } + return "" +} + +func (x *StartBackgroundRequest) GetArgs() []string { + if x != nil { + return x.Args + } + return nil +} + +func (x *StartBackgroundRequest) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *StartBackgroundRequest) GetEnvs() map[string]string { + if x != nil { + return x.Envs + } + return nil +} + +func (x *StartBackgroundRequest) GetCwd() string { + if x != nil { + return x.Cwd + } + return "" +} + +type StartBackgroundResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartBackgroundResponse) Reset() { + *x = StartBackgroundResponse{} + mi := &file_hostagent_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartBackgroundResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartBackgroundResponse) ProtoMessage() {} + +func (x *StartBackgroundResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[61] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartBackgroundResponse.ProtoReflect.Descriptor instead. +func (*StartBackgroundResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{61} +} + +func (x *StartBackgroundResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *StartBackgroundResponse) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +type ListProcessesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListProcessesRequest) Reset() { + *x = ListProcessesRequest{} + mi := &file_hostagent_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListProcessesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListProcessesRequest) ProtoMessage() {} + +func (x *ListProcessesRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[62] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListProcessesRequest.ProtoReflect.Descriptor instead. +func (*ListProcessesRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{62} +} + +func (x *ListProcessesRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +type ProcessEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"` + Cmd string `protobuf:"bytes,3,opt,name=cmd,proto3" json:"cmd,omitempty"` + Args []string `protobuf:"bytes,4,rep,name=args,proto3" json:"args,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ProcessEntry) Reset() { + *x = ProcessEntry{} + mi := &file_hostagent_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ProcessEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessEntry) ProtoMessage() {} + +func (x *ProcessEntry) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[63] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessEntry.ProtoReflect.Descriptor instead. +func (*ProcessEntry) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{63} +} + +func (x *ProcessEntry) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *ProcessEntry) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *ProcessEntry) GetCmd() string { + if x != nil { + return x.Cmd + } + return "" +} + +func (x *ProcessEntry) GetArgs() []string { + if x != nil { + return x.Args + } + return nil +} + +type ListProcessesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Processes []*ProcessEntry `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListProcessesResponse) Reset() { + *x = ListProcessesResponse{} + mi := &file_hostagent_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListProcessesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListProcessesResponse) ProtoMessage() {} + +func (x *ListProcessesResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[64] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListProcessesResponse.ProtoReflect.Descriptor instead. +func (*ListProcessesResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{64} +} + +func (x *ListProcessesResponse) GetProcesses() []*ProcessEntry { + if x != nil { + return x.Processes + } + return nil +} + +type KillProcessRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + // Types that are valid to be assigned to Selector: + // + // *KillProcessRequest_Pid + // *KillProcessRequest_Tag + Selector isKillProcessRequest_Selector `protobuf_oneof:"selector"` + // Signal to send: "SIGTERM" or "SIGKILL" (default: "SIGKILL"). + Signal string `protobuf:"bytes,4,opt,name=signal,proto3" json:"signal,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *KillProcessRequest) Reset() { + *x = KillProcessRequest{} + mi := &file_hostagent_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *KillProcessRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KillProcessRequest) ProtoMessage() {} + +func (x *KillProcessRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[65] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KillProcessRequest.ProtoReflect.Descriptor instead. +func (*KillProcessRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{65} +} + +func (x *KillProcessRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *KillProcessRequest) GetSelector() isKillProcessRequest_Selector { + if x != nil { + return x.Selector + } + return nil +} + +func (x *KillProcessRequest) GetPid() uint32 { + if x != nil { + if x, ok := x.Selector.(*KillProcessRequest_Pid); ok { + return x.Pid + } + } + return 0 +} + +func (x *KillProcessRequest) GetTag() string { + if x != nil { + if x, ok := x.Selector.(*KillProcessRequest_Tag); ok { + return x.Tag + } + } + return "" +} + +func (x *KillProcessRequest) GetSignal() string { + if x != nil { + return x.Signal + } + return "" +} + +type isKillProcessRequest_Selector interface { + isKillProcessRequest_Selector() +} + +type KillProcessRequest_Pid struct { + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3,oneof"` +} + +type KillProcessRequest_Tag struct { + Tag string `protobuf:"bytes,3,opt,name=tag,proto3,oneof"` +} + +func (*KillProcessRequest_Pid) isKillProcessRequest_Selector() {} + +func (*KillProcessRequest_Tag) isKillProcessRequest_Selector() {} + +type KillProcessResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *KillProcessResponse) Reset() { + *x = KillProcessResponse{} + mi := &file_hostagent_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *KillProcessResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KillProcessResponse) ProtoMessage() {} + +func (x *KillProcessResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[66] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KillProcessResponse.ProtoReflect.Descriptor instead. +func (*KillProcessResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{66} +} + +type ConnectProcessRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + // Types that are valid to be assigned to Selector: + // + // *ConnectProcessRequest_Pid + // *ConnectProcessRequest_Tag + Selector isConnectProcessRequest_Selector `protobuf_oneof:"selector"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectProcessRequest) Reset() { + *x = ConnectProcessRequest{} + mi := &file_hostagent_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectProcessRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectProcessRequest) ProtoMessage() {} + +func (x *ConnectProcessRequest) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[67] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectProcessRequest.ProtoReflect.Descriptor instead. +func (*ConnectProcessRequest) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{67} +} + +func (x *ConnectProcessRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *ConnectProcessRequest) GetSelector() isConnectProcessRequest_Selector { + if x != nil { + return x.Selector + } + return nil +} + +func (x *ConnectProcessRequest) GetPid() uint32 { + if x != nil { + if x, ok := x.Selector.(*ConnectProcessRequest_Pid); ok { + return x.Pid + } + } + return 0 +} + +func (x *ConnectProcessRequest) GetTag() string { + if x != nil { + if x, ok := x.Selector.(*ConnectProcessRequest_Tag); ok { + return x.Tag + } + } + return "" +} + +type isConnectProcessRequest_Selector interface { + isConnectProcessRequest_Selector() +} + +type ConnectProcessRequest_Pid struct { + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3,oneof"` +} + +type ConnectProcessRequest_Tag struct { + Tag string `protobuf:"bytes,3,opt,name=tag,proto3,oneof"` +} + +func (*ConnectProcessRequest_Pid) isConnectProcessRequest_Selector() {} + +func (*ConnectProcessRequest_Tag) isConnectProcessRequest_Selector() {} + +// Reuses ExecStream event types for symmetry. +type ConnectProcessResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Event: + // + // *ConnectProcessResponse_Start + // *ConnectProcessResponse_Data + // *ConnectProcessResponse_End + Event isConnectProcessResponse_Event `protobuf_oneof:"event"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectProcessResponse) Reset() { + *x = ConnectProcessResponse{} + mi := &file_hostagent_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectProcessResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectProcessResponse) ProtoMessage() {} + +func (x *ConnectProcessResponse) ProtoReflect() protoreflect.Message { + mi := &file_hostagent_proto_msgTypes[68] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectProcessResponse.ProtoReflect.Descriptor instead. +func (*ConnectProcessResponse) Descriptor() ([]byte, []int) { + return file_hostagent_proto_rawDescGZIP(), []int{68} +} + +func (x *ConnectProcessResponse) GetEvent() isConnectProcessResponse_Event { + if x != nil { + return x.Event + } + return nil +} + +func (x *ConnectProcessResponse) GetStart() *ExecStreamStart { + if x != nil { + if x, ok := x.Event.(*ConnectProcessResponse_Start); ok { + return x.Start + } + } + return nil +} + +func (x *ConnectProcessResponse) GetData() *ExecStreamData { + if x != nil { + if x, ok := x.Event.(*ConnectProcessResponse_Data); ok { + return x.Data + } + } + return nil +} + +func (x *ConnectProcessResponse) GetEnd() *ExecStreamEnd { + if x != nil { + if x, ok := x.Event.(*ConnectProcessResponse_End); ok { + return x.End + } + } + return nil +} + +type isConnectProcessResponse_Event interface { + isConnectProcessResponse_Event() +} + +type ConnectProcessResponse_Start struct { + Start *ExecStreamStart `protobuf:"bytes,1,opt,name=start,proto3,oneof"` +} + +type ConnectProcessResponse_Data struct { + Data *ExecStreamData `protobuf:"bytes,2,opt,name=data,proto3,oneof"` +} + +type ConnectProcessResponse_End struct { + End *ExecStreamEnd `protobuf:"bytes,3,opt,name=end,proto3,oneof"` +} + +func (*ConnectProcessResponse_Start) isConnectProcessResponse_Event() {} + +func (*ConnectProcessResponse_Data) isConnectProcessResponse_Event() {} + +func (*ConnectProcessResponse_End) isConnectProcessResponse_Event() {} + var File_hostagent_proto protoreflect.FileDescriptor const file_hostagent_proto_rawDesc = "" + "\n" + - "\x0fhostagent.proto\x12\fhostagent.v1\"\x81\x02\n" + + "\x0fhostagent.proto\x12\fhostagent.v1\"\xb8\x03\n" + "\x14CreateSandboxRequest\x12\x1d\n" + "\n" + "sandbox_id\x18\x05 \x01(\tR\tsandboxId\x12\x1a\n" + @@ -2386,12 +4134,23 @@ const file_hostagent_proto_rawDesc = "" + "diskSizeMb\x12\x17\n" + "\ateam_id\x18\a \x01(\tR\x06teamId\x12\x1f\n" + "\vtemplate_id\x18\b \x01(\tR\n" + - "templateId\"g\n" + + "templateId\x12!\n" + + "\fdefault_user\x18\t \x01(\tR\vdefaultUser\x12S\n" + + "\vdefault_env\x18\n" + + " \x03(\v22.hostagent.v1.CreateSandboxRequest.DefaultEnvEntryR\n" + + "defaultEnv\x1a=\n" + + "\x0fDefaultEnvEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xf3\x01\n" + "\x15CreateSandboxResponse\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x16\n" + "\x06status\x18\x02 \x01(\tR\x06status\x12\x17\n" + - "\ahost_ip\x18\x03 \x01(\tR\x06hostIp\"6\n" + + "\ahost_ip\x18\x03 \x01(\tR\x06hostIp\x12M\n" + + "\bmetadata\x18\x04 \x03(\v21.hostagent.v1.CreateSandboxResponse.MetadataEntryR\bmetadata\x1a;\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"6\n" + "\x15DestroySandboxRequest\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\"\x18\n" + @@ -2399,17 +4158,28 @@ const file_hostagent_proto_rawDesc = "" + "\x13PauseSandboxRequest\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\"\x16\n" + - "\x14PauseSandboxResponse\"V\n" + + "\x14PauseSandboxResponse\"\xb4\x02\n" + "\x14ResumeSandboxRequest\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x1f\n" + "\vtimeout_sec\x18\x02 \x01(\x05R\n" + - "timeoutSec\"g\n" + + "timeoutSec\x12!\n" + + "\fdefault_user\x18\x03 \x01(\tR\vdefaultUser\x12S\n" + + "\vdefault_env\x18\x04 \x03(\v22.hostagent.v1.ResumeSandboxRequest.DefaultEnvEntryR\n" + + "defaultEnv\x12%\n" + + "\x0ekernel_version\x18\x05 \x01(\tR\rkernelVersion\x1a=\n" + + "\x0fDefaultEnvEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xf3\x01\n" + "\x15ResumeSandboxResponse\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x16\n" + "\x06status\x18\x02 \x01(\tR\x06status\x12\x17\n" + - "\ahost_ip\x18\x03 \x01(\tR\x06hostIp\"\x84\x01\n" + + "\ahost_ip\x18\x03 \x01(\tR\x06hostIp\x12M\n" + + "\bmetadata\x18\x04 \x03(\v21.hostagent.v1.ResumeSandboxResponse.MetadataEntryR\bmetadata\x1a;\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x84\x01\n" + "\x15CreateSnapshotRequest\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" + @@ -2441,7 +4211,7 @@ const file_hostagent_proto_rawDesc = "" + "\x14ListSandboxesRequest\"\x87\x01\n" + "\x15ListSandboxesResponse\x127\n" + "\tsandboxes\x18\x01 \x03(\v2\x19.hostagent.v1.SandboxInfoR\tsandboxes\x125\n" + - "\x17auto_paused_sandbox_ids\x18\x02 \x03(\tR\x14autoPausedSandboxIds\"\xde\x02\n" + + "\x17auto_paused_sandbox_ids\x18\x02 \x03(\tR\x14autoPausedSandboxIds\"\xe0\x03\n" + "\vSandboxInfo\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x16\n" + @@ -2457,7 +4227,11 @@ const file_hostagent_proto_rawDesc = "" + "\ateam_id\x18\n" + " \x01(\tR\x06teamId\x12\x1f\n" + "\vtemplate_id\x18\v \x01(\tR\n" + - "templateId\"_\n" + + "templateId\x12C\n" + + "\bmetadata\x18\f \x03(\v2'.hostagent.v1.SandboxInfo.MetadataEntryR\bmetadata\x1a;\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"_\n" + "\x10WriteFileRequest\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" + @@ -2505,7 +4279,39 @@ const file_hostagent_proto_rawDesc = "" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" + "\x04path\x18\x02 \x01(\tR\x04path\".\n" + "\x16ReadFileStreamResponse\x12\x14\n" + - "\x05chunk\x18\x01 \x01(\fR\x05chunk\"3\n" + + "\x05chunk\x18\x01 \x01(\fR\x05chunk\"Y\n" + + "\x0eListDirRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" + + "\x04path\x18\x02 \x01(\tR\x04path\x12\x14\n" + + "\x05depth\x18\x03 \x01(\rR\x05depth\"D\n" + + "\x0fListDirResponse\x121\n" + + "\aentries\x18\x01 \x03(\v2\x17.hostagent.v1.FileEntryR\aentries\"\x9d\x02\n" + + "\tFileEntry\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" + + "\x04path\x18\x02 \x01(\tR\x04path\x12\x12\n" + + "\x04type\x18\x03 \x01(\tR\x04type\x12\x12\n" + + "\x04size\x18\x04 \x01(\x03R\x04size\x12\x12\n" + + "\x04mode\x18\x05 \x01(\rR\x04mode\x12 \n" + + "\vpermissions\x18\x06 \x01(\tR\vpermissions\x12\x14\n" + + "\x05owner\x18\a \x01(\tR\x05owner\x12\x14\n" + + "\x05group\x18\b \x01(\tR\x05group\x12\x1f\n" + + "\vmodified_at\x18\t \x01(\x03R\n" + + "modifiedAt\x12*\n" + + "\x0esymlink_target\x18\n" + + " \x01(\tH\x00R\rsymlinkTarget\x88\x01\x01B\x11\n" + + "\x0f_symlink_target\"C\n" + + "\x0eMakeDirRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" + + "\x04path\x18\x02 \x01(\tR\x04path\"@\n" + + "\x0fMakeDirResponse\x12-\n" + + "\x05entry\x18\x01 \x01(\v2\x17.hostagent.v1.FileEntryR\x05entry\"F\n" + + "\x11RemovePathRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" + + "\x04path\x18\x02 \x01(\tR\x04path\"\x14\n" + + "\x12RemovePathResponse\"3\n" + "\x12PingSandboxRequest\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\"\x15\n" + @@ -2542,7 +4348,98 @@ const file_hostagent_proto_rawDesc = "" + "templateId\"6\n" + "\x15FlattenRootfsResponse\x12\x1d\n" + "\n" + - "size_bytes\x18\x01 \x01(\x03R\tsizeBytes2\xc8\f\n" + + "size_bytes\x18\x01 \x01(\x03R\tsizeBytes\"\xae\x02\n" + + "\x10PtyAttachRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x10\n" + + "\x03tag\x18\x02 \x01(\tR\x03tag\x12\x10\n" + + "\x03cmd\x18\x03 \x01(\tR\x03cmd\x12\x12\n" + + "\x04args\x18\x04 \x03(\tR\x04args\x12\x12\n" + + "\x04cols\x18\x05 \x01(\rR\x04cols\x12\x12\n" + + "\x04rows\x18\x06 \x01(\rR\x04rows\x12<\n" + + "\x04envs\x18\a \x03(\v2(.hostagent.v1.PtyAttachRequest.EnvsEntryR\x04envs\x12\x10\n" + + "\x03cwd\x18\b \x01(\tR\x03cwd\x12\x12\n" + + "\x04user\x18\t \x01(\tR\x04user\x1a7\n" + + "\tEnvsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb8\x01\n" + + "\x11PtyAttachResponse\x124\n" + + "\astarted\x18\x01 \x01(\v2\x18.hostagent.v1.PtyStartedH\x00R\astarted\x121\n" + + "\x06output\x18\x02 \x01(\v2\x17.hostagent.v1.PtyOutputH\x00R\x06output\x121\n" + + "\x06exited\x18\x03 \x01(\v2\x17.hostagent.v1.PtyExitedH\x00R\x06exitedB\a\n" + + "\x05event\"0\n" + + "\n" + + "PtyStarted\x12\x10\n" + + "\x03pid\x18\x01 \x01(\rR\x03pid\x12\x10\n" + + "\x03tag\x18\x02 \x01(\tR\x03tag\"\x1f\n" + + "\tPtyOutput\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data\">\n" + + "\tPtyExited\x12\x1b\n" + + "\texit_code\x18\x01 \x01(\x05R\bexitCode\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\"Z\n" + + "\x13PtySendInputRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x10\n" + + "\x03tag\x18\x02 \x01(\tR\x03tag\x12\x12\n" + + "\x04data\x18\x03 \x01(\fR\x04data\"\x16\n" + + "\x14PtySendInputResponse\"k\n" + + "\x10PtyResizeRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x10\n" + + "\x03tag\x18\x02 \x01(\tR\x03tag\x12\x12\n" + + "\x04cols\x18\x03 \x01(\rR\x04cols\x12\x12\n" + + "\x04rows\x18\x04 \x01(\rR\x04rows\"\x13\n" + + "\x11PtyResizeResponse\"A\n" + + "\x0ePtyKillRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x10\n" + + "\x03tag\x18\x02 \x01(\tR\x03tag\"\x11\n" + + "\x0fPtyKillResponse\"\xfe\x01\n" + + "\x16StartBackgroundRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x10\n" + + "\x03cmd\x18\x02 \x01(\tR\x03cmd\x12\x12\n" + + "\x04args\x18\x03 \x03(\tR\x04args\x12\x10\n" + + "\x03tag\x18\x04 \x01(\tR\x03tag\x12B\n" + + "\x04envs\x18\x05 \x03(\v2..hostagent.v1.StartBackgroundRequest.EnvsEntryR\x04envs\x12\x10\n" + + "\x03cwd\x18\x06 \x01(\tR\x03cwd\x1a7\n" + + "\tEnvsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"=\n" + + "\x17StartBackgroundResponse\x12\x10\n" + + "\x03pid\x18\x01 \x01(\rR\x03pid\x12\x10\n" + + "\x03tag\x18\x02 \x01(\tR\x03tag\"5\n" + + "\x14ListProcessesRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\"X\n" + + "\fProcessEntry\x12\x10\n" + + "\x03pid\x18\x01 \x01(\rR\x03pid\x12\x10\n" + + "\x03tag\x18\x02 \x01(\tR\x03tag\x12\x10\n" + + "\x03cmd\x18\x03 \x01(\tR\x03cmd\x12\x12\n" + + "\x04args\x18\x04 \x03(\tR\x04args\"Q\n" + + "\x15ListProcessesResponse\x128\n" + + "\tprocesses\x18\x01 \x03(\v2\x1a.hostagent.v1.ProcessEntryR\tprocesses\"\x7f\n" + + "\x12KillProcessRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" + + "\x03pid\x18\x02 \x01(\rH\x00R\x03pid\x12\x12\n" + + "\x03tag\x18\x03 \x01(\tH\x00R\x03tag\x12\x16\n" + + "\x06signal\x18\x04 \x01(\tR\x06signalB\n" + + "\n" + + "\bselector\"\x15\n" + + "\x13KillProcessResponse\"j\n" + + "\x15ConnectProcessRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" + + "\x03pid\x18\x02 \x01(\rH\x00R\x03pid\x12\x12\n" + + "\x03tag\x18\x03 \x01(\tH\x00R\x03tagB\n" + + "\n" + + "\bselector\"\xbd\x01\n" + + "\x16ConnectProcessResponse\x125\n" + + "\x05start\x18\x01 \x01(\v2\x1d.hostagent.v1.ExecStreamStartH\x00R\x05start\x122\n" + + "\x04data\x18\x02 \x01(\v2\x1c.hostagent.v1.ExecStreamDataH\x00R\x04data\x12/\n" + + "\x03end\x18\x03 \x01(\v2\x1b.hostagent.v1.ExecStreamEndH\x00R\x03endB\a\n" + + "\x05event2\xd3\x13\n" + "\x10HostAgentService\x12X\n" + "\rCreateSandbox\x12\".hostagent.v1.CreateSandboxRequest\x1a#.hostagent.v1.CreateSandboxResponse\x12[\n" + "\x0eDestroySandbox\x12#.hostagent.v1.DestroySandboxRequest\x1a$.hostagent.v1.DestroySandboxResponse\x12U\n" + @@ -2551,7 +4448,11 @@ const file_hostagent_proto_rawDesc = "" + "\x04Exec\x12\x19.hostagent.v1.ExecRequest\x1a\x1a.hostagent.v1.ExecResponse\x12X\n" + "\rListSandboxes\x12\".hostagent.v1.ListSandboxesRequest\x1a#.hostagent.v1.ListSandboxesResponse\x12L\n" + "\tWriteFile\x12\x1e.hostagent.v1.WriteFileRequest\x1a\x1f.hostagent.v1.WriteFileResponse\x12I\n" + - "\bReadFile\x12\x1d.hostagent.v1.ReadFileRequest\x1a\x1e.hostagent.v1.ReadFileResponse\x12[\n" + + "\bReadFile\x12\x1d.hostagent.v1.ReadFileRequest\x1a\x1e.hostagent.v1.ReadFileResponse\x12F\n" + + "\aListDir\x12\x1c.hostagent.v1.ListDirRequest\x1a\x1d.hostagent.v1.ListDirResponse\x12F\n" + + "\aMakeDir\x12\x1c.hostagent.v1.MakeDirRequest\x1a\x1d.hostagent.v1.MakeDirResponse\x12O\n" + + "\n" + + "RemovePath\x12\x1f.hostagent.v1.RemovePathRequest\x1a .hostagent.v1.RemovePathResponse\x12[\n" + "\x0eCreateSnapshot\x12#.hostagent.v1.CreateSnapshotRequest\x1a$.hostagent.v1.CreateSnapshotResponse\x12[\n" + "\x0eDeleteSnapshot\x12#.hostagent.v1.DeleteSnapshotRequest\x1a$.hostagent.v1.DeleteSnapshotResponse\x12Q\n" + "\n" + @@ -2562,7 +4463,15 @@ const file_hostagent_proto_rawDesc = "" + "\tTerminate\x12\x1e.hostagent.v1.TerminateRequest\x1a\x1f.hostagent.v1.TerminateResponse\x12d\n" + "\x11GetSandboxMetrics\x12&.hostagent.v1.GetSandboxMetricsRequest\x1a'.hostagent.v1.GetSandboxMetricsResponse\x12j\n" + "\x13FlushSandboxMetrics\x12(.hostagent.v1.FlushSandboxMetricsRequest\x1a).hostagent.v1.FlushSandboxMetricsResponse\x12X\n" + - "\rFlattenRootfs\x12\".hostagent.v1.FlattenRootfsRequest\x1a#.hostagent.v1.FlattenRootfsResponseB\xae\x01\n" + + "\rFlattenRootfs\x12\".hostagent.v1.FlattenRootfsRequest\x1a#.hostagent.v1.FlattenRootfsResponse\x12N\n" + + "\tPtyAttach\x12\x1e.hostagent.v1.PtyAttachRequest\x1a\x1f.hostagent.v1.PtyAttachResponse0\x01\x12U\n" + + "\fPtySendInput\x12!.hostagent.v1.PtySendInputRequest\x1a\".hostagent.v1.PtySendInputResponse\x12L\n" + + "\tPtyResize\x12\x1e.hostagent.v1.PtyResizeRequest\x1a\x1f.hostagent.v1.PtyResizeResponse\x12F\n" + + "\aPtyKill\x12\x1c.hostagent.v1.PtyKillRequest\x1a\x1d.hostagent.v1.PtyKillResponse\x12^\n" + + "\x0fStartBackground\x12$.hostagent.v1.StartBackgroundRequest\x1a%.hostagent.v1.StartBackgroundResponse\x12X\n" + + "\rListProcesses\x12\".hostagent.v1.ListProcessesRequest\x1a#.hostagent.v1.ListProcessesResponse\x12R\n" + + "\vKillProcess\x12 .hostagent.v1.KillProcessRequest\x1a!.hostagent.v1.KillProcessResponse\x12]\n" + + "\x0eConnectProcess\x12#.hostagent.v1.ConnectProcessRequest\x1a$.hostagent.v1.ConnectProcessResponse0\x01B\xae\x01\n" + "\x10com.hostagent.v1B\x0eHostagentProtoP\x01Z9git.omukk.dev/wrenn/wrenn/proto/hostagent/gen;hostagentv1\xa2\x02\x03HXX\xaa\x02\fHostagent.V1\xca\x02\fHostagent\\V1\xe2\x02\x18Hostagent\\V1\\GPBMetadata\xea\x02\rHostagent::V1b\x06proto3" var ( @@ -2577,7 +4486,7 @@ func file_hostagent_proto_rawDescGZIP() []byte { return file_hostagent_proto_rawDescData } -var file_hostagent_proto_msgTypes = make([]protoimpl.MessageInfo, 42) +var file_hostagent_proto_msgTypes = make([]protoimpl.MessageInfo, 76) var file_hostagent_proto_goTypes = []any{ (*CreateSandboxRequest)(nil), // 0: hostagent.v1.CreateSandboxRequest (*CreateSandboxResponse)(nil), // 1: hostagent.v1.CreateSandboxResponse @@ -2610,69 +4519,141 @@ var file_hostagent_proto_goTypes = []any{ (*WriteFileStreamResponse)(nil), // 28: hostagent.v1.WriteFileStreamResponse (*ReadFileStreamRequest)(nil), // 29: hostagent.v1.ReadFileStreamRequest (*ReadFileStreamResponse)(nil), // 30: hostagent.v1.ReadFileStreamResponse - (*PingSandboxRequest)(nil), // 31: hostagent.v1.PingSandboxRequest - (*PingSandboxResponse)(nil), // 32: hostagent.v1.PingSandboxResponse - (*TerminateRequest)(nil), // 33: hostagent.v1.TerminateRequest - (*TerminateResponse)(nil), // 34: hostagent.v1.TerminateResponse - (*MetricPoint)(nil), // 35: hostagent.v1.MetricPoint - (*GetSandboxMetricsRequest)(nil), // 36: hostagent.v1.GetSandboxMetricsRequest - (*GetSandboxMetricsResponse)(nil), // 37: hostagent.v1.GetSandboxMetricsResponse - (*FlushSandboxMetricsRequest)(nil), // 38: hostagent.v1.FlushSandboxMetricsRequest - (*FlushSandboxMetricsResponse)(nil), // 39: hostagent.v1.FlushSandboxMetricsResponse - (*FlattenRootfsRequest)(nil), // 40: hostagent.v1.FlattenRootfsRequest - (*FlattenRootfsResponse)(nil), // 41: hostagent.v1.FlattenRootfsResponse + (*ListDirRequest)(nil), // 31: hostagent.v1.ListDirRequest + (*ListDirResponse)(nil), // 32: hostagent.v1.ListDirResponse + (*FileEntry)(nil), // 33: hostagent.v1.FileEntry + (*MakeDirRequest)(nil), // 34: hostagent.v1.MakeDirRequest + (*MakeDirResponse)(nil), // 35: hostagent.v1.MakeDirResponse + (*RemovePathRequest)(nil), // 36: hostagent.v1.RemovePathRequest + (*RemovePathResponse)(nil), // 37: hostagent.v1.RemovePathResponse + (*PingSandboxRequest)(nil), // 38: hostagent.v1.PingSandboxRequest + (*PingSandboxResponse)(nil), // 39: hostagent.v1.PingSandboxResponse + (*TerminateRequest)(nil), // 40: hostagent.v1.TerminateRequest + (*TerminateResponse)(nil), // 41: hostagent.v1.TerminateResponse + (*MetricPoint)(nil), // 42: hostagent.v1.MetricPoint + (*GetSandboxMetricsRequest)(nil), // 43: hostagent.v1.GetSandboxMetricsRequest + (*GetSandboxMetricsResponse)(nil), // 44: hostagent.v1.GetSandboxMetricsResponse + (*FlushSandboxMetricsRequest)(nil), // 45: hostagent.v1.FlushSandboxMetricsRequest + (*FlushSandboxMetricsResponse)(nil), // 46: hostagent.v1.FlushSandboxMetricsResponse + (*FlattenRootfsRequest)(nil), // 47: hostagent.v1.FlattenRootfsRequest + (*FlattenRootfsResponse)(nil), // 48: hostagent.v1.FlattenRootfsResponse + (*PtyAttachRequest)(nil), // 49: hostagent.v1.PtyAttachRequest + (*PtyAttachResponse)(nil), // 50: hostagent.v1.PtyAttachResponse + (*PtyStarted)(nil), // 51: hostagent.v1.PtyStarted + (*PtyOutput)(nil), // 52: hostagent.v1.PtyOutput + (*PtyExited)(nil), // 53: hostagent.v1.PtyExited + (*PtySendInputRequest)(nil), // 54: hostagent.v1.PtySendInputRequest + (*PtySendInputResponse)(nil), // 55: hostagent.v1.PtySendInputResponse + (*PtyResizeRequest)(nil), // 56: hostagent.v1.PtyResizeRequest + (*PtyResizeResponse)(nil), // 57: hostagent.v1.PtyResizeResponse + (*PtyKillRequest)(nil), // 58: hostagent.v1.PtyKillRequest + (*PtyKillResponse)(nil), // 59: hostagent.v1.PtyKillResponse + (*StartBackgroundRequest)(nil), // 60: hostagent.v1.StartBackgroundRequest + (*StartBackgroundResponse)(nil), // 61: hostagent.v1.StartBackgroundResponse + (*ListProcessesRequest)(nil), // 62: hostagent.v1.ListProcessesRequest + (*ProcessEntry)(nil), // 63: hostagent.v1.ProcessEntry + (*ListProcessesResponse)(nil), // 64: hostagent.v1.ListProcessesResponse + (*KillProcessRequest)(nil), // 65: hostagent.v1.KillProcessRequest + (*KillProcessResponse)(nil), // 66: hostagent.v1.KillProcessResponse + (*ConnectProcessRequest)(nil), // 67: hostagent.v1.ConnectProcessRequest + (*ConnectProcessResponse)(nil), // 68: hostagent.v1.ConnectProcessResponse + nil, // 69: hostagent.v1.CreateSandboxRequest.DefaultEnvEntry + nil, // 70: hostagent.v1.CreateSandboxResponse.MetadataEntry + nil, // 71: hostagent.v1.ResumeSandboxRequest.DefaultEnvEntry + nil, // 72: hostagent.v1.ResumeSandboxResponse.MetadataEntry + nil, // 73: hostagent.v1.SandboxInfo.MetadataEntry + nil, // 74: hostagent.v1.PtyAttachRequest.EnvsEntry + nil, // 75: hostagent.v1.StartBackgroundRequest.EnvsEntry } var file_hostagent_proto_depIdxs = []int32{ - 16, // 0: hostagent.v1.ListSandboxesResponse.sandboxes:type_name -> hostagent.v1.SandboxInfo - 23, // 1: hostagent.v1.ExecStreamResponse.start:type_name -> hostagent.v1.ExecStreamStart - 24, // 2: hostagent.v1.ExecStreamResponse.data:type_name -> hostagent.v1.ExecStreamData - 25, // 3: hostagent.v1.ExecStreamResponse.end:type_name -> hostagent.v1.ExecStreamEnd - 27, // 4: hostagent.v1.WriteFileStreamRequest.meta:type_name -> hostagent.v1.WriteFileStreamMeta - 35, // 5: hostagent.v1.GetSandboxMetricsResponse.points:type_name -> hostagent.v1.MetricPoint - 35, // 6: hostagent.v1.FlushSandboxMetricsResponse.points_10m:type_name -> hostagent.v1.MetricPoint - 35, // 7: hostagent.v1.FlushSandboxMetricsResponse.points_2h:type_name -> hostagent.v1.MetricPoint - 35, // 8: hostagent.v1.FlushSandboxMetricsResponse.points_24h:type_name -> hostagent.v1.MetricPoint - 0, // 9: hostagent.v1.HostAgentService.CreateSandbox:input_type -> hostagent.v1.CreateSandboxRequest - 2, // 10: hostagent.v1.HostAgentService.DestroySandbox:input_type -> hostagent.v1.DestroySandboxRequest - 4, // 11: hostagent.v1.HostAgentService.PauseSandbox:input_type -> hostagent.v1.PauseSandboxRequest - 6, // 12: hostagent.v1.HostAgentService.ResumeSandbox:input_type -> hostagent.v1.ResumeSandboxRequest - 12, // 13: hostagent.v1.HostAgentService.Exec:input_type -> hostagent.v1.ExecRequest - 14, // 14: hostagent.v1.HostAgentService.ListSandboxes:input_type -> hostagent.v1.ListSandboxesRequest - 17, // 15: hostagent.v1.HostAgentService.WriteFile:input_type -> hostagent.v1.WriteFileRequest - 19, // 16: hostagent.v1.HostAgentService.ReadFile:input_type -> hostagent.v1.ReadFileRequest - 8, // 17: hostagent.v1.HostAgentService.CreateSnapshot:input_type -> hostagent.v1.CreateSnapshotRequest - 10, // 18: hostagent.v1.HostAgentService.DeleteSnapshot:input_type -> hostagent.v1.DeleteSnapshotRequest - 21, // 19: hostagent.v1.HostAgentService.ExecStream:input_type -> hostagent.v1.ExecStreamRequest - 26, // 20: hostagent.v1.HostAgentService.WriteFileStream:input_type -> hostagent.v1.WriteFileStreamRequest - 29, // 21: hostagent.v1.HostAgentService.ReadFileStream:input_type -> hostagent.v1.ReadFileStreamRequest - 31, // 22: hostagent.v1.HostAgentService.PingSandbox:input_type -> hostagent.v1.PingSandboxRequest - 33, // 23: hostagent.v1.HostAgentService.Terminate:input_type -> hostagent.v1.TerminateRequest - 36, // 24: hostagent.v1.HostAgentService.GetSandboxMetrics:input_type -> hostagent.v1.GetSandboxMetricsRequest - 38, // 25: hostagent.v1.HostAgentService.FlushSandboxMetrics:input_type -> hostagent.v1.FlushSandboxMetricsRequest - 40, // 26: hostagent.v1.HostAgentService.FlattenRootfs:input_type -> hostagent.v1.FlattenRootfsRequest - 1, // 27: hostagent.v1.HostAgentService.CreateSandbox:output_type -> hostagent.v1.CreateSandboxResponse - 3, // 28: hostagent.v1.HostAgentService.DestroySandbox:output_type -> hostagent.v1.DestroySandboxResponse - 5, // 29: hostagent.v1.HostAgentService.PauseSandbox:output_type -> hostagent.v1.PauseSandboxResponse - 7, // 30: hostagent.v1.HostAgentService.ResumeSandbox:output_type -> hostagent.v1.ResumeSandboxResponse - 13, // 31: hostagent.v1.HostAgentService.Exec:output_type -> hostagent.v1.ExecResponse - 15, // 32: hostagent.v1.HostAgentService.ListSandboxes:output_type -> hostagent.v1.ListSandboxesResponse - 18, // 33: hostagent.v1.HostAgentService.WriteFile:output_type -> hostagent.v1.WriteFileResponse - 20, // 34: hostagent.v1.HostAgentService.ReadFile:output_type -> hostagent.v1.ReadFileResponse - 9, // 35: hostagent.v1.HostAgentService.CreateSnapshot:output_type -> hostagent.v1.CreateSnapshotResponse - 11, // 36: hostagent.v1.HostAgentService.DeleteSnapshot:output_type -> hostagent.v1.DeleteSnapshotResponse - 22, // 37: hostagent.v1.HostAgentService.ExecStream:output_type -> hostagent.v1.ExecStreamResponse - 28, // 38: hostagent.v1.HostAgentService.WriteFileStream:output_type -> hostagent.v1.WriteFileStreamResponse - 30, // 39: hostagent.v1.HostAgentService.ReadFileStream:output_type -> hostagent.v1.ReadFileStreamResponse - 32, // 40: hostagent.v1.HostAgentService.PingSandbox:output_type -> hostagent.v1.PingSandboxResponse - 34, // 41: hostagent.v1.HostAgentService.Terminate:output_type -> hostagent.v1.TerminateResponse - 37, // 42: hostagent.v1.HostAgentService.GetSandboxMetrics:output_type -> hostagent.v1.GetSandboxMetricsResponse - 39, // 43: hostagent.v1.HostAgentService.FlushSandboxMetrics:output_type -> hostagent.v1.FlushSandboxMetricsResponse - 41, // 44: hostagent.v1.HostAgentService.FlattenRootfs:output_type -> hostagent.v1.FlattenRootfsResponse - 27, // [27:45] is the sub-list for method output_type - 9, // [9:27] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 69, // 0: hostagent.v1.CreateSandboxRequest.default_env:type_name -> hostagent.v1.CreateSandboxRequest.DefaultEnvEntry + 70, // 1: hostagent.v1.CreateSandboxResponse.metadata:type_name -> hostagent.v1.CreateSandboxResponse.MetadataEntry + 71, // 2: hostagent.v1.ResumeSandboxRequest.default_env:type_name -> hostagent.v1.ResumeSandboxRequest.DefaultEnvEntry + 72, // 3: hostagent.v1.ResumeSandboxResponse.metadata:type_name -> hostagent.v1.ResumeSandboxResponse.MetadataEntry + 16, // 4: hostagent.v1.ListSandboxesResponse.sandboxes:type_name -> hostagent.v1.SandboxInfo + 73, // 5: hostagent.v1.SandboxInfo.metadata:type_name -> hostagent.v1.SandboxInfo.MetadataEntry + 23, // 6: hostagent.v1.ExecStreamResponse.start:type_name -> hostagent.v1.ExecStreamStart + 24, // 7: hostagent.v1.ExecStreamResponse.data:type_name -> hostagent.v1.ExecStreamData + 25, // 8: hostagent.v1.ExecStreamResponse.end:type_name -> hostagent.v1.ExecStreamEnd + 27, // 9: hostagent.v1.WriteFileStreamRequest.meta:type_name -> hostagent.v1.WriteFileStreamMeta + 33, // 10: hostagent.v1.ListDirResponse.entries:type_name -> hostagent.v1.FileEntry + 33, // 11: hostagent.v1.MakeDirResponse.entry:type_name -> hostagent.v1.FileEntry + 42, // 12: hostagent.v1.GetSandboxMetricsResponse.points:type_name -> hostagent.v1.MetricPoint + 42, // 13: hostagent.v1.FlushSandboxMetricsResponse.points_10m:type_name -> hostagent.v1.MetricPoint + 42, // 14: hostagent.v1.FlushSandboxMetricsResponse.points_2h:type_name -> hostagent.v1.MetricPoint + 42, // 15: hostagent.v1.FlushSandboxMetricsResponse.points_24h:type_name -> hostagent.v1.MetricPoint + 74, // 16: hostagent.v1.PtyAttachRequest.envs:type_name -> hostagent.v1.PtyAttachRequest.EnvsEntry + 51, // 17: hostagent.v1.PtyAttachResponse.started:type_name -> hostagent.v1.PtyStarted + 52, // 18: hostagent.v1.PtyAttachResponse.output:type_name -> hostagent.v1.PtyOutput + 53, // 19: hostagent.v1.PtyAttachResponse.exited:type_name -> hostagent.v1.PtyExited + 75, // 20: hostagent.v1.StartBackgroundRequest.envs:type_name -> hostagent.v1.StartBackgroundRequest.EnvsEntry + 63, // 21: hostagent.v1.ListProcessesResponse.processes:type_name -> hostagent.v1.ProcessEntry + 23, // 22: hostagent.v1.ConnectProcessResponse.start:type_name -> hostagent.v1.ExecStreamStart + 24, // 23: hostagent.v1.ConnectProcessResponse.data:type_name -> hostagent.v1.ExecStreamData + 25, // 24: hostagent.v1.ConnectProcessResponse.end:type_name -> hostagent.v1.ExecStreamEnd + 0, // 25: hostagent.v1.HostAgentService.CreateSandbox:input_type -> hostagent.v1.CreateSandboxRequest + 2, // 26: hostagent.v1.HostAgentService.DestroySandbox:input_type -> hostagent.v1.DestroySandboxRequest + 4, // 27: hostagent.v1.HostAgentService.PauseSandbox:input_type -> hostagent.v1.PauseSandboxRequest + 6, // 28: hostagent.v1.HostAgentService.ResumeSandbox:input_type -> hostagent.v1.ResumeSandboxRequest + 12, // 29: hostagent.v1.HostAgentService.Exec:input_type -> hostagent.v1.ExecRequest + 14, // 30: hostagent.v1.HostAgentService.ListSandboxes:input_type -> hostagent.v1.ListSandboxesRequest + 17, // 31: hostagent.v1.HostAgentService.WriteFile:input_type -> hostagent.v1.WriteFileRequest + 19, // 32: hostagent.v1.HostAgentService.ReadFile:input_type -> hostagent.v1.ReadFileRequest + 31, // 33: hostagent.v1.HostAgentService.ListDir:input_type -> hostagent.v1.ListDirRequest + 34, // 34: hostagent.v1.HostAgentService.MakeDir:input_type -> hostagent.v1.MakeDirRequest + 36, // 35: hostagent.v1.HostAgentService.RemovePath:input_type -> hostagent.v1.RemovePathRequest + 8, // 36: hostagent.v1.HostAgentService.CreateSnapshot:input_type -> hostagent.v1.CreateSnapshotRequest + 10, // 37: hostagent.v1.HostAgentService.DeleteSnapshot:input_type -> hostagent.v1.DeleteSnapshotRequest + 21, // 38: hostagent.v1.HostAgentService.ExecStream:input_type -> hostagent.v1.ExecStreamRequest + 26, // 39: hostagent.v1.HostAgentService.WriteFileStream:input_type -> hostagent.v1.WriteFileStreamRequest + 29, // 40: hostagent.v1.HostAgentService.ReadFileStream:input_type -> hostagent.v1.ReadFileStreamRequest + 38, // 41: hostagent.v1.HostAgentService.PingSandbox:input_type -> hostagent.v1.PingSandboxRequest + 40, // 42: hostagent.v1.HostAgentService.Terminate:input_type -> hostagent.v1.TerminateRequest + 43, // 43: hostagent.v1.HostAgentService.GetSandboxMetrics:input_type -> hostagent.v1.GetSandboxMetricsRequest + 45, // 44: hostagent.v1.HostAgentService.FlushSandboxMetrics:input_type -> hostagent.v1.FlushSandboxMetricsRequest + 47, // 45: hostagent.v1.HostAgentService.FlattenRootfs:input_type -> hostagent.v1.FlattenRootfsRequest + 49, // 46: hostagent.v1.HostAgentService.PtyAttach:input_type -> hostagent.v1.PtyAttachRequest + 54, // 47: hostagent.v1.HostAgentService.PtySendInput:input_type -> hostagent.v1.PtySendInputRequest + 56, // 48: hostagent.v1.HostAgentService.PtyResize:input_type -> hostagent.v1.PtyResizeRequest + 58, // 49: hostagent.v1.HostAgentService.PtyKill:input_type -> hostagent.v1.PtyKillRequest + 60, // 50: hostagent.v1.HostAgentService.StartBackground:input_type -> hostagent.v1.StartBackgroundRequest + 62, // 51: hostagent.v1.HostAgentService.ListProcesses:input_type -> hostagent.v1.ListProcessesRequest + 65, // 52: hostagent.v1.HostAgentService.KillProcess:input_type -> hostagent.v1.KillProcessRequest + 67, // 53: hostagent.v1.HostAgentService.ConnectProcess:input_type -> hostagent.v1.ConnectProcessRequest + 1, // 54: hostagent.v1.HostAgentService.CreateSandbox:output_type -> hostagent.v1.CreateSandboxResponse + 3, // 55: hostagent.v1.HostAgentService.DestroySandbox:output_type -> hostagent.v1.DestroySandboxResponse + 5, // 56: hostagent.v1.HostAgentService.PauseSandbox:output_type -> hostagent.v1.PauseSandboxResponse + 7, // 57: hostagent.v1.HostAgentService.ResumeSandbox:output_type -> hostagent.v1.ResumeSandboxResponse + 13, // 58: hostagent.v1.HostAgentService.Exec:output_type -> hostagent.v1.ExecResponse + 15, // 59: hostagent.v1.HostAgentService.ListSandboxes:output_type -> hostagent.v1.ListSandboxesResponse + 18, // 60: hostagent.v1.HostAgentService.WriteFile:output_type -> hostagent.v1.WriteFileResponse + 20, // 61: hostagent.v1.HostAgentService.ReadFile:output_type -> hostagent.v1.ReadFileResponse + 32, // 62: hostagent.v1.HostAgentService.ListDir:output_type -> hostagent.v1.ListDirResponse + 35, // 63: hostagent.v1.HostAgentService.MakeDir:output_type -> hostagent.v1.MakeDirResponse + 37, // 64: hostagent.v1.HostAgentService.RemovePath:output_type -> hostagent.v1.RemovePathResponse + 9, // 65: hostagent.v1.HostAgentService.CreateSnapshot:output_type -> hostagent.v1.CreateSnapshotResponse + 11, // 66: hostagent.v1.HostAgentService.DeleteSnapshot:output_type -> hostagent.v1.DeleteSnapshotResponse + 22, // 67: hostagent.v1.HostAgentService.ExecStream:output_type -> hostagent.v1.ExecStreamResponse + 28, // 68: hostagent.v1.HostAgentService.WriteFileStream:output_type -> hostagent.v1.WriteFileStreamResponse + 30, // 69: hostagent.v1.HostAgentService.ReadFileStream:output_type -> hostagent.v1.ReadFileStreamResponse + 39, // 70: hostagent.v1.HostAgentService.PingSandbox:output_type -> hostagent.v1.PingSandboxResponse + 41, // 71: hostagent.v1.HostAgentService.Terminate:output_type -> hostagent.v1.TerminateResponse + 44, // 72: hostagent.v1.HostAgentService.GetSandboxMetrics:output_type -> hostagent.v1.GetSandboxMetricsResponse + 46, // 73: hostagent.v1.HostAgentService.FlushSandboxMetrics:output_type -> hostagent.v1.FlushSandboxMetricsResponse + 48, // 74: hostagent.v1.HostAgentService.FlattenRootfs:output_type -> hostagent.v1.FlattenRootfsResponse + 50, // 75: hostagent.v1.HostAgentService.PtyAttach:output_type -> hostagent.v1.PtyAttachResponse + 55, // 76: hostagent.v1.HostAgentService.PtySendInput:output_type -> hostagent.v1.PtySendInputResponse + 57, // 77: hostagent.v1.HostAgentService.PtyResize:output_type -> hostagent.v1.PtyResizeResponse + 59, // 78: hostagent.v1.HostAgentService.PtyKill:output_type -> hostagent.v1.PtyKillResponse + 61, // 79: hostagent.v1.HostAgentService.StartBackground:output_type -> hostagent.v1.StartBackgroundResponse + 64, // 80: hostagent.v1.HostAgentService.ListProcesses:output_type -> hostagent.v1.ListProcessesResponse + 66, // 81: hostagent.v1.HostAgentService.KillProcess:output_type -> hostagent.v1.KillProcessResponse + 68, // 82: hostagent.v1.HostAgentService.ConnectProcess:output_type -> hostagent.v1.ConnectProcessResponse + 54, // [54:83] is the sub-list for method output_type + 25, // [25:54] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name } func init() { file_hostagent_proto_init() } @@ -2693,13 +4674,32 @@ func file_hostagent_proto_init() { (*WriteFileStreamRequest_Meta)(nil), (*WriteFileStreamRequest_Chunk)(nil), } + file_hostagent_proto_msgTypes[33].OneofWrappers = []any{} + file_hostagent_proto_msgTypes[50].OneofWrappers = []any{ + (*PtyAttachResponse_Started)(nil), + (*PtyAttachResponse_Output)(nil), + (*PtyAttachResponse_Exited)(nil), + } + file_hostagent_proto_msgTypes[65].OneofWrappers = []any{ + (*KillProcessRequest_Pid)(nil), + (*KillProcessRequest_Tag)(nil), + } + file_hostagent_proto_msgTypes[67].OneofWrappers = []any{ + (*ConnectProcessRequest_Pid)(nil), + (*ConnectProcessRequest_Tag)(nil), + } + file_hostagent_proto_msgTypes[68].OneofWrappers = []any{ + (*ConnectProcessResponse_Start)(nil), + (*ConnectProcessResponse_Data)(nil), + (*ConnectProcessResponse_End)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_hostagent_proto_rawDesc), len(file_hostagent_proto_rawDesc)), NumEnums: 0, - NumMessages: 42, + NumMessages: 76, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/hostagent/gen/hostagentv1connect/hostagent.connect.go b/proto/hostagent/gen/hostagentv1connect/hostagent.connect.go index 924f4b3..16f30ad 100644 --- a/proto/hostagent/gen/hostagentv1connect/hostagent.connect.go +++ b/proto/hostagent/gen/hostagentv1connect/hostagent.connect.go @@ -56,6 +56,15 @@ const ( // HostAgentServiceReadFileProcedure is the fully-qualified name of the HostAgentService's ReadFile // RPC. HostAgentServiceReadFileProcedure = "/hostagent.v1.HostAgentService/ReadFile" + // HostAgentServiceListDirProcedure is the fully-qualified name of the HostAgentService's ListDir + // RPC. + HostAgentServiceListDirProcedure = "/hostagent.v1.HostAgentService/ListDir" + // HostAgentServiceMakeDirProcedure is the fully-qualified name of the HostAgentService's MakeDir + // RPC. + HostAgentServiceMakeDirProcedure = "/hostagent.v1.HostAgentService/MakeDir" + // HostAgentServiceRemovePathProcedure is the fully-qualified name of the HostAgentService's + // RemovePath RPC. + HostAgentServiceRemovePathProcedure = "/hostagent.v1.HostAgentService/RemovePath" // HostAgentServiceCreateSnapshotProcedure is the fully-qualified name of the HostAgentService's // CreateSnapshot RPC. HostAgentServiceCreateSnapshotProcedure = "/hostagent.v1.HostAgentService/CreateSnapshot" @@ -86,6 +95,30 @@ const ( // HostAgentServiceFlattenRootfsProcedure is the fully-qualified name of the HostAgentService's // FlattenRootfs RPC. HostAgentServiceFlattenRootfsProcedure = "/hostagent.v1.HostAgentService/FlattenRootfs" + // HostAgentServicePtyAttachProcedure is the fully-qualified name of the HostAgentService's + // PtyAttach RPC. + HostAgentServicePtyAttachProcedure = "/hostagent.v1.HostAgentService/PtyAttach" + // HostAgentServicePtySendInputProcedure is the fully-qualified name of the HostAgentService's + // PtySendInput RPC. + HostAgentServicePtySendInputProcedure = "/hostagent.v1.HostAgentService/PtySendInput" + // HostAgentServicePtyResizeProcedure is the fully-qualified name of the HostAgentService's + // PtyResize RPC. + HostAgentServicePtyResizeProcedure = "/hostagent.v1.HostAgentService/PtyResize" + // HostAgentServicePtyKillProcedure is the fully-qualified name of the HostAgentService's PtyKill + // RPC. + HostAgentServicePtyKillProcedure = "/hostagent.v1.HostAgentService/PtyKill" + // HostAgentServiceStartBackgroundProcedure is the fully-qualified name of the HostAgentService's + // StartBackground RPC. + HostAgentServiceStartBackgroundProcedure = "/hostagent.v1.HostAgentService/StartBackground" + // HostAgentServiceListProcessesProcedure is the fully-qualified name of the HostAgentService's + // ListProcesses RPC. + HostAgentServiceListProcessesProcedure = "/hostagent.v1.HostAgentService/ListProcesses" + // HostAgentServiceKillProcessProcedure is the fully-qualified name of the HostAgentService's + // KillProcess RPC. + HostAgentServiceKillProcessProcedure = "/hostagent.v1.HostAgentService/KillProcess" + // HostAgentServiceConnectProcessProcedure is the fully-qualified name of the HostAgentService's + // ConnectProcess RPC. + HostAgentServiceConnectProcessProcedure = "/hostagent.v1.HostAgentService/ConnectProcess" ) // HostAgentServiceClient is a client for the hostagent.v1.HostAgentService service. @@ -106,6 +139,12 @@ type HostAgentServiceClient interface { WriteFile(context.Context, *connect.Request[gen.WriteFileRequest]) (*connect.Response[gen.WriteFileResponse], error) // ReadFile reads a file from inside a sandbox. ReadFile(context.Context, *connect.Request[gen.ReadFileRequest]) (*connect.Response[gen.ReadFileResponse], error) + // ListDir lists directory contents inside a sandbox. + ListDir(context.Context, *connect.Request[gen.ListDirRequest]) (*connect.Response[gen.ListDirResponse], error) + // MakeDir creates a directory inside a sandbox. + MakeDir(context.Context, *connect.Request[gen.MakeDirRequest]) (*connect.Response[gen.MakeDirResponse], error) + // RemovePath removes a file or directory inside a sandbox. + RemovePath(context.Context, *connect.Request[gen.RemovePathRequest]) (*connect.Response[gen.RemovePathResponse], error) // CreateSnapshot pauses a sandbox, takes a snapshot, stores it as a reusable // template, and destroys the sandbox. CreateSnapshot(context.Context, *connect.Request[gen.CreateSnapshotRequest]) (*connect.Response[gen.CreateSnapshotResponse], error) @@ -134,6 +173,26 @@ type HostAgentServiceClient interface { // cleans up all sandbox resources. Used by the template build system to // produce image-only templates (no memory/CPU state). FlattenRootfs(context.Context, *connect.Request[gen.FlattenRootfsRequest]) (*connect.Response[gen.FlattenRootfsResponse], error) + // PtyAttach starts a new PTY process or reconnects to an existing one. + // If cmd is non-empty, starts a new process with the given PTY dimensions. + // If tag is set and cmd is empty, reconnects to the existing process with that tag. + // Returns a stream of output events (started, output data, exit). + PtyAttach(context.Context, *connect.Request[gen.PtyAttachRequest]) (*connect.ServerStreamForClient[gen.PtyAttachResponse], error) + // PtySendInput sends raw bytes to a PTY process identified by tag. + PtySendInput(context.Context, *connect.Request[gen.PtySendInputRequest]) (*connect.Response[gen.PtySendInputResponse], error) + // PtyResize updates the terminal dimensions for a PTY process. + PtyResize(context.Context, *connect.Request[gen.PtyResizeRequest]) (*connect.Response[gen.PtyResizeResponse], error) + // PtyKill sends a signal to a PTY process. + PtyKill(context.Context, *connect.Request[gen.PtyKillRequest]) (*connect.Response[gen.PtyKillResponse], error) + // StartBackground starts a process in the background and returns immediately + // with the PID and tag. The process survives RPC disconnection. + StartBackground(context.Context, *connect.Request[gen.StartBackgroundRequest]) (*connect.Response[gen.StartBackgroundResponse], error) + // ListProcesses returns all running processes inside a sandbox. + ListProcesses(context.Context, *connect.Request[gen.ListProcessesRequest]) (*connect.Response[gen.ListProcessesResponse], error) + // KillProcess sends a signal to a process identified by PID or tag. + KillProcess(context.Context, *connect.Request[gen.KillProcessRequest]) (*connect.Response[gen.KillProcessResponse], error) + // ConnectProcess re-attaches to a running process and streams its output. + ConnectProcess(context.Context, *connect.Request[gen.ConnectProcessRequest]) (*connect.ServerStreamForClient[gen.ConnectProcessResponse], error) } // NewHostAgentServiceClient constructs a client for the hostagent.v1.HostAgentService service. By @@ -195,6 +254,24 @@ func NewHostAgentServiceClient(httpClient connect.HTTPClient, baseURL string, op connect.WithSchema(hostAgentServiceMethods.ByName("ReadFile")), connect.WithClientOptions(opts...), ), + listDir: connect.NewClient[gen.ListDirRequest, gen.ListDirResponse]( + httpClient, + baseURL+HostAgentServiceListDirProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("ListDir")), + connect.WithClientOptions(opts...), + ), + makeDir: connect.NewClient[gen.MakeDirRequest, gen.MakeDirResponse]( + httpClient, + baseURL+HostAgentServiceMakeDirProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("MakeDir")), + connect.WithClientOptions(opts...), + ), + removePath: connect.NewClient[gen.RemovePathRequest, gen.RemovePathResponse]( + httpClient, + baseURL+HostAgentServiceRemovePathProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("RemovePath")), + connect.WithClientOptions(opts...), + ), createSnapshot: connect.NewClient[gen.CreateSnapshotRequest, gen.CreateSnapshotResponse]( httpClient, baseURL+HostAgentServiceCreateSnapshotProcedure, @@ -255,6 +332,54 @@ func NewHostAgentServiceClient(httpClient connect.HTTPClient, baseURL string, op connect.WithSchema(hostAgentServiceMethods.ByName("FlattenRootfs")), connect.WithClientOptions(opts...), ), + ptyAttach: connect.NewClient[gen.PtyAttachRequest, gen.PtyAttachResponse]( + httpClient, + baseURL+HostAgentServicePtyAttachProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("PtyAttach")), + connect.WithClientOptions(opts...), + ), + ptySendInput: connect.NewClient[gen.PtySendInputRequest, gen.PtySendInputResponse]( + httpClient, + baseURL+HostAgentServicePtySendInputProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("PtySendInput")), + connect.WithClientOptions(opts...), + ), + ptyResize: connect.NewClient[gen.PtyResizeRequest, gen.PtyResizeResponse]( + httpClient, + baseURL+HostAgentServicePtyResizeProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("PtyResize")), + connect.WithClientOptions(opts...), + ), + ptyKill: connect.NewClient[gen.PtyKillRequest, gen.PtyKillResponse]( + httpClient, + baseURL+HostAgentServicePtyKillProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("PtyKill")), + connect.WithClientOptions(opts...), + ), + startBackground: connect.NewClient[gen.StartBackgroundRequest, gen.StartBackgroundResponse]( + httpClient, + baseURL+HostAgentServiceStartBackgroundProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("StartBackground")), + connect.WithClientOptions(opts...), + ), + listProcesses: connect.NewClient[gen.ListProcessesRequest, gen.ListProcessesResponse]( + httpClient, + baseURL+HostAgentServiceListProcessesProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("ListProcesses")), + connect.WithClientOptions(opts...), + ), + killProcess: connect.NewClient[gen.KillProcessRequest, gen.KillProcessResponse]( + httpClient, + baseURL+HostAgentServiceKillProcessProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("KillProcess")), + connect.WithClientOptions(opts...), + ), + connectProcess: connect.NewClient[gen.ConnectProcessRequest, gen.ConnectProcessResponse]( + httpClient, + baseURL+HostAgentServiceConnectProcessProcedure, + connect.WithSchema(hostAgentServiceMethods.ByName("ConnectProcess")), + connect.WithClientOptions(opts...), + ), } } @@ -268,6 +393,9 @@ type hostAgentServiceClient struct { listSandboxes *connect.Client[gen.ListSandboxesRequest, gen.ListSandboxesResponse] writeFile *connect.Client[gen.WriteFileRequest, gen.WriteFileResponse] readFile *connect.Client[gen.ReadFileRequest, gen.ReadFileResponse] + listDir *connect.Client[gen.ListDirRequest, gen.ListDirResponse] + makeDir *connect.Client[gen.MakeDirRequest, gen.MakeDirResponse] + removePath *connect.Client[gen.RemovePathRequest, gen.RemovePathResponse] createSnapshot *connect.Client[gen.CreateSnapshotRequest, gen.CreateSnapshotResponse] deleteSnapshot *connect.Client[gen.DeleteSnapshotRequest, gen.DeleteSnapshotResponse] execStream *connect.Client[gen.ExecStreamRequest, gen.ExecStreamResponse] @@ -278,6 +406,14 @@ type hostAgentServiceClient struct { getSandboxMetrics *connect.Client[gen.GetSandboxMetricsRequest, gen.GetSandboxMetricsResponse] flushSandboxMetrics *connect.Client[gen.FlushSandboxMetricsRequest, gen.FlushSandboxMetricsResponse] flattenRootfs *connect.Client[gen.FlattenRootfsRequest, gen.FlattenRootfsResponse] + ptyAttach *connect.Client[gen.PtyAttachRequest, gen.PtyAttachResponse] + ptySendInput *connect.Client[gen.PtySendInputRequest, gen.PtySendInputResponse] + ptyResize *connect.Client[gen.PtyResizeRequest, gen.PtyResizeResponse] + ptyKill *connect.Client[gen.PtyKillRequest, gen.PtyKillResponse] + startBackground *connect.Client[gen.StartBackgroundRequest, gen.StartBackgroundResponse] + listProcesses *connect.Client[gen.ListProcessesRequest, gen.ListProcessesResponse] + killProcess *connect.Client[gen.KillProcessRequest, gen.KillProcessResponse] + connectProcess *connect.Client[gen.ConnectProcessRequest, gen.ConnectProcessResponse] } // CreateSandbox calls hostagent.v1.HostAgentService.CreateSandbox. @@ -320,6 +456,21 @@ func (c *hostAgentServiceClient) ReadFile(ctx context.Context, req *connect.Requ return c.readFile.CallUnary(ctx, req) } +// ListDir calls hostagent.v1.HostAgentService.ListDir. +func (c *hostAgentServiceClient) ListDir(ctx context.Context, req *connect.Request[gen.ListDirRequest]) (*connect.Response[gen.ListDirResponse], error) { + return c.listDir.CallUnary(ctx, req) +} + +// MakeDir calls hostagent.v1.HostAgentService.MakeDir. +func (c *hostAgentServiceClient) MakeDir(ctx context.Context, req *connect.Request[gen.MakeDirRequest]) (*connect.Response[gen.MakeDirResponse], error) { + return c.makeDir.CallUnary(ctx, req) +} + +// RemovePath calls hostagent.v1.HostAgentService.RemovePath. +func (c *hostAgentServiceClient) RemovePath(ctx context.Context, req *connect.Request[gen.RemovePathRequest]) (*connect.Response[gen.RemovePathResponse], error) { + return c.removePath.CallUnary(ctx, req) +} + // CreateSnapshot calls hostagent.v1.HostAgentService.CreateSnapshot. func (c *hostAgentServiceClient) CreateSnapshot(ctx context.Context, req *connect.Request[gen.CreateSnapshotRequest]) (*connect.Response[gen.CreateSnapshotResponse], error) { return c.createSnapshot.CallUnary(ctx, req) @@ -370,6 +521,46 @@ func (c *hostAgentServiceClient) FlattenRootfs(ctx context.Context, req *connect return c.flattenRootfs.CallUnary(ctx, req) } +// PtyAttach calls hostagent.v1.HostAgentService.PtyAttach. +func (c *hostAgentServiceClient) PtyAttach(ctx context.Context, req *connect.Request[gen.PtyAttachRequest]) (*connect.ServerStreamForClient[gen.PtyAttachResponse], error) { + return c.ptyAttach.CallServerStream(ctx, req) +} + +// PtySendInput calls hostagent.v1.HostAgentService.PtySendInput. +func (c *hostAgentServiceClient) PtySendInput(ctx context.Context, req *connect.Request[gen.PtySendInputRequest]) (*connect.Response[gen.PtySendInputResponse], error) { + return c.ptySendInput.CallUnary(ctx, req) +} + +// PtyResize calls hostagent.v1.HostAgentService.PtyResize. +func (c *hostAgentServiceClient) PtyResize(ctx context.Context, req *connect.Request[gen.PtyResizeRequest]) (*connect.Response[gen.PtyResizeResponse], error) { + return c.ptyResize.CallUnary(ctx, req) +} + +// PtyKill calls hostagent.v1.HostAgentService.PtyKill. +func (c *hostAgentServiceClient) PtyKill(ctx context.Context, req *connect.Request[gen.PtyKillRequest]) (*connect.Response[gen.PtyKillResponse], error) { + return c.ptyKill.CallUnary(ctx, req) +} + +// StartBackground calls hostagent.v1.HostAgentService.StartBackground. +func (c *hostAgentServiceClient) StartBackground(ctx context.Context, req *connect.Request[gen.StartBackgroundRequest]) (*connect.Response[gen.StartBackgroundResponse], error) { + return c.startBackground.CallUnary(ctx, req) +} + +// ListProcesses calls hostagent.v1.HostAgentService.ListProcesses. +func (c *hostAgentServiceClient) ListProcesses(ctx context.Context, req *connect.Request[gen.ListProcessesRequest]) (*connect.Response[gen.ListProcessesResponse], error) { + return c.listProcesses.CallUnary(ctx, req) +} + +// KillProcess calls hostagent.v1.HostAgentService.KillProcess. +func (c *hostAgentServiceClient) KillProcess(ctx context.Context, req *connect.Request[gen.KillProcessRequest]) (*connect.Response[gen.KillProcessResponse], error) { + return c.killProcess.CallUnary(ctx, req) +} + +// ConnectProcess calls hostagent.v1.HostAgentService.ConnectProcess. +func (c *hostAgentServiceClient) ConnectProcess(ctx context.Context, req *connect.Request[gen.ConnectProcessRequest]) (*connect.ServerStreamForClient[gen.ConnectProcessResponse], error) { + return c.connectProcess.CallServerStream(ctx, req) +} + // HostAgentServiceHandler is an implementation of the hostagent.v1.HostAgentService service. type HostAgentServiceHandler interface { // CreateSandbox boots a new microVM with the given configuration. @@ -388,6 +579,12 @@ type HostAgentServiceHandler interface { WriteFile(context.Context, *connect.Request[gen.WriteFileRequest]) (*connect.Response[gen.WriteFileResponse], error) // ReadFile reads a file from inside a sandbox. ReadFile(context.Context, *connect.Request[gen.ReadFileRequest]) (*connect.Response[gen.ReadFileResponse], error) + // ListDir lists directory contents inside a sandbox. + ListDir(context.Context, *connect.Request[gen.ListDirRequest]) (*connect.Response[gen.ListDirResponse], error) + // MakeDir creates a directory inside a sandbox. + MakeDir(context.Context, *connect.Request[gen.MakeDirRequest]) (*connect.Response[gen.MakeDirResponse], error) + // RemovePath removes a file or directory inside a sandbox. + RemovePath(context.Context, *connect.Request[gen.RemovePathRequest]) (*connect.Response[gen.RemovePathResponse], error) // CreateSnapshot pauses a sandbox, takes a snapshot, stores it as a reusable // template, and destroys the sandbox. CreateSnapshot(context.Context, *connect.Request[gen.CreateSnapshotRequest]) (*connect.Response[gen.CreateSnapshotResponse], error) @@ -416,6 +613,26 @@ type HostAgentServiceHandler interface { // cleans up all sandbox resources. Used by the template build system to // produce image-only templates (no memory/CPU state). FlattenRootfs(context.Context, *connect.Request[gen.FlattenRootfsRequest]) (*connect.Response[gen.FlattenRootfsResponse], error) + // PtyAttach starts a new PTY process or reconnects to an existing one. + // If cmd is non-empty, starts a new process with the given PTY dimensions. + // If tag is set and cmd is empty, reconnects to the existing process with that tag. + // Returns a stream of output events (started, output data, exit). + PtyAttach(context.Context, *connect.Request[gen.PtyAttachRequest], *connect.ServerStream[gen.PtyAttachResponse]) error + // PtySendInput sends raw bytes to a PTY process identified by tag. + PtySendInput(context.Context, *connect.Request[gen.PtySendInputRequest]) (*connect.Response[gen.PtySendInputResponse], error) + // PtyResize updates the terminal dimensions for a PTY process. + PtyResize(context.Context, *connect.Request[gen.PtyResizeRequest]) (*connect.Response[gen.PtyResizeResponse], error) + // PtyKill sends a signal to a PTY process. + PtyKill(context.Context, *connect.Request[gen.PtyKillRequest]) (*connect.Response[gen.PtyKillResponse], error) + // StartBackground starts a process in the background and returns immediately + // with the PID and tag. The process survives RPC disconnection. + StartBackground(context.Context, *connect.Request[gen.StartBackgroundRequest]) (*connect.Response[gen.StartBackgroundResponse], error) + // ListProcesses returns all running processes inside a sandbox. + ListProcesses(context.Context, *connect.Request[gen.ListProcessesRequest]) (*connect.Response[gen.ListProcessesResponse], error) + // KillProcess sends a signal to a process identified by PID or tag. + KillProcess(context.Context, *connect.Request[gen.KillProcessRequest]) (*connect.Response[gen.KillProcessResponse], error) + // ConnectProcess re-attaches to a running process and streams its output. + ConnectProcess(context.Context, *connect.Request[gen.ConnectProcessRequest], *connect.ServerStream[gen.ConnectProcessResponse]) error } // NewHostAgentServiceHandler builds an HTTP handler from the service implementation. It returns the @@ -473,6 +690,24 @@ func NewHostAgentServiceHandler(svc HostAgentServiceHandler, opts ...connect.Han connect.WithSchema(hostAgentServiceMethods.ByName("ReadFile")), connect.WithHandlerOptions(opts...), ) + hostAgentServiceListDirHandler := connect.NewUnaryHandler( + HostAgentServiceListDirProcedure, + svc.ListDir, + connect.WithSchema(hostAgentServiceMethods.ByName("ListDir")), + connect.WithHandlerOptions(opts...), + ) + hostAgentServiceMakeDirHandler := connect.NewUnaryHandler( + HostAgentServiceMakeDirProcedure, + svc.MakeDir, + connect.WithSchema(hostAgentServiceMethods.ByName("MakeDir")), + connect.WithHandlerOptions(opts...), + ) + hostAgentServiceRemovePathHandler := connect.NewUnaryHandler( + HostAgentServiceRemovePathProcedure, + svc.RemovePath, + connect.WithSchema(hostAgentServiceMethods.ByName("RemovePath")), + connect.WithHandlerOptions(opts...), + ) hostAgentServiceCreateSnapshotHandler := connect.NewUnaryHandler( HostAgentServiceCreateSnapshotProcedure, svc.CreateSnapshot, @@ -533,6 +768,54 @@ func NewHostAgentServiceHandler(svc HostAgentServiceHandler, opts ...connect.Han connect.WithSchema(hostAgentServiceMethods.ByName("FlattenRootfs")), connect.WithHandlerOptions(opts...), ) + hostAgentServicePtyAttachHandler := connect.NewServerStreamHandler( + HostAgentServicePtyAttachProcedure, + svc.PtyAttach, + connect.WithSchema(hostAgentServiceMethods.ByName("PtyAttach")), + connect.WithHandlerOptions(opts...), + ) + hostAgentServicePtySendInputHandler := connect.NewUnaryHandler( + HostAgentServicePtySendInputProcedure, + svc.PtySendInput, + connect.WithSchema(hostAgentServiceMethods.ByName("PtySendInput")), + connect.WithHandlerOptions(opts...), + ) + hostAgentServicePtyResizeHandler := connect.NewUnaryHandler( + HostAgentServicePtyResizeProcedure, + svc.PtyResize, + connect.WithSchema(hostAgentServiceMethods.ByName("PtyResize")), + connect.WithHandlerOptions(opts...), + ) + hostAgentServicePtyKillHandler := connect.NewUnaryHandler( + HostAgentServicePtyKillProcedure, + svc.PtyKill, + connect.WithSchema(hostAgentServiceMethods.ByName("PtyKill")), + connect.WithHandlerOptions(opts...), + ) + hostAgentServiceStartBackgroundHandler := connect.NewUnaryHandler( + HostAgentServiceStartBackgroundProcedure, + svc.StartBackground, + connect.WithSchema(hostAgentServiceMethods.ByName("StartBackground")), + connect.WithHandlerOptions(opts...), + ) + hostAgentServiceListProcessesHandler := connect.NewUnaryHandler( + HostAgentServiceListProcessesProcedure, + svc.ListProcesses, + connect.WithSchema(hostAgentServiceMethods.ByName("ListProcesses")), + connect.WithHandlerOptions(opts...), + ) + hostAgentServiceKillProcessHandler := connect.NewUnaryHandler( + HostAgentServiceKillProcessProcedure, + svc.KillProcess, + connect.WithSchema(hostAgentServiceMethods.ByName("KillProcess")), + connect.WithHandlerOptions(opts...), + ) + hostAgentServiceConnectProcessHandler := connect.NewServerStreamHandler( + HostAgentServiceConnectProcessProcedure, + svc.ConnectProcess, + connect.WithSchema(hostAgentServiceMethods.ByName("ConnectProcess")), + connect.WithHandlerOptions(opts...), + ) return "/hostagent.v1.HostAgentService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case HostAgentServiceCreateSandboxProcedure: @@ -551,6 +834,12 @@ func NewHostAgentServiceHandler(svc HostAgentServiceHandler, opts ...connect.Han hostAgentServiceWriteFileHandler.ServeHTTP(w, r) case HostAgentServiceReadFileProcedure: hostAgentServiceReadFileHandler.ServeHTTP(w, r) + case HostAgentServiceListDirProcedure: + hostAgentServiceListDirHandler.ServeHTTP(w, r) + case HostAgentServiceMakeDirProcedure: + hostAgentServiceMakeDirHandler.ServeHTTP(w, r) + case HostAgentServiceRemovePathProcedure: + hostAgentServiceRemovePathHandler.ServeHTTP(w, r) case HostAgentServiceCreateSnapshotProcedure: hostAgentServiceCreateSnapshotHandler.ServeHTTP(w, r) case HostAgentServiceDeleteSnapshotProcedure: @@ -571,6 +860,22 @@ func NewHostAgentServiceHandler(svc HostAgentServiceHandler, opts ...connect.Han hostAgentServiceFlushSandboxMetricsHandler.ServeHTTP(w, r) case HostAgentServiceFlattenRootfsProcedure: hostAgentServiceFlattenRootfsHandler.ServeHTTP(w, r) + case HostAgentServicePtyAttachProcedure: + hostAgentServicePtyAttachHandler.ServeHTTP(w, r) + case HostAgentServicePtySendInputProcedure: + hostAgentServicePtySendInputHandler.ServeHTTP(w, r) + case HostAgentServicePtyResizeProcedure: + hostAgentServicePtyResizeHandler.ServeHTTP(w, r) + case HostAgentServicePtyKillProcedure: + hostAgentServicePtyKillHandler.ServeHTTP(w, r) + case HostAgentServiceStartBackgroundProcedure: + hostAgentServiceStartBackgroundHandler.ServeHTTP(w, r) + case HostAgentServiceListProcessesProcedure: + hostAgentServiceListProcessesHandler.ServeHTTP(w, r) + case HostAgentServiceKillProcessProcedure: + hostAgentServiceKillProcessHandler.ServeHTTP(w, r) + case HostAgentServiceConnectProcessProcedure: + hostAgentServiceConnectProcessHandler.ServeHTTP(w, r) default: http.NotFound(w, r) } @@ -612,6 +917,18 @@ func (UnimplementedHostAgentServiceHandler) ReadFile(context.Context, *connect.R return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.ReadFile is not implemented")) } +func (UnimplementedHostAgentServiceHandler) ListDir(context.Context, *connect.Request[gen.ListDirRequest]) (*connect.Response[gen.ListDirResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.ListDir is not implemented")) +} + +func (UnimplementedHostAgentServiceHandler) MakeDir(context.Context, *connect.Request[gen.MakeDirRequest]) (*connect.Response[gen.MakeDirResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.MakeDir is not implemented")) +} + +func (UnimplementedHostAgentServiceHandler) RemovePath(context.Context, *connect.Request[gen.RemovePathRequest]) (*connect.Response[gen.RemovePathResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.RemovePath is not implemented")) +} + func (UnimplementedHostAgentServiceHandler) CreateSnapshot(context.Context, *connect.Request[gen.CreateSnapshotRequest]) (*connect.Response[gen.CreateSnapshotResponse], error) { return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.CreateSnapshot is not implemented")) } @@ -651,3 +968,35 @@ func (UnimplementedHostAgentServiceHandler) FlushSandboxMetrics(context.Context, func (UnimplementedHostAgentServiceHandler) FlattenRootfs(context.Context, *connect.Request[gen.FlattenRootfsRequest]) (*connect.Response[gen.FlattenRootfsResponse], error) { return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.FlattenRootfs is not implemented")) } + +func (UnimplementedHostAgentServiceHandler) PtyAttach(context.Context, *connect.Request[gen.PtyAttachRequest], *connect.ServerStream[gen.PtyAttachResponse]) error { + return connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.PtyAttach is not implemented")) +} + +func (UnimplementedHostAgentServiceHandler) PtySendInput(context.Context, *connect.Request[gen.PtySendInputRequest]) (*connect.Response[gen.PtySendInputResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.PtySendInput is not implemented")) +} + +func (UnimplementedHostAgentServiceHandler) PtyResize(context.Context, *connect.Request[gen.PtyResizeRequest]) (*connect.Response[gen.PtyResizeResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.PtyResize is not implemented")) +} + +func (UnimplementedHostAgentServiceHandler) PtyKill(context.Context, *connect.Request[gen.PtyKillRequest]) (*connect.Response[gen.PtyKillResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.PtyKill is not implemented")) +} + +func (UnimplementedHostAgentServiceHandler) StartBackground(context.Context, *connect.Request[gen.StartBackgroundRequest]) (*connect.Response[gen.StartBackgroundResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.StartBackground is not implemented")) +} + +func (UnimplementedHostAgentServiceHandler) ListProcesses(context.Context, *connect.Request[gen.ListProcessesRequest]) (*connect.Response[gen.ListProcessesResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.ListProcesses is not implemented")) +} + +func (UnimplementedHostAgentServiceHandler) KillProcess(context.Context, *connect.Request[gen.KillProcessRequest]) (*connect.Response[gen.KillProcessResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.KillProcess is not implemented")) +} + +func (UnimplementedHostAgentServiceHandler) ConnectProcess(context.Context, *connect.Request[gen.ConnectProcessRequest], *connect.ServerStream[gen.ConnectProcessResponse]) error { + return connect.NewError(connect.CodeUnimplemented, errors.New("hostagent.v1.HostAgentService.ConnectProcess is not implemented")) +} diff --git a/proto/hostagent/hostagent.proto b/proto/hostagent/hostagent.proto index 817d535..5dbf222 100644 --- a/proto/hostagent/hostagent.proto +++ b/proto/hostagent/hostagent.proto @@ -29,6 +29,15 @@ service HostAgentService { // ReadFile reads a file from inside a sandbox. rpc ReadFile(ReadFileRequest) returns (ReadFileResponse); + // ListDir lists directory contents inside a sandbox. + rpc ListDir(ListDirRequest) returns (ListDirResponse); + + // MakeDir creates a directory inside a sandbox. + rpc MakeDir(MakeDirRequest) returns (MakeDirResponse); + + // RemovePath removes a file or directory inside a sandbox. + rpc RemovePath(RemovePathRequest) returns (RemovePathResponse); + // CreateSnapshot pauses a sandbox, takes a snapshot, stores it as a reusable // template, and destroys the sandbox. rpc CreateSnapshot(CreateSnapshotRequest) returns (CreateSnapshotResponse); @@ -67,6 +76,34 @@ service HostAgentService { // produce image-only templates (no memory/CPU state). rpc FlattenRootfs(FlattenRootfsRequest) returns (FlattenRootfsResponse); + // PtyAttach starts a new PTY process or reconnects to an existing one. + // If cmd is non-empty, starts a new process with the given PTY dimensions. + // If tag is set and cmd is empty, reconnects to the existing process with that tag. + // Returns a stream of output events (started, output data, exit). + rpc PtyAttach(PtyAttachRequest) returns (stream PtyAttachResponse); + + // PtySendInput sends raw bytes to a PTY process identified by tag. + rpc PtySendInput(PtySendInputRequest) returns (PtySendInputResponse); + + // PtyResize updates the terminal dimensions for a PTY process. + rpc PtyResize(PtyResizeRequest) returns (PtyResizeResponse); + + // PtyKill sends a signal to a PTY process. + rpc PtyKill(PtyKillRequest) returns (PtyKillResponse); + + // StartBackground starts a process in the background and returns immediately + // with the PID and tag. The process survives RPC disconnection. + rpc StartBackground(StartBackgroundRequest) returns (StartBackgroundResponse); + + // ListProcesses returns all running processes inside a sandbox. + rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse); + + // KillProcess sends a signal to a process identified by PID or tag. + rpc KillProcess(KillProcessRequest) returns (KillProcessResponse); + + // ConnectProcess re-attaches to a running process and streams its output. + rpc ConnectProcess(ConnectProcessRequest) returns (stream ConnectProcessResponse); + } message CreateSandboxRequest { @@ -95,12 +132,22 @@ message CreateSandboxRequest { // Template UUID (hex string). Both zeros + team zeros = "minimal" sentinel. string template_id = 8; + + // Default unix user for the sandbox (set in envd via PostInit). + string default_user = 9; + + // Default environment variables (set in envd via PostInit). + map default_env = 10; } message CreateSandboxResponse { string sandbox_id = 1; string status = 2; string host_ip = 3; + + // Runtime metadata collected during sandbox creation (e.g. envd_version, + // kernel_version, firecracker_version, agent_version). + map metadata = 4; } message DestroySandboxRequest { @@ -121,12 +168,26 @@ message ResumeSandboxRequest { // TTL in seconds restored from the DB so the reaper can auto-pause // the sandbox again after inactivity. 0 means no auto-pause. int32 timeout_sec = 2; + + // Default unix user for the sandbox (set in envd via PostInit on resume). + string default_user = 3; + + // Default environment variables (set in envd via PostInit on resume). + map default_env = 4; + + // Kernel version hint from the DB — the agent tries to use the exact version, + // falling back to latest if not found on disk. + string kernel_version = 5; } message ResumeSandboxResponse { string sandbox_id = 1; string status = 2; string host_ip = 3; + + // Actual runtime metadata after resume (versions may differ from hint if + // the exact kernel was not available). + map metadata = 4; } message CreateSnapshotRequest { @@ -192,6 +253,9 @@ message SandboxInfo { int32 timeout_sec = 9; string team_id = 10; string template_id = 11; + + // Runtime metadata (envd_version, kernel_version, etc.). + map metadata = 12; } message WriteFileRequest { @@ -269,6 +333,50 @@ message ReadFileStreamResponse { bytes chunk = 1; } +// ── Filesystem Operations ────────────────────────────────────────── + +message ListDirRequest { + string sandbox_id = 1; + string path = 2; + uint32 depth = 3; +} + +message ListDirResponse { + repeated FileEntry entries = 1; +} + +message FileEntry { + string name = 1; + string path = 2; + // "file", "directory", or "symlink". + string type = 3; + int64 size = 4; + uint32 mode = 5; + // Human-readable permissions string, e.g. "-rwxr-xr-x". + string permissions = 6; + string owner = 7; + string group = 8; + // Last modification time as Unix timestamp (seconds). + int64 modified_at = 9; + optional string symlink_target = 10; +} + +message MakeDirRequest { + string sandbox_id = 1; + string path = 2; +} + +message MakeDirResponse { + FileEntry entry = 1; +} + +message RemovePathRequest { + string sandbox_id = 1; + string path = 2; +} + +message RemovePathResponse {} + // ── Ping ──────────────────────────────────────────────────────────── message PingSandboxRequest { @@ -329,3 +437,131 @@ message FlattenRootfsRequest { message FlattenRootfsResponse { int64 size_bytes = 1; } + +// ── PTY ───────────────────────────────────────────────────────────── + +message PtyAttachRequest { + string sandbox_id = 1; + // Tag is the stable identifier for this PTY session (e.g. "pty-abc123de"). + // Chosen by the caller and used to reconnect later. + string tag = 2; + // If cmd is non-empty, a new process is started. If empty, reconnects to + // the existing process identified by tag. + string cmd = 3; + repeated string args = 4; + uint32 cols = 5; + uint32 rows = 6; + // Environment variables for the process. + map envs = 7; + // Working directory. Empty means default. + string cwd = 8; + // User to run as. Empty means default (root). + string user = 9; +} + +message PtyAttachResponse { + oneof event { + PtyStarted started = 1; + PtyOutput output = 2; + PtyExited exited = 3; + } +} + +message PtyStarted { + uint32 pid = 1; + string tag = 2; +} + +message PtyOutput { + bytes data = 1; +} + +message PtyExited { + int32 exit_code = 1; + string error = 2; +} + +message PtySendInputRequest { + string sandbox_id = 1; + string tag = 2; + bytes data = 3; +} + +message PtySendInputResponse {} + +message PtyResizeRequest { + string sandbox_id = 1; + string tag = 2; + uint32 cols = 3; + uint32 rows = 4; +} + +message PtyResizeResponse {} + +message PtyKillRequest { + string sandbox_id = 1; + string tag = 2; +} + +message PtyKillResponse {} + +// ── Background Processes ─────────────────────────────────────────── + +message StartBackgroundRequest { + string sandbox_id = 1; + string cmd = 2; + repeated string args = 3; + // User-chosen tag for the process. If empty, the host agent generates one. + string tag = 4; + map envs = 5; + string cwd = 6; +} + +message StartBackgroundResponse { + uint32 pid = 1; + string tag = 2; +} + +message ListProcessesRequest { + string sandbox_id = 1; +} + +message ProcessEntry { + uint32 pid = 1; + string tag = 2; + string cmd = 3; + repeated string args = 4; +} + +message ListProcessesResponse { + repeated ProcessEntry processes = 1; +} + +message KillProcessRequest { + string sandbox_id = 1; + oneof selector { + uint32 pid = 2; + string tag = 3; + } + // Signal to send: "SIGTERM" or "SIGKILL" (default: "SIGKILL"). + string signal = 4; +} + +message KillProcessResponse {} + +message ConnectProcessRequest { + string sandbox_id = 1; + oneof selector { + uint32 pid = 2; + string tag = 3; + } +} + +// Reuses ExecStream event types for symmetry. +message ConnectProcessResponse { + oneof event { + ExecStreamStart start = 1; + ExecStreamData data = 2; + ExecStreamEnd end = 3; + } +} diff --git a/recipes/code-runner-beta.healthcheck b/recipes/code-runner-beta.healthcheck new file mode 100644 index 0000000..186da39 --- /dev/null +++ b/recipes/code-runner-beta.healthcheck @@ -0,0 +1 @@ +--interval=5s --timeout=5s --start-period=60s --retries=5 curl -sf http://127.0.0.1:8888/api/status diff --git a/recipes/code-runner-beta.recipefile b/recipes/code-runner-beta.recipefile new file mode 100644 index 0000000..dc96779 --- /dev/null +++ b/recipes/code-runner-beta.recipefile @@ -0,0 +1,9 @@ +RUN --timeout=5m sudo apt install -y python3 python3-pip python3-venv +ENV PYTHONUNBUFFERED=1 + +RUN python3 -m venv ~/jupyter-env +RUN --timeout=5m ~/jupyter-env/bin/pip install --upgrade pip +RUN --timeout=5m ~/jupyter-env/bin/pip install jupyter-server ipykernel +RUN --timeout=5m ~/jupyter-env/bin/python -m ipykernel install --sys-prefix + +START ~/jupyter-env/bin/jupyter server --ServerApp.ip=0.0.0.0 --ServerApp.port=8888 --ServerApp.token='' --ServerApp.password='' --ServerApp.allow_origin='*' --ServerApp.disable_check_xsrf=True --no-browser --log-level=INFO diff --git a/recipes/python-interpreter-v0-beta.healthcheck b/recipes/python-interpreter-v0-beta.healthcheck deleted file mode 100644 index ca2555c..0000000 --- a/recipes/python-interpreter-v0-beta.healthcheck +++ /dev/null @@ -1 +0,0 @@ ---interval=5s --timeout=3s --start-period=3s --retries=3 python3 -c "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8888/api/status', timeout=3)" diff --git a/recipes/python-interpreter-v0-beta.recipefile b/recipes/python-interpreter-v0-beta.recipefile deleted file mode 100644 index e83f5da..0000000 --- a/recipes/python-interpreter-v0-beta.recipefile +++ /dev/null @@ -1,7 +0,0 @@ -RUN apt-get install -y --no-install-recommends python3 python3-pip python3-venv -RUN python3 -m venv /opt/venv -ENV PATH=/opt/venv/bin:$PATH - -RUN --timeout=5m pip install --no-cache-dir jupyter-server ipykernel - -START jupyter server --ServerApp.ip=0.0.0.0 --ServerApp.port=8888 --ServerApp.token='' --ServerApp.allow_origin='*' --ServerApp.disable_check_xsrf=True --no-browser --allow-root diff --git a/scripts/dev.sh b/scripts/dev.sh deleted file mode 100644 index e69de29..0000000 diff --git a/scripts/generate-proto.sh b/scripts/generate-proto.sh deleted file mode 100644 index e69de29..0000000 diff --git a/scripts/prepare-wrenn-user.sh b/scripts/prepare-wrenn-user.sh new file mode 100755 index 0000000..c48eef0 --- /dev/null +++ b/scripts/prepare-wrenn-user.sh @@ -0,0 +1,385 @@ +#!/usr/bin/env bash +# +# prepare-wrenn-user.sh — Create the wrenn system user and configure minimal privileges. +# +# Creates a locked-down 'wrenn' system user that can run wrenn-agent and wrenn-cp +# with only the privileges they need. The agent binary gets Linux capabilities +# via setcap — no sudo is configured for the wrenn user at all. If an attacker +# compromises the wrenn user, they cannot escalate via sudo. +# +# What this script does: +# 1. Creates the 'wrenn' system user (bash shell for debugging, no home dir) +# 2. Creates required directories with correct ownership +# 3. Sets Linux capabilities on wrenn-agent and all child binaries +# 4. Installs an apt hook to restore capabilities after package updates +# 5. Installs a sudoers drop-in (comment-only, no grants — absence is the cage) +# 6. Ensures required kernel modules are loaded +# 7. Writes systemd unit files for both wrenn-agent and wrenn-cp +# +# Usage: +# sudo bash scripts/prepare-wrenn-user.sh +# +# Prerequisites: +# - wrenn-agent binary at /usr/local/bin/wrenn-agent +# - wrenn-cp binary at /usr/local/bin/wrenn-cp +# - firecracker binary at /usr/local/bin/firecracker +# - libcap2-bin installed (for setcap) + +set -euo pipefail + +# ── Guard ──────────────────────────────────────────────────────────────────── + +if [[ $EUID -ne 0 ]]; then + echo "ERROR: This script must be run as root." + exit 1 +fi + +# ── Configuration ──────────────────────────────────────────────────────────── + +WRENN_USER="wrenn" +WRENN_GROUP="wrenn" +WRENN_DIR="/var/lib/wrenn" +AGENT_BIN="/usr/local/bin/wrenn-agent" +CP_BIN="/usr/local/bin/wrenn-cp" +FC_BIN="/usr/local/bin/firecracker" +RESTORE_CAPS_SCRIPT="/etc/wrenn/restore-caps.sh" + +# ── 1. Create system user ─────────────────────────────────────────────────── + +if id "${WRENN_USER}" &>/dev/null; then + echo "==> User '${WRENN_USER}' already exists, skipping creation." +else + echo "==> Creating system user '${WRENN_USER}'..." + useradd \ + --system \ + --no-create-home \ + --home-dir "${WRENN_DIR}" \ + --shell /bin/bash \ + "${WRENN_USER}" +fi + +# Add wrenn to kvm group for /dev/kvm access. +if getent group kvm &>/dev/null; then + usermod -aG kvm "${WRENN_USER}" + echo "==> Added '${WRENN_USER}' to 'kvm' group." +fi + +# ── 2. Create directories with correct ownership ──────────────────────────── + +echo "==> Setting up directories..." + +directories=( + "${WRENN_DIR}" + "${WRENN_DIR}/images" + "${WRENN_DIR}/kernels" + "${WRENN_DIR}/sandboxes" + "${WRENN_DIR}/snapshots" + "${WRENN_DIR}/logs" + "/run/netns" +) + +for dir in "${directories[@]}"; do + mkdir -p "${dir}" +done + +# Only chown wrenn-owned dirs (not /run/netns which is system-managed). +for dir in "${WRENN_DIR}" "${WRENN_DIR}/images" "${WRENN_DIR}/kernels" \ + "${WRENN_DIR}/sandboxes" "${WRENN_DIR}/snapshots" "${WRENN_DIR}/logs"; do + chown "${WRENN_USER}:${WRENN_GROUP}" "${dir}" + chmod 750 "${dir}" +done + +# ── 3. Set capabilities on binaries ───────────────────────────────────────── +# +# These capabilities replace full root access. The wrenn-agent binary gets +# exactly the capabilities it needs for: +# +# CAP_SYS_ADMIN — network namespaces (netns create/enter), mount namespaces +# (unshare -m), losetup, dmsetup, mount/umount +# CAP_NET_ADMIN — veth/TAP creation (netlink), iptables rules, IP forwarding, +# routing table manipulation +# CAP_NET_RAW — raw socket access (needed by iptables internally) +# CAP_SYS_PTRACE — reading /proc/self/ns/net (netns.Get) +# CAP_KILL — sending SIGTERM/SIGKILL to Firecracker processes +# CAP_DAC_OVERRIDE — accessing /dev/loop*, /dev/mapper/*, /dev/net/tun, +# /proc/sys/net/ipv4/ip_forward +# CAP_MKNOD — creating device nodes (dm-snapshot) +# +# The 'ep' suffix means Effective + Permitted (granted at exec time). + +echo "==> Setting capabilities on wrenn-agent..." + +if [[ ! -f "${AGENT_BIN}" ]]; then + echo "WARNING: ${AGENT_BIN} not found, skipping setcap. Install the binary first." +else + setcap \ + cap_sys_admin,cap_net_admin,cap_net_raw,cap_sys_ptrace,cap_kill,cap_dac_override,cap_mknod+ep \ + "${AGENT_BIN}" + + echo " Capabilities set on ${AGENT_BIN}:" + getcap "${AGENT_BIN}" +fi + +# Firecracker also needs capabilities when spawned by a non-root parent. +# CAP_NET_ADMIN is required for network device access inside the netns. +if [[ -f "${FC_BIN}" ]]; then + setcap cap_net_admin,cap_sys_admin,cap_dac_override+ep "${FC_BIN}" + echo " Capabilities set on ${FC_BIN}:" + getcap "${FC_BIN}" +fi + +# ── Helper: resolve binary path and apply setcap ──────────────────────────── +# +# Uses `command -v` to find the binary in PATH (handles /usr/bin vs /usr/sbin +# differences across distros), then `readlink -f` to resolve symlinks so that +# setcap hits the real inode (important for iptables-nft/alternatives). + +setcap_binary() { + local name="$1" caps="$2" + local bin + bin=$(command -v "$name" 2>/dev/null) || { + echo " WARNING: ${name} not found in PATH, skipping." + return 0 + } + bin=$(readlink -f "$bin") + setcap "$caps" "$bin" + echo " $(getcap "$bin")" +} + +# The child binaries invoked by wrenn-agent (iptables, losetup, dmsetup, etc.) +# also need capabilities since they'll be exec'd by a non-root user. +echo "==> Setting capabilities on child binaries..." + +setcap_binary iptables "cap_net_admin,cap_net_raw+ep" +setcap_binary iptables-save "cap_net_admin,cap_net_raw+ep" +setcap_binary ip "cap_sys_admin,cap_net_admin+ep" +setcap_binary sysctl "cap_net_admin+ep" +setcap_binary losetup "cap_sys_admin,cap_dac_override+ep" +setcap_binary blockdev "cap_sys_admin,cap_dac_override+ep" +setcap_binary dmsetup "cap_sys_admin,cap_dac_override,cap_mknod+ep" +setcap_binary e2fsck "cap_sys_admin,cap_dac_override+ep" +setcap_binary resize2fs "cap_sys_admin,cap_dac_override+ep" +setcap_binary dd "cap_dac_override+ep" +setcap_binary unshare "cap_sys_admin+ep" +setcap_binary mount "cap_sys_admin,cap_dac_override+ep" + +# ── 4. Persist capabilities across package updates ────────────────────────── +# +# apt/dpkg overwrites binaries on package updates, which strips the xattr-based +# capabilities set by setcap. This installs: +# - /etc/wrenn/restore-caps.sh: re-applies setcap to all child binaries +# - /etc/apt/apt.conf.d/99-wrenn-setcap: apt post-invoke hook that calls it + +echo "==> Installing capability restore hook..." + +mkdir -p /etc/wrenn + +cat > "${RESTORE_CAPS_SCRIPT}" << 'RESTORE' +#!/usr/bin/env bash +# +# restore-caps.sh — Re-apply Linux capabilities to wrenn child binaries. +# Called automatically by apt after package updates (see /etc/apt/apt.conf.d/99-wrenn-setcap). +# Can also be run manually: sudo /etc/wrenn/restore-caps.sh + +set -euo pipefail + +setcap_binary() { + local name="$1" caps="$2" + local bin + bin=$(command -v "$name" 2>/dev/null) || return 0 + bin=$(readlink -f "$bin") + setcap "$caps" "$bin" 2>/dev/null || true +} + +# wrenn-agent and firecracker (only if present — they aren't package-managed). +[[ -f /usr/local/bin/wrenn-agent ]] && \ + setcap cap_sys_admin,cap_net_admin,cap_net_raw,cap_sys_ptrace,cap_kill,cap_dac_override,cap_mknod+ep \ + /usr/local/bin/wrenn-agent 2>/dev/null || true +[[ -f /usr/local/bin/firecracker ]] && \ + setcap cap_net_admin,cap_sys_admin,cap_dac_override+ep \ + /usr/local/bin/firecracker 2>/dev/null || true + +# Child binaries (these are the ones wiped by apt). +setcap_binary iptables "cap_net_admin,cap_net_raw+ep" +setcap_binary iptables-save "cap_net_admin,cap_net_raw+ep" +setcap_binary ip "cap_sys_admin,cap_net_admin+ep" +setcap_binary sysctl "cap_net_admin+ep" +setcap_binary losetup "cap_sys_admin,cap_dac_override+ep" +setcap_binary blockdev "cap_sys_admin,cap_dac_override+ep" +setcap_binary dmsetup "cap_sys_admin,cap_dac_override,cap_mknod+ep" +setcap_binary e2fsck "cap_sys_admin,cap_dac_override+ep" +setcap_binary resize2fs "cap_sys_admin,cap_dac_override+ep" +setcap_binary dd "cap_dac_override+ep" +setcap_binary unshare "cap_sys_admin+ep" +setcap_binary mount "cap_sys_admin,cap_dac_override+ep" +RESTORE + +chmod 755 "${RESTORE_CAPS_SCRIPT}" + +cat > /etc/apt/apt.conf.d/99-wrenn-setcap << 'APT' +// Re-apply Linux capabilities to wrenn child binaries after any package update. +// Capabilities (xattr) are stripped when dpkg overwrites a binary. +DPkg::Post-Invoke { "/etc/wrenn/restore-caps.sh"; }; +APT + +echo " Installed ${RESTORE_CAPS_SCRIPT} and apt post-invoke hook." + +# ── 5. Device access ──────────────────────────────────────────────────────── +# +# /dev/kvm — handled by kvm group membership above +# /dev/net/tun — needs to be accessible by wrenn user + +echo "==> Configuring device access..." + +# Ensure /dev/net/tun is accessible (udev rule for persistence across reboots). +cat > /etc/udev/rules.d/99-wrenn.rules << 'UDEV' +# Allow wrenn user access to TUN device for TAP networking. +SUBSYSTEM=="misc", KERNEL=="tun", GROUP="wrenn", MODE="0660" +UDEV + +udevadm control --reload-rules 2>/dev/null || true +echo " Installed udev rule for /dev/net/tun." + +# ── 6. Kernel modules ─────────────────────────────────────────────────────── + +echo "==> Ensuring kernel modules are loaded..." + +modules=(dm_snapshot dm_mod loop tun) +for mod in "${modules[@]}"; do + if ! lsmod | grep -q "^${mod}"; then + modprobe "${mod}" 2>/dev/null && echo " Loaded ${mod}" || echo " WARNING: Could not load ${mod}" + else + echo " ${mod} already loaded." + fi +done + +# Persist across reboots. +for mod in "${modules[@]}"; do + grep -qxF "${mod}" /etc/modules-load.d/wrenn.conf 2>/dev/null || echo "${mod}" >> /etc/modules-load.d/wrenn.conf +done +echo " Module persistence written to /etc/modules-load.d/wrenn.conf." + +# ── 7. Sudoers ────────────────────────────────────────────────────────────── +# +# The wrenn user has no sudo grants. The absence of a grant is the cage — an +# explicit "!ALL" deny is weaker due to known bypasses (CVE-2019-14287). +# This file exists purely as documentation for operators running `sudo -l`. + +echo "==> Writing sudoers drop-in..." + +cat > /etc/sudoers.d/wrenn << 'SUDOERS' +# Wrenn system user — no sudo access permitted. +# All privilege is granted via Linux capabilities on specific binaries (setcap). +# This file contains no active rules. The absence of any grant is intentional +# and is the strongest way to deny escalation. +# +# Do not add rules here. If the wrenn user needs new privileges, use setcap +# on the specific binary instead. +SUDOERS + +chmod 440 /etc/sudoers.d/wrenn +visudo -c -f /etc/sudoers.d/wrenn +echo " /etc/sudoers.d/wrenn installed and validated." + +# ── 8. Systemd units ──────────────────────────────────────────────────────── + +echo "==> Writing systemd service files..." + +cat > /etc/systemd/system/wrenn-agent.service << 'UNIT' +[Unit] +Description=Wrenn Host Agent +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=wrenn +Group=wrenn +EnvironmentFile=-/etc/wrenn/agent.env + +# The binary has capabilities set via setcap. These systemd directives ensure +# the capabilities are inherited into the process at exec time. +AmbientCapabilities=CAP_SYS_ADMIN CAP_NET_ADMIN CAP_NET_RAW CAP_SYS_PTRACE CAP_KILL CAP_DAC_OVERRIDE CAP_MKNOD +CapabilityBoundingSet=CAP_SYS_ADMIN CAP_NET_ADMIN CAP_NET_RAW CAP_SYS_PTRACE CAP_KILL CAP_DAC_OVERRIDE CAP_MKNOD + +# IMPORTANT: must be false — child binaries (iptables, losetup, dmsetup, etc.) +# have their own file capabilities via setcap which must be honored at exec time. +NoNewPrivileges=false + +# Enable IP forwarding before the agent starts. The "+" prefix runs this +# directive as root (bypassing User=wrenn) so it can write to procfs. +ExecStartPre=+/bin/sh -c 'sysctl -w net.ipv4.ip_forward=1' + +ExecStart=/usr/local/bin/wrenn-agent --address ${WRENN_ADVERTISE_ADDR} + +Restart=on-failure +RestartSec=5 + +# File descriptor limits (Firecracker + loop devices + sockets). +LimitNOFILE=65536 +LimitNPROC=4096 + +# Protect host filesystem — only allow access to what's needed. +ProtectHome=true +ReadWritePaths=/var/lib/wrenn /tmp /run/netns /dev/mapper +ReadOnlyPaths=/usr/local/bin/firecracker + +[Install] +WantedBy=multi-user.target +UNIT + +cat > /etc/systemd/system/wrenn-cp.service << 'UNIT' +[Unit] +Description=Wrenn Control Plane +After=network-online.target postgresql.service +Wants=network-online.target + +[Service] +Type=simple +User=wrenn +Group=wrenn +EnvironmentFile=-/etc/wrenn/cp.env + +# Control plane is fully unprivileged — no capabilities needed. +NoNewPrivileges=true +CapabilityBoundingSet= + +ExecStart=/usr/local/bin/wrenn-cp + +Restart=on-failure +RestartSec=5 + +ProtectHome=true +ProtectSystem=strict +ReadWritePaths=/tmp + +[Install] +WantedBy=multi-user.target +UNIT + +mkdir -p /etc/wrenn +touch /etc/wrenn/agent.env /etc/wrenn/cp.env +chmod 640 /etc/wrenn/agent.env /etc/wrenn/cp.env +chown root:${WRENN_GROUP} /etc/wrenn/agent.env /etc/wrenn/cp.env + +systemctl daemon-reload +echo " wrenn-agent.service and wrenn-cp.service installed." + +# ── Done ───────────────────────────────────────────────────────────────────── + +echo "" +echo "=== Setup complete ===" +echo "" +echo "Next steps:" +echo " 1. Copy wrenn-agent and wrenn-cp binaries to /usr/local/bin/" +echo " 2. Edit /etc/wrenn/agent.env with WRENN_CP_URL and WRENN_ADVERTISE_ADDR" +echo " 3. Edit /etc/wrenn/cp.env with DATABASE_URL and other control plane config" +echo " 4. systemctl enable --now wrenn-agent" +echo " 5. systemctl enable --now wrenn-cp" +echo "" +echo "Security summary:" +echo " - wrenn user: bash shell (for debugging), no home, no sudo (no grants in sudoers)" +echo " - wrenn-agent: runs as wrenn with 7 capabilities via setcap (not root)" +echo " - wrenn-cp: runs as wrenn with zero capabilities" +echo " - Capabilities auto-restored after apt upgrades via /etc/wrenn/restore-caps.sh" +echo "" diff --git a/scripts/rootfs-from-container.sh b/scripts/rootfs-from-container.sh index 2f96a3a..74e309b 100755 --- a/scripts/rootfs-from-container.sh +++ b/scripts/rootfs-from-container.sh @@ -86,7 +86,7 @@ sudo mkfs.ext4 -F "${OUTPUT_FILE}" # Step 4: Mount and populate. echo "==> Mounting image at ${MOUNT_DIR}..." mkdir -p "${MOUNT_DIR}" -sudo mount -o loop "${OUTPUT_FILE}" "${MOUNT_DIR}" +sudo mount -o loop,rw "${OUTPUT_FILE}" "${MOUNT_DIR}" echo "==> Extracting container filesystem..." sudo tar xf "${TAR_FILE}" -C "${MOUNT_DIR}" diff --git a/scripts/setup-host.sh b/scripts/setup-host.sh deleted file mode 100644 index e69de29..0000000 diff --git a/scripts/test-host.sh b/scripts/test-host.sh deleted file mode 100755 index 1cd61e7..0000000 --- a/scripts/test-host.sh +++ /dev/null @@ -1,233 +0,0 @@ -#!/usr/bin/env bash -# -# test-host.sh — Integration test for the Wrenn host agent. -# -# Prerequisites: -# - Host agent running: sudo ./builds/wrenn-agent -# - Firecracker installed at /usr/local/bin/firecracker -# - Kernel at /var/lib/wrenn/kernels/vmlinux -# - Base rootfs at /var/lib/wrenn/images/minimal.ext4 (with envd + wrenn-init baked in) -# -# Usage: -# ./scripts/test-host.sh [agent_url] -# -# The agent URL defaults to http://localhost:50051. - -set -euo pipefail - -AGENT="${1:-http://localhost:50051}" -BASE="/hostagent.v1.HostAgentService" -SANDBOX_ID="" - -# Colors for output. -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[0;33m' -NC='\033[0m' - -pass() { echo -e "${GREEN}PASS${NC}: $1"; } -fail() { echo -e "${RED}FAIL${NC}: $1"; exit 1; } -info() { echo -e "${YELLOW}----${NC}: $1"; } - -rpc() { - local method="$1" - local body="$2" - curl -s -X POST \ - -H "Content-Type: application/json" \ - "${AGENT}${BASE}/${method}" \ - -d "${body}" -} - -# ────────────────────────────────────────────────── -# Test 1: List sandboxes (should be empty) -# ────────────────────────────────────────────────── -info "Test 1: List sandboxes (expect empty)" - -RESULT=$(rpc "ListSandboxes" '{}') -echo " Response: ${RESULT}" - -echo "${RESULT}" | grep -q '"sandboxes"' || echo "${RESULT}" | grep -q '{}' && \ - pass "ListSandboxes returned" || \ - fail "ListSandboxes failed" - -# ────────────────────────────────────────────────── -# Test 2: Create a sandbox -# ────────────────────────────────────────────────── -info "Test 2: Create a sandbox" - -RESULT=$(rpc "CreateSandbox" '{ - "template": "minimal", - "vcpus": 1, - "memoryMb": 512, - "timeoutSec": 300 -}') -echo " Response: ${RESULT}" - -SANDBOX_ID=$(echo "${RESULT}" | python3 -c "import sys,json; print(json.load(sys.stdin)['sandboxId'])" 2>/dev/null) || \ - fail "CreateSandbox did not return sandboxId" - -echo " Sandbox ID: ${SANDBOX_ID}" -pass "Sandbox created: ${SANDBOX_ID}" - -# ────────────────────────────────────────────────── -# Test 3: List sandboxes (should have one) -# ────────────────────────────────────────────────── -info "Test 3: List sandboxes (expect one)" - -RESULT=$(rpc "ListSandboxes" '{}') -echo " Response: ${RESULT}" - -echo "${RESULT}" | grep -q "${SANDBOX_ID}" && \ - pass "Sandbox ${SANDBOX_ID} found in list" || \ - fail "Sandbox not found in list" - -# ────────────────────────────────────────────────── -# Test 4: Execute a command -# ────────────────────────────────────────────────── -info "Test 4: Execute 'echo hello world'" - -RESULT=$(rpc "Exec" "{ - \"sandboxId\": \"${SANDBOX_ID}\", - \"cmd\": \"/bin/sh\", - \"args\": [\"-c\", \"echo hello world\"], - \"timeoutSec\": 10 -}") -echo " Response: ${RESULT}" - -# stdout is base64-encoded in Connect RPC JSON. -STDOUT=$(echo "${RESULT}" | python3 -c " -import sys, json, base64 -r = json.load(sys.stdin) -print(base64.b64decode(r['stdout']).decode().strip()) -" 2>/dev/null) || fail "Exec did not return stdout" - -[ "${STDOUT}" = "hello world" ] && \ - pass "Exec returned correct output: '${STDOUT}'" || \ - fail "Expected 'hello world', got '${STDOUT}'" - -# ────────────────────────────────────────────────── -# Test 5: Execute a multi-line command -# ────────────────────────────────────────────────── -info "Test 5: Execute multi-line command" - -RESULT=$(rpc "Exec" "{ - \"sandboxId\": \"${SANDBOX_ID}\", - \"cmd\": \"/bin/sh\", - \"args\": [\"-c\", \"echo line1; echo line2; echo line3\"], - \"timeoutSec\": 10 -}") -echo " Response: ${RESULT}" - -LINE_COUNT=$(echo "${RESULT}" | python3 -c " -import sys, json, base64 -r = json.load(sys.stdin) -lines = base64.b64decode(r['stdout']).decode().strip().split('\n') -print(len(lines)) -" 2>/dev/null) - -[ "${LINE_COUNT}" = "3" ] && \ - pass "Multi-line output: ${LINE_COUNT} lines" || \ - fail "Expected 3 lines, got ${LINE_COUNT}" - -# ────────────────────────────────────────────────── -# Test 6: Pause the sandbox -# ────────────────────────────────────────────────── -info "Test 6: Pause sandbox" - -RESULT=$(rpc "PauseSandbox" "{\"sandboxId\": \"${SANDBOX_ID}\"}") -echo " Response: ${RESULT}" - -# Verify status is paused. -LIST=$(rpc "ListSandboxes" '{}') -echo "${LIST}" | grep -q '"paused"' && \ - pass "Sandbox paused" || \ - fail "Sandbox not in paused state" - -# ────────────────────────────────────────────────── -# Test 7: Exec should fail while paused -# ────────────────────────────────────────────────── -info "Test 7: Exec while paused (expect error)" - -RESULT=$(rpc "Exec" "{ - \"sandboxId\": \"${SANDBOX_ID}\", - \"cmd\": \"/bin/echo\", - \"args\": [\"should fail\"] -}") -echo " Response: ${RESULT}" - -echo "${RESULT}" | grep -qi "not running\|error\|code" && \ - pass "Exec correctly rejected while paused" || \ - fail "Exec should have failed while paused" - -# ────────────────────────────────────────────────── -# Test 8: Resume the sandbox -# ────────────────────────────────────────────────── -info "Test 8: Resume sandbox" - -RESULT=$(rpc "ResumeSandbox" "{\"sandboxId\": \"${SANDBOX_ID}\"}") -echo " Response: ${RESULT}" - -# Verify status is running. -LIST=$(rpc "ListSandboxes" '{}') -echo "${LIST}" | grep -q '"running"' && \ - pass "Sandbox resumed" || \ - fail "Sandbox not in running state" - -# ────────────────────────────────────────────────── -# Test 9: Exec after resume -# ────────────────────────────────────────────────── -info "Test 9: Exec after resume" - -RESULT=$(rpc "Exec" "{ - \"sandboxId\": \"${SANDBOX_ID}\", - \"cmd\": \"/bin/sh\", - \"args\": [\"-c\", \"echo resumed ok\"], - \"timeoutSec\": 10 -}") -echo " Response: ${RESULT}" - -STDOUT=$(echo "${RESULT}" | python3 -c " -import sys, json, base64 -r = json.load(sys.stdin) -print(base64.b64decode(r['stdout']).decode().strip()) -" 2>/dev/null) || fail "Exec after resume failed" - -[ "${STDOUT}" = "resumed ok" ] && \ - pass "Exec after resume works: '${STDOUT}'" || \ - fail "Expected 'resumed ok', got '${STDOUT}'" - -# ────────────────────────────────────────────────── -# Test 10: Destroy the sandbox -# ────────────────────────────────────────────────── -info "Test 10: Destroy sandbox" - -RESULT=$(rpc "DestroySandbox" "{\"sandboxId\": \"${SANDBOX_ID}\"}") -echo " Response: ${RESULT}" -pass "Sandbox destroyed" - -# ────────────────────────────────────────────────── -# Test 11: List sandboxes (should be empty again) -# ────────────────────────────────────────────────── -info "Test 11: List sandboxes (expect empty)" - -RESULT=$(rpc "ListSandboxes" '{}') -echo " Response: ${RESULT}" - -echo "${RESULT}" | grep -q "${SANDBOX_ID}" && \ - fail "Destroyed sandbox still in list" || \ - pass "Sandbox list is clean" - -# ────────────────────────────────────────────────── -# Test 12: Destroy non-existent sandbox (expect error) -# ────────────────────────────────────────────────── -info "Test 12: Destroy non-existent sandbox (expect error)" - -RESULT=$(rpc "DestroySandbox" '{"sandboxId": "sb-nonexist"}') -echo " Response: ${RESULT}" - -echo "${RESULT}" | grep -qi "not found\|error\|code" && \ - pass "Correctly rejected non-existent sandbox" || \ - fail "Should have returned error for non-existent sandbox" - -echo "" -echo -e "${GREEN}All tests passed!${NC}" diff --git a/scripts/update-debug-rootfs.sh b/scripts/update-minimal-rootfs.sh similarity index 98% rename from scripts/update-debug-rootfs.sh rename to scripts/update-minimal-rootfs.sh index bdedded..71a9f47 100755 --- a/scripts/update-debug-rootfs.sh +++ b/scripts/update-minimal-rootfs.sh @@ -45,7 +45,7 @@ fi # Step 2: Mount the rootfs. echo "==> Mounting rootfs at ${MOUNT_DIR}..." mkdir -p "${MOUNT_DIR}" -sudo mount -o loop "${ROOTFS}" "${MOUNT_DIR}" +sudo mount -o loop,rw "${ROOTFS}" "${MOUNT_DIR}" cleanup() { echo "==> Unmounting rootfs..." diff --git a/sqlc.yaml b/sqlc.yaml index eb9298f..1840f4e 100644 --- a/sqlc.yaml +++ b/sqlc.yaml @@ -6,6 +6,6 @@ sql: gen: go: package: "db" - out: "internal/db" + out: "pkg/db" sql_package: "pgx/v5" emit_json_tags: true