diff --git a/.env.example b/.env.example index f9318cc..7e16a7a 100644 --- a/.env.example +++ b/.env.example @@ -12,6 +12,7 @@ WRENN_HOST_LISTEN_ADDR=:50051 WRENN_DIR=/var/lib/wrenn WRENN_HOST_INTERFACE=eth0 WRENN_CP_URL=http://localhost:8080 +WRENN_DEFAULT_ROOTFS_SIZE=5Gi # Lago (billing — external service) LAGO_API_URL=http://localhost:3000 diff --git a/cmd/host-agent/main.go b/cmd/host-agent/main.go index 4e5d8ed..287122b 100644 --- a/cmd/host-agent/main.go +++ b/cmd/host-agent/main.go @@ -63,15 +63,28 @@ func main() { os.Exit(1) } - // Expand base images to the standard disk size (sparse, no extra physical + // Parse default rootfs size from env (e.g. "5G", "2Gi", "1000M"). + defaultRootfsSizeMB := sandbox.DefaultDiskSizeMB + if sizeStr := os.Getenv("WRENN_DEFAULT_ROOTFS_SIZE"); sizeStr != "" { + parsed, err := sandbox.ParseSizeToMB(sizeStr) + if err != nil { + slog.Error("invalid WRENN_DEFAULT_ROOTFS_SIZE", "value", sizeStr, "error", err) + os.Exit(1) + } + defaultRootfsSizeMB = parsed + slog.Info("using custom rootfs size", "size_mb", defaultRootfsSizeMB) + } + + // Expand base images to the configured disk size (sparse, no extra physical // disk). This ensures dm-snapshot sandboxes see the full size from boot. - if err := sandbox.EnsureImageSizes(rootDir, sandbox.DefaultDiskSizeMB); err != nil { + if err := sandbox.EnsureImageSizes(rootDir, defaultRootfsSizeMB); err != nil { slog.Error("failed to expand base images", "error", err) os.Exit(1) } cfg := sandbox.Config{ - WrennDir: rootDir, + WrennDir: rootDir, + DefaultRootfsSizeMB: defaultRootfsSizeMB, } mgr := sandbox.New(cfg) diff --git a/db/migrations/20260411182550_template_defaults.sql b/db/migrations/20260411182550_template_defaults.sql new file mode 100644 index 0000000..3378453 --- /dev/null +++ b/db/migrations/20260411182550_template_defaults.sql @@ -0,0 +1,17 @@ +-- +goose Up +ALTER TABLE templates + ADD COLUMN default_user TEXT NOT NULL DEFAULT 'root', + ADD COLUMN default_env JSONB NOT NULL DEFAULT '{}'; + +ALTER TABLE template_builds + ADD COLUMN default_user TEXT NOT NULL DEFAULT 'root', + ADD COLUMN default_env JSONB NOT NULL DEFAULT '{}'; + +-- +goose Down +ALTER TABLE template_builds + DROP COLUMN default_env, + DROP COLUMN default_user; + +ALTER TABLE templates + DROP COLUMN default_env, + DROP COLUMN default_user; diff --git a/db/queries/template_builds.sql b/db/queries/template_builds.sql index 1fb07be..1a0e3b0 100644 --- a/db/queries/template_builds.sql +++ b/db/queries/template_builds.sql @@ -31,3 +31,8 @@ WHERE id = $1; UPDATE template_builds SET error = $2, status = 'failed', completed_at = NOW() WHERE id = $1; + +-- name: UpdateBuildDefaults :exec +UPDATE template_builds +SET default_user = $2, default_env = $3 +WHERE id = $1; diff --git a/db/queries/templates.sql b/db/queries/templates.sql index de4d6f2..fbea228 100644 --- a/db/queries/templates.sql +++ b/db/queries/templates.sql @@ -1,6 +1,6 @@ -- name: InsertTemplate :one -INSERT INTO templates (id, name, type, vcpus, memory_mb, size_bytes, team_id) -VALUES ($1, $2, $3, $4, $5, $6, $7) +INSERT INTO templates (id, name, type, vcpus, memory_mb, size_bytes, team_id, default_user, default_env) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING *; -- name: GetTemplate :one diff --git a/frontend/src/lib/api/builds.ts b/frontend/src/lib/api/builds.ts index 1de23b8..fe9fa1f 100644 --- a/frontend/src/lib/api/builds.ts +++ b/frontend/src/lib/api/builds.ts @@ -1,4 +1,4 @@ -import { apiFetch, type ApiResult } from '$lib/api/client'; +import { apiFetch, apiFetchMultipart, type ApiResult } from '$lib/api/client'; export type BuildLogEntry = { step: number; @@ -26,6 +26,8 @@ export type Build = { error?: string; sandbox_id?: string; host_id?: string; + default_user: string; + default_env: Record; created_at: string; started_at?: string; completed_at?: string; @@ -39,9 +41,18 @@ export type CreateBuildParams = { vcpus?: number; memory_mb?: number; skip_pre_post?: boolean; + archive?: File; }; export async function createBuild(params: CreateBuildParams): Promise> { + if (params.archive) { + // Use multipart when an archive file is provided. + const { archive, ...config } = params; + const formData = new FormData(); + formData.append('config', JSON.stringify(config)); + formData.append('archive', archive); + return apiFetchMultipart('POST', '/api/v1/admin/builds', formData); + } return apiFetch('POST', '/api/v1/admin/builds', params); } diff --git a/frontend/src/lib/api/client.ts b/frontend/src/lib/api/client.ts index 00fa381..d6e6459 100644 --- a/frontend/src/lib/api/client.ts +++ b/frontend/src/lib/api/client.ts @@ -22,3 +22,24 @@ export async function apiFetch(method: string, path: string, body?: unknown): return { ok: false, error: 'Unable to connect to the server' }; } } + +export async function apiFetchMultipart(method: string, path: string, formData: FormData): Promise> { + try { + const headers: Record = {}; + if (auth.token) headers['Authorization'] = `Bearer ${auth.token}`; + + const res = await fetch(path, { + method, + headers, + body: formData + }); + + if (res.status === 204) return { ok: true, data: undefined as T }; + + const data = await res.json(); + if (!res.ok) return { ok: false, error: data?.error?.message ?? 'Something went wrong' }; + return { ok: true, data: data as T }; + } catch { + return { ok: false, error: 'Unable to connect to the server' }; + } +} diff --git a/frontend/src/lib/components/AdminSidebar.svelte b/frontend/src/lib/components/AdminSidebar.svelte index ebf4b64..d01e857 100644 --- a/frontend/src/lib/components/AdminSidebar.svelte +++ b/frontend/src/lib/components/AdminSidebar.svelte @@ -22,8 +22,8 @@ }; const managementItems: NavItem[] = [ - { label: 'Hosts', icon: IconServer, href: '/admin/hosts' }, - { label: 'Templates', icon: IconTemplate, href: '/admin/templates' } + { label: 'Templates', icon: IconTemplate, href: '/admin/templates' }, + { label: 'Hosts', icon: IconServer, href: '/admin/hosts' } ]; function isActive(href: string): boolean { diff --git a/frontend/src/lib/components/Sidebar.svelte b/frontend/src/lib/components/Sidebar.svelte index 4111dd8..f7849ee 100644 --- a/frontend/src/lib/components/Sidebar.svelte +++ b/frontend/src/lib/components/Sidebar.svelte @@ -49,7 +49,7 @@ const platformItems: NavItem[] = [ { label: 'Capsules', icon: IconMonitor, href: '/dashboard/capsules' }, - { label: 'Templates', icon: IconBox, href: '/dashboard/snapshots' }, + { label: 'Templates', icon: IconBox, href: '/dashboard/templates' }, { label: 'Metrics', icon: IconMetrics, href: '/dashboard/metrics' } ]; diff --git a/frontend/src/routes/admin/+page.svelte b/frontend/src/routes/admin/+page.svelte index b5a56c1..c6c45dc 100644 --- a/frontend/src/routes/admin/+page.svelte +++ b/frontend/src/routes/admin/+page.svelte @@ -1,5 +1,5 @@ diff --git a/frontend/src/routes/admin/hosts/+page.svelte b/frontend/src/routes/admin/hosts/+page.svelte index 16c7476..362b9ff 100644 --- a/frontend/src/routes/admin/hosts/+page.svelte +++ b/frontend/src/routes/admin/hosts/+page.svelte @@ -168,45 +168,48 @@
-
-
+
+ +
+ +
-

+

Hosts

-

+

Platform and BYOC compute across all teams.

{#if activeTab === 'platform'} {/if}
- + {#if !loading && !error} -
-
- {totalCount} +
+
+ {totalCount} total
-
- - - +
+ + + - {onlineCount} + {onlineCount} online
{#if pendingCount > 0} -
- {pendingCount} +
+ {pendingCount} pending
{/if} @@ -214,30 +217,32 @@ {/if}
- -
+ +
{#each [['platform', 'Platform', platformHosts.length], ['byoc', 'BYOC', byocHosts.length]] as [id, label, count] (id)} {/each}
-
+
{#if loading} {@render skeletonRows()} {:else if error} @@ -251,16 +256,16 @@ {#if byocHosts.length === 0} {@render emptyState('byoc')} {:else} -
+
{#each byocGroups as group (group.teamId ?? '__none__')} {@const groupPageHosts = byocPageHosts.filter(h => h.team_id === group.teamId || (group.teamId === null && !h.team_id))} {#if groupPageHosts.length > 0}
-
+
{group.teamName} - + {group.hosts.length}
@@ -271,22 +276,22 @@ {#if byocPageCount > 1} -
+
- Page {byocPage + 1} of {byocPageCount} · {byocHosts.length} hosts + Page {byocPage + 1} of {byocPageCount} · {byocHosts.length} hosts
@@ -301,34 +306,34 @@
{#snippet skeletonRows()} -
+
- - - - - - + + + + + + {#each Array(5) as _, i} - - + - - - - @@ -342,26 +347,28 @@ {#if hosts.length === 0} {@render emptyState('platform')} {:else} -
+
HostStatus
HostStatus
+
+
+
- - - - - - + + + + + + - {#each hosts as host (host.id)} + {#each hosts as host, i (host.id)} - - - - -
HostStatus
HostStatus
-
{host.id}
+
+
{host.id}
{#if host.address}
{host.address}
{/if} @@ -371,31 +378,31 @@ {/if}
- + + {#if host.status === 'online'} - - - + + + {:else} - + {/if} {host.status} + @@ -409,18 +416,31 @@ {/snippet} {#snippet emptyState(type: 'platform' | 'byoc')} -
-
- +
+ +
+
+
+ +
-

- {type === 'platform' ? 'No platform hosts yet.' : 'No BYOC hosts across any team.'} +

+ {type === 'platform' ? 'No platform hosts yet' : 'No BYOC hosts across any team'}

-

+

{type === 'platform' ? 'Add a host to start scheduling capsules onto your own compute.' : 'Teams that register their own compute will appear here.'}

+ {#if type === 'platform'} + + {/if}
{/snippet} @@ -435,10 +455,14 @@ onkeydown={(e) => { if (e.key === 'Escape' && !creating) showCreate = false; }} >
-

+ +
+ +
+

Add Platform Host

@@ -491,7 +515,7 @@

+

{/if} @@ -510,11 +535,15 @@
+ +
+ +
-
+
-

+

Host registered

@@ -558,11 +587,12 @@

+
{/if} @@ -578,10 +608,14 @@ onkeydown={(e) => { if (e.key === 'Escape' && !deleting) deleteTarget = null; }} >
-

+ +
+ +
+

Delete Host

@@ -621,7 +655,7 @@

+

{/if} @@ -676,4 +711,54 @@ .checkmark-drawn { stroke-dashoffset: 0; } + + /* Stat pill — shared base */ + .stat-pill { + display: flex; + align-items: baseline; + gap: 6px; + border-radius: var(--radius-button); + border-width: 1px; + padding: 6px 12px; + transition: transform 0.15s ease, box-shadow 0.15s ease; + } + .stat-pill:hover { + transform: translateY(-1px); + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.25); + } + + /* Table header */ + .table-header { + padding: 10px 20px; + text-align: left; + font-size: var(--text-label); + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.06em; + color: var(--color-text-tertiary); + } + + /* Staggered row entrance */ + .table-row-animate { + animation: fadeUp 0.25s ease both; + } + + /* Tab button */ + .tab-button { + position: relative; + padding: 14px 20px 14px 0; + font-size: var(--text-ui); + transition: color 0.15s ease; + cursor: pointer; + } + + /* Online host row — subtle left accent */ + .host-row-online { + box-shadow: inset 3px 0 0 var(--color-accent); + } + + /* Empty state icon float */ + .empty-icon-float { + animation: iconFloat 3s ease-in-out infinite; + } diff --git a/frontend/src/routes/admin/templates/+page.svelte b/frontend/src/routes/admin/templates/+page.svelte index 39b62da..a309658 100644 --- a/frontend/src/routes/admin/templates/+page.svelte +++ b/frontend/src/routes/admin/templates/+page.svelte @@ -56,7 +56,8 @@ memory_mb: 512, recipe: '', healthcheck: '', - skip_pre_post: false + skip_pre_post: false, + archive: null as File | null }); let creating = $state(false); let createError = $state(null); @@ -131,12 +132,13 @@ healthcheck: createForm.healthcheck.trim() || undefined, vcpus: createForm.vcpus, memory_mb: createForm.memory_mb, - skip_pre_post: createForm.skip_pre_post + skip_pre_post: createForm.skip_pre_post, + archive: createForm.archive || undefined }); if (result.ok) { showCreate = false; - createForm = { name: '', base_template: 'minimal', vcpus: 1, memory_mb: 512, recipe: '', healthcheck: '', skip_pre_post: false }; + createForm = { name: '', base_template: 'minimal', vcpus: 1, memory_mb: 512, recipe: '', healthcheck: '', skip_pre_post: false, archive: null }; builds = [result.data, ...builds]; activeTab = 'builds'; expandedBuildId = result.data.id; @@ -235,6 +237,8 @@ case 'RUN': return 'var(--color-blue)'; case 'START': return 'var(--color-accent-bright)'; case 'ENV': return 'var(--color-amber)'; + case 'USER': return 'var(--color-accent)'; + case 'COPY': return 'var(--color-text-bright)'; case 'WORKDIR': return 'var(--color-text-tertiary)'; default: return 'var(--color-text-muted)'; } @@ -266,47 +270,50 @@
-
-
+
+ +
+ +
-

+

Templates

-

+

Build and manage global templates available to all teams.

- + {#if !templatesLoading && !templatesError} -
-
- {templateCount} +
+
+ {templateCount} templates
-
- {baseCount} +
+ {baseCount} base
-
- {snapshotCount} +
+ {snapshotCount} snapshots
{#if runningBuilds > 0} -
- - - +
+ + + - {runningBuilds} + {runningBuilds} building
{/if} @@ -314,30 +321,32 @@ {/if}
- -
+ +
{#each [['templates', 'Templates', templateCount], ['builds', 'Builds', builds.length]] as [id, label, count] (id)} {/each}
-
+
{#if activeTab === 'templates'} {#if templatesLoading} {@render skeletonRows(5, ['Name', 'Type', 'Specs', 'Size', 'Created', ''])} @@ -370,20 +379,20 @@ {#snippet skeletonRows(count: number, headers: string[])} -
+
- + {#each headers as h} - + {/each} {#each Array(count) as _, i} - + {#each headers as _h, j} - {/each} @@ -395,81 +404,99 @@ {/snippet} {#snippet emptyState(type: 'templates' | 'builds')} -
-
- {#if type === 'templates'} - - {:else} - - {/if} +
+ +
+
+
+ {#if type === 'templates'} + + {:else} + + {/if} +
-

- {type === 'templates' ? 'No templates yet.' : 'No builds yet.'} +

+ {type === 'templates' ? 'No templates yet' : 'No builds yet'}

-

+

{type === 'templates' ? 'Create a template to provide pre-configured environments for all teams.' : 'Start a template build to see progress and logs here.'}

+ {#if type === 'templates'} + + {/if}
{/snippet} {#snippet templatesTable()} -
+
{h}{h}
+
- - - - - - - + + + + + + + - {#each templates as tmpl (tmpl.name)} - - + - - - - -
NameType
NameType
-
- {tmpl.name} + {#each templates as tmpl, i (tmpl.name)} +
+
+ {tmpl.name}
+ {#if tmpl.type === 'snapshot'} - + + snapshot {:else} - + + base {/if} + @@ -482,28 +509,30 @@ {/snippet} {#snippet buildsTable()} -
+
- - - - - - - - + + + + + + + + - {#each builds as build (build.id)} + {#each builds as build, i (build.id)} toggleBuildExpand(build.id)} > - - - - - - - @@ -565,7 +596,7 @@ {#if expandedBuildId === build.id}
BuildNameStatus
BuildNameStatus
-
+
+
{build.id}
- {build.name} + + {build.name} - + + {#if build.status === 'running'} - - - + + + {:else if build.status === 'success'} - + {:else if build.status === 'failed'} - + {:else} - + {/if} {build.status}
-
+
{#if build.status === 'pending' || build.status === 'running'}
+ + {:else} + tar, tar.gz, or zip + {/if} +
+
+
+
{/if} @@ -855,10 +929,14 @@ onkeydown={(e) => { if (e.key === 'Escape' && !deleting) deleteTarget = null; }} >
-

+ +
+ +
+

Delete Template

@@ -882,7 +960,7 @@

+

{/if} @@ -917,4 +996,59 @@ background-size: 200% 100%; animation: shimmer 1.4s ease infinite; } + + /* Stat pill — shared base */ + .stat-pill { + display: flex; + align-items: baseline; + gap: 6px; + border-radius: var(--radius-button); + border-width: 1px; + padding: 6px 12px; + transition: transform 0.15s ease, box-shadow 0.15s ease; + } + .stat-pill:hover { + transform: translateY(-1px); + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.25); + } + + /* Table header */ + .table-header { + padding: 10px 20px; + text-align: left; + font-size: var(--text-label); + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.06em; + color: var(--color-text-tertiary); + } + + /* Staggered row entrance */ + .table-row-animate { + animation: fadeUp 0.25s ease both; + } + + /* Tab button */ + .tab-button { + position: relative; + padding: 14px 20px 14px 0; + font-size: var(--text-ui); + transition: color 0.15s ease; + cursor: pointer; + } + + /* Active build row — subtle left accent */ + .build-row-active { + box-shadow: inset 3px 0 0 var(--color-blue); + } + + /* Progress bar glow for running builds */ + .progress-bar-glow { + box-shadow: 0 0 8px rgba(90, 159, 212, 0.4); + } + + /* Empty state icon float */ + .empty-icon-float { + animation: iconFloat 3s ease-in-out infinite; + } diff --git a/frontend/src/routes/dashboard/capsules/[id]/+page.svelte b/frontend/src/routes/dashboard/capsules/[id]/+page.svelte index 8b1aa67..a90d334 100644 --- a/frontend/src/routes/dashboard/capsules/[id]/+page.svelte +++ b/frontend/src/routes/dashboard/capsules/[id]/+page.svelte @@ -478,60 +478,8 @@ {:else if capsule}
- -
- {#if capsule.status === 'running'} - - {:else if capsule.status === 'paused'} - - - {/if} - - {#if capsule.status === 'running' || capsule.status === 'paused'} - - {/if} -
- - -
+ +
+ + +
+ {#if capsule.status === 'running'} + + {:else if capsule.status === 'paused'} + + + {/if} + + {#if capsule.status === 'running' || capsule.status === 'paused'} + + {/if} +
diff --git a/frontend/src/routes/dashboard/snapshots/+page.svelte b/frontend/src/routes/dashboard/templates/+page.svelte similarity index 84% rename from frontend/src/routes/dashboard/snapshots/+page.svelte rename to frontend/src/routes/dashboard/templates/+page.svelte index 2fe2b37..b852841 100644 --- a/frontend/src/routes/dashboard/snapshots/+page.svelte +++ b/frontend/src/routes/dashboard/templates/+page.svelte @@ -162,10 +162,10 @@ {/if} @@ -591,11 +591,15 @@ >
-

Launch Capsule

-

+ +

+ +
+

Launch Capsule

+

Configure resources and launch a new capsule from this snapshot.

@@ -612,12 +616,9 @@
{#if launchTarget.type === 'snapshot'} - + {:else} - + {/if} {launchTarget.name} @@ -694,7 +695,7 @@
+
{/if} @@ -715,17 +717,17 @@ .skeleton { background: linear-gradient( 90deg, - var(--color-bg-4) 0%, - var(--color-bg-5) 50%, - var(--color-bg-4) 100% + var(--color-bg-3) 25%, + var(--color-bg-4) 50%, + var(--color-bg-3) 75% ); background-size: 200% 100%; - animation: shimmer 1.6s ease-in-out infinite; + animation: shimmer 1.4s ease infinite; } @keyframes shimmer { - 0% { background-position: 200% center; } - 100% { background-position: -200% center; } + 0% { background-position: -200% 0; } + 100% { background-position: 200% 0; } } /* Left accent stripe — slides in on hover, color-keyed to snapshot type */ @@ -745,4 +747,9 @@ .snapshot-row.type-image:hover { background: rgba(90, 159, 212, 0.04); } + + /* Empty state icon float — matches admin pattern */ + .empty-icon-float { + animation: iconFloat 3s ease-in-out infinite; + } diff --git a/images/wrenn-init.sh b/images/wrenn-init.sh index d83be1c..8a9e22e 100644 --- a/images/wrenn-init.sh +++ b/images/wrenn-init.sh @@ -17,8 +17,9 @@ mkdir -p /sys/fs/cgroup mount -t cgroup2 cgroup2 /sys/fs/cgroup 2>/dev/null || true echo "+cpu +memory +io" > /sys/fs/cgroup/cgroup.subtree_control 2>/dev/null || true -# Set hostname +# Set hostname and make it resolvable (sudo requires this). hostname sandbox +echo "127.0.0.1 sandbox" >> /etc/hosts # Configure networking if the kernel ip= boot arg did not already set it up. if ! ip addr show eth0 2>/dev/null | grep -q "169.254.0.21"; then diff --git a/internal/api/handlers_builds.go b/internal/api/handlers_builds.go index bd3260e..b0b0e40 100644 --- a/internal/api/handlers_builds.go +++ b/internal/api/handlers_builds.go @@ -3,8 +3,10 @@ package api import ( "encoding/json" "fmt" + "io" "log/slog" "net/http" + "strings" "time" "connectrpc.com/connect" @@ -54,6 +56,8 @@ type buildResponse struct { Error *string `json:"error,omitempty"` SandboxID *string `json:"sandbox_id,omitempty"` HostID *string `json:"host_id,omitempty"` + DefaultUser string `json:"default_user"` + DefaultEnv json.RawMessage `json:"default_env"` CreatedAt string `json:"created_at"` StartedAt *string `json:"started_at,omitempty"` CompletedAt *string `json:"completed_at,omitempty"` @@ -71,6 +75,8 @@ func buildToResponse(b db.TemplateBuild) buildResponse { CurrentStep: b.CurrentStep, TotalSteps: b.TotalSteps, Logs: b.Logs, + DefaultUser: b.DefaultUser, + DefaultEnv: b.DefaultEnv, } if b.Healthcheck != "" { resp.Healthcheck = &b.Healthcheck @@ -101,11 +107,54 @@ func buildToResponse(b db.TemplateBuild) buildResponse { } // Create handles POST /v1/admin/builds. +// Accepts either JSON body or multipart/form-data with a "config" JSON part +// and an optional "archive" file part (tar/tar.gz/zip for COPY commands). func (h *buildHandler) Create(w http.ResponseWriter, r *http.Request) { var req createBuildRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") - return + var archive []byte + var archiveName string + + ct := r.Header.Get("Content-Type") + if strings.HasPrefix(ct, "multipart/") { + // 100 MB max for multipart (archive + JSON config). + if err := r.ParseMultipartForm(100 << 20); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "failed to parse multipart form") + return + } + + // Parse JSON config from "config" field. + configStr := r.FormValue("config") + if configStr == "" { + writeError(w, http.StatusBadRequest, "invalid_request", "multipart form requires a 'config' JSON field") + return + } + if err := json.Unmarshal([]byte(configStr), &req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid config JSON in multipart form") + return + } + + // Read optional archive file (max 100 MB). + file, header, err := r.FormFile("archive") + if err == nil { + defer file.Close() + const maxArchiveSize = 100 << 20 // 100 MB + lr := io.LimitReader(file, maxArchiveSize+1) + archive, err = io.ReadAll(lr) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "failed to read archive file") + return + } + if int64(len(archive)) > maxArchiveSize { + writeError(w, http.StatusRequestEntityTooLarge, "invalid_request", "archive exceeds 100 MB limit") + return + } + archiveName = header.Filename + } + } else { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid_request", "invalid JSON body") + return + } } if req.Name == "" { @@ -129,6 +178,8 @@ func (h *buildHandler) Create(w http.ResponseWriter, r *http.Request) { VCPUs: req.VCPUs, MemoryMB: req.MemoryMB, SkipPrePost: req.SkipPrePost, + Archive: archive, + ArchiveName: archiveName, }) if err != nil { slog.Error("failed to create build", "error", err) diff --git a/internal/api/handlers_snapshots.go b/internal/api/handlers_snapshots.go index 8855c29..df3c6ef 100644 --- a/internal/api/handlers_snapshots.go +++ b/internal/api/handlers_snapshots.go @@ -210,13 +210,15 @@ func (h *snapshotHandler) Create(w http.ResponseWriter, r *http.Request) { } tmpl, err := h.db.InsertTemplate(snapCtx, db.InsertTemplateParams{ - ID: newTemplateID, - Name: req.Name, - Type: "snapshot", - Vcpus: sb.Vcpus, - MemoryMb: sb.MemoryMb, - SizeBytes: resp.Msg.SizeBytes, - TeamID: ac.TeamID, + ID: newTemplateID, + Name: req.Name, + Type: "snapshot", + Vcpus: sb.Vcpus, + MemoryMb: sb.MemoryMb, + SizeBytes: resp.Msg.SizeBytes, + TeamID: ac.TeamID, + DefaultUser: "root", + DefaultEnv: []byte("{}"), }) if err != nil { slog.Error("failed to insert template record", "name", req.Name, "error", err) diff --git a/internal/db/models.go b/internal/db/models.go index 3b9cd9e..45c00da 100644 --- a/internal/db/models.go +++ b/internal/db/models.go @@ -152,14 +152,16 @@ type TeamApiKey struct { } type Template struct { - Name string `json:"name"` - Type string `json:"type"` - Vcpus int32 `json:"vcpus"` - MemoryMb int32 `json:"memory_mb"` - SizeBytes int64 `json:"size_bytes"` - CreatedAt pgtype.Timestamptz `json:"created_at"` - TeamID pgtype.UUID `json:"team_id"` - ID pgtype.UUID `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Vcpus int32 `json:"vcpus"` + MemoryMb int32 `json:"memory_mb"` + SizeBytes int64 `json:"size_bytes"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + TeamID pgtype.UUID `json:"team_id"` + ID pgtype.UUID `json:"id"` + DefaultUser string `json:"default_user"` + DefaultEnv []byte `json:"default_env"` } type TemplateBuild struct { @@ -183,6 +185,8 @@ type TemplateBuild struct { TemplateID pgtype.UUID `json:"template_id"` TeamID pgtype.UUID `json:"team_id"` SkipPrePost bool `json:"skip_pre_post"` + DefaultUser string `json:"default_user"` + DefaultEnv []byte `json:"default_env"` } type User struct { diff --git a/internal/db/template_builds.sql.go b/internal/db/template_builds.sql.go index facfb19..ff15634 100644 --- a/internal/db/template_builds.sql.go +++ b/internal/db/template_builds.sql.go @@ -12,7 +12,7 @@ import ( ) const getTemplateBuild = `-- name: GetTemplateBuild :one -SELECT id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post FROM template_builds WHERE id = $1 +SELECT id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post, default_user, default_env FROM template_builds WHERE id = $1 ` func (q *Queries) GetTemplateBuild(ctx context.Context, id pgtype.UUID) (TemplateBuild, error) { @@ -39,6 +39,8 @@ func (q *Queries) GetTemplateBuild(ctx context.Context, id pgtype.UUID) (Templat &i.TemplateID, &i.TeamID, &i.SkipPrePost, + &i.DefaultUser, + &i.DefaultEnv, ) return i, err } @@ -46,7 +48,7 @@ func (q *Queries) GetTemplateBuild(ctx context.Context, id pgtype.UUID) (Templat const insertTemplateBuild = `-- name: InsertTemplateBuild :one INSERT INTO template_builds (id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, total_steps, template_id, team_id, skip_pre_post) VALUES ($1, $2, $3, $4, $5, $6, $7, 'pending', $8, $9, $10, $11) -RETURNING id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post +RETURNING id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post, default_user, default_env ` type InsertTemplateBuildParams struct { @@ -99,12 +101,14 @@ func (q *Queries) InsertTemplateBuild(ctx context.Context, arg InsertTemplateBui &i.TemplateID, &i.TeamID, &i.SkipPrePost, + &i.DefaultUser, + &i.DefaultEnv, ) return i, err } const listTemplateBuilds = `-- name: ListTemplateBuilds :many -SELECT id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post FROM template_builds ORDER BY created_at DESC +SELECT id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post, default_user, default_env FROM template_builds ORDER BY created_at DESC ` func (q *Queries) ListTemplateBuilds(ctx context.Context) ([]TemplateBuild, error) { @@ -137,6 +141,8 @@ func (q *Queries) ListTemplateBuilds(ctx context.Context) ([]TemplateBuild, erro &i.TemplateID, &i.TeamID, &i.SkipPrePost, + &i.DefaultUser, + &i.DefaultEnv, ); err != nil { return nil, err } @@ -148,6 +154,23 @@ func (q *Queries) ListTemplateBuilds(ctx context.Context) ([]TemplateBuild, erro return items, nil } +const updateBuildDefaults = `-- name: UpdateBuildDefaults :exec +UPDATE template_builds +SET default_user = $2, default_env = $3 +WHERE id = $1 +` + +type UpdateBuildDefaultsParams struct { + ID pgtype.UUID `json:"id"` + DefaultUser string `json:"default_user"` + DefaultEnv []byte `json:"default_env"` +} + +func (q *Queries) UpdateBuildDefaults(ctx context.Context, arg UpdateBuildDefaultsParams) error { + _, err := q.db.Exec(ctx, updateBuildDefaults, arg.ID, arg.DefaultUser, arg.DefaultEnv) + return err +} + const updateBuildError = `-- name: UpdateBuildError :exec UPDATE template_builds SET error = $2, status = 'failed', completed_at = NOW() @@ -204,7 +227,7 @@ SET status = $2, started_at = CASE WHEN $2 = 'running' AND started_at IS NULL THEN NOW() ELSE started_at END, completed_at = CASE WHEN $2 IN ('success', 'failed', 'cancelled') THEN NOW() ELSE completed_at END WHERE id = $1 -RETURNING id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post +RETURNING id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, current_step, total_steps, logs, error, sandbox_id, host_id, created_at, started_at, completed_at, template_id, team_id, skip_pre_post, default_user, default_env ` type UpdateBuildStatusParams struct { @@ -236,6 +259,8 @@ func (q *Queries) UpdateBuildStatus(ctx context.Context, arg UpdateBuildStatusPa &i.TemplateID, &i.TeamID, &i.SkipPrePost, + &i.DefaultUser, + &i.DefaultEnv, ) return i, err } diff --git a/internal/db/templates.sql.go b/internal/db/templates.sql.go index 7d37808..1606d6f 100644 --- a/internal/db/templates.sql.go +++ b/internal/db/templates.sql.go @@ -45,7 +45,7 @@ func (q *Queries) DeleteTemplatesByTeam(ctx context.Context, teamID pgtype.UUID) } const getPlatformTemplateByName = `-- name: GetPlatformTemplateByName :one -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE team_id = '00000000-0000-0000-0000-000000000000' AND name = $1 +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env FROM templates WHERE team_id = '00000000-0000-0000-0000-000000000000' AND name = $1 ` // Check if a global (platform) template exists with the given name. @@ -61,12 +61,14 @@ func (q *Queries) GetPlatformTemplateByName(ctx context.Context, name string) (T &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, ) return i, err } const getTemplate = `-- name: GetTemplate :one -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE id = $1 +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env FROM templates WHERE id = $1 ` func (q *Queries) GetTemplate(ctx context.Context, id pgtype.UUID) (Template, error) { @@ -81,12 +83,14 @@ func (q *Queries) GetTemplate(ctx context.Context, id pgtype.UUID) (Template, er &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, ) return i, err } const getTemplateByName = `-- name: GetTemplateByName :one -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE team_id = $1 AND name = $2 +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env FROM templates WHERE team_id = $1 AND name = $2 ` type GetTemplateByNameParams struct { @@ -107,12 +111,14 @@ func (q *Queries) GetTemplateByName(ctx context.Context, arg GetTemplateByNamePa &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, ) return i, err } const getTemplateByTeam = `-- name: GetTemplateByTeam :one -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE name = $1 AND (team_id = $2 OR team_id = '00000000-0000-0000-0000-000000000000') +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env FROM templates WHERE name = $1 AND (team_id = $2 OR team_id = '00000000-0000-0000-0000-000000000000') ` type GetTemplateByTeamParams struct { @@ -133,24 +139,28 @@ func (q *Queries) GetTemplateByTeam(ctx context.Context, arg GetTemplateByTeamPa &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, ) return i, err } const insertTemplate = `-- name: InsertTemplate :one -INSERT INTO templates (id, name, type, vcpus, memory_mb, size_bytes, team_id) -VALUES ($1, $2, $3, $4, $5, $6, $7) -RETURNING name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id +INSERT INTO templates (id, name, type, vcpus, memory_mb, size_bytes, team_id, default_user, default_env) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) +RETURNING name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env ` type InsertTemplateParams struct { - ID pgtype.UUID `json:"id"` - Name string `json:"name"` - Type string `json:"type"` - Vcpus int32 `json:"vcpus"` - MemoryMb int32 `json:"memory_mb"` - SizeBytes int64 `json:"size_bytes"` - TeamID pgtype.UUID `json:"team_id"` + ID pgtype.UUID `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Vcpus int32 `json:"vcpus"` + MemoryMb int32 `json:"memory_mb"` + SizeBytes int64 `json:"size_bytes"` + TeamID pgtype.UUID `json:"team_id"` + DefaultUser string `json:"default_user"` + DefaultEnv []byte `json:"default_env"` } func (q *Queries) InsertTemplate(ctx context.Context, arg InsertTemplateParams) (Template, error) { @@ -162,6 +172,8 @@ func (q *Queries) InsertTemplate(ctx context.Context, arg InsertTemplateParams) arg.MemoryMb, arg.SizeBytes, arg.TeamID, + arg.DefaultUser, + arg.DefaultEnv, ) var i Template err := row.Scan( @@ -173,12 +185,14 @@ func (q *Queries) InsertTemplate(ctx context.Context, arg InsertTemplateParams) &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, ) return i, err } const listTemplates = `-- name: ListTemplates :many -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates ORDER BY created_at DESC +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env FROM templates ORDER BY created_at DESC ` func (q *Queries) ListTemplates(ctx context.Context) ([]Template, error) { @@ -199,6 +213,8 @@ func (q *Queries) ListTemplates(ctx context.Context) ([]Template, error) { &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, ); err != nil { return nil, err } @@ -211,7 +227,7 @@ func (q *Queries) ListTemplates(ctx context.Context) ([]Template, error) { } const listTemplatesByTeam = `-- name: ListTemplatesByTeam :many -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE (team_id = $1 OR team_id = '00000000-0000-0000-0000-000000000000') ORDER BY created_at DESC +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env FROM templates WHERE (team_id = $1 OR team_id = '00000000-0000-0000-0000-000000000000') ORDER BY created_at DESC ` // Platform templates are visible to all teams. @@ -233,6 +249,8 @@ func (q *Queries) ListTemplatesByTeam(ctx context.Context, teamID pgtype.UUID) ( &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, ); err != nil { return nil, err } @@ -245,7 +263,7 @@ func (q *Queries) ListTemplatesByTeam(ctx context.Context, teamID pgtype.UUID) ( } const listTemplatesByTeamAndType = `-- name: ListTemplatesByTeamAndType :many -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE (team_id = $1 OR team_id = '00000000-0000-0000-0000-000000000000') AND type = $2 ORDER BY created_at DESC +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env FROM templates WHERE (team_id = $1 OR team_id = '00000000-0000-0000-0000-000000000000') AND type = $2 ORDER BY created_at DESC ` type ListTemplatesByTeamAndTypeParams struct { @@ -272,6 +290,8 @@ func (q *Queries) ListTemplatesByTeamAndType(ctx context.Context, arg ListTempla &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, ); err != nil { return nil, err } @@ -284,7 +304,7 @@ func (q *Queries) ListTemplatesByTeamAndType(ctx context.Context, arg ListTempla } const listTemplatesByTeamOnly = `-- name: ListTemplatesByTeamOnly :many -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE team_id = $1 ORDER BY created_at DESC +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env FROM templates WHERE team_id = $1 ORDER BY created_at DESC ` // List templates owned by a specific team (NOT including platform templates). @@ -306,6 +326,8 @@ func (q *Queries) ListTemplatesByTeamOnly(ctx context.Context, teamID pgtype.UUI &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, ); err != nil { return nil, err } @@ -318,7 +340,7 @@ func (q *Queries) ListTemplatesByTeamOnly(ctx context.Context, teamID pgtype.UUI } const listTemplatesByType = `-- name: ListTemplatesByType :many -SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id FROM templates WHERE type = $1 ORDER BY created_at DESC +SELECT name, type, vcpus, memory_mb, size_bytes, created_at, team_id, id, default_user, default_env FROM templates WHERE type = $1 ORDER BY created_at DESC ` func (q *Queries) ListTemplatesByType(ctx context.Context, type_ string) ([]Template, error) { @@ -339,6 +361,8 @@ func (q *Queries) ListTemplatesByType(ctx context.Context, type_ string) ([]Temp &i.CreatedAt, &i.TeamID, &i.ID, + &i.DefaultUser, + &i.DefaultEnv, ); err != nil { return nil, err } diff --git a/internal/envdclient/client.go b/internal/envdclient/client.go index 0567cf1..278050e 100644 --- a/internal/envdclient/client.go +++ b/internal/envdclient/client.go @@ -3,6 +3,7 @@ package envdclient import ( "bytes" "context" + "encoding/json" "fmt" "io" "log/slog" @@ -273,10 +274,36 @@ func (c *Client) ReadFile(ctx context.Context, path string) ([]byte, error) { // env vars and the corresponding files under /run/wrenn/ inside the guest. // Must be called after snapshot restore so envd picks up the new sandbox's metadata. func (c *Client) PostInit(ctx context.Context) error { - req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.base+"/init", nil) + return c.PostInitWithDefaults(ctx, "", nil) +} + +// PostInitWithDefaults calls envd's POST /init endpoint with optional default +// user and environment variables. These are applied to envd's defaults so all +// subsequent process executions use them. +func (c *Client) PostInitWithDefaults(ctx context.Context, defaultUser string, envVars map[string]string) error { + var body io.Reader + if defaultUser != "" || len(envVars) > 0 { + payload := make(map[string]any) + if defaultUser != "" { + payload["defaultUser"] = defaultUser + } + if len(envVars) > 0 { + payload["envVars"] = envVars + } + data, err := json.Marshal(payload) + if err != nil { + return fmt.Errorf("marshal init body: %w", err) + } + body = bytes.NewReader(data) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.base+"/init", body) if err != nil { return fmt.Errorf("create request: %w", err) } + if body != nil { + req.Header.Set("Content-Type", "application/json") + } resp, err := c.httpClient.Do(req) if err != nil { @@ -285,8 +312,8 @@ func (c *Client) PostInit(ctx context.Context) error { defer resp.Body.Close() if resp.StatusCode != http.StatusNoContent { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("post init: status %d: %s", resp.StatusCode, string(body)) + respBody, _ := io.ReadAll(resp.Body) + return fmt.Errorf("post init: status %d: %s", resp.StatusCode, string(respBody)) } return nil diff --git a/internal/hostagent/server.go b/internal/hostagent/server.go index fad0d40..7c1315e 100644 --- a/internal/hostagent/server.go +++ b/internal/hostagent/server.go @@ -69,6 +69,13 @@ func (s *Server) CreateSandbox( return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("create sandbox: %w", err)) } + // Apply template defaults (user, env vars) if provided. + if msg.DefaultUser != "" || len(msg.DefaultEnv) > 0 { + if err := s.mgr.SetDefaults(ctx, sb.ID, msg.DefaultUser, msg.DefaultEnv); err != nil { + slog.Warn("failed to set sandbox defaults", "sandbox", sb.ID, "error", err) + } + } + return connect.NewResponse(&pb.CreateSandboxResponse{ SandboxId: sb.ID, Status: string(sb.Status), @@ -100,10 +107,19 @@ func (s *Server) ResumeSandbox( ctx context.Context, req *connect.Request[pb.ResumeSandboxRequest], ) (*connect.Response[pb.ResumeSandboxResponse], error) { - sb, err := s.mgr.Resume(ctx, req.Msg.SandboxId, int(req.Msg.TimeoutSec)) + msg := req.Msg + sb, err := s.mgr.Resume(ctx, msg.SandboxId, int(msg.TimeoutSec)) if err != nil { return nil, connect.NewError(connect.CodeInternal, err) } + + // Apply template defaults (user, env vars) if provided. + if msg.DefaultUser != "" || len(msg.DefaultEnv) > 0 { + if err := s.mgr.SetDefaults(ctx, sb.ID, msg.DefaultUser, msg.DefaultEnv); err != nil { + slog.Warn("failed to set sandbox defaults on resume", "sandbox", sb.ID, "error", err) + } + } + return connect.NewResponse(&pb.ResumeSandboxResponse{ SandboxId: sb.ID, Status: string(sb.Status), diff --git a/internal/recipe/context.go b/internal/recipe/context.go index 71cc0bc..820e717 100644 --- a/internal/recipe/context.go +++ b/internal/recipe/context.go @@ -7,10 +7,11 @@ import ( ) // ExecContext holds mutable state that persists across recipe steps. -// It is initialized empty and updated by ENV and WORKDIR steps. +// It is initialized empty and updated by ENV, WORKDIR, and USER steps. type ExecContext struct { WorkDir string EnvVars map[string]string + User string // Current unix user for command execution. } // This regex matches: @@ -25,7 +26,20 @@ var envRegex = regexp.MustCompile(`\$\$|\$\{([a-zA-Z0-9_]*)\}|\$([a-zA-Z0-9_]+)` // If WORKDIR and/or ENV are set, they are prepended as a shell preamble: // // cd '/the/dir' && KEY='val' /bin/sh -c 'original command' +// +// If USER is set to a non-root user, the entire command is wrapped with su: +// +// su -s /bin/sh -c '' func (c *ExecContext) WrappedCommand(cmd string) string { + inner := c.innerCommand(cmd) + if c.User != "" && c.User != "root" { + return "su " + shellescape(c.User) + " -s /bin/sh -c " + shellescape(inner) + } + return inner +} + +// innerCommand builds the command with workdir/env preamble but without user wrapping. +func (c *ExecContext) innerCommand(cmd string) string { prefix := c.shellPrefix() if prefix == "" { return cmd @@ -42,7 +56,11 @@ func (c *ExecContext) WrappedCommand(cmd string) string { // simultaneously before a healthcheck is evaluated. func (c *ExecContext) StartCommand(cmd string) string { prefix := c.shellPrefix() - return prefix + "nohup /bin/sh -c " + shellescape(cmd) + " >/dev/null 2>&1 &" + inner := prefix + "nohup /bin/sh -c " + shellescape(cmd) + " >/dev/null 2>&1 &" + if c.User != "" && c.User != "root" { + return "su " + shellescape(c.User) + " -s /bin/sh -c " + shellescape(inner) + } + return inner } // shellPrefix builds the "cd ... && KEY=val " preamble for a shell command. diff --git a/internal/recipe/executor.go b/internal/recipe/executor.go index 53aaeeb..38a8b12 100644 --- a/internal/recipe/executor.go +++ b/internal/recipe/executor.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log/slog" + "path" "strings" "time" @@ -16,6 +17,10 @@ import ( // explicit --timeout flag. const DefaultStepTimeout = 30 * time.Second +// BuildFilesDir is the directory inside the sandbox where uploaded build +// archives are extracted. COPY instructions reference paths relative to this. +const BuildFilesDir = "/tmp/build-files" + // BuildLogEntry is the per-step record stored in template_builds.logs (JSONB). type BuildLogEntry struct { Step int `json:"step"` @@ -32,13 +37,18 @@ type BuildLogEntry struct { // the method on the hostagent Connect RPC client. type ExecFunc func(ctx context.Context, req *connect.Request[pb.ExecRequest]) (*connect.Response[pb.ExecResponse], error) +// ProgressFunc is called after each step with the current step counter and +// accumulated log entries. Used for per-step DB progress updates. +type ProgressFunc func(step int, entries []BuildLogEntry) + // Execute runs steps sequentially against sandboxID using execFn. // // - phase labels the log entries (e.g., "pre-build", "recipe", "post-build"). // - startStep is the 1-based offset so entries are globally numbered across phases. // - defaultTimeout applies to RUN steps with no per-step --timeout; 0 → 10 minutes. -// - bctx is mutated in place as ENV/WORKDIR steps execute, and carries forward +// - bctx is mutated in place as ENV/WORKDIR/USER steps execute, and carries forward // into subsequent phases when the caller passes the same pointer. +// - onProgress is called after each step for live progress updates (may be nil). // // Returns all log entries appended during this call, the next step counter // value, and whether all steps succeeded. On false the last entry contains @@ -53,6 +63,7 @@ func Execute( defaultTimeout time.Duration, bctx *ExecContext, execFn ExecFunc, + onProgress ProgressFunc, ) (entries []BuildLogEntry, nextStep int, ok bool) { if defaultTimeout <= 0 { defaultTimeout = 10 * time.Minute @@ -72,19 +83,30 @@ func Execute( entries = append(entries, BuildLogEntry{Step: step, Phase: phase, Cmd: st.Raw, Ok: true}) case KindWORKDIR: + // Create the directory if it doesn't exist. + mkdirEntry := execRawShell(ctx, st.Raw, sandboxID, phase, step, 10*time.Second, execFn, + "mkdir -p "+shellescape(st.Path)) + if !mkdirEntry.Ok { + entries = append(entries, mkdirEntry) + return entries, step, false + } bctx.WorkDir = st.Path - entries = append(entries, BuildLogEntry{Step: step, Phase: phase, Cmd: st.Raw, Ok: true}) + mkdirEntry.Ok = true + entries = append(entries, mkdirEntry) - case KindUSER, KindCOPY: - verb := strings.ToUpper(strings.Fields(st.Raw)[0]) - entries = append(entries, BuildLogEntry{ - Step: step, - Phase: phase, - Cmd: st.Raw, - Stderr: verb + " is not yet supported", - Ok: false, - }) - return entries, step, false + case KindUSER: + entry, succeeded := execUser(ctx, st, sandboxID, phase, step, bctx, execFn) + entries = append(entries, entry) + if !succeeded { + return entries, step, false + } + + case KindCOPY: + entry, succeeded := execCopy(ctx, st, sandboxID, phase, step, bctx, execFn) + entries = append(entries, entry) + if !succeeded { + return entries, step, false + } case KindSTART: entry, succeeded := execStart(ctx, st, sandboxID, phase, step, bctx, execFn) @@ -104,6 +126,10 @@ func Execute( return entries, step, false } } + + if onProgress != nil { + onProgress(step, entries) + } } return entries, step, true } @@ -145,6 +171,114 @@ func execRun( return entry, entry.Ok } +// execUser creates a unix user (if not exists), grants passwordless sudo, +// and updates bctx.User for subsequent steps. +func execUser( + ctx context.Context, + st Step, + sandboxID, phase string, + step int, + bctx *ExecContext, + execFn ExecFunc, +) (BuildLogEntry, bool) { + username := st.Key + // Create user if not exists, with home directory and bash shell. + // Grant passwordless sudo access (E2B convention). + // Uses printf %s to avoid shell injection in the sudoers line. + script := fmt.Sprintf( + "id %s >/dev/null 2>&1 || (adduser --disabled-password --gecos '' --shell /bin/bash %s && printf '%%s ALL=(ALL) NOPASSWD:ALL\\n' %s >> /etc/sudoers)", + shellescape(username), shellescape(username), shellescape(username), + ) + + entry := execRawShell(ctx, st.Raw, sandboxID, phase, step, 30*time.Second, execFn, script) + if entry.Ok { + bctx.User = username + } + return entry, entry.Ok +} + +// execCopy copies a file or directory from the build archive (extracted at +// BuildFilesDir) to the destination path inside the sandbox. Ownership is +// set to the current user from bctx. +func execCopy( + ctx context.Context, + st Step, + sandboxID, phase string, + step int, + bctx *ExecContext, + execFn ExecFunc, +) (BuildLogEntry, bool) { + // Validate all source paths: must be relative and not escape the archive directory. + var srcPaths []string + for _, s := range st.Srcs { + cleaned := path.Clean(s) + if strings.HasPrefix(cleaned, "..") || strings.HasPrefix(cleaned, "/") { + return BuildLogEntry{ + Step: step, + Phase: phase, + Cmd: st.Raw, + Stderr: fmt.Sprintf("COPY source must be a relative path within the archive: %q", s), + }, false + } + srcPaths = append(srcPaths, shellescape(BuildFilesDir+"/"+cleaned)) + } + + dst := st.Dst + // Resolve relative destination against the current WORKDIR. + if dst != "" && dst[0] != '/' && bctx.WorkDir != "" { + dst = bctx.WorkDir + "/" + dst + } + owner := "root" + if bctx.User != "" { + owner = bctx.User + } + script := fmt.Sprintf( + "cp -r %s %s && chown -R %s:%s %s", + strings.Join(srcPaths, " "), shellescape(dst), shellescape(owner), shellescape(owner), shellescape(dst), + ) + + entry := execRawShell(ctx, st.Raw, sandboxID, phase, step, 60*time.Second, execFn, script) + return entry, entry.Ok +} + +// execRawShell runs a shell command directly (as root) without ExecContext +// wrapping. Used for internal operations like user creation and file copy. +func execRawShell( + ctx context.Context, + raw, sandboxID, phase string, + step int, + timeout time.Duration, + execFn ExecFunc, + shellCmd string, +) BuildLogEntry { + execCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + start := time.Now() + resp, err := execFn(execCtx, connect.NewRequest(&pb.ExecRequest{ + SandboxId: sandboxID, + Cmd: "/bin/sh", + Args: []string{"-c", shellCmd}, + TimeoutSec: int32(timeout.Seconds()), + })) + + entry := BuildLogEntry{ + Step: step, + Phase: phase, + Cmd: raw, + Elapsed: time.Since(start).Milliseconds(), + } + if err != nil { + entry.Stderr = fmt.Sprintf("exec error: %v", err) + return entry + } + entry.Stdout = string(resp.Msg.Stdout) + entry.Stderr = string(resp.Msg.Stderr) + entry.Exit = resp.Msg.ExitCode + entry.Ok = resp.Msg.ExitCode == 0 + return entry +} + func execStart( ctx context.Context, st Step, diff --git a/internal/recipe/step.go b/internal/recipe/step.go index 7d51036..07e167e 100644 --- a/internal/recipe/step.go +++ b/internal/recipe/step.go @@ -24,9 +24,11 @@ type Step struct { Raw string // original string, preserved for logging Shell string // KindRUN, KindSTART: the shell command text Timeout time.Duration // KindRUN: 0 means use caller's default - Key string // KindENV: variable name + Key string // KindENV: variable name; KindUSER: username Value string // KindENV: variable value Path string // KindWORKDIR: directory path + Srcs []string // KindCOPY: source paths (relative to build archive) + Dst string // KindCOPY: destination path inside sandbox } // ParseStep parses a single recipe instruction string into a Step. @@ -61,9 +63,9 @@ func ParseStep(s string) (Step, error) { case "WORKDIR": return parseWORKDIR(s, rest) case "USER": - return Step{Kind: KindUSER, Raw: s}, nil + return parseUSER(s, rest) case "COPY": - return Step{Kind: KindCOPY, Raw: s}, nil + return parseCOPY(s, rest) default: return Step{}, fmt.Errorf("unknown instruction %q (expected RUN, START, ENV, WORKDIR, USER, or COPY)", keyword) } @@ -127,3 +129,33 @@ func parseWORKDIR(raw, path string) (Step, error) { } return Step{Kind: KindWORKDIR, Raw: raw, Path: path}, nil } + +func parseUSER(raw, username string) (Step, error) { + if username == "" { + return Step{}, fmt.Errorf("USER requires a username: %q", raw) + } + // Validate: alphanumeric, hyphens, underscores only; must start with a letter or underscore. + for i, c := range username { + if i == 0 && !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_') { + return Step{}, fmt.Errorf("USER username must start with a letter or underscore: %q", raw) + } + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-') { + return Step{}, fmt.Errorf("USER username contains invalid character %q: %q", string(c), raw) + } + } + return Step{Kind: KindUSER, Raw: raw, Key: username}, nil +} + +func parseCOPY(raw, rest string) (Step, error) { + if rest == "" { + return Step{}, fmt.Errorf("COPY requires ... : %q", raw) + } + parts := strings.Fields(rest) + if len(parts) < 2 { + return Step{}, fmt.Errorf("COPY requires ... : %q", raw) + } + // Last argument is the destination, everything before is sources. + dst := parts[len(parts)-1] + srcs := parts[:len(parts)-1] + return Step{Kind: KindCOPY, Raw: raw, Srcs: srcs, Dst: dst}, nil +} diff --git a/internal/recipe/step_test.go b/internal/recipe/step_test.go index 2370bb2..2d0c9e2 100644 --- a/internal/recipe/step_test.go +++ b/internal/recipe/step_test.go @@ -1,6 +1,7 @@ package recipe import ( + "reflect" "testing" "time" ) @@ -111,16 +112,42 @@ func TestParseStep(t *testing.T) { input: "WORKDIR", wantErr: true, }, - // USER and COPY stubs + // USER { - name: "USER stub", + name: "USER basic", input: "USER www-data", - want: Step{Kind: KindUSER, Raw: "USER www-data"}, + want: Step{Kind: KindUSER, Raw: "USER www-data", Key: "www-data"}, }, { - name: "COPY stub", + name: "USER empty", + input: "USER", + wantErr: true, + }, + { + name: "USER invalid chars", + input: "USER bad user", + wantErr: true, + }, + // COPY + { + name: "COPY basic", input: "COPY config.yaml /etc/app/config.yaml", - want: Step{Kind: KindCOPY, Raw: "COPY config.yaml /etc/app/config.yaml"}, + want: Step{Kind: KindCOPY, Raw: "COPY config.yaml /etc/app/config.yaml", Srcs: []string{"config.yaml"}, Dst: "/etc/app/config.yaml"}, + }, + { + name: "COPY multiple sources", + input: "COPY a.txt b.txt /dest/", + want: Step{Kind: KindCOPY, Raw: "COPY a.txt b.txt /dest/", Srcs: []string{"a.txt", "b.txt"}, Dst: "/dest/"}, + }, + { + name: "COPY missing dst", + input: "COPY config.yaml", + wantErr: true, + }, + { + name: "COPY empty", + input: "COPY", + wantErr: true, }, // Unknown keyword { @@ -148,7 +175,7 @@ func TestParseStep(t *testing.T) { if err != nil { t.Fatalf("ParseStep(%q) unexpected error: %v", tc.input, err) } - if got != tc.want { + if !reflect.DeepEqual(got, tc.want) { t.Errorf("ParseStep(%q)\n got %+v\n want %+v", tc.input, got, tc.want) } }) diff --git a/internal/sandbox/images.go b/internal/sandbox/images.go index ecee469..eabb2e3 100644 --- a/internal/sandbox/images.go +++ b/internal/sandbox/images.go @@ -6,6 +6,8 @@ import ( "os" "os/exec" "path/filepath" + "strconv" + "strings" "git.omukk.dev/wrenn/wrenn/internal/id" "git.omukk.dev/wrenn/wrenn/internal/layout" @@ -66,6 +68,42 @@ func EnsureImageSizes(wrennDir string, targetMB int) error { return nil } +// ParseSizeToMB parses a human-readable size string into megabytes. +// Supported suffixes: G, Gi (gibibytes), M, Mi (mebibytes). +// Examples: "5G" → 5120, "2Gi" → 2048, "1000M" → 1000, "512Mi" → 512. +func ParseSizeToMB(s string) (int, error) { + s = strings.TrimSpace(s) + if s == "" { + return 0, fmt.Errorf("empty size string") + } + + // Find where the numeric part ends. + i := 0 + for i < len(s) && (s[i] == '.' || (s[i] >= '0' && s[i] <= '9')) { + i++ + } + if i == 0 { + return 0, fmt.Errorf("invalid size %q: no numeric value", s) + } + + numStr := s[:i] + suffix := strings.TrimSpace(s[i:]) + + num, err := strconv.ParseFloat(numStr, 64) + if err != nil { + return 0, fmt.Errorf("invalid size %q: %w", s, err) + } + + switch suffix { + case "G", "Gi": + return int(num * 1024), nil + case "M", "Mi", "": + return int(num), nil + default: + return 0, fmt.Errorf("invalid size %q: unknown suffix %q (use G, Gi, M, or Mi)", s, suffix) + } +} + // expandImage expands a single rootfs image if it is smaller than targetBytes. func expandImage(rootfs string, targetBytes int64, targetMB int) error { info, err := os.Stat(rootfs) diff --git a/internal/sandbox/manager.go b/internal/sandbox/manager.go index 7647730..b792406 100644 --- a/internal/sandbox/manager.go +++ b/internal/sandbox/manager.go @@ -28,8 +28,9 @@ import ( // Config holds the paths and defaults for the sandbox manager. type Config struct { - WrennDir string // root directory (e.g. /var/lib/wrenn); all sub-paths derived via layout package - EnvdTimeout time.Duration + WrennDir string // root directory (e.g. /var/lib/wrenn); all sub-paths derived via layout package + EnvdTimeout time.Duration + DefaultRootfsSizeMB int // target size for template rootfs images; 0 → DefaultDiskSizeMB } // Manager orchestrates sandbox lifecycle: VM, network, filesystem, envd. @@ -924,8 +925,8 @@ func (m *Manager) FlattenRootfs(ctx context.Context, sandboxID string, teamID, t // Clean up dm device and loop device now that flatten is complete. m.cleanupDM(sb) - // Shrink the flattened image to its minimum size so stored templates are - // compact. EnsureImageSizes will re-expand them on the next agent startup. + // Shrink the flattened image to its minimum size, then re-expand to the + // configured default rootfs size so sandboxes see the full disk from boot. if out, err := exec.Command("e2fsck", "-fy", outputPath).CombinedOutput(); err != nil { if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() > 1 { slog.Warn("e2fsck before shrink failed (non-fatal)", "output", string(out), "error", err) @@ -935,6 +936,15 @@ func (m *Manager) FlattenRootfs(ctx context.Context, sandboxID string, teamID, t slog.Warn("resize2fs -M failed (non-fatal)", "output", string(out), "error", err) } + // Re-expand to default rootfs size. + targetMB := m.cfg.DefaultRootfsSizeMB + if targetMB <= 0 { + targetMB = DefaultDiskSizeMB + } + if err := expandImage(outputPath, int64(targetMB)*1024*1024, targetMB); err != nil { + slog.Warn("failed to expand template to default size (non-fatal)", "error", err) + } + sizeBytes, err := snapshot.DirSize(flattenDstDir, "") if err != nil { slog.Warn("failed to calculate template size", "error", err) @@ -1223,6 +1233,23 @@ func (m *Manager) GetClient(sandboxID string) (*envdclient.Client, error) { return sb.client, nil } +// SetDefaults calls envd's PostInit to configure the default user and +// environment variables for a running sandbox. This is called by the host +// agent after sandbox creation or resume when the template specifies defaults. +func (m *Manager) SetDefaults(ctx context.Context, sandboxID, defaultUser string, defaultEnv map[string]string) error { + if defaultUser == "" && len(defaultEnv) == 0 { + return nil + } + sb, err := m.get(sandboxID) + if err != nil { + return err + } + if sb.Status != models.StatusRunning { + return fmt.Errorf("sandbox %s is not running (status: %s)", sandboxID, sb.Status) + } + return sb.client.PostInitWithDefaults(ctx, defaultUser, defaultEnv) +} + // PtyAttach starts a new PTY process or reconnects to an existing one. // If cmd is non-empty, starts a new process. If empty, reconnects using tag. func (m *Manager) PtyAttach(ctx context.Context, sandboxID, tag, cmd string, args []string, cols, rows uint32, envs map[string]string, cwd string) (<-chan envdclient.PtyEvent, error) { diff --git a/internal/service/build.go b/internal/service/build.go index b45ba1d..97a7523 100644 --- a/internal/service/build.go +++ b/internal/service/build.go @@ -27,8 +27,11 @@ const ( ) // preBuildCmds run before the user recipe to prepare the build environment. +// apt update runs as root first, then USER switches to wrenn-user for the recipe. var preBuildCmds = []string{ "RUN apt update", + "USER wrenn-user", + "WORKDIR /home/wrenn-user", } // postBuildCmds run after the user recipe to clean up caches and reduce image size. @@ -36,6 +39,7 @@ var postBuildCmds = []string{ "RUN apt clean", "RUN apt autoremove -y", "RUN rm -rf /var/lib/apt/lists/*", + "RUN rm -rf /tmp/build-files /tmp/build-files.*", } // buildAgentClient is the subset of the host agent client used by the build worker. @@ -43,6 +47,7 @@ type buildAgentClient interface { CreateSandbox(ctx context.Context, req *connect.Request[pb.CreateSandboxRequest]) (*connect.Response[pb.CreateSandboxResponse], error) DestroySandbox(ctx context.Context, req *connect.Request[pb.DestroySandboxRequest]) (*connect.Response[pb.DestroySandboxResponse], error) Exec(ctx context.Context, req *connect.Request[pb.ExecRequest]) (*connect.Response[pb.ExecResponse], error) + WriteFile(ctx context.Context, req *connect.Request[pb.WriteFileRequest]) (*connect.Response[pb.WriteFileResponse], error) CreateSnapshot(ctx context.Context, req *connect.Request[pb.CreateSnapshotRequest]) (*connect.Response[pb.CreateSnapshotResponse], error) FlattenRootfs(ctx context.Context, req *connect.Request[pb.FlattenRootfsRequest]) (*connect.Response[pb.FlattenRootfsResponse], error) } @@ -56,6 +61,7 @@ type BuildService struct { mu sync.Mutex cancelMap map[string]context.CancelFunc // buildID → per-build cancel func + filesMap map[string][]byte // buildID → uploaded archive bytes } // BuildCreateParams holds the parameters for creating a template build. @@ -67,6 +73,27 @@ type BuildCreateParams struct { VCPUs int32 MemoryMB int32 SkipPrePost bool + Archive []byte // Optional tar/tar.gz/zip archive for COPY commands. + ArchiveName string // Original filename (used to detect format). +} + +// storeArchive stores uploaded archive bytes keyed by build ID for the worker. +func (s *BuildService) storeArchive(buildID string, data []byte) { + s.mu.Lock() + defer s.mu.Unlock() + if s.filesMap == nil { + s.filesMap = make(map[string][]byte) + } + s.filesMap[buildID] = data +} + +// takeArchive retrieves and removes stored archive bytes for a build. +func (s *BuildService) takeArchive(buildID string) []byte { + s.mu.Lock() + defer s.mu.Unlock() + data := s.filesMap[buildID] + delete(s.filesMap, buildID) + return data } // Create inserts a new build record and enqueues it to Redis. @@ -117,6 +144,11 @@ func (s *BuildService) Create(ctx context.Context, p BuildCreateParams) (db.Temp return db.TemplateBuild{}, fmt.Errorf("enqueue build: %w", err) } + // Store archive for the worker if provided. + if len(p.Archive) > 0 { + s.storeArchive(buildIDStr, p.Archive) + } + return build, nil } @@ -303,6 +335,16 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { HostID: host.ID, }) + // Upload and extract build archive if provided. + archive := s.takeArchive(buildIDStr) + if len(archive) > 0 { + if err := s.uploadAndExtractArchive(buildCtx, agent, sandboxIDStr, archive, buildIDStr); err != nil { + s.destroySandbox(buildCtx, agent, sandboxIDStr) + s.failBuild(buildCtx, buildID, fmt.Sprintf("archive upload failed: %v", err)) + return + } + } + // Parse recipe steps. preBuildCmds and postBuildCmds are hardcoded and always // valid; panic on error is appropriate here since it would be a programmer mistake. preBuildSteps, err := recipe.ParseRecipe(preBuildCmds) @@ -331,10 +373,18 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { "HOME": "/root", } } - bctx := &recipe.ExecContext{EnvVars: envVars} + bctx := &recipe.ExecContext{EnvVars: envVars, User: "root"} + + // Per-step progress callback for live UI updates. + progressFn := func(currentStep int, allEntries []recipe.BuildLogEntry) { + s.updateLogs(buildCtx, buildID, currentStep, allEntries) + } runPhase := func(phase string, steps []recipe.Step, defaultTimeout time.Duration) bool { - newEntries, nextStep, ok := recipe.Execute(buildCtx, phase, steps, sandboxIDStr, step, defaultTimeout, bctx, agent.Exec) + newEntries, nextStep, ok := recipe.Execute(buildCtx, phase, steps, sandboxIDStr, step, defaultTimeout, bctx, agent.Exec, func(currentStep int, phaseEntries []recipe.BuildLogEntry) { + // Progress callback: combine prior logs with current phase entries. + progressFn(currentStep, append(logs, phaseEntries...)) + }) logs = append(logs, newEntries...) step = nextStep s.updateLogs(buildCtx, buildID, step, logs) @@ -344,24 +394,40 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { if buildCtx.Err() != nil { return false } - last := newEntries[len(newEntries)-1] - reason := last.Stderr - if reason == "" { - reason = fmt.Sprintf("exit code %d", last.Exit) + reason := "unknown error" + if len(newEntries) > 0 { + last := newEntries[len(newEntries)-1] + reason = last.Stderr + if reason == "" { + reason = fmt.Sprintf("exit code %d", last.Exit) + } } s.failBuild(buildCtx, buildID, fmt.Sprintf("%s step %d failed: %s", phase, step, reason)) } return ok } + // Phase 1: Pre-build (as root) — creates wrenn-user, updates apt. if !build.SkipPrePost { if !runPhase("pre-build", preBuildSteps, 0) { return } } + + // Phase 2: User recipe — starts as wrenn-user (set by USER in pre-build) + // or root if skip_pre_post. if !runPhase("recipe", userRecipeSteps, buildCommandTimeout) { return } + + // Capture the final user and env vars as template defaults. + // Filter out user-specific and runtime vars that should be resolved at + // sandbox creation time, not baked in from the build environment. + templateDefaultUser := bctx.User + templateDefaultEnv := filterBuildEnv(bctx.EnvVars) + + // Phase 3: Post-build (as root) — cleanup. + bctx.User = "root" if !build.SkipPrePost { if !runPhase("post-build", postBuildSteps, 0) { return @@ -430,19 +496,34 @@ func (s *BuildService) executeBuild(ctx context.Context, buildIDStr string) { templateType = "snapshot" } + // Serialize env vars for DB storage. + defaultEnvJSON, err := json.Marshal(templateDefaultEnv) + if err != nil { + defaultEnvJSON = []byte("{}") + } + if _, err := s.DB.InsertTemplate(buildCtx, db.InsertTemplateParams{ - ID: build.TemplateID, - Name: build.Name, - Type: templateType, - Vcpus: build.Vcpus, - MemoryMb: build.MemoryMb, - SizeBytes: sizeBytes, - TeamID: id.PlatformTeamID, + ID: build.TemplateID, + Name: build.Name, + Type: templateType, + Vcpus: build.Vcpus, + MemoryMb: build.MemoryMb, + SizeBytes: sizeBytes, + TeamID: id.PlatformTeamID, + DefaultUser: templateDefaultUser, + DefaultEnv: defaultEnvJSON, }); err != nil { log.Error("failed to insert template record", "error", err) // Build succeeded on disk, just DB record failed — don't mark as failed. } + // Record defaults on the build record for inspection. + _ = s.DB.UpdateBuildDefaults(buildCtx, db.UpdateBuildDefaultsParams{ + ID: buildID, + DefaultUser: templateDefaultUser, + DefaultEnv: defaultEnvJSON, + }) + // For CreateSnapshot, the sandbox is already destroyed by the snapshot process. // For FlattenRootfs, the sandbox is already destroyed by the flatten process. // No additional destroy needed. @@ -603,3 +684,87 @@ func parseSandboxEnv(raw string) map[string]string { return envVars } + +// uploadAndExtractArchive writes the archive to the sandbox and extracts it +// to /tmp/build-files/. Detects format from content (tar.gz, tar, zip). +func (s *BuildService) uploadAndExtractArchive( + ctx context.Context, + agent buildAgentClient, + sandboxID string, + archive []byte, + buildID string, +) error { + // Detect archive type from magic bytes. + var archivePath, extractCmd string + switch { + case len(archive) >= 2 && archive[0] == 0x1f && archive[1] == 0x8b: + // gzip (tar.gz) + archivePath = "/tmp/build-files.tar.gz" + extractCmd = "mkdir -p /tmp/build-files && tar xzf /tmp/build-files.tar.gz -C /tmp/build-files" + case len(archive) >= 4 && string(archive[:4]) == "PK\x03\x04": + // zip + archivePath = "/tmp/build-files.zip" + extractCmd = "mkdir -p /tmp/build-files && unzip -o /tmp/build-files.zip -d /tmp/build-files" + case len(archive) >= 262 && string(archive[257:262]) == "ustar": + // tar (ustar magic at offset 257) + archivePath = "/tmp/build-files.tar" + extractCmd = "mkdir -p /tmp/build-files && tar xf /tmp/build-files.tar -C /tmp/build-files" + default: + // Fallback: try tar.gz + archivePath = "/tmp/build-files.tar.gz" + extractCmd = "mkdir -p /tmp/build-files && tar xzf /tmp/build-files.tar.gz -C /tmp/build-files" + } + + slog.Info("uploading build archive", "build_id", buildID, "path", archivePath, "size", len(archive)) + + // Write archive to VM. + if _, err := agent.WriteFile(ctx, connect.NewRequest(&pb.WriteFileRequest{ + SandboxId: sandboxID, + Path: archivePath, + Content: archive, + })); err != nil { + return fmt.Errorf("write archive: %w", err) + } + + // Extract and ensure files are readable. + fullCmd := extractCmd + " && chmod -R a+rX /tmp/build-files" + + resp, err := agent.Exec(ctx, connect.NewRequest(&pb.ExecRequest{ + SandboxId: sandboxID, + Cmd: "/bin/sh", + Args: []string{"-c", fullCmd}, + TimeoutSec: 120, + })) + if err != nil { + return fmt.Errorf("extract archive: %w", err) + } + if resp.Msg.ExitCode != 0 { + return fmt.Errorf("extract archive: exit code %d: %s", resp.Msg.ExitCode, string(resp.Msg.Stderr)) + } + + return nil +} + +// runtimeEnvVars lists env vars that are user- or session-specific and should +// not be persisted into template defaults. These are resolved at runtime by +// envd based on the actual user and sandbox context. +var runtimeEnvVars = map[string]bool{ + "HOME": true, "USER": true, "LOGNAME": true, "SHELL": true, + "PWD": true, "OLDPWD": true, "HOSTNAME": true, "TERM": true, + "SHLVL": true, "_": true, + // Per-sandbox identifiers set by envd at boot via MMDS. + "WRENN_SANDBOX_ID": true, "WRENN_TEMPLATE_ID": true, +} + +// filterBuildEnv returns a copy of envVars with runtime/user-specific +// variables removed so they don't override envd's per-user resolution. +func filterBuildEnv(envVars map[string]string) map[string]string { + filtered := make(map[string]string, len(envVars)) + for k, v := range envVars { + if runtimeEnvVars[k] { + continue + } + filtered[k] = v + } + return filtered +} diff --git a/internal/service/sandbox.go b/internal/service/sandbox.go index 68c9bbf..5ebd4d2 100644 --- a/internal/service/sandbox.go +++ b/internal/service/sandbox.go @@ -2,6 +2,7 @@ package service import ( "context" + "encoding/json" "fmt" "log/slog" "time" @@ -85,6 +86,8 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db. // Resolve template name → (teamID, templateID). templateTeamID := id.PlatformTeamID templateID := id.MinimalTemplateID + var templateDefaultUser string + var templateDefaultEnv map[string]string if p.Template != "minimal" { tmpl, err := s.DB.GetTemplateByTeam(ctx, db.GetTemplateByTeamParams{Name: p.Template, TeamID: p.TeamID}) if err != nil { @@ -92,6 +95,11 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db. } templateTeamID = tmpl.TeamID templateID = tmpl.ID + templateDefaultUser = tmpl.DefaultUser + // Parse default_env JSONB into a map. + if len(tmpl.DefaultEnv) > 0 { + _ = json.Unmarshal(tmpl.DefaultEnv, &templateDefaultEnv) + } // If the template is a snapshot, use its baked-in vcpus/memory. if tmpl.Type == "snapshot" { p.VCPUs = tmpl.Vcpus @@ -140,14 +148,16 @@ func (s *SandboxService) Create(ctx context.Context, p SandboxCreateParams) (db. } resp, err := agent.CreateSandbox(ctx, connect.NewRequest(&pb.CreateSandboxRequest{ - SandboxId: sandboxIDStr, - Template: p.Template, - TeamId: id.UUIDString(templateTeamID), - TemplateId: id.UUIDString(templateID), - Vcpus: p.VCPUs, - MemoryMb: p.MemoryMB, - TimeoutSec: p.TimeoutSec, - DiskSizeMb: p.DiskSizeMB, + SandboxId: sandboxIDStr, + Template: p.Template, + TeamId: id.UUIDString(templateTeamID), + TemplateId: id.UUIDString(templateID), + Vcpus: p.VCPUs, + MemoryMb: p.MemoryMB, + TimeoutSec: p.TimeoutSec, + DiskSizeMb: p.DiskSizeMB, + DefaultUser: templateDefaultUser, + DefaultEnv: templateDefaultEnv, })) if err != nil { if _, dbErr := s.DB.UpdateSandboxStatus(ctx, db.UpdateSandboxStatusParams{ @@ -249,9 +259,24 @@ func (s *SandboxService) Resume(ctx context.Context, sandboxID, teamID pgtype.UU sandboxIDStr := id.FormatSandboxID(sandboxID) + // Look up template defaults for resume. + var resumeDefaultUser string + var resumeDefaultEnv map[string]string + if sb.TemplateID.Valid { + tmpl, err := s.DB.GetTemplate(ctx, sb.TemplateID) + if err == nil { + resumeDefaultUser = tmpl.DefaultUser + if len(tmpl.DefaultEnv) > 0 { + _ = json.Unmarshal(tmpl.DefaultEnv, &resumeDefaultEnv) + } + } + } + resp, err := agent.ResumeSandbox(ctx, connect.NewRequest(&pb.ResumeSandboxRequest{ - SandboxId: sandboxIDStr, - TimeoutSec: sb.TimeoutSec, + SandboxId: sandboxIDStr, + TimeoutSec: sb.TimeoutSec, + DefaultUser: resumeDefaultUser, + DefaultEnv: resumeDefaultEnv, })) if err != nil { return db.Sandbox{}, fmt.Errorf("agent resume: %w", err) diff --git a/proto/hostagent/gen/hostagent.pb.go b/proto/hostagent/gen/hostagent.pb.go index c6800b9..ac864ff 100644 --- a/proto/hostagent/gen/hostagent.pb.go +++ b/proto/hostagent/gen/hostagent.pb.go @@ -40,7 +40,11 @@ type CreateSandboxRequest struct { // Team UUID that owns the template (hex string). All-zeros = platform. TeamId string `protobuf:"bytes,7,opt,name=team_id,json=teamId,proto3" json:"team_id,omitempty"` // Template UUID (hex string). Both zeros + team zeros = "minimal" sentinel. - TemplateId string `protobuf:"bytes,8,opt,name=template_id,json=templateId,proto3" json:"template_id,omitempty"` + TemplateId string `protobuf:"bytes,8,opt,name=template_id,json=templateId,proto3" json:"template_id,omitempty"` + // Default unix user for the sandbox (set in envd via PostInit). + DefaultUser string `protobuf:"bytes,9,opt,name=default_user,json=defaultUser,proto3" json:"default_user,omitempty"` + // Default environment variables (set in envd via PostInit). + DefaultEnv map[string]string `protobuf:"bytes,10,rep,name=default_env,json=defaultEnv,proto3" json:"default_env,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -131,6 +135,20 @@ func (x *CreateSandboxRequest) GetTemplateId() string { return "" } +func (x *CreateSandboxRequest) GetDefaultUser() string { + if x != nil { + return x.DefaultUser + } + return "" +} + +func (x *CreateSandboxRequest) GetDefaultEnv() map[string]string { + if x != nil { + return x.DefaultEnv + } + return nil +} + type CreateSandboxResponse struct { state protoimpl.MessageState `protogen:"open.v1"` SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` @@ -356,7 +374,11 @@ type ResumeSandboxRequest struct { SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` // TTL in seconds restored from the DB so the reaper can auto-pause // the sandbox again after inactivity. 0 means no auto-pause. - TimeoutSec int32 `protobuf:"varint,2,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"` + TimeoutSec int32 `protobuf:"varint,2,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"` + // Default unix user for the sandbox (set in envd via PostInit on resume). + DefaultUser string `protobuf:"bytes,3,opt,name=default_user,json=defaultUser,proto3" json:"default_user,omitempty"` + // Default environment variables (set in envd via PostInit on resume). + DefaultEnv map[string]string `protobuf:"bytes,4,rep,name=default_env,json=defaultEnv,proto3" json:"default_env,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -405,6 +427,20 @@ func (x *ResumeSandboxRequest) GetTimeoutSec() int32 { return 0 } +func (x *ResumeSandboxRequest) GetDefaultUser() string { + if x != nil { + return x.DefaultUser + } + return "" +} + +func (x *ResumeSandboxRequest) GetDefaultEnv() map[string]string { + if x != nil { + return x.DefaultEnv + } + return nil +} + type ResumeSandboxResponse struct { state protoimpl.MessageState `protogen:"open.v1"` SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` @@ -3429,7 +3465,7 @@ var File_hostagent_proto protoreflect.FileDescriptor const file_hostagent_proto_rawDesc = "" + "\n" + - "\x0fhostagent.proto\x12\fhostagent.v1\"\x81\x02\n" + + "\x0fhostagent.proto\x12\fhostagent.v1\"\xb8\x03\n" + "\x14CreateSandboxRequest\x12\x1d\n" + "\n" + "sandbox_id\x18\x05 \x01(\tR\tsandboxId\x12\x1a\n" + @@ -3442,7 +3478,14 @@ const file_hostagent_proto_rawDesc = "" + "diskSizeMb\x12\x17\n" + "\ateam_id\x18\a \x01(\tR\x06teamId\x12\x1f\n" + "\vtemplate_id\x18\b \x01(\tR\n" + - "templateId\"g\n" + + "templateId\x12!\n" + + "\fdefault_user\x18\t \x01(\tR\vdefaultUser\x12S\n" + + "\vdefault_env\x18\n" + + " \x03(\v22.hostagent.v1.CreateSandboxRequest.DefaultEnvEntryR\n" + + "defaultEnv\x1a=\n" + + "\x0fDefaultEnvEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"g\n" + "\x15CreateSandboxResponse\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x16\n" + @@ -3455,12 +3498,18 @@ const file_hostagent_proto_rawDesc = "" + "\x13PauseSandboxRequest\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\"\x16\n" + - "\x14PauseSandboxResponse\"V\n" + + "\x14PauseSandboxResponse\"\x8d\x02\n" + "\x14ResumeSandboxRequest\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x1f\n" + "\vtimeout_sec\x18\x02 \x01(\x05R\n" + - "timeoutSec\"g\n" + + "timeoutSec\x12!\n" + + "\fdefault_user\x18\x03 \x01(\tR\vdefaultUser\x12S\n" + + "\vdefault_env\x18\x04 \x03(\v22.hostagent.v1.ResumeSandboxRequest.DefaultEnvEntryR\n" + + "defaultEnv\x1a=\n" + + "\x0fDefaultEnvEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"g\n" + "\x15ResumeSandboxResponse\x12\x1d\n" + "\n" + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x16\n" + @@ -3719,7 +3768,7 @@ func file_hostagent_proto_rawDescGZIP() []byte { return file_hostagent_proto_rawDescData } -var file_hostagent_proto_msgTypes = make([]protoimpl.MessageInfo, 61) +var file_hostagent_proto_msgTypes = make([]protoimpl.MessageInfo, 63) var file_hostagent_proto_goTypes = []any{ (*CreateSandboxRequest)(nil), // 0: hostagent.v1.CreateSandboxRequest (*CreateSandboxResponse)(nil), // 1: hostagent.v1.CreateSandboxResponse @@ -3781,79 +3830,83 @@ var file_hostagent_proto_goTypes = []any{ (*PtyResizeResponse)(nil), // 57: hostagent.v1.PtyResizeResponse (*PtyKillRequest)(nil), // 58: hostagent.v1.PtyKillRequest (*PtyKillResponse)(nil), // 59: hostagent.v1.PtyKillResponse - nil, // 60: hostagent.v1.PtyAttachRequest.EnvsEntry + nil, // 60: hostagent.v1.CreateSandboxRequest.DefaultEnvEntry + nil, // 61: hostagent.v1.ResumeSandboxRequest.DefaultEnvEntry + nil, // 62: hostagent.v1.PtyAttachRequest.EnvsEntry } var file_hostagent_proto_depIdxs = []int32{ - 16, // 0: hostagent.v1.ListSandboxesResponse.sandboxes:type_name -> hostagent.v1.SandboxInfo - 23, // 1: hostagent.v1.ExecStreamResponse.start:type_name -> hostagent.v1.ExecStreamStart - 24, // 2: hostagent.v1.ExecStreamResponse.data:type_name -> hostagent.v1.ExecStreamData - 25, // 3: hostagent.v1.ExecStreamResponse.end:type_name -> hostagent.v1.ExecStreamEnd - 27, // 4: hostagent.v1.WriteFileStreamRequest.meta:type_name -> hostagent.v1.WriteFileStreamMeta - 33, // 5: hostagent.v1.ListDirResponse.entries:type_name -> hostagent.v1.FileEntry - 33, // 6: hostagent.v1.MakeDirResponse.entry:type_name -> hostagent.v1.FileEntry - 42, // 7: hostagent.v1.GetSandboxMetricsResponse.points:type_name -> hostagent.v1.MetricPoint - 42, // 8: hostagent.v1.FlushSandboxMetricsResponse.points_10m:type_name -> hostagent.v1.MetricPoint - 42, // 9: hostagent.v1.FlushSandboxMetricsResponse.points_2h:type_name -> hostagent.v1.MetricPoint - 42, // 10: hostagent.v1.FlushSandboxMetricsResponse.points_24h:type_name -> hostagent.v1.MetricPoint - 60, // 11: hostagent.v1.PtyAttachRequest.envs:type_name -> hostagent.v1.PtyAttachRequest.EnvsEntry - 51, // 12: hostagent.v1.PtyAttachResponse.started:type_name -> hostagent.v1.PtyStarted - 52, // 13: hostagent.v1.PtyAttachResponse.output:type_name -> hostagent.v1.PtyOutput - 53, // 14: hostagent.v1.PtyAttachResponse.exited:type_name -> hostagent.v1.PtyExited - 0, // 15: hostagent.v1.HostAgentService.CreateSandbox:input_type -> hostagent.v1.CreateSandboxRequest - 2, // 16: hostagent.v1.HostAgentService.DestroySandbox:input_type -> hostagent.v1.DestroySandboxRequest - 4, // 17: hostagent.v1.HostAgentService.PauseSandbox:input_type -> hostagent.v1.PauseSandboxRequest - 6, // 18: hostagent.v1.HostAgentService.ResumeSandbox:input_type -> hostagent.v1.ResumeSandboxRequest - 12, // 19: hostagent.v1.HostAgentService.Exec:input_type -> hostagent.v1.ExecRequest - 14, // 20: hostagent.v1.HostAgentService.ListSandboxes:input_type -> hostagent.v1.ListSandboxesRequest - 17, // 21: hostagent.v1.HostAgentService.WriteFile:input_type -> hostagent.v1.WriteFileRequest - 19, // 22: hostagent.v1.HostAgentService.ReadFile:input_type -> hostagent.v1.ReadFileRequest - 31, // 23: hostagent.v1.HostAgentService.ListDir:input_type -> hostagent.v1.ListDirRequest - 34, // 24: hostagent.v1.HostAgentService.MakeDir:input_type -> hostagent.v1.MakeDirRequest - 36, // 25: hostagent.v1.HostAgentService.RemovePath:input_type -> hostagent.v1.RemovePathRequest - 8, // 26: hostagent.v1.HostAgentService.CreateSnapshot:input_type -> hostagent.v1.CreateSnapshotRequest - 10, // 27: hostagent.v1.HostAgentService.DeleteSnapshot:input_type -> hostagent.v1.DeleteSnapshotRequest - 21, // 28: hostagent.v1.HostAgentService.ExecStream:input_type -> hostagent.v1.ExecStreamRequest - 26, // 29: hostagent.v1.HostAgentService.WriteFileStream:input_type -> hostagent.v1.WriteFileStreamRequest - 29, // 30: hostagent.v1.HostAgentService.ReadFileStream:input_type -> hostagent.v1.ReadFileStreamRequest - 38, // 31: hostagent.v1.HostAgentService.PingSandbox:input_type -> hostagent.v1.PingSandboxRequest - 40, // 32: hostagent.v1.HostAgentService.Terminate:input_type -> hostagent.v1.TerminateRequest - 43, // 33: hostagent.v1.HostAgentService.GetSandboxMetrics:input_type -> hostagent.v1.GetSandboxMetricsRequest - 45, // 34: hostagent.v1.HostAgentService.FlushSandboxMetrics:input_type -> hostagent.v1.FlushSandboxMetricsRequest - 47, // 35: hostagent.v1.HostAgentService.FlattenRootfs:input_type -> hostagent.v1.FlattenRootfsRequest - 49, // 36: hostagent.v1.HostAgentService.PtyAttach:input_type -> hostagent.v1.PtyAttachRequest - 54, // 37: hostagent.v1.HostAgentService.PtySendInput:input_type -> hostagent.v1.PtySendInputRequest - 56, // 38: hostagent.v1.HostAgentService.PtyResize:input_type -> hostagent.v1.PtyResizeRequest - 58, // 39: hostagent.v1.HostAgentService.PtyKill:input_type -> hostagent.v1.PtyKillRequest - 1, // 40: hostagent.v1.HostAgentService.CreateSandbox:output_type -> hostagent.v1.CreateSandboxResponse - 3, // 41: hostagent.v1.HostAgentService.DestroySandbox:output_type -> hostagent.v1.DestroySandboxResponse - 5, // 42: hostagent.v1.HostAgentService.PauseSandbox:output_type -> hostagent.v1.PauseSandboxResponse - 7, // 43: hostagent.v1.HostAgentService.ResumeSandbox:output_type -> hostagent.v1.ResumeSandboxResponse - 13, // 44: hostagent.v1.HostAgentService.Exec:output_type -> hostagent.v1.ExecResponse - 15, // 45: hostagent.v1.HostAgentService.ListSandboxes:output_type -> hostagent.v1.ListSandboxesResponse - 18, // 46: hostagent.v1.HostAgentService.WriteFile:output_type -> hostagent.v1.WriteFileResponse - 20, // 47: hostagent.v1.HostAgentService.ReadFile:output_type -> hostagent.v1.ReadFileResponse - 32, // 48: hostagent.v1.HostAgentService.ListDir:output_type -> hostagent.v1.ListDirResponse - 35, // 49: hostagent.v1.HostAgentService.MakeDir:output_type -> hostagent.v1.MakeDirResponse - 37, // 50: hostagent.v1.HostAgentService.RemovePath:output_type -> hostagent.v1.RemovePathResponse - 9, // 51: hostagent.v1.HostAgentService.CreateSnapshot:output_type -> hostagent.v1.CreateSnapshotResponse - 11, // 52: hostagent.v1.HostAgentService.DeleteSnapshot:output_type -> hostagent.v1.DeleteSnapshotResponse - 22, // 53: hostagent.v1.HostAgentService.ExecStream:output_type -> hostagent.v1.ExecStreamResponse - 28, // 54: hostagent.v1.HostAgentService.WriteFileStream:output_type -> hostagent.v1.WriteFileStreamResponse - 30, // 55: hostagent.v1.HostAgentService.ReadFileStream:output_type -> hostagent.v1.ReadFileStreamResponse - 39, // 56: hostagent.v1.HostAgentService.PingSandbox:output_type -> hostagent.v1.PingSandboxResponse - 41, // 57: hostagent.v1.HostAgentService.Terminate:output_type -> hostagent.v1.TerminateResponse - 44, // 58: hostagent.v1.HostAgentService.GetSandboxMetrics:output_type -> hostagent.v1.GetSandboxMetricsResponse - 46, // 59: hostagent.v1.HostAgentService.FlushSandboxMetrics:output_type -> hostagent.v1.FlushSandboxMetricsResponse - 48, // 60: hostagent.v1.HostAgentService.FlattenRootfs:output_type -> hostagent.v1.FlattenRootfsResponse - 50, // 61: hostagent.v1.HostAgentService.PtyAttach:output_type -> hostagent.v1.PtyAttachResponse - 55, // 62: hostagent.v1.HostAgentService.PtySendInput:output_type -> hostagent.v1.PtySendInputResponse - 57, // 63: hostagent.v1.HostAgentService.PtyResize:output_type -> hostagent.v1.PtyResizeResponse - 59, // 64: hostagent.v1.HostAgentService.PtyKill:output_type -> hostagent.v1.PtyKillResponse - 40, // [40:65] is the sub-list for method output_type - 15, // [15:40] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name + 60, // 0: hostagent.v1.CreateSandboxRequest.default_env:type_name -> hostagent.v1.CreateSandboxRequest.DefaultEnvEntry + 61, // 1: hostagent.v1.ResumeSandboxRequest.default_env:type_name -> hostagent.v1.ResumeSandboxRequest.DefaultEnvEntry + 16, // 2: hostagent.v1.ListSandboxesResponse.sandboxes:type_name -> hostagent.v1.SandboxInfo + 23, // 3: hostagent.v1.ExecStreamResponse.start:type_name -> hostagent.v1.ExecStreamStart + 24, // 4: hostagent.v1.ExecStreamResponse.data:type_name -> hostagent.v1.ExecStreamData + 25, // 5: hostagent.v1.ExecStreamResponse.end:type_name -> hostagent.v1.ExecStreamEnd + 27, // 6: hostagent.v1.WriteFileStreamRequest.meta:type_name -> hostagent.v1.WriteFileStreamMeta + 33, // 7: hostagent.v1.ListDirResponse.entries:type_name -> hostagent.v1.FileEntry + 33, // 8: hostagent.v1.MakeDirResponse.entry:type_name -> hostagent.v1.FileEntry + 42, // 9: hostagent.v1.GetSandboxMetricsResponse.points:type_name -> hostagent.v1.MetricPoint + 42, // 10: hostagent.v1.FlushSandboxMetricsResponse.points_10m:type_name -> hostagent.v1.MetricPoint + 42, // 11: hostagent.v1.FlushSandboxMetricsResponse.points_2h:type_name -> hostagent.v1.MetricPoint + 42, // 12: hostagent.v1.FlushSandboxMetricsResponse.points_24h:type_name -> hostagent.v1.MetricPoint + 62, // 13: hostagent.v1.PtyAttachRequest.envs:type_name -> hostagent.v1.PtyAttachRequest.EnvsEntry + 51, // 14: hostagent.v1.PtyAttachResponse.started:type_name -> hostagent.v1.PtyStarted + 52, // 15: hostagent.v1.PtyAttachResponse.output:type_name -> hostagent.v1.PtyOutput + 53, // 16: hostagent.v1.PtyAttachResponse.exited:type_name -> hostagent.v1.PtyExited + 0, // 17: hostagent.v1.HostAgentService.CreateSandbox:input_type -> hostagent.v1.CreateSandboxRequest + 2, // 18: hostagent.v1.HostAgentService.DestroySandbox:input_type -> hostagent.v1.DestroySandboxRequest + 4, // 19: hostagent.v1.HostAgentService.PauseSandbox:input_type -> hostagent.v1.PauseSandboxRequest + 6, // 20: hostagent.v1.HostAgentService.ResumeSandbox:input_type -> hostagent.v1.ResumeSandboxRequest + 12, // 21: hostagent.v1.HostAgentService.Exec:input_type -> hostagent.v1.ExecRequest + 14, // 22: hostagent.v1.HostAgentService.ListSandboxes:input_type -> hostagent.v1.ListSandboxesRequest + 17, // 23: hostagent.v1.HostAgentService.WriteFile:input_type -> hostagent.v1.WriteFileRequest + 19, // 24: hostagent.v1.HostAgentService.ReadFile:input_type -> hostagent.v1.ReadFileRequest + 31, // 25: hostagent.v1.HostAgentService.ListDir:input_type -> hostagent.v1.ListDirRequest + 34, // 26: hostagent.v1.HostAgentService.MakeDir:input_type -> hostagent.v1.MakeDirRequest + 36, // 27: hostagent.v1.HostAgentService.RemovePath:input_type -> hostagent.v1.RemovePathRequest + 8, // 28: hostagent.v1.HostAgentService.CreateSnapshot:input_type -> hostagent.v1.CreateSnapshotRequest + 10, // 29: hostagent.v1.HostAgentService.DeleteSnapshot:input_type -> hostagent.v1.DeleteSnapshotRequest + 21, // 30: hostagent.v1.HostAgentService.ExecStream:input_type -> hostagent.v1.ExecStreamRequest + 26, // 31: hostagent.v1.HostAgentService.WriteFileStream:input_type -> hostagent.v1.WriteFileStreamRequest + 29, // 32: hostagent.v1.HostAgentService.ReadFileStream:input_type -> hostagent.v1.ReadFileStreamRequest + 38, // 33: hostagent.v1.HostAgentService.PingSandbox:input_type -> hostagent.v1.PingSandboxRequest + 40, // 34: hostagent.v1.HostAgentService.Terminate:input_type -> hostagent.v1.TerminateRequest + 43, // 35: hostagent.v1.HostAgentService.GetSandboxMetrics:input_type -> hostagent.v1.GetSandboxMetricsRequest + 45, // 36: hostagent.v1.HostAgentService.FlushSandboxMetrics:input_type -> hostagent.v1.FlushSandboxMetricsRequest + 47, // 37: hostagent.v1.HostAgentService.FlattenRootfs:input_type -> hostagent.v1.FlattenRootfsRequest + 49, // 38: hostagent.v1.HostAgentService.PtyAttach:input_type -> hostagent.v1.PtyAttachRequest + 54, // 39: hostagent.v1.HostAgentService.PtySendInput:input_type -> hostagent.v1.PtySendInputRequest + 56, // 40: hostagent.v1.HostAgentService.PtyResize:input_type -> hostagent.v1.PtyResizeRequest + 58, // 41: hostagent.v1.HostAgentService.PtyKill:input_type -> hostagent.v1.PtyKillRequest + 1, // 42: hostagent.v1.HostAgentService.CreateSandbox:output_type -> hostagent.v1.CreateSandboxResponse + 3, // 43: hostagent.v1.HostAgentService.DestroySandbox:output_type -> hostagent.v1.DestroySandboxResponse + 5, // 44: hostagent.v1.HostAgentService.PauseSandbox:output_type -> hostagent.v1.PauseSandboxResponse + 7, // 45: hostagent.v1.HostAgentService.ResumeSandbox:output_type -> hostagent.v1.ResumeSandboxResponse + 13, // 46: hostagent.v1.HostAgentService.Exec:output_type -> hostagent.v1.ExecResponse + 15, // 47: hostagent.v1.HostAgentService.ListSandboxes:output_type -> hostagent.v1.ListSandboxesResponse + 18, // 48: hostagent.v1.HostAgentService.WriteFile:output_type -> hostagent.v1.WriteFileResponse + 20, // 49: hostagent.v1.HostAgentService.ReadFile:output_type -> hostagent.v1.ReadFileResponse + 32, // 50: hostagent.v1.HostAgentService.ListDir:output_type -> hostagent.v1.ListDirResponse + 35, // 51: hostagent.v1.HostAgentService.MakeDir:output_type -> hostagent.v1.MakeDirResponse + 37, // 52: hostagent.v1.HostAgentService.RemovePath:output_type -> hostagent.v1.RemovePathResponse + 9, // 53: hostagent.v1.HostAgentService.CreateSnapshot:output_type -> hostagent.v1.CreateSnapshotResponse + 11, // 54: hostagent.v1.HostAgentService.DeleteSnapshot:output_type -> hostagent.v1.DeleteSnapshotResponse + 22, // 55: hostagent.v1.HostAgentService.ExecStream:output_type -> hostagent.v1.ExecStreamResponse + 28, // 56: hostagent.v1.HostAgentService.WriteFileStream:output_type -> hostagent.v1.WriteFileStreamResponse + 30, // 57: hostagent.v1.HostAgentService.ReadFileStream:output_type -> hostagent.v1.ReadFileStreamResponse + 39, // 58: hostagent.v1.HostAgentService.PingSandbox:output_type -> hostagent.v1.PingSandboxResponse + 41, // 59: hostagent.v1.HostAgentService.Terminate:output_type -> hostagent.v1.TerminateResponse + 44, // 60: hostagent.v1.HostAgentService.GetSandboxMetrics:output_type -> hostagent.v1.GetSandboxMetricsResponse + 46, // 61: hostagent.v1.HostAgentService.FlushSandboxMetrics:output_type -> hostagent.v1.FlushSandboxMetricsResponse + 48, // 62: hostagent.v1.HostAgentService.FlattenRootfs:output_type -> hostagent.v1.FlattenRootfsResponse + 50, // 63: hostagent.v1.HostAgentService.PtyAttach:output_type -> hostagent.v1.PtyAttachResponse + 55, // 64: hostagent.v1.HostAgentService.PtySendInput:output_type -> hostagent.v1.PtySendInputResponse + 57, // 65: hostagent.v1.HostAgentService.PtyResize:output_type -> hostagent.v1.PtyResizeResponse + 59, // 66: hostagent.v1.HostAgentService.PtyKill:output_type -> hostagent.v1.PtyKillResponse + 42, // [42:67] is the sub-list for method output_type + 17, // [17:42] is the sub-list for method input_type + 17, // [17:17] is the sub-list for extension type_name + 17, // [17:17] is the sub-list for extension extendee + 0, // [0:17] is the sub-list for field type_name } func init() { file_hostagent_proto_init() } @@ -3886,7 +3939,7 @@ func file_hostagent_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_hostagent_proto_rawDesc), len(file_hostagent_proto_rawDesc)), NumEnums: 0, - NumMessages: 61, + NumMessages: 63, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/hostagent/hostagent.proto b/proto/hostagent/hostagent.proto index ea6c4eb..6b6306e 100644 --- a/proto/hostagent/hostagent.proto +++ b/proto/hostagent/hostagent.proto @@ -119,6 +119,12 @@ message CreateSandboxRequest { // Template UUID (hex string). Both zeros + team zeros = "minimal" sentinel. string template_id = 8; + + // Default unix user for the sandbox (set in envd via PostInit). + string default_user = 9; + + // Default environment variables (set in envd via PostInit). + map default_env = 10; } message CreateSandboxResponse { @@ -145,6 +151,12 @@ message ResumeSandboxRequest { // TTL in seconds restored from the DB so the reaper can auto-pause // the sandbox again after inactivity. 0 means no auto-pause. int32 timeout_sec = 2; + + // Default unix user for the sandbox (set in envd via PostInit on resume). + string default_user = 3; + + // Default environment variables (set in envd via PostInit on resume). + map default_env = 4; } message ResumeSandboxResponse {