diff --git a/.env.example b/.env.example index f128de7..f9318cc 100644 --- a/.env.example +++ b/.env.example @@ -5,14 +5,13 @@ DATABASE_URL=postgres://wrenn:wrenn@localhost:5432/wrenn?sslmode=disable REDIS_URL=redis://localhost:6379/0 # Control Plane -CP_LISTEN_ADDR=:8000 -CP_HOST_AGENT_ADDR=localhost:50051 +WRENN_CP_LISTEN_ADDR=:8080 # Host Agent -AGENT_LISTEN_ADDR=:50051 -AGENT_FILES_ROOTDIR=/var/lib/wrenn -AGENT_HOST_INTERFACE=eth0 -AGENT_CP_URL=http://localhost:8000 +WRENN_HOST_LISTEN_ADDR=:50051 +WRENN_DIR=/var/lib/wrenn +WRENN_HOST_INTERFACE=eth0 +WRENN_CP_URL=http://localhost:8080 # Lago (billing — external service) LAGO_API_URL=http://localhost:3000 @@ -28,6 +27,18 @@ AWS_SECRET_ACCESS_KEY= # Auth JWT_SECRET= +# mTLS — CP→Agent channel +# Generate a self-signed CA with: +# openssl ecparam -genkey -name P-256 -noout -out ca.key +# openssl req -new -x509 -key ca.key -days 3650 -out ca.crt -subj "/CN=wrenn-internal-ca" +# Then set these to the file contents (newlines replaced with \n or use multiline env). +WRENN_CA_CERT= +WRENN_CA_KEY= + +# Channels (notification destinations) +# AES-256-GCM key for encrypting channel secrets. Generate with: openssl rand -hex 32 +WRENN_ENCRYPTION_KEY= + # OAuth OAUTH_GITHUB_CLIENT_ID= OAUTH_GITHUB_CLIENT_SECRET= diff --git a/.gitignore b/.gitignore index c7fff43..96b55a4 100644 --- a/.gitignore +++ b/.gitignore @@ -34,6 +34,8 @@ go.work.sum ## AI .claude/ e2b/ +.impeccable.md +.gstack ## Builds builds/ diff --git a/CLAUDE.md b/CLAUDE.md index 34a6bbb..3366b67 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -230,186 +230,68 @@ The main module (`go.mod`) and envd (`envd/go.mod`) are fully independent. `make - Sandbox clones: `/var/lib/wrenn/sandboxes/` - Firecracker: `/usr/local/bin/firecracker` (e2b's fork of firecracker) -## Web UI Styling +## Design Context -### Identity - -Warm, confident developer tool with industrial precision and crafted organic character. The feel is sharp and data-forward — not cold or sterile, but not soft either. Think: an engineer's favorite tool, built with care. - ---- - -### Color Palette (Dark Mode) - -**Background scale (6 steps, near-black-green):** -`#0a0c0b` (bg-0, page base) → `#0f1211` (bg-1, sidebar/topbar) → `#141817` (bg-2, cards/surfaces) → `#1a1e1c` (bg-3, hover states/elevated) → `#212624` (bg-4, inputs/avatars) → `#2a302d` (bg-5, active controls) - -**Text hierarchy (5 levels):** -- Bright `#eae7e2` — page titles, metric values, active states -- Primary `#d0cdc6` — body text, nav labels, readable content -- Secondary `#9b9790` — supporting text, inactive nav, descriptions -- Tertiary `#6b6862` — labels, section headers, timestamps -- Muted `#454340` — ghost text, disabled states, grid labels - -**Sage green brand accent (3 tiers + 2 glows):** -- Solid `#5e8c58` — primary accent, buttons, borders, active indicators -- Mid `#89a785` — badges, chart lines, secondary accent -- Bright `#a4c89f` — active nav text, live counts, chart dots -- Glow `rgba(94,140,88,0.07)` — active nav backgrounds, subtle highlights -- Glow Mid `rgba(94,140,88,0.14)` — live badges, status badge backgrounds - -**Borders (2 levels):** -- Default `#1f2321` — card edges, dividers, sidebar borders -- Mid `#2a2f2c` — hover states, interactive borders, stronger separation - -**Semantic status colors:** -- Amber `#d4a73c` — warning, building, countdown timers -- Red `#cf8172` — error, failed, destructive actions -- Blue `#5a9fd4` — info, stopped (use sparingly) - -**Light mode:** (TBD — follow same warm-tinted approach. Background scale from `#f8f6f1` → `#dedbd5`. Text hierarchy inverts. Accent stays `#5e8c58` for solid.) - ---- - -### Typography - -Four fonts, each with a clear role: - -| Font | Role | Weights | Where | -|------|------|---------|-------| -| **Manrope** (variable) | Body, UI | 400–700 | All body text, nav labels, buttons, descriptions, section headers | -| **Instrument Serif** | Display, metrics | 400 | Page titles (h1), large metric values, empty-state headings only | -| **JetBrains Mono** | Code, data | 400–600 | Status bar, time range buttons, search inputs, IDs, commit SHAs, countdown timers, log viewer, URL paths, code blocks | -| **Alice** | Brand wordmark | 400 | Sidebar wordmark only — never used elsewhere | - -**Sizing:** -- Base body: `14px` -- Page title (h1): `24px` serif, `letter-spacing: -0.02em` -- Card metric values: `36px` serif, `letter-spacing: -0.04em` -- Chart inline metric: `30px` serif, `letter-spacing: -0.04em` -- Nav items: `13px` body, weight 500 -- Section/group labels: `11px` body, uppercase, `letter-spacing: 0.06em`, weight 600 -- Chart section labels: `12px` body, uppercase, `letter-spacing: 0.05em`, weight 600 -- Stat cell labels: `11px` body, uppercase, `letter-spacing: 0.05em`, weight 600 -- Badge text: `10px`, uppercase, `letter-spacing: 0.04em`, weight 600 -- Status bar / footer links: `11–12px` mono -- Table headers: `11px` body, uppercase, `letter-spacing: 0.05em`, weight 600, color muted -- Table body cells: `13px` - -**Key rule:** Instrument Serif is reserved exclusively for page-level titles and large numeric values. It provides warmth and character without softness. Everything else uses Manrope (UI) or JetBrains Mono (data/code). - ---- - -### Spacing - -4px base unit (Tailwind scale). Moderate density — functional and confident, never cramped. - -- Page content padding: `24–28px` -- Card/surface internal padding: `18–20px` -- Sidebar width: `230px` -- Sidebar nav item padding: `8px 10px` -- Sidebar brand area: `18px 16px 16px` -- Tab bar items: `10px 16px` -- Topbar: `16px 28px` -- Metric strip cell: `18px 20px` -- Chart header: `18px 20px` -- Chart canvas: `14px 20px 12px` -- Table header cells: `11px 16px` -- Table body cells: `12px 16px` -- Status bar: `6px 28px` -- Between sections (cards): `20–24px` margin-bottom - ---- - -### Borders & Depth - -**Flat aesthetic — no drop shadows.** Depth comes from background color stepping (bg-0 → bg-1 → bg-2 → bg-3), not shadows. `--shadow-sm: 0 0 #0000`. - -- All borders: `1px solid` in warm muted tones -- Corner radii: cards/surfaces `8px`, inputs/buttons `5px`, logo mark `6px`, avatars `5px`, dots `50%` -- Connected metric cells use shared border container with `border-left: 1px solid` between cells (no gap/grid trick) — creates the industrial panel look -- Tables wrapped in `border-radius: 8px` container with overflow hidden - ---- - -### Components - -**Sidebar navigation:** -- Active items use `3px left-border` in sage solid (`#5e8c58`) with accent glow background (`rgba(94,140,88,0.07)`) -- Active text color: accent-bright (`#a4c89f`) -- Icons at `16px`, opacity 0.5 default, 1.0 on active -- Group labels: `11px` uppercase with `0.06em` tracking, muted color - -**Status chip (live indicator):** -- Rounded `8px` border, `bg-2` background, `border-mid` border -- Pulsing dot: `7px`, accent-solid fill, `box-shadow: 0 0 8px rgba(94,140,88,0.5)` with glow animation -- Count in mono at `14px` accent-bright, label in secondary text - -**Live badges (inline):** -- `10px` text, uppercase, `3px` border-radius -- Background: accent-glow-mid (`rgba(94,140,88,0.14)`), text: accent mid -- Includes `5px` pulsing dot with box-shadow - -**Metric strip:** -- 3-column grid, connected cells (single outer border, inner dividers) -- Hover: background steps from bg-2 to bg-3 -- Value: `36px` serif, bright text -- Label: `11px` uppercase, tertiary -- Sub-metadata row with `1px` divider between items - -**Chart cards:** -- `8px` border-radius, bg-2 background, default border -- Header: section label (12px uppercase) + large serif metric + live badge -- Range group: segmented buttons with `1px` borders, mono text, active state uses bg-5 -- Chart area: SVG with `0.5px` grid lines in border color, `10px` mono axis labels in muted -- Data line: `1.5px` accent-solid stroke, `stroke-linejoin: round` -- Area fill: gradient from `rgba(94,140,88,0.28)` → transparent -- Data dot: accent-bright fill, `2.5px` bg-2 stroke, `4px` radius - -**Buttons hierarchy:** -1. Ghost (icon-btn): transparent bg, default border, tertiary color → border-mid + secondary on hover -2. Outline: no bg, border-mid border → accent-solid border + primary text on hover -3. Tool: bg-2 background, default border → border-mid + primary on hover -4. Filled/CTA: accent-solid background, white text → lighter green on hover, subtle `translateY(-1px)` lift - -**Tables:** -- Container: `8px` border-radius, border, overflow hidden -- Header: bg-3 background, `11px` uppercase muted text -- Body: default bg, `1px` border-bottom between rows -- Row hover: bg-3 - -**Empty states:** -- Centered, `72px` vertical padding -- Icon container: `56px` square, bg-3, border-mid border, `8px` radius -- Heading: `20px` serif, bright text -- Description: `13px` body, tertiary text -- CTA button below - -**Inputs:** -- bg-2 background, default border, `5px` radius -- Mono font for search/filter inputs -- Focus: `border-color: accent-solid` (clean single ring, no double-ring) -- Placeholder: muted color - -**Focus rings:** Single accent-solid border-color change on focus. Clean and minimal — no double-ring outlines. - ---- - -### Animation - -- **All interactive transitions:** `150ms ease` -- **Page load / section entrance:** `fadeUp` — `opacity: 0, translateY(6px)` → visible, `0.35s ease`, staggered with `60–80ms` delays between elements -- **Chart data animation:** SVG `` on path `d`, polyline `points`, and circle `cy` — `0.5–0.6s` duration, `0.2–0.35s` begin delay, `fill: freeze` -- **Live status dot:** `glow` keyframe — `2.5s ease infinite` box-shadow bloom from `0 0 6px rgba(94,140,88,0.5)` → `0 0 14px rgba(94,140,88,0.2)` -- **CTA buttons:** subtle `translateY(-1px)` on hover for lift feel - ---- - -### Dark Mode - -Primary and default mode. Very dark near-black-green backgrounds (`#0a0c0b` base) with warm off-white text and desaturated sage accent. Completely flat — no card shadows anywhere. System preference detection + localStorage persistence. - ---- - -### Overall Feel - -Sharp, warm, industrial-confident. Avoids cold grays entirely — palette leans slightly warm/brown-tinted throughout. The serif display type provides organic character and warmth on titles and metrics, while Manrope handles readable UI text and JetBrains Mono anchors the data-forward, developer-tool identity. Connected metric panels, tight chart cards, and uppercase section labels create engineering density without sacrificing readability. The result is a tool that feels crafted and precise — designed by someone who uses developer tools daily. \ No newline at end of file +### Users +Developers across the full spectrum — solo engineers building side projects, startup teams integrating sandboxed execution into products, and platform/infra engineers at larger organizations. The interface must feel at home for all three: approachable enough not to intimidate a hacker, precise enough to earn the trust of a production ops team. Never condescend, never oversimplify. Trust the user to understand what they're looking at. + +### Brand Personality +**Precise. Warm. Uncompromising.** + +Wrenn is an engineer's favorite tool — built with visible care, not assembled from defaults. It runs real infrastructure (Firecracker microVMs), so the UI should reflect that seriousness without becoming cold or corporate. The warmth comes from the typography and color palette; the precision comes from hierarchy, density, and data fidelity. + +Emotional goal: **in control.** Users leave a session with full confidence in what's running, what happened, and what comes next. Nothing is hidden, nothing is ambiguous. + +### Aesthetic Direction +**Dark-first, industrial-warm, data-forward.** + +The near-black-green background palette (`#0a0c0b` through `#2a302d`) reads as "black with intention" — not pitch black (cold) and not charcoal (dated). The sage green accent (`#5e8c58`) is muted and organic, a meaningful departure from the startup-green neon that saturates the developer tool space. + +**Anti-references:** +- **Supabase**: avoid the friendly, approachable startup-green energy — too generic, too eager to please +- **AWS / GCP consoles**: avoid utility-first density without craft — functional but joyless, visually dated + +**References that capture the right spirit:** +- The precision of a well-calibrated instrument +- Editorial typography from technical publications +- The quiet confidence of tools that don't need to explain themselves + +### Type System +Four fonts with strict roles — this is the design system's strongest personality trait and must be respected: + +| Font | Role | When to use | +|------|------|-------------| +| **Manrope** (variable, sans) | UI workhorse | All body copy, nav, labels, buttons, form text | +| **Instrument Serif** | Display / editorial | Page titles (h1), dialog headings, metric values, hero moments | +| **JetBrains Mono** (variable) | Data / code | IDs, timestamps, key prefixes, file paths, terminal output, metrics | +| **Alice** | Brand wordmark | "Wrenn" in sidebar and login only — nowhere else | + +Instrument Serif at scale creates the signature editorial moments. Mono provides the precision signal for technical data. Never swap these roles. + +### Color System +``` +Backgrounds: bg-0 (#0a0c0b) through bg-5 (#2a302d) — 6 steps +Text: bright > primary > secondary > tertiary > muted — 5 levels +Accent: accent (#5e8c58) / accent-mid / accent-bright / glow / glow-mid +Status: amber (#d4a73c) / red (#cf8172) / blue (#5a9fd4) +``` + +Use accent sparingly. It should feel earned — reserved for live/active state indicators, primary CTAs, focus rings, and active nav. When accent appears, it should register. + +### Upcoming Surfaces (design must accommodate) +- **Terminal / shell output**: streaming exec output, TTY sessions. Needs strong mono treatment, high contrast for long sessions. +- **File browser**: filesystem tree inside capsule. Density matters — breadcrumbs, file icons, permission bits. +- **SDK / docs embedding**: code samples, quickstart flows inline in dashboard. Code blocks must feel premium, not afterthought. +- **Billing / usage charts**: pool consumption, cost curves, usage over time. Instrument Serif at large scale for metrics; chart containers should feel like instruments, not dashboards. + +### Design Principles + +1. **Precision over friendliness.** Every element earns its place. Wrenn doesn't need to tell you it's developer-friendly — that should be self-evident from the quality of the information architecture. + +2. **Density with breathing room.** Data-forward doesn't mean cramped. Strategic whitespace creates calm hierarchy within dense contexts. Sections breathe; rows don't waste space. + +3. **Industrial warmth.** The serif + mono + warm-black combination prevents sterility. This is a forge, not a gallery. The warmth is in the details, not the primary colors. + +4. **Legible at speed.** Users scan dashboards in seconds. Strong typographic contrast (serif h1, mono IDs, sans body), consistent patterns, and predictable placement let users orientate instantly without reading everything. + +5. **Craft signals trust.** For infrastructure that runs production code, the quality of the UI is a proxy for the quality of the product. Pixel-level decisions matter. Polish is not decoration — it's a trust signal. diff --git a/LICENSE b/LICENSE index ca87729..edfdc22 100644 --- a/LICENSE +++ b/LICENSE @@ -1,97 +1,201 @@ -Wrenn Sandbox License + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Business Source License 1.1 + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -Copyright (c) 2026 M/S Omukk, Bangladesh + 1. Definitions. ---- + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. -Licensor + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -M/S Omukk, Bangladesh + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. -Contact: [contact@omukk.dev](mailto:contact@omukk.dev) + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. ---- + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. -Licensed Work + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -The Licensed Work is the software project known as "Wrenn Sandbox", including all source code and associated files in this repository, except the directory `envd/`, which is licensed separately under the Apache License Version 2.0. + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). -Initial development of the Licensed Work began in March 2026. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. ---- + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspiciously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." -Change Date + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -January 1, 2030 + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. ---- + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -Change License + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: -On the Change Date, the Licensed Work will automatically become available under the terms of the GNU General Public License, Version 3 (GPL-3.0). + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and ---- + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and -Additional Use Grant (SaaS Restriction) + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and -The Licensor grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work, provided that you comply with the limitations of this License. + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. -You may: + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -* Use the software for personal use -* Use the software internally within your organization -* Modify the source code -* Experiment, test, and evaluate the software -* Distribute unmodified copies of the source code for evaluation + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -You may not: + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -Provide the Licensed Work to third parties as a managed service, hosted service, software-as-a-service (SaaS), platform service, or any similar commercial offering where the primary value of the service derives from the Licensed Work. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -You may not sell the Licensed Work or offer paid services primarily based on the Licensed Work without a commercial license from M/S Omukk. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -Commercial licenses may be obtained by contacting: + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. -[contact@omukk.dev](mailto:contact@omukk.dev) + END OF TERMS AND CONDITIONS ---- + APPENDIX: How to apply the Apache License to your work. -Contributions + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -Unless otherwise stated, any Contribution intentionally submitted for inclusion in the Licensed Work shall be licensed under the terms of this Business Source License 1.1. + Copyright (c) 2026 M/S Omukk, Bangladesh ---- + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -Business Source License Terms + http://www.apache.org/licenses/LICENSE-2.0 -Use of the Licensed Work is governed by the Business Source License included in this file. - -The Business Source License is not an Open Source license. However, the Licensed Work will automatically become available under the Change License on the Change Date. - -Licensor grants you a non-exclusive, worldwide, royalty-free license to use, copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work, provided that you comply with the limitations stated in this License. - -All copies of the Licensed Work must include this License file. - -Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License. - ---- - -Disclaimer of Warranty - -THE LICENSED WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. - ---- - -Limitation of Liability - -IN NO EVENT SHALL THE LICENSOR OR CONTRIBUTORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY ARISING FROM THE USE OF THE LICENSED WORK. - ---- - -Third-Party Components - -Portions of this project include software licensed under separate open-source licenses. - -See the NOTICE file and THIRD_PARTY_LICENSES directory for details. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile index 4a2e0b6..2dbcc76 100644 --- a/Makefile +++ b/Makefile @@ -39,7 +39,7 @@ dev: dev-infra migrate-up dev-cp dev-infra: docker compose -f deploy/docker-compose.dev.yml up -d @echo "Waiting for PostgreSQL..." - @until pg_isready -h localhost -p 5432 -q; do sleep 0.5; done + @until docker compose -f deploy/docker-compose.dev.yml exec -T postgres pg_isready -q 2>/dev/null; do sleep 0.5; done @echo "Dev infrastructure ready." dev-down: @@ -53,7 +53,7 @@ dev-agent: sudo go run ./cmd/host-agent dev-frontend: - cd frontend && pnpm dev --port 5173 + cd frontend && pnpm dev --port 5173 --host 0.0.0.0 dev-envd: cd $(ENVD_DIR) && go run . --debug --listen-tcp :3002 @@ -137,7 +137,7 @@ image-minimal: sudo bash images/templates/minimal/build.sh image-python: - sudo bash images/templates/python311/build.sh + sudo bash images/templates/python312/build.sh image-node: sudo bash images/templates/node20/build.sh diff --git a/NOTICE b/NOTICE index c6c4e38..ecf2c96 100644 --- a/NOTICE +++ b/NOTICE @@ -6,7 +6,7 @@ This project includes software derived from the following project: Project: e2b infra Repository: https://github.com/e2b-dev/infra -The following files and directories in this repository contain code derived from the above project and are licensed under the Apache License Version 2.0: +The following files and directories in this repository contain code derived from the above project: - envd/ - proto/envd/*.proto diff --git a/README.md b/README.md index dff1932..e2b290f 100644 --- a/README.md +++ b/README.md @@ -51,12 +51,12 @@ Copy `.env.example` to `.env` and edit: DATABASE_URL=postgres://wrenn:wrenn@localhost:5432/wrenn?sslmode=disable # Control plane -CP_LISTEN_ADDR=:8000 +WRENN_CP_LISTEN_ADDR=:8000 CP_HOST_AGENT_ADDR=http://localhost:50051 # Host agent -AGENT_LISTEN_ADDR=:50051 -AGENT_FILES_ROOTDIR=/var/lib/wrenn +WRENN_HOST_LISTEN_ADDR=:50051 +WRENN_DIR=/var/lib/wrenn ``` ### Run @@ -69,7 +69,7 @@ make migrate-up ./builds/wrenn-cp ``` -Control plane listens on `CP_LISTEN_ADDR` (default `:8000`). +Control plane listens on `WRENN_CP_LISTEN_ADDR` (default `:8000`). ### Host registration @@ -87,16 +87,16 @@ Hosts must be registered with the control plane before they can serve sandboxes. 2. **Start the host agent** with the registration token and its externally-reachable address: ```bash - sudo AGENT_CP_URL=http://cp-host:8000 \ + sudo WRENN_CP_URL=http://cp-host:8000 \ ./builds/wrenn-agent \ --register \ --address 10.0.1.5:50051 ``` - On first startup the agent sends its specs (arch, CPU, memory, disk) to the control plane, receives a long-lived host JWT, and saves it to `$AGENT_FILES_ROOTDIR/host-token`. + On first startup the agent sends its specs (arch, CPU, memory, disk) to the control plane, receives a long-lived host JWT, and saves it to `$WRENN_DIR/host-token`. 3. **Subsequent startups** don't need `--register` — the agent loads the saved JWT automatically: ```bash - sudo AGENT_CP_URL=http://cp-host:8000 \ + sudo WRENN_CP_URL=http://cp-host:8000 \ ./builds/wrenn-agent --address 10.0.1.5:50051 ``` @@ -107,7 +107,7 @@ Hosts must be registered with the control plane before they can serve sandboxes. ``` Then restart the agent with the new token. -The agent sends heartbeats to the control plane every 30 seconds. Host agent listens on `AGENT_LISTEN_ADDR` (default `:50051`). +The agent sends heartbeats to the control plane every 30 seconds. Host agent listens on `WRENN_HOST_LISTEN_ADDR` (default `:50051`). ### Rootfs images diff --git a/cmd/control-plane/main.go b/cmd/control-plane/main.go index 3f52b41..942469c 100644 --- a/cmd/control-plane/main.go +++ b/cmd/control-plane/main.go @@ -14,10 +14,14 @@ import ( "github.com/redis/go-redis/v9" "git.omukk.dev/wrenn/sandbox/internal/api" + "git.omukk.dev/wrenn/sandbox/internal/audit" + "git.omukk.dev/wrenn/sandbox/internal/auth" "git.omukk.dev/wrenn/sandbox/internal/auth/oauth" + "git.omukk.dev/wrenn/sandbox/internal/channels" "git.omukk.dev/wrenn/sandbox/internal/config" "git.omukk.dev/wrenn/sandbox/internal/db" - "git.omukk.dev/wrenn/sandbox/proto/hostagent/gen/hostagentv1connect" + "git.omukk.dev/wrenn/sandbox/internal/lifecycle" + "git.omukk.dev/wrenn/sandbox/internal/scheduler" ) func main() { @@ -66,12 +70,47 @@ func main() { } slog.Info("connected to redis") - // Connect RPC client for the host agent. - agentHTTP := &http.Client{Timeout: 10 * time.Minute} - agentClient := hostagentv1connect.NewHostAgentServiceClient( - agentHTTP, - cfg.HostAgentAddr, - ) + // mTLS is mandatory — parse internal CA for CP↔agent communication. + if cfg.CACert == "" || cfg.CAKey == "" { + slog.Error("WRENN_CA_CERT and WRENN_CA_KEY are required — mTLS is mandatory for CP↔agent communication") + os.Exit(1) + } + ca, err := auth.ParseCA(cfg.CACert, cfg.CAKey) + if err != nil { + slog.Error("failed to parse mTLS CA from environment", "error", err) + os.Exit(1) + } + slog.Info("mTLS enabled: CA loaded") + + // Host client pool — manages Connect RPC clients to host agents. + cpCertStore, err := auth.NewCPCertStore(ca) + if err != nil { + slog.Error("failed to issue CP client certificate", "error", err) + os.Exit(1) + } + // Renew the CP client certificate periodically so it never expires + // while the control plane is running (TTL = 24h, renewal = every 12h). + go func() { + ticker := time.NewTicker(auth.CPCertRenewInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if err := cpCertStore.Refresh(); err != nil { + slog.Error("failed to renew CP client certificate", "error", err) + } else { + slog.Info("CP client certificate renewed") + } + } + } + }() + hostPool := lifecycle.NewHostClientPoolTLS(auth.CPClientTLSConfig(ca, cpCertStore)) + slog.Info("host client pool: mTLS enabled") + + // Scheduler — picks a host for each new sandbox (round-robin for now). + hostScheduler := scheduler.NewRoundRobinScheduler(queries) // OAuth provider registry. oauthRegistry := oauth.NewRegistry() @@ -86,16 +125,44 @@ func main() { slog.Info("registered OAuth provider", "provider", "github") } - // API server. - srv := api.New(queries, agentClient, pool, rdb, []byte(cfg.JWTSecret), oauthRegistry, cfg.OAuthRedirectURL) + // Channels: publisher, service, dispatcher. + if len(cfg.EncryptionKeyHex) != 64 { + slog.Error("WRENN_ENCRYPTION_KEY must be a hex-encoded 32-byte key (64 hex chars)") + os.Exit(1) + } + channelPub := channels.NewPublisher(rdb) + channelSvc := &channels.Service{DB: queries, EncKey: cfg.EncryptionKey} + channelDispatcher := channels.NewDispatcher(rdb, queries, cfg.EncryptionKey) - // Start reconciler. - reconciler := api.NewReconciler(queries, agentClient, "default", 5*time.Second) - reconciler.Start(ctx) + // Shared audit logger with event publishing. + al := audit.NewWithPublisher(queries, channelPub) + + // API server. + srv := api.New(queries, hostPool, hostScheduler, pool, rdb, []byte(cfg.JWTSecret), oauthRegistry, cfg.OAuthRedirectURL, ca, al, channelSvc) + + // Start template build workers (2 concurrent). + stopBuildWorkers := srv.BuildSvc.StartWorkers(ctx, 2) + defer stopBuildWorkers() + + // Start channel event dispatcher. + channelDispatcher.Start(ctx) + + // Start host monitor (passive + active reconciliation every 30s). + monitor := api.NewHostMonitor(queries, hostPool, al, 30*time.Second) + monitor.Start(ctx) + + // Start metrics sampler (records per-team sandbox stats every 10s). + sampler := api.NewMetricsSampler(queries, 10*time.Second) + sampler.Start(ctx) + + // Wrap the API handler with the sandbox proxy so that requests with + // {port}-{sandbox_id}.{domain} Host headers are routed to the sandbox's + // host agent. All other requests pass through to the normal API router. + proxyWrapper := api.NewSandboxProxyWrapper(srv.Handler(), queries, hostPool) httpServer := &http.Server{ Addr: cfg.ListenAddr, - Handler: srv.Handler(), + Handler: proxyWrapper, } // Graceful shutdown on signal. @@ -114,7 +181,7 @@ func main() { } }() - slog.Info("control plane starting", "addr", cfg.ListenAddr, "agent", cfg.HostAgentAddr) + slog.Info("control plane starting", "addr", cfg.ListenAddr) if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { slog.Error("http server error", "error", err) os.Exit(1) diff --git a/cmd/host-agent/main.go b/cmd/host-agent/main.go index c426a81..df8de3e 100644 --- a/cmd/host-agent/main.go +++ b/cmd/host-agent/main.go @@ -2,23 +2,32 @@ package main import ( "context" + "crypto/tls" "flag" "log/slog" "net/http" "os" "os/signal" "path/filepath" + "sync" "syscall" "time" + "github.com/joho/godotenv" + + "git.omukk.dev/wrenn/sandbox/internal/auth" "git.omukk.dev/wrenn/sandbox/internal/devicemapper" "git.omukk.dev/wrenn/sandbox/internal/hostagent" + "git.omukk.dev/wrenn/sandbox/internal/network" "git.omukk.dev/wrenn/sandbox/internal/sandbox" "git.omukk.dev/wrenn/sandbox/proto/hostagent/gen/hostagentv1connect" ) func main() { - registrationToken := flag.String("register", "", "One-time registration token from the control plane") + // Best-effort load — missing .env file is fine. + _ = godotenv.Load() + + registrationToken := flag.String("register", "", "One-time registration token from the control plane (required on first run)") advertiseAddr := flag.String("address", "", "Externally-reachable address (ip:port) for this host agent") flag.Parse() @@ -36,19 +45,33 @@ func main() { slog.Warn("failed to enable ip_forward", "error", err) } - // Clean up any stale dm-snapshot devices from a previous crash. + // Clean up stale resources from a previous crash. devicemapper.CleanupStaleDevices() + network.CleanupStaleNamespaces() - listenAddr := envOrDefault("AGENT_LISTEN_ADDR", ":50051") - rootDir := envOrDefault("AGENT_FILES_ROOTDIR", "/var/lib/wrenn") - cpURL := os.Getenv("AGENT_CP_URL") - tokenFile := filepath.Join(rootDir, "host-token") + listenAddr := envOrDefault("WRENN_HOST_LISTEN_ADDR", ":50051") + rootDir := envOrDefault("WRENN_DIR", "/var/lib/wrenn") + cpURL := os.Getenv("WRENN_CP_URL") + credsFile := filepath.Join(rootDir, "host-credentials.json") + + if cpURL == "" { + slog.Error("WRENN_CP_URL environment variable is required") + os.Exit(1) + } + if *advertiseAddr == "" { + slog.Error("--address flag is required (externally-reachable ip:port)") + os.Exit(1) + } + + // Expand base images to the standard disk size (sparse, no extra physical + // disk). This ensures dm-snapshot sandboxes see the full size from boot. + if err := sandbox.EnsureImageSizes(rootDir, sandbox.DefaultDiskSizeMB); err != nil { + slog.Error("failed to expand base images", "error", err) + os.Exit(1) + } cfg := sandbox.Config{ - KernelPath: filepath.Join(rootDir, "kernels", "vmlinux"), - ImagesDir: filepath.Join(rootDir, "images"), - SandboxesDir: filepath.Join(rootDir, "sandboxes"), - SnapshotsDir: filepath.Join(rootDir, "snapshots"), + WrennDir: rootDir, } mgr := sandbox.New(cfg) @@ -58,66 +81,116 @@ func main() { mgr.StartTTLReaper(ctx) - if *advertiseAddr == "" { - slog.Error("--address flag is required (externally-reachable ip:port)") + // Register with the control plane and start heartbeating. + creds, err := hostagent.Register(ctx, hostagent.RegistrationConfig{ + CPURL: cpURL, + RegistrationToken: *registrationToken, + TokenFile: credsFile, + Address: *advertiseAddr, + }) + if err != nil { + slog.Error("host registration failed", "error", err) os.Exit(1) } - // Register with the control plane (if configured). - if cpURL != "" { - hostToken, err := hostagent.Register(ctx, hostagent.RegistrationConfig{ - CPURL: cpURL, - RegistrationToken: *registrationToken, - TokenFile: tokenFile, - Address: *advertiseAddr, + slog.Info("host registered", "host_id", creds.HostID) + + // httpServer is declared here so the shutdown func can reference it. + httpServer := &http.Server{Addr: listenAddr} + + // mTLS is mandatory — refuse to start without a valid certificate. + var certStore hostagent.CertStore + if creds.CertPEM == "" || creds.KeyPEM == "" || creds.CACertPEM == "" { + slog.Error("mTLS certificate not received from CP — ensure WRENN_CA_CERT and WRENN_CA_KEY are configured on the control plane") + os.Exit(1) + } + if err := certStore.ParseAndStore(creds.CertPEM, creds.KeyPEM); err != nil { + slog.Error("failed to load host TLS certificate", "error", err) + os.Exit(1) + } + tlsCfg := auth.AgentTLSConfigFromPEM(creds.CACertPEM, certStore.GetCert) + if tlsCfg == nil { + slog.Error("failed to build agent TLS config: invalid CA certificate PEM") + os.Exit(1) + } + httpServer.TLSConfig = tlsCfg + slog.Info("mTLS enabled on agent server") + + // doShutdown is the single shutdown path. sync.Once ensures mgr.Shutdown + // and httpServer.Shutdown are each called exactly once regardless of + // whether shutdown is triggered by a signal, a heartbeat 404, or the + // Terminate RPC. + var shutdownOnce sync.Once + doShutdown := func(reason string) { + shutdownOnce.Do(func() { + slog.Info("shutting down", "reason", reason) + cancel() + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer shutdownCancel() + mgr.Shutdown(shutdownCtx) + if err := httpServer.Shutdown(shutdownCtx); err != nil { + slog.Error("http server shutdown error", "error", err) + } }) - if err != nil { - slog.Error("host registration failed", "error", err) - os.Exit(1) - } - - hostID, err := hostagent.HostIDFromToken(hostToken) - if err != nil { - slog.Error("failed to extract host ID from token", "error", err) - os.Exit(1) - } - - slog.Info("host registered", "host_id", hostID) - hostagent.StartHeartbeat(ctx, cpURL, hostID, hostToken, 30*time.Second) } - srv := hostagent.NewServer(mgr) + srv := hostagent.NewServer(mgr, func() { + doShutdown("Terminate RPC received") + }) path, handler := hostagentv1connect.NewHostAgentServiceHandler(srv) + proxyHandler := hostagent.NewProxyHandler(mgr) + mux := http.NewServeMux() mux.Handle(path, handler) + mux.Handle("/proxy/", proxyHandler) + httpServer.Handler = mux - httpServer := &http.Server{ - Addr: listenAddr, - Handler: mux, - } + // Start heartbeat loop. Handler must be set before this because the + // immediate beat can trigger doShutdown → httpServer.Shutdown synchronously. + hostagent.StartHeartbeat(ctx, cpURL, credsFile, creds.HostID, 30*time.Second, + // pauseAll: called on 3 consecutive network failures. + func() { + pauseCtx, pauseCancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer pauseCancel() + mgr.PauseAll(pauseCtx) + }, + // onDeleted: called when CP returns 404 (host was deleted). + func() { + doShutdown("host deleted from CP") + }, + // onCredsRefreshed: hot-swap the TLS certificate after a JWT refresh. + func(tf *hostagent.TokenFile) { + if tf.CertPEM == "" || tf.KeyPEM == "" { + return + } + if err := certStore.ParseAndStore(tf.CertPEM, tf.KeyPEM); err != nil { + slog.Error("failed to hot-swap TLS cert after credentials refresh", "error", err) + } else { + slog.Info("TLS cert hot-swapped after credentials refresh") + } + }, + ) - // Graceful shutdown on signal. + // Graceful shutdown on SIGINT/SIGTERM. sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) go func() { sig := <-sigCh - slog.Info("received signal, shutting down", "signal", sig) - cancel() - - shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) - defer shutdownCancel() - - mgr.Shutdown(shutdownCtx) - - if err := httpServer.Shutdown(shutdownCtx); err != nil { - slog.Error("http server shutdown error", "error", err) - } + doShutdown("signal: " + sig.String()) }() - slog.Info("host agent starting", "addr", listenAddr) - if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - slog.Error("http server error", "error", err) + slog.Info("host agent starting", "addr", listenAddr, "host_id", creds.HostID) + // TLSConfig is always set (mTLS is mandatory). Create the TLS listener + // manually because ListenAndServeTLS requires on-disk cert/key paths + // but we use GetCertificate callback for hot-swap support. + ln, err := tls.Listen("tcp", listenAddr, httpServer.TLSConfig) + if err != nil { + slog.Error("failed to start TLS listener", "error", err) + os.Exit(1) + } + if err := httpServer.Serve(ln); err != nil && err != http.ErrServerClosed { + slog.Error("https server error", "error", err) os.Exit(1) } diff --git a/db/migrations/20260310094104_initial.sql b/db/migrations/20260310094104_initial.sql index c291815..6c8afc4 100644 --- a/db/migrations/20260310094104_initial.sql +++ b/db/migrations/20260310094104_initial.sql @@ -1,25 +1,237 @@ -- +goose Up -CREATE TABLE sandboxes ( - id TEXT PRIMARY KEY, - owner_id TEXT NOT NULL DEFAULT '', - host_id TEXT NOT NULL DEFAULT 'default', - template TEXT NOT NULL DEFAULT 'minimal', - status TEXT NOT NULL DEFAULT 'pending', - vcpus INTEGER NOT NULL DEFAULT 1, - memory_mb INTEGER NOT NULL DEFAULT 512, - timeout_sec INTEGER NOT NULL DEFAULT 0, - guest_ip TEXT NOT NULL DEFAULT '', - host_ip TEXT NOT NULL DEFAULT '', - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - started_at TIMESTAMPTZ, - last_active_at TIMESTAMPTZ, - last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW() +-- teams +CREATE TABLE teams ( + id UUID PRIMARY KEY, + name TEXT NOT NULL, + slug TEXT NOT NULL UNIQUE, + is_byoc BOOLEAN NOT NULL DEFAULT FALSE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ +); +CREATE INDEX idx_teams_slug ON teams(slug); + +-- users +CREATE TABLE users ( + id UUID PRIMARY KEY, + email TEXT NOT NULL UNIQUE, + password_hash TEXT, + name TEXT NOT NULL DEFAULT '', + is_admin BOOLEAN NOT NULL DEFAULT FALSE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() ); +-- users_teams (junction) +CREATE TABLE users_teams ( + user_id UUID NOT NULL REFERENCES users(id), + team_id UUID NOT NULL REFERENCES teams(id), + is_default BOOLEAN NOT NULL DEFAULT FALSE, + role TEXT NOT NULL DEFAULT 'member', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (team_id, user_id) +); +CREATE INDEX idx_users_teams_user ON users_teams(user_id); + +-- team_api_keys +CREATE TABLE team_api_keys ( + id UUID PRIMARY KEY, + team_id UUID NOT NULL REFERENCES teams(id), + name TEXT NOT NULL, + key_hash TEXT NOT NULL UNIQUE, + key_prefix TEXT NOT NULL, + created_by UUID NOT NULL REFERENCES users(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + last_used TIMESTAMPTZ +); +CREATE INDEX idx_team_api_keys_team ON team_api_keys(team_id); + +-- oauth_providers +CREATE TABLE oauth_providers ( + provider TEXT NOT NULL, + provider_id TEXT NOT NULL, + user_id UUID NOT NULL REFERENCES users(id), + email TEXT NOT NULL DEFAULT '', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (provider, provider_id) +); +CREATE INDEX idx_oauth_providers_user ON oauth_providers(user_id); + +-- admin_permissions +CREATE TABLE admin_permissions ( + id UUID PRIMARY KEY, + user_id UUID NOT NULL REFERENCES users(id), + permission TEXT NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE (user_id, permission) +); +CREATE INDEX idx_admin_permissions_user ON admin_permissions(user_id); + +-- hosts +CREATE TABLE hosts ( + id UUID PRIMARY KEY, + type TEXT NOT NULL DEFAULT 'regular', + team_id UUID REFERENCES teams(id), + provider TEXT NOT NULL DEFAULT '', + availability_zone TEXT NOT NULL DEFAULT '', + arch TEXT NOT NULL DEFAULT '', + cpu_cores INTEGER NOT NULL DEFAULT 0, + memory_mb INTEGER NOT NULL DEFAULT 0, + disk_gb INTEGER NOT NULL DEFAULT 0, + address TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + last_heartbeat_at TIMESTAMPTZ, + metadata JSONB NOT NULL DEFAULT '{}', + created_by UUID NOT NULL REFERENCES users(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + cert_fingerprint TEXT NOT NULL DEFAULT '', + mtls_enabled BOOLEAN NOT NULL DEFAULT FALSE +); +CREATE INDEX idx_hosts_type ON hosts(type); +CREATE INDEX idx_hosts_team ON hosts(team_id); +CREATE INDEX idx_hosts_status ON hosts(status); + +-- host_tokens +CREATE TABLE host_tokens ( + id UUID PRIMARY KEY, + host_id UUID NOT NULL REFERENCES hosts(id) ON DELETE CASCADE, + created_by UUID NOT NULL REFERENCES users(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + expires_at TIMESTAMPTZ NOT NULL, + used_at TIMESTAMPTZ +); +CREATE INDEX idx_host_tokens_host ON host_tokens(host_id); + +-- host_tags +CREATE TABLE host_tags ( + host_id UUID NOT NULL REFERENCES hosts(id) ON DELETE CASCADE, + tag TEXT NOT NULL, + PRIMARY KEY (host_id, tag) +); +CREATE INDEX idx_host_tags_tag ON host_tags(tag); + +-- host_refresh_tokens +CREATE TABLE host_refresh_tokens ( + id UUID PRIMARY KEY, + host_id UUID NOT NULL REFERENCES hosts(id) ON DELETE CASCADE, + token_hash TEXT NOT NULL UNIQUE, + expires_at TIMESTAMPTZ NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + revoked_at TIMESTAMPTZ +); +CREATE INDEX idx_host_refresh_tokens_host ON host_refresh_tokens(host_id); + +-- templates (TEXT primary key — not UUID) +CREATE TABLE templates ( + name TEXT PRIMARY KEY, + type TEXT NOT NULL DEFAULT 'base', + vcpus INTEGER NOT NULL DEFAULT 1, + memory_mb INTEGER NOT NULL DEFAULT 512, + size_bytes BIGINT NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + team_id UUID NOT NULL +); +CREATE INDEX idx_templates_team ON templates(team_id); + +-- sandboxes +CREATE TABLE sandboxes ( + id UUID PRIMARY KEY, + team_id UUID NOT NULL REFERENCES teams(id), + host_id UUID NOT NULL, + template TEXT NOT NULL DEFAULT 'minimal', + status TEXT NOT NULL DEFAULT 'pending', + vcpus INTEGER NOT NULL DEFAULT 1, + memory_mb INTEGER NOT NULL DEFAULT 512, + timeout_sec INTEGER NOT NULL DEFAULT 300, + disk_size_mb INTEGER NOT NULL DEFAULT 5120, + guest_ip TEXT NOT NULL DEFAULT '', + host_ip TEXT NOT NULL DEFAULT '', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + started_at TIMESTAMPTZ, + last_active_at TIMESTAMPTZ, + last_updated TIMESTAMPTZ NOT NULL DEFAULT NOW() +); CREATE INDEX idx_sandboxes_status ON sandboxes(status); CREATE INDEX idx_sandboxes_host_status ON sandboxes(host_id, status); +CREATE INDEX idx_sandboxes_team ON sandboxes(team_id); + +-- audit_logs (id and team_id are UUID; actor_id and resource_id are TEXT for polymorphism) +CREATE TABLE audit_logs ( + id UUID PRIMARY KEY, + team_id UUID NOT NULL, + actor_type TEXT NOT NULL, + actor_id TEXT, + actor_name TEXT NOT NULL DEFAULT '', + resource_type TEXT NOT NULL, + resource_id TEXT, + action TEXT NOT NULL, + scope TEXT NOT NULL DEFAULT 'team', + status TEXT NOT NULL DEFAULT 'success', + metadata JSONB NOT NULL DEFAULT '{}', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); +CREATE INDEX idx_audit_logs_team_time ON audit_logs(team_id, created_at DESC); +CREATE INDEX idx_audit_logs_team_resource ON audit_logs(team_id, resource_type, created_at DESC); + +-- sandbox_metrics_snapshots +CREATE TABLE sandbox_metrics_snapshots ( + id BIGSERIAL PRIMARY KEY, + team_id UUID NOT NULL, + sampled_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + running_count INTEGER NOT NULL DEFAULT 0, + vcpus_reserved INTEGER NOT NULL DEFAULT 0, + memory_mb_reserved INTEGER NOT NULL DEFAULT 0 +); +CREATE INDEX idx_metrics_snapshots_team_time ON sandbox_metrics_snapshots(team_id, sampled_at DESC); + +-- sandbox_metric_points +CREATE TABLE sandbox_metric_points ( + sandbox_id UUID NOT NULL, + tier TEXT NOT NULL CHECK (tier IN ('10m', '2h', '24h')), + ts BIGINT NOT NULL, + cpu_pct FLOAT8 NOT NULL DEFAULT 0, + mem_bytes BIGINT NOT NULL DEFAULT 0, + disk_bytes BIGINT NOT NULL DEFAULT 0, + PRIMARY KEY (sandbox_id, tier, ts) +); +CREATE INDEX idx_sandbox_metric_points_sandbox_tier ON sandbox_metric_points(sandbox_id, tier); + +-- template_builds +CREATE TABLE template_builds ( + id UUID PRIMARY KEY, + name TEXT NOT NULL, + base_template TEXT NOT NULL, + recipe JSONB NOT NULL DEFAULT '[]', + healthcheck TEXT NOT NULL DEFAULT '', + vcpus INTEGER NOT NULL DEFAULT 1, + memory_mb INTEGER NOT NULL DEFAULT 512, + status TEXT NOT NULL DEFAULT 'pending', + current_step INTEGER NOT NULL DEFAULT 0, + total_steps INTEGER NOT NULL DEFAULT 0, + logs JSONB NOT NULL DEFAULT '[]', + error TEXT NOT NULL DEFAULT '', + sandbox_id UUID, + host_id UUID, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + started_at TIMESTAMPTZ, + completed_at TIMESTAMPTZ +); -- +goose Down - -DROP TABLE sandboxes; +DROP TABLE IF EXISTS template_builds; +DROP TABLE IF EXISTS sandbox_metric_points; +DROP TABLE IF EXISTS sandbox_metrics_snapshots; +DROP TABLE IF EXISTS audit_logs; +DROP TABLE IF EXISTS sandboxes; +DROP TABLE IF EXISTS templates; +DROP TABLE IF EXISTS host_refresh_tokens; +DROP TABLE IF EXISTS host_tags; +DROP TABLE IF EXISTS host_tokens; +DROP TABLE IF EXISTS hosts; +DROP TABLE IF EXISTS admin_permissions; +DROP TABLE IF EXISTS oauth_providers; +DROP TABLE IF EXISTS team_api_keys; +DROP TABLE IF EXISTS users_teams; +DROP TABLE IF EXISTS users; +DROP TABLE IF EXISTS teams; diff --git a/db/migrations/20260311224925_snapshots.sql b/db/migrations/20260311224925_snapshots.sql deleted file mode 100644 index 8a0427c..0000000 --- a/db/migrations/20260311224925_snapshots.sql +++ /dev/null @@ -1,14 +0,0 @@ --- +goose Up - -CREATE TABLE templates ( - name TEXT PRIMARY KEY, - type TEXT NOT NULL DEFAULT 'base', -- 'base' or 'snapshot' - vcpus INTEGER, - memory_mb INTEGER, - size_bytes BIGINT NOT NULL DEFAULT 0, - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -); - --- +goose Down - -DROP TABLE templates; diff --git a/db/migrations/20260313210608_auth.sql b/db/migrations/20260313210608_auth.sql deleted file mode 100644 index 03970a8..0000000 --- a/db/migrations/20260313210608_auth.sql +++ /dev/null @@ -1,46 +0,0 @@ --- +goose Up - -CREATE TABLE users ( - id TEXT PRIMARY KEY, - email TEXT NOT NULL UNIQUE, - password_hash TEXT NOT NULL, - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -); - -CREATE TABLE teams ( - id TEXT PRIMARY KEY, - name TEXT NOT NULL, - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -); - -CREATE TABLE users_teams ( - user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE, - team_id TEXT NOT NULL REFERENCES teams(id) ON DELETE CASCADE, - is_default BOOLEAN NOT NULL DEFAULT TRUE, - role TEXT NOT NULL DEFAULT 'owner', - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - PRIMARY KEY (team_id, user_id) -); - -CREATE INDEX idx_users_teams_user ON users_teams(user_id); - -CREATE TABLE team_api_keys ( - id TEXT PRIMARY KEY, - team_id TEXT NOT NULL REFERENCES teams(id) ON DELETE CASCADE, - name TEXT NOT NULL DEFAULT '', - key_hash TEXT NOT NULL UNIQUE, - key_prefix TEXT NOT NULL DEFAULT '', - created_by TEXT NOT NULL REFERENCES users(id), - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - last_used TIMESTAMPTZ -); - -CREATE INDEX idx_team_api_keys_team ON team_api_keys(team_id); - --- +goose Down - -DROP TABLE team_api_keys; -DROP TABLE users_teams; -DROP TABLE teams; -DROP TABLE users; diff --git a/db/migrations/20260313210611_team_ownership.sql b/db/migrations/20260313210611_team_ownership.sql deleted file mode 100644 index 849e781..0000000 --- a/db/migrations/20260313210611_team_ownership.sql +++ /dev/null @@ -1,31 +0,0 @@ --- +goose Up - -ALTER TABLE sandboxes - ADD COLUMN team_id TEXT NOT NULL DEFAULT ''; - -UPDATE sandboxes SET team_id = owner_id WHERE owner_id != ''; - -ALTER TABLE sandboxes - DROP COLUMN owner_id; - -ALTER TABLE templates - ADD COLUMN team_id TEXT NOT NULL DEFAULT ''; - -CREATE INDEX idx_sandboxes_team ON sandboxes(team_id); -CREATE INDEX idx_templates_team ON templates(team_id); - --- +goose Down - -ALTER TABLE sandboxes - ADD COLUMN owner_id TEXT NOT NULL DEFAULT ''; - -UPDATE sandboxes SET owner_id = team_id WHERE team_id != ''; - -ALTER TABLE sandboxes - DROP COLUMN team_id; - -ALTER TABLE templates - DROP COLUMN team_id; - -DROP INDEX IF EXISTS idx_sandboxes_team; -DROP INDEX IF EXISTS idx_templates_team; diff --git a/db/migrations/20260315001514_oauth.sql b/db/migrations/20260315001514_oauth.sql deleted file mode 100644 index c3c33e9..0000000 --- a/db/migrations/20260315001514_oauth.sql +++ /dev/null @@ -1,22 +0,0 @@ --- +goose Up - -ALTER TABLE users - ALTER COLUMN password_hash DROP NOT NULL; - -CREATE TABLE oauth_providers ( - provider TEXT NOT NULL, - provider_id TEXT NOT NULL, - user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE, - email TEXT NOT NULL, - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - PRIMARY KEY (provider, provider_id) -); - -CREATE INDEX idx_oauth_providers_user ON oauth_providers(user_id); - --- +goose Down - -DROP TABLE oauth_providers; - -UPDATE users SET password_hash = '' WHERE password_hash IS NULL; -ALTER TABLE users ALTER COLUMN password_hash SET NOT NULL; diff --git a/db/migrations/20260316203135_admin_users.sql b/db/migrations/20260316203135_admin_users.sql deleted file mode 100644 index eff669b..0000000 --- a/db/migrations/20260316203135_admin_users.sql +++ /dev/null @@ -1,21 +0,0 @@ --- +goose Up - -ALTER TABLE users - ADD COLUMN is_admin BOOLEAN NOT NULL DEFAULT FALSE; - -CREATE TABLE admin_permissions ( - id TEXT PRIMARY KEY, - user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE, - permission TEXT NOT NULL, - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - UNIQUE (user_id, permission) -); - -CREATE INDEX idx_admin_permissions_user ON admin_permissions(user_id); - --- +goose Down - -DROP TABLE admin_permissions; - -ALTER TABLE users - DROP COLUMN is_admin; diff --git a/db/migrations/20260316203138_byoc_teams.sql b/db/migrations/20260316203138_byoc_teams.sql deleted file mode 100644 index bb2c8ec..0000000 --- a/db/migrations/20260316203138_byoc_teams.sql +++ /dev/null @@ -1,9 +0,0 @@ --- +goose Up - -ALTER TABLE teams - ADD COLUMN is_byoc BOOLEAN NOT NULL DEFAULT FALSE; - --- +goose Down - -ALTER TABLE teams - DROP COLUMN is_byoc; diff --git a/db/migrations/20260316203142_hosts.sql b/db/migrations/20260316203142_hosts.sql deleted file mode 100644 index 372b380..0000000 --- a/db/migrations/20260316203142_hosts.sql +++ /dev/null @@ -1,47 +0,0 @@ --- +goose Up - -CREATE TABLE hosts ( - id TEXT PRIMARY KEY, - type TEXT NOT NULL DEFAULT 'regular', -- 'regular' or 'byoc' - team_id TEXT REFERENCES teams(id) ON DELETE SET NULL, - provider TEXT, - availability_zone TEXT, - arch TEXT, - cpu_cores INTEGER, - memory_mb INTEGER, - disk_gb INTEGER, - address TEXT, -- ip:port of host agent - status TEXT NOT NULL DEFAULT 'pending', -- 'pending', 'online', 'offline', 'draining' - last_heartbeat_at TIMESTAMPTZ, - metadata JSONB NOT NULL DEFAULT '{}', - created_by TEXT NOT NULL REFERENCES users(id), - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -); - -CREATE TABLE host_tokens ( - id TEXT PRIMARY KEY, - host_id TEXT NOT NULL REFERENCES hosts(id) ON DELETE CASCADE, - created_by TEXT NOT NULL REFERENCES users(id), - created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - expires_at TIMESTAMPTZ NOT NULL, - used_at TIMESTAMPTZ -); - -CREATE TABLE host_tags ( - host_id TEXT NOT NULL REFERENCES hosts(id) ON DELETE CASCADE, - tag TEXT NOT NULL, - PRIMARY KEY (host_id, tag) -); - -CREATE INDEX idx_hosts_type ON hosts(type); -CREATE INDEX idx_hosts_team ON hosts(team_id); -CREATE INDEX idx_hosts_status ON hosts(status); -CREATE INDEX idx_host_tokens_host ON host_tokens(host_id); -CREATE INDEX idx_host_tags_tag ON host_tags(tag); - --- +goose Down - -DROP TABLE host_tags; -DROP TABLE host_tokens; -DROP TABLE hosts; diff --git a/db/migrations/20260316223629_host_mtls.sql b/db/migrations/20260316223629_host_mtls.sql deleted file mode 100644 index f56b923..0000000 --- a/db/migrations/20260316223629_host_mtls.sql +++ /dev/null @@ -1,11 +0,0 @@ --- +goose Up - -ALTER TABLE hosts - ADD COLUMN cert_fingerprint TEXT, - ADD COLUMN mtls_enabled BOOLEAN NOT NULL DEFAULT FALSE; - --- +goose Down - -ALTER TABLE hosts - DROP COLUMN cert_fingerprint, - DROP COLUMN mtls_enabled; diff --git a/db/migrations/20260328162803_template_uuid_pk.sql b/db/migrations/20260328162803_template_uuid_pk.sql new file mode 100644 index 0000000..0bb6566 --- /dev/null +++ b/db/migrations/20260328162803_template_uuid_pk.sql @@ -0,0 +1,82 @@ +-- +goose Up + +-- 1. Add UUID id column to templates and make it the primary key. +ALTER TABLE templates ADD COLUMN id UUID DEFAULT gen_random_uuid(); +UPDATE templates SET id = gen_random_uuid() WHERE id IS NULL; +ALTER TABLE templates ALTER COLUMN id SET NOT NULL; +ALTER TABLE templates DROP CONSTRAINT templates_pkey; +ALTER TABLE templates ADD PRIMARY KEY (id); + +-- 2. Name becomes a display field with team-scoped uniqueness. +ALTER TABLE templates ADD CONSTRAINT uq_templates_team_name UNIQUE (team_id, name); + +-- 3. Prevent team templates from using names that belong to global (platform) templates. +-- A team template insert/update with a name matching any platform template is rejected. +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION check_global_template_name_collision() +RETURNS TRIGGER AS $$ +BEGIN + IF NEW.team_id != '00000000-0000-0000-0000-000000000000' THEN + IF EXISTS ( + SELECT 1 FROM templates + WHERE name = NEW.name + AND team_id = '00000000-0000-0000-0000-000000000000' + ) THEN + RAISE EXCEPTION 'template name "%" is reserved by a global template', NEW.name + USING ERRCODE = 'unique_violation'; + END IF; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +-- +goose StatementEnd + +CREATE TRIGGER trg_check_global_template_name + BEFORE INSERT OR UPDATE ON templates + FOR EACH ROW + EXECUTE FUNCTION check_global_template_name_collision(); + +-- 4. Seed the built-in "minimal" template so it appears in all listings. +-- Both id and team_id are the all-zeros UUID (platform sentinel). +INSERT INTO templates (id, name, type, vcpus, memory_mb, size_bytes, team_id) +VALUES ( + '00000000-0000-0000-0000-000000000000', + 'minimal', + 'base', + 1, + 512, + 0, + '00000000-0000-0000-0000-000000000000' +) ON CONFLICT DO NOTHING; + +-- 5. Add template UUID references to template_builds. +ALTER TABLE template_builds + ADD COLUMN template_id UUID, + ADD COLUMN team_id UUID; + +-- 5. Add template UUID references to sandboxes. +ALTER TABLE sandboxes + ADD COLUMN template_id UUID, + ADD COLUMN template_team_id UUID; + +-- +goose Down + +ALTER TABLE sandboxes + DROP COLUMN IF EXISTS template_team_id, + DROP COLUMN IF EXISTS template_id; + +ALTER TABLE template_builds + DROP COLUMN IF EXISTS team_id, + DROP COLUMN IF EXISTS template_id; + +-- Remove the seeded minimal template. +DELETE FROM templates WHERE id = '00000000-0000-0000-0000-000000000000'; + +DROP TRIGGER IF EXISTS trg_check_global_template_name ON templates; +DROP FUNCTION IF EXISTS check_global_template_name_collision(); + +ALTER TABLE templates DROP CONSTRAINT IF EXISTS uq_templates_team_name; + +ALTER TABLE templates DROP CONSTRAINT IF EXISTS templates_pkey; +ALTER TABLE templates ADD PRIMARY KEY (name); +ALTER TABLE templates DROP COLUMN IF EXISTS id; diff --git a/db/migrations/20260330112050_mtls_cert_expiry.sql b/db/migrations/20260330112050_mtls_cert_expiry.sql new file mode 100644 index 0000000..e7245d2 --- /dev/null +++ b/db/migrations/20260330112050_mtls_cert_expiry.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE hosts DROP COLUMN mtls_enabled; +ALTER TABLE hosts ADD COLUMN cert_expires_at TIMESTAMPTZ; + +-- +goose Down +ALTER TABLE hosts DROP COLUMN cert_expires_at; +ALTER TABLE hosts ADD COLUMN mtls_enabled BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/db/migrations/20260330150223_build_options.sql b/db/migrations/20260330150223_build_options.sql new file mode 100644 index 0000000..981ad06 --- /dev/null +++ b/db/migrations/20260330150223_build_options.sql @@ -0,0 +1,11 @@ +-- +goose Up + +-- Allow completed_at to be set when a build is cancelled. +-- (The UpdateBuildStatus query is updated in sqlc; no schema change needed for that.) + +-- Add skip_pre_post flag: when true, the pre-build and post-build command phases +-- are skipped for this build. +ALTER TABLE template_builds ADD COLUMN skip_pre_post BOOLEAN NOT NULL DEFAULT FALSE; + +-- +goose Down +ALTER TABLE template_builds DROP COLUMN skip_pre_post; diff --git a/db/migrations/20260409103357_add_channels.sql b/db/migrations/20260409103357_add_channels.sql new file mode 100644 index 0000000..dbe3cc3 --- /dev/null +++ b/db/migrations/20260409103357_add_channels.sql @@ -0,0 +1,19 @@ +-- +goose Up + +CREATE TABLE channels ( + id UUID PRIMARY KEY, + team_id UUID NOT NULL REFERENCES teams(id) ON DELETE CASCADE, + name TEXT NOT NULL, + provider TEXT NOT NULL, + config JSONB NOT NULL DEFAULT '{}', + event_types TEXT[] NOT NULL DEFAULT '{}', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE (team_id, name) +); + +CREATE INDEX idx_channels_team ON channels(team_id); + +-- +goose Down + +DROP TABLE IF EXISTS channels; diff --git a/db/queries/audit.sql b/db/queries/audit.sql index e69de29..9250db7 100644 --- a/db/queries/audit.sql +++ b/db/queries/audit.sql @@ -0,0 +1,14 @@ +-- name: InsertAuditLog :exec +INSERT INTO audit_logs (id, team_id, actor_type, actor_id, actor_name, resource_type, resource_id, action, scope, status, metadata) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11); + +-- name: ListAuditLogs :many +SELECT * FROM audit_logs +WHERE team_id = $1 + AND scope = ANY($2::text[]) + AND (cardinality($3::text[]) = 0 OR resource_type = ANY($3::text[])) + AND (cardinality($4::text[]) = 0 OR action = ANY($4::text[])) + AND ($5::timestamptz IS NULL OR created_at < $5 + OR (created_at = $5 AND id < $6)) +ORDER BY created_at DESC, id DESC +LIMIT $7; diff --git a/db/queries/channels.sql b/db/queries/channels.sql new file mode 100644 index 0000000..5772c99 --- /dev/null +++ b/db/queries/channels.sql @@ -0,0 +1,29 @@ +-- name: InsertChannel :one +INSERT INTO channels (id, team_id, name, provider, config, event_types) +VALUES ($1, $2, $3, $4, $5, $6) +RETURNING *; + +-- name: ListChannelsByTeam :many +SELECT * FROM channels WHERE team_id = $1 ORDER BY created_at DESC; + +-- name: GetChannelByTeam :one +SELECT * FROM channels WHERE id = $1 AND team_id = $2; + +-- name: UpdateChannel :one +UPDATE channels SET name = $3, event_types = $4, updated_at = NOW() +WHERE id = $1 AND team_id = $2 +RETURNING *; + +-- name: UpdateChannelConfig :one +UPDATE channels SET config = $3, updated_at = NOW() +WHERE id = $1 AND team_id = $2 +RETURNING *; + +-- name: DeleteChannelByTeam :exec +DELETE FROM channels WHERE id = $1 AND team_id = $2; + +-- name: ListChannelsForEvent :many +SELECT * FROM channels +WHERE team_id = $1 + AND sqlc.arg(event_type)::text = ANY(event_types) +ORDER BY created_at; diff --git a/db/queries/host_refresh_tokens.sql b/db/queries/host_refresh_tokens.sql new file mode 100644 index 0000000..5a41164 --- /dev/null +++ b/db/queries/host_refresh_tokens.sql @@ -0,0 +1,19 @@ +-- name: InsertHostRefreshToken :one +INSERT INTO host_refresh_tokens (id, host_id, token_hash, expires_at) +VALUES ($1, $2, $3, $4) +RETURNING *; + +-- name: GetHostRefreshTokenByHash :one +SELECT * FROM host_refresh_tokens +WHERE token_hash = $1 AND revoked_at IS NULL AND expires_at > NOW(); + +-- name: RevokeHostRefreshToken :exec +UPDATE host_refresh_tokens SET revoked_at = NOW() WHERE id = $1; + +-- name: RevokeHostRefreshTokensByHost :exec +UPDATE host_refresh_tokens SET revoked_at = NOW() +WHERE host_id = $1 AND revoked_at IS NULL; + +-- name: DeleteExpiredHostRefreshTokens :exec +DELETE FROM host_refresh_tokens +WHERE expires_at < NOW() OR revoked_at IS NOT NULL; diff --git a/db/queries/hosts.sql b/db/queries/hosts.sql index 7f8c9e4..0a5a150 100644 --- a/db/queries/hosts.sql +++ b/db/queries/hosts.sql @@ -20,16 +20,25 @@ SELECT * FROM hosts WHERE status = $1 ORDER BY created_at DESC; -- name: RegisterHost :execrows UPDATE hosts -SET arch = $2, - cpu_cores = $3, - memory_mb = $4, - disk_gb = $5, - address = $6, - status = 'online', +SET arch = $2, + cpu_cores = $3, + memory_mb = $4, + disk_gb = $5, + address = $6, + cert_fingerprint = $7, + cert_expires_at = $8, + status = 'online', last_heartbeat_at = NOW(), - updated_at = NOW() + updated_at = NOW() WHERE id = $1 AND status = 'pending'; +-- name: UpdateHostCert :exec +UPDATE hosts +SET cert_fingerprint = $2, + cert_expires_at = $3, + updated_at = NOW() +WHERE id = $1; + -- name: UpdateHostStatus :exec UPDATE hosts SET status = $2, updated_at = NOW() WHERE id = $1; @@ -67,3 +76,19 @@ SELECT * FROM host_tokens WHERE host_id = $1 ORDER BY created_at DESC; -- name: GetHostByTeam :one SELECT * FROM hosts WHERE id = $1 AND team_id = $2; + +-- name: ListActiveHosts :many +-- Returns all hosts that have completed registration (not pending/offline). +SELECT * FROM hosts WHERE status NOT IN ('pending', 'offline') ORDER BY created_at; + +-- name: UpdateHostHeartbeatAndStatus :execrows +-- Updates last_heartbeat_at and transitions unreachable hosts back to online. +-- Returns 0 if no host was found (deleted), which the caller treats as 404. +UPDATE hosts +SET last_heartbeat_at = NOW(), + status = CASE WHEN status = 'unreachable' THEN 'online' ELSE status END, + updated_at = NOW() +WHERE id = $1; + +-- name: MarkHostUnreachable :exec +UPDATE hosts SET status = 'unreachable', updated_at = NOW() WHERE id = $1; diff --git a/db/queries/metrics.sql b/db/queries/metrics.sql new file mode 100644 index 0000000..f58d480 --- /dev/null +++ b/db/queries/metrics.sql @@ -0,0 +1,68 @@ +-- name: InsertMetricsSnapshot :exec +INSERT INTO sandbox_metrics_snapshots (team_id, running_count, vcpus_reserved, memory_mb_reserved) +VALUES ($1, $2, $3, $4); + +-- name: GetLiveMetrics :one +-- Reads directly from sandboxes for accurate real-time current values. +-- CPU reserved = running + starting only (paused VMs release CPU). +-- RAM reserved = running + starting + sum(ceil(each_paused/2)) (per-VM ceiling). +SELECT + (COUNT(*) FILTER (WHERE status IN ('running', 'starting')))::INTEGER AS running_count, + (COALESCE(SUM(vcpus) FILTER (WHERE status IN ('running', 'starting')), 0))::INTEGER AS vcpus_reserved, + (COALESCE(SUM(memory_mb) FILTER (WHERE status IN ('running', 'starting')), 0) + + COALESCE(SUM(CEIL(memory_mb::NUMERIC / 2)) FILTER (WHERE status = 'paused'), 0))::INTEGER AS memory_mb_reserved +FROM sandboxes +WHERE team_id = $1; + +-- name: GetPeakMetrics :one +SELECT + COALESCE(MAX(running_count), 0)::INTEGER AS peak_running_count, + COALESCE(MAX(vcpus_reserved), 0)::INTEGER AS peak_vcpus, + COALESCE(MAX(memory_mb_reserved), 0)::INTEGER AS peak_memory_mb +FROM sandbox_metrics_snapshots +WHERE team_id = $1 + AND sampled_at > NOW() - INTERVAL '30 days'; + +-- name: PruneOldMetrics :exec +DELETE FROM sandbox_metrics_snapshots +WHERE sampled_at < NOW() - INTERVAL '60 days'; + +-- name: InsertSandboxMetricPoint :exec +INSERT INTO sandbox_metric_points (sandbox_id, tier, ts, cpu_pct, mem_bytes, disk_bytes) +VALUES ($1, $2, $3, $4, $5, $6) +ON CONFLICT (sandbox_id, tier, ts) DO NOTHING; + +-- name: GetSandboxMetricPoints :many +SELECT ts, cpu_pct, mem_bytes, disk_bytes +FROM sandbox_metric_points +WHERE sandbox_id = $1 AND tier = $2 AND ts >= $3 +ORDER BY ts ASC; + +-- name: DeleteSandboxMetricPoints :exec +DELETE FROM sandbox_metric_points +WHERE sandbox_id = $1; + +-- name: DeleteSandboxMetricPointsByTier :exec +DELETE FROM sandbox_metric_points +WHERE sandbox_id = $1 AND tier = $2; + +-- name: PruneSandboxMetricPoints :exec +-- Remove metric points older than 30 days for destroyed sandboxes. +DELETE FROM sandbox_metric_points +WHERE ts < EXTRACT(EPOCH FROM NOW() - INTERVAL '30 days')::BIGINT; + +-- name: SampleSandboxMetrics :many +-- Aggregates per-team resource usage from the live sandboxes table. +-- Groups by all teams that have any sandbox row (including stopped) so that +-- zero-value snapshots are recorded when all capsules are stopped, keeping the +-- time-series charts continuous rather than trailing off into empty space. +-- CPU reserved = running + starting only (paused VMs release CPU). +-- RAM reserved = running + starting + sum(ceil(each_paused/2)) (per-VM ceiling). +SELECT + team_id, + (COUNT(*) FILTER (WHERE status IN ('running', 'starting')))::INTEGER AS running_count, + (COALESCE(SUM(vcpus) FILTER (WHERE status IN ('running', 'starting')), 0))::INTEGER AS vcpus_reserved, + (COALESCE(SUM(memory_mb) FILTER (WHERE status IN ('running', 'starting')), 0) + + COALESCE(SUM(CEIL(memory_mb::NUMERIC / 2)) FILTER (WHERE status = 'paused'), 0))::INTEGER AS memory_mb_reserved +FROM sandboxes +GROUP BY team_id; diff --git a/db/queries/sandboxes.sql b/db/queries/sandboxes.sql index f2a5d51..2b19574 100644 --- a/db/queries/sandboxes.sql +++ b/db/queries/sandboxes.sql @@ -1,6 +1,6 @@ -- name: InsertSandbox :one -INSERT INTO sandboxes (id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec) -VALUES ($1, $2, $3, $4, $5, $6, $7, $8) +INSERT INTO sandboxes (id, team_id, host_id, template, status, vcpus, memory_mb, timeout_sec, disk_size_mb, template_id, template_team_id) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) RETURNING *; -- name: GetSandbox :one @@ -9,6 +9,14 @@ SELECT * FROM sandboxes WHERE id = $1; -- name: GetSandboxByTeam :one SELECT * FROM sandboxes WHERE id = $1 AND team_id = $2; +-- name: GetSandboxProxyTarget :one +-- Returns the sandbox status and its host's address in one query. +-- Used by SandboxProxyWrapper to avoid two round-trips. +SELECT s.status, h.address AS host_address +FROM sandboxes s +JOIN hosts h ON h.id = s.host_id +WHERE s.id = $1 AND s.team_id = $2; + -- name: ListSandboxes :many SELECT * FROM sandboxes ORDER BY created_at DESC; @@ -50,4 +58,26 @@ WHERE id = $1; UPDATE sandboxes SET status = $2, last_updated = NOW() -WHERE id = ANY($1::text[]); +WHERE id = ANY($1::uuid[]); + +-- name: ListActiveSandboxesByTeam :many +SELECT * FROM sandboxes +WHERE team_id = $1 AND status IN ('running', 'paused', 'starting') +ORDER BY created_at DESC; + +-- name: MarkSandboxesMissingByHost :exec +-- Called when the host monitor marks a host unreachable. +-- Marks running/starting/pending sandboxes on that host as 'missing' so users see +-- the sandbox is not currently reachable, without permanently losing the record. +UPDATE sandboxes +SET status = 'missing', + last_updated = NOW() +WHERE host_id = $1 AND status IN ('running', 'starting', 'pending'); + +-- name: BulkRestoreRunning :exec +-- Called by the reconciler when a host comes back online and its sandboxes are +-- confirmed alive. Restores only sandboxes that are in 'missing' state. +UPDATE sandboxes +SET status = 'running', + last_updated = NOW() +WHERE id = ANY($1::uuid[]) AND status = 'missing'; diff --git a/db/queries/teams.sql b/db/queries/teams.sql index 58985ab..2117e95 100644 --- a/db/queries/teams.sql +++ b/db/queries/teams.sql @@ -1,6 +1,6 @@ -- name: InsertTeam :one -INSERT INTO teams (id, name) -VALUES ($1, $2) +INSERT INTO teams (id, name, slug) +VALUES ($1, $2, $3) RETURNING *; -- name: GetTeam :one @@ -13,14 +13,43 @@ VALUES ($1, $2, $3, $4); -- name: GetDefaultTeamForUser :one SELECT t.* FROM teams t JOIN users_teams ut ON ut.team_id = t.id -WHERE ut.user_id = $1 AND ut.is_default = TRUE +WHERE ut.user_id = $1 AND ut.is_default = TRUE AND t.deleted_at IS NULL LIMIT 1; -- name: SetTeamBYOC :exec UPDATE teams SET is_byoc = $2 WHERE id = $1; -- name: GetBYOCTeams :many -SELECT * FROM teams WHERE is_byoc = TRUE ORDER BY created_at; +SELECT * FROM teams WHERE is_byoc = TRUE AND deleted_at IS NULL ORDER BY created_at; -- name: GetTeamMembership :one SELECT * FROM users_teams WHERE user_id = $1 AND team_id = $2; + +-- name: UpdateTeamName :exec +UPDATE teams SET name = $2 WHERE id = $1 AND deleted_at IS NULL; + +-- name: SoftDeleteTeam :exec +UPDATE teams SET deleted_at = NOW() WHERE id = $1; + +-- name: GetTeamBySlug :one +SELECT * FROM teams WHERE slug = $1 AND deleted_at IS NULL; + +-- name: GetTeamsForUser :many +SELECT t.id, t.name, t.slug, t.is_byoc, t.created_at, t.deleted_at, ut.role +FROM teams t +JOIN users_teams ut ON ut.team_id = t.id +WHERE ut.user_id = $1 AND t.deleted_at IS NULL +ORDER BY ut.created_at; + +-- name: GetTeamMembers :many +SELECT u.id, u.name, u.email, ut.role, ut.created_at AS joined_at +FROM users_teams ut +JOIN users u ON u.id = ut.user_id +WHERE ut.team_id = $1 +ORDER BY ut.created_at; + +-- name: UpdateMemberRole :exec +UPDATE users_teams SET role = $3 WHERE team_id = $1 AND user_id = $2; + +-- name: DeleteTeamMember :exec +DELETE FROM users_teams WHERE team_id = $1 AND user_id = $2; diff --git a/db/queries/template_builds.sql b/db/queries/template_builds.sql new file mode 100644 index 0000000..1fb07be --- /dev/null +++ b/db/queries/template_builds.sql @@ -0,0 +1,33 @@ +-- name: InsertTemplateBuild :one +INSERT INTO template_builds (id, name, base_template, recipe, healthcheck, vcpus, memory_mb, status, total_steps, template_id, team_id, skip_pre_post) +VALUES ($1, $2, $3, $4, $5, $6, $7, 'pending', $8, $9, $10, $11) +RETURNING *; + +-- name: GetTemplateBuild :one +SELECT * FROM template_builds WHERE id = $1; + +-- name: ListTemplateBuilds :many +SELECT * FROM template_builds ORDER BY created_at DESC; + +-- name: UpdateBuildStatus :one +UPDATE template_builds +SET status = $2, + started_at = CASE WHEN $2 = 'running' AND started_at IS NULL THEN NOW() ELSE started_at END, + completed_at = CASE WHEN $2 IN ('success', 'failed', 'cancelled') THEN NOW() ELSE completed_at END +WHERE id = $1 +RETURNING *; + +-- name: UpdateBuildProgress :exec +UPDATE template_builds +SET current_step = $2, logs = $3 +WHERE id = $1; + +-- name: UpdateBuildSandbox :exec +UPDATE template_builds +SET sandbox_id = $2, host_id = $3 +WHERE id = $1; + +-- name: UpdateBuildError :exec +UPDATE template_builds +SET error = $2, status = 'failed', completed_at = NOW() +WHERE id = $1; diff --git a/db/queries/templates.sql b/db/queries/templates.sql index b17abc3..de4d6f2 100644 --- a/db/queries/templates.sql +++ b/db/queries/templates.sql @@ -1,13 +1,22 @@ -- name: InsertTemplate :one -INSERT INTO templates (name, type, vcpus, memory_mb, size_bytes, team_id) -VALUES ($1, $2, $3, $4, $5, $6) +INSERT INTO templates (id, name, type, vcpus, memory_mb, size_bytes, team_id) +VALUES ($1, $2, $3, $4, $5, $6, $7) RETURNING *; -- name: GetTemplate :one -SELECT * FROM templates WHERE name = $1; +SELECT * FROM templates WHERE id = $1; -- name: GetTemplateByTeam :one -SELECT * FROM templates WHERE name = $1 AND team_id = $2; +-- Platform templates (team_id = 00000000-...) are visible to all teams. +SELECT * FROM templates WHERE name = $1 AND (team_id = $2 OR team_id = '00000000-0000-0000-0000-000000000000'); + +-- name: GetTemplateByName :one +-- Look up a template by team_id and name (exact team match, no global fallback). +SELECT * FROM templates WHERE team_id = $1 AND name = $2; + +-- name: GetPlatformTemplateByName :one +-- Check if a global (platform) template exists with the given name. +SELECT * FROM templates WHERE team_id = '00000000-0000-0000-0000-000000000000' AND name = $1; -- name: ListTemplates :many SELECT * FROM templates ORDER BY created_at DESC; @@ -16,13 +25,23 @@ SELECT * FROM templates ORDER BY created_at DESC; SELECT * FROM templates WHERE type = $1 ORDER BY created_at DESC; -- name: ListTemplatesByTeam :many -SELECT * FROM templates WHERE team_id = $1 ORDER BY created_at DESC; +-- Platform templates are visible to all teams. +SELECT * FROM templates WHERE (team_id = $1 OR team_id = '00000000-0000-0000-0000-000000000000') ORDER BY created_at DESC; -- name: ListTemplatesByTeamAndType :many -SELECT * FROM templates WHERE team_id = $1 AND type = $2 ORDER BY created_at DESC; +-- Platform templates are visible to all teams. +SELECT * FROM templates WHERE (team_id = $1 OR team_id = '00000000-0000-0000-0000-000000000000') AND type = $2 ORDER BY created_at DESC; -- name: DeleteTemplate :exec -DELETE FROM templates WHERE name = $1; +DELETE FROM templates WHERE id = $1; -- name: DeleteTemplateByTeam :exec DELETE FROM templates WHERE name = $1 AND team_id = $2; + +-- name: DeleteTemplatesByTeam :exec +-- Bulk delete all templates owned by a team (for team soft-delete cleanup). +DELETE FROM templates WHERE team_id = $1; + +-- name: ListTemplatesByTeamOnly :many +-- List templates owned by a specific team (NOT including platform templates). +SELECT * FROM templates WHERE team_id = $1 ORDER BY created_at DESC; diff --git a/db/queries/users.sql b/db/queries/users.sql index 3c2f4f0..a244fc9 100644 --- a/db/queries/users.sql +++ b/db/queries/users.sql @@ -1,6 +1,6 @@ -- name: InsertUser :one -INSERT INTO users (id, email, password_hash) -VALUES ($1, $2, $3) +INSERT INTO users (id, email, password_hash, name) +VALUES ($1, $2, $3, $4) RETURNING *; -- name: GetUserByEmail :one @@ -10,8 +10,8 @@ SELECT * FROM users WHERE email = $1; SELECT * FROM users WHERE id = $1; -- name: InsertUserOAuth :one -INSERT INTO users (id, email) -VALUES ($1, $2) +INSERT INTO users (id, email, name) +VALUES ($1, $2, $3) RETURNING *; -- name: SetUserAdmin :exec @@ -34,3 +34,9 @@ SELECT * FROM admin_permissions WHERE user_id = $1 ORDER BY permission; SELECT EXISTS( SELECT 1 FROM admin_permissions WHERE user_id = $1 AND permission = $2 ) AS has_permission; + +-- name: SearchUsersByEmailPrefix :many +SELECT id, email FROM users WHERE email LIKE $1 || '%' ORDER BY email LIMIT 10; + +-- name: UpdateUserName :exec +UPDATE users SET name = $2, updated_at = NOW() WHERE id = $1; diff --git a/deploy/Caddyfile.dev b/deploy/Caddyfile.dev new file mode 100644 index 0000000..789f8df --- /dev/null +++ b/deploy/Caddyfile.dev @@ -0,0 +1,41 @@ +# Sandbox port forwarding: {port}-{sandbox_id}.localhost +# Matches subdomains like 49999-sb-abcd1234.localhost and proxies them +# to the control plane, which inspects the Host header and routes to +# the correct host agent. +# +# NOTE: Wildcard *.localhost DNS resolution requires local setup. +# Option 1: Add entries to /etc/hosts for each sandbox +# Option 2: Use dnsmasq: address=/.localhost/127.0.0.1 +# Option 3: Use systemd-resolved (Ubuntu default — *.localhost resolves to 127.0.0.1) +http://*.localhost { + reverse_proxy host.docker.internal:8080 +} + +# Main entry point: API + frontend +http://localhost { + # API routes — strip /api prefix and proxy to the control plane. + # The frontend calls /api/v1/... which becomes /v1/... at the CP. + handle_path /api/* { + reverse_proxy host.docker.internal:8080 + } + + # Backend routes served directly (SDK clients, OAuth initiation) + handle /v1/* { + reverse_proxy host.docker.internal:8080 + } + handle /openapi.yaml { + reverse_proxy host.docker.internal:8080 + } + handle /docs { + reverse_proxy host.docker.internal:8080 + } + handle /auth/oauth/* { + reverse_proxy host.docker.internal:8080 + } + + # Everything else — proxy to the frontend dev server + # This includes: /login, /dashboard/*, /admin/*, /auth/github/callback + handle { + reverse_proxy host.docker.internal:5173 + } +} diff --git a/deploy/docker-compose.dev.yml b/deploy/docker-compose.dev.yml index ebcd308..e16f3ff 100644 --- a/deploy/docker-compose.dev.yml +++ b/deploy/docker-compose.dev.yml @@ -10,24 +10,19 @@ services: volumes: - pgdata:/var/lib/postgresql/data - redis: - image: redis:7-alpine + keydb: + image: eqalpha/keydb:alpine ports: - "6379:6379" - prometheus: - image: prom/prometheus:latest + caddy: + image: caddy:2-alpine ports: - - "9090:9090" + - "8000:80" volumes: - - ./deploy/prometheus.yml:/etc/prometheus/prometheus.yml - - grafana: - image: grafana/grafana:latest - ports: - - "3001:3000" - environment: - GF_SECURITY_ADMIN_PASSWORD: admin + - ./Caddyfile.dev:/etc/caddy/Caddyfile:ro + extra_hosts: + - "host.docker.internal:host-gateway" volumes: pgdata: diff --git a/envd/LICENSE b/envd/LICENSE index ec47fef..00c83da 100644 --- a/envd/LICENSE +++ b/envd/LICENSE @@ -187,6 +187,7 @@ identification within third-party archives. Copyright 2023 FoundryLabs, Inc. + Modifications Copyright (c) 2026 M/S Omukk, Bangladesh Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/envd/go.mod b/envd/go.mod index be2c95a..c739bc6 100644 --- a/envd/go.mod +++ b/envd/go.mod @@ -1,6 +1,6 @@ module git.omukk.dev/wrenn/sandbox/envd -go 1.25.5 +go 1.25.8 require ( connectrpc.com/authn v0.1.0 diff --git a/envd/internal/api/init.go b/envd/internal/api/init.go index a489459..301400c 100644 --- a/envd/internal/api/init.go +++ b/envd/internal/api/init.go @@ -14,14 +14,12 @@ import ( "os/exec" "time" - "github.com/awnumar/memguard" - "github.com/rs/zerolog" - "github.com/txn2/txeh" - "golang.org/x/sys/unix" - "git.omukk.dev/wrenn/sandbox/envd/internal/host" "git.omukk.dev/wrenn/sandbox/envd/internal/logs" "git.omukk.dev/wrenn/sandbox/envd/internal/shared/keys" + "github.com/awnumar/memguard" + "github.com/rs/zerolog" + "github.com/txn2/txeh" ) var ( @@ -29,11 +27,6 @@ var ( ErrAccessTokenResetNotAuthorized = errors.New("access token reset not authorized") ) -const ( - maxTimeInPast = 50 * time.Millisecond - maxTimeInFuture = 5 * time.Second -) - // validateInitAccessToken validates the access token for /init requests. // Token is valid if it matches the existing token OR the MMDS hash. // If neither exists, first-time setup is allowed. @@ -172,20 +165,6 @@ func (a *API) SetData(ctx context.Context, logger zerolog.Logger, data PostInitJ return err } - if data.Timestamp != nil { - // Check if current time differs significantly from the received timestamp - if shouldSetSystemTime(time.Now(), *data.Timestamp) { - logger.Debug().Msgf("Setting sandbox start time to: %v", *data.Timestamp) - ts := unix.NsecToTimespec(data.Timestamp.UnixNano()) - err := unix.ClockSettime(unix.CLOCK_REALTIME, &ts) - if err != nil { - logger.Error().Msgf("Failed to set system time: %v", err) - } - } else { - logger.Debug().Msgf("Current time is within acceptable range of timestamp %v, not setting system time", *data.Timestamp) - } - } - if data.EnvVars != nil { logger.Debug().Msg(fmt.Sprintf("Setting %d env vars", len(*data.EnvVars))) @@ -308,10 +287,3 @@ func getIPFamily(address string) (txeh.IPFamily, error) { return txeh.IPFamilyV4, fmt.Errorf("%w: %s", ErrUnknownAddressFormat, address) } } - -// shouldSetSystemTime returns true if the current time differs significantly from the received timestamp, -// indicating the system clock should be adjusted. Returns true when the sandboxTime is more than -// maxTimeInPast before the hostTime or more than maxTimeInFuture after the hostTime. -func shouldSetSystemTime(sandboxTime, hostTime time.Time) bool { - return sandboxTime.Before(hostTime.Add(-maxTimeInPast)) || sandboxTime.After(hostTime.Add(maxTimeInFuture)) -} diff --git a/envd/internal/api/init_test.go b/envd/internal/api/init_test.go index c4b6f4b..f3db361 100644 --- a/envd/internal/api/init_test.go +++ b/envd/internal/api/init_test.go @@ -9,7 +9,6 @@ import ( "path/filepath" "strings" "testing" - "time" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" @@ -59,71 +58,6 @@ func TestSimpleCases(t *testing.T) { } } -func TestShouldSetSystemTime(t *testing.T) { - t.Parallel() - sandboxTime := time.Now() - - tests := []struct { - name string - hostTime time.Time - want bool - }{ - { - name: "sandbox time far ahead of host time (should set)", - hostTime: sandboxTime.Add(-10 * time.Second), - want: true, - }, - { - name: "sandbox time at maxTimeInPast boundary ahead of host time (should not set)", - hostTime: sandboxTime.Add(-50 * time.Millisecond), - want: false, - }, - { - name: "sandbox time just within maxTimeInPast ahead of host time (should not set)", - hostTime: sandboxTime.Add(-40 * time.Millisecond), - want: false, - }, - { - name: "sandbox time slightly ahead of host time (should not set)", - hostTime: sandboxTime.Add(-10 * time.Millisecond), - want: false, - }, - { - name: "sandbox time equals host time (should not set)", - hostTime: sandboxTime, - want: false, - }, - { - name: "sandbox time slightly behind host time (should not set)", - hostTime: sandboxTime.Add(1 * time.Second), - want: false, - }, - { - name: "sandbox time just within maxTimeInFuture behind host time (should not set)", - hostTime: sandboxTime.Add(4 * time.Second), - want: false, - }, - { - name: "sandbox time at maxTimeInFuture boundary behind host time (should not set)", - hostTime: sandboxTime.Add(5 * time.Second), - want: false, - }, - { - name: "sandbox time far behind host time (should set)", - hostTime: sandboxTime.Add(1 * time.Minute), - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - got := shouldSetSystemTime(tt.hostTime, sandboxTime) - assert.Equal(t, tt.want, got) - }) - } -} - func secureTokenPtr(s string) *SecureToken { token := &SecureToken{} _ = token.Set([]byte(s)) diff --git a/envd/internal/port/conn.go b/envd/internal/port/conn.go new file mode 100644 index 0000000..8a8c032 --- /dev/null +++ b/envd/internal/port/conn.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: Apache-2.0 + +package port + +import ( + "bufio" + "encoding/hex" + "fmt" + "net" + "os" + "strconv" + "strings" + "syscall" +) + +// ConnStat represents a single TCP connection read from /proc/net/tcp(6). +// It contains only the fields needed by the port scanner and forwarder. +type ConnStat struct { + LocalIP string + LocalPort uint32 + Status string + Family uint32 // syscall.AF_INET or syscall.AF_INET6 + Inode uint64 // socket inode, unique per connection +} + +// tcpStates maps the hex state values from /proc/net/tcp to string names +// matching the gopsutil convention used by ScannerFilter. +var tcpStates = map[string]string{ + "01": "ESTABLISHED", + "02": "SYN_SENT", + "03": "SYN_RECV", + "04": "FIN_WAIT1", + "05": "FIN_WAIT2", + "06": "TIME_WAIT", + "07": "CLOSE", + "08": "CLOSE_WAIT", + "09": "LAST_ACK", + "0A": "LISTEN", + "0B": "CLOSING", +} + +// ReadTCPConnections reads /proc/net/tcp and /proc/net/tcp6 and returns +// all TCP connections. This avoids the /proc/{pid}/fd walk that gopsutil +// performs, which is unsafe across Firecracker snapshot/restore boundaries. +func ReadTCPConnections() ([]ConnStat, error) { + var conns []ConnStat + + tcp4, err := parseProcNetTCP("/proc/net/tcp", syscall.AF_INET) + if err != nil { + return nil, fmt.Errorf("parse /proc/net/tcp: %w", err) + } + conns = append(conns, tcp4...) + + tcp6, err := parseProcNetTCP("/proc/net/tcp6", syscall.AF_INET6) + if err != nil { + return nil, fmt.Errorf("parse /proc/net/tcp6: %w", err) + } + conns = append(conns, tcp6...) + + return conns, nil +} + +// parseProcNetTCP reads a single /proc/net/tcp or /proc/net/tcp6 file. +// +// Format (fields are whitespace-separated): +// +// sl local_address rem_address st tx_queue:rx_queue tr:tm->when retrnsmt uid timeout inode +// 0: 0100007F:1F90 00000000:0000 0A 00000000:00000000 00:00000000 00000000 1000 0 12345 +func parseProcNetTCP(path string, family uint32) ([]ConnStat, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var conns []ConnStat + scanner := bufio.NewScanner(f) + + // Skip header line. + scanner.Scan() + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" { + continue + } + + fields := strings.Fields(line) + if len(fields) < 10 { + continue + } + + // fields[1] = local_address (hex_ip:hex_port) + ip, port, err := parseHexAddr(fields[1], family) + if err != nil { + continue + } + + // fields[3] = state (hex) + state, ok := tcpStates[fields[3]] + if !ok { + state = "UNKNOWN" + } + + // fields[9] = inode + inode, err := strconv.ParseUint(fields[9], 10, 64) + if err != nil { + continue + } + + conns = append(conns, ConnStat{ + LocalIP: ip, + LocalPort: port, + Status: state, + Family: family, + Inode: inode, + }) + } + + return conns, scanner.Err() +} + +// parseHexAddr parses "HEXIP:HEXPORT" from /proc/net/tcp. +// IPv4 addresses are 8 hex chars (4 bytes, little-endian per 32-bit word). +// IPv6 addresses are 32 hex chars (16 bytes, little-endian per 32-bit word). +func parseHexAddr(s string, family uint32) (string, uint32, error) { + parts := strings.SplitN(s, ":", 2) + if len(parts) != 2 { + return "", 0, fmt.Errorf("invalid address: %s", s) + } + + port64, err := strconv.ParseUint(parts[1], 16, 32) + if err != nil { + return "", 0, err + } + + ipHex := parts[0] + ipBytes, err := hex.DecodeString(ipHex) + if err != nil { + return "", 0, err + } + + var ip net.IP + if family == syscall.AF_INET { + if len(ipBytes) != 4 { + return "", 0, fmt.Errorf("invalid IPv4 length: %d", len(ipBytes)) + } + // /proc/net/tcp stores IPv4 as a single little-endian 32-bit word. + ip = net.IPv4(ipBytes[3], ipBytes[2], ipBytes[1], ipBytes[0]) + } else { + if len(ipBytes) != 16 { + return "", 0, fmt.Errorf("invalid IPv6 length: %d", len(ipBytes)) + } + // /proc/net/tcp6 stores IPv6 as four little-endian 32-bit words. + ip = make(net.IP, 16) + for i := 0; i < 4; i++ { + ip[i*4+0] = ipBytes[i*4+3] + ip[i*4+1] = ipBytes[i*4+2] + ip[i*4+2] = ipBytes[i*4+1] + ip[i*4+3] = ipBytes[i*4+0] + } + } + + return ip.String(), uint32(port64), nil +} diff --git a/envd/internal/port/forward.go b/envd/internal/port/forward.go index e836519..bf516ff 100644 --- a/envd/internal/port/forward.go +++ b/envd/internal/port/forward.go @@ -31,8 +31,8 @@ var defaultGatewayIP = net.IPv4(169, 254, 0, 21) type PortToForward struct { socat *exec.Cmd - // Process ID of the process that's listening on port. - pid int32 + // Socket inode of the listening socket (unique per connection). + inode uint64 // family version of the ip. family uint32 state PortState @@ -94,7 +94,7 @@ func (f *Forwarder) StartForwarding(ctx context.Context) { // Let's refresh our map of currently forwarded ports and mark the currently opened ones with the "FORWARD" state. // This will make sure we won't delete them later. for _, p := range procs { - key := fmt.Sprintf("%d-%d", p.Pid, p.Laddr.Port) + key := fmt.Sprintf("%d-%d", p.Inode, p.LocalPort) // We check if the opened port is in our map of forwarded ports. val, portOk := f.ports[key] @@ -104,16 +104,16 @@ func (f *Forwarder) StartForwarding(ctx context.Context) { val.state = PortStateForward } else { f.logger.Debug(). - Str("ip", p.Laddr.IP). - Uint32("port", p.Laddr.Port). + Str("ip", p.LocalIP). + Uint32("port", p.LocalPort). Uint32("family", familyToIPVersion(p.Family)). Str("state", p.Status). Msg("Detected new opened port on localhost that is not forwarded") // The opened port wasn't in the map so we create a new PortToForward and start forwarding. ptf := &PortToForward{ - pid: p.Pid, - port: p.Laddr.Port, + inode: p.Inode, + port: p.LocalPort, state: PortStateForward, family: familyToIPVersion(p.Family), } @@ -153,7 +153,7 @@ func (f *Forwarder) startPortForwarding(ctx context.Context, p *PortToForward) { f.logger.Debug(). Str("socatCmd", cmd.String()). - Int32("pid", p.pid). + Uint64("inode", p.inode). Uint32("family", p.family). IPAddr("sourceIP", f.sourceIP.To4()). Uint32("port", p.port). @@ -191,7 +191,7 @@ func (f *Forwarder) stopPortForwarding(p *PortToForward) { logger := f.logger.With(). Str("socatCmd", p.socat.String()). - Int32("pid", p.pid). + Uint64("inode", p.inode). Uint32("family", p.family). IPAddr("sourceIP", f.sourceIP.To4()). Uint32("port", p.port). diff --git a/envd/internal/port/scan.go b/envd/internal/port/scan.go index 766202a..2b15523 100644 --- a/envd/internal/port/scan.go +++ b/envd/internal/port/scan.go @@ -3,19 +3,21 @@ package port import ( + "sync" "time" "github.com/rs/zerolog" - "github.com/shirou/gopsutil/v4/net" - - "git.omukk.dev/wrenn/sandbox/envd/internal/shared/smap" ) type Scanner struct { - Processes chan net.ConnectionStat - scanExit chan struct{} - subs *smap.Map[*ScannerSubscriber] - period time.Duration + scanExit chan struct{} + period time.Duration + + // Plain mutex-protected map instead of concurrent-map. The concurrent-map + // library's Items() spawns goroutines and uses a WaitGroup internally, + // which corrupts Go runtime semaphore state across Firecracker snapshot/restore. + mu sync.RWMutex + subs map[string]*ScannerSubscriber } func (s *Scanner) Destroy() { @@ -24,33 +26,44 @@ func (s *Scanner) Destroy() { func NewScanner(period time.Duration) *Scanner { return &Scanner{ - period: period, - subs: smap.New[*ScannerSubscriber](), - scanExit: make(chan struct{}), - Processes: make(chan net.ConnectionStat), + period: period, + subs: make(map[string]*ScannerSubscriber), + scanExit: make(chan struct{}), } } func (s *Scanner) AddSubscriber(logger *zerolog.Logger, id string, filter *ScannerFilter) *ScannerSubscriber { subscriber := NewScannerSubscriber(logger, id, filter) - s.subs.Insert(id, subscriber) + + s.mu.Lock() + s.subs[id] = subscriber + s.mu.Unlock() return subscriber } func (s *Scanner) Unsubscribe(sub *ScannerSubscriber) { - s.subs.Remove(sub.ID()) + s.mu.Lock() + delete(s.subs, sub.ID()) + s.mu.Unlock() + sub.Destroy() } // ScanAndBroadcast starts scanning open TCP ports and broadcasts every open port to all subscribers. func (s *Scanner) ScanAndBroadcast() { for { - // tcp monitors both ipv4 and ipv6 connections. - processes, _ := net.Connections("tcp") - for _, sub := range s.subs.Items() { - sub.Signal(processes) + // Read directly from /proc/net/tcp and /proc/net/tcp6 instead of + // using gopsutil's net.Connections(), which walks /proc/{pid}/fd + // and causes Go runtime corruption after Firecracker snapshot/restore. + conns, _ := ReadTCPConnections() + + s.mu.RLock() + for _, sub := range s.subs { + sub.Signal(conns) } + s.mu.RUnlock() + select { case <-s.scanExit: return diff --git a/envd/internal/port/scanSubscriber.go b/envd/internal/port/scanSubscriber.go index 6a4f5b0..bad9908 100644 --- a/envd/internal/port/scanSubscriber.go +++ b/envd/internal/port/scanSubscriber.go @@ -4,7 +4,6 @@ package port import ( "github.com/rs/zerolog" - "github.com/shirou/gopsutil/v4/net" ) // If we want to create a listener/subscriber pattern somewhere else we should move @@ -13,7 +12,7 @@ import ( type ScannerSubscriber struct { logger *zerolog.Logger filter *ScannerFilter - Messages chan ([]net.ConnectionStat) + Messages chan ([]ConnStat) id string } @@ -22,7 +21,7 @@ func NewScannerSubscriber(logger *zerolog.Logger, id string, filter *ScannerFilt logger: logger, id: id, filter: filter, - Messages: make(chan []net.ConnectionStat), + Messages: make(chan []ConnStat), } } @@ -34,17 +33,17 @@ func (ss *ScannerSubscriber) Destroy() { close(ss.Messages) } -func (ss *ScannerSubscriber) Signal(proc []net.ConnectionStat) { +func (ss *ScannerSubscriber) Signal(conns []ConnStat) { // Filter isn't specified. Accept everything. if ss.filter == nil { - ss.Messages <- proc + ss.Messages <- conns } else { - filtered := []net.ConnectionStat{} - for i := range proc { + filtered := []ConnStat{} + for i := range conns { // We need to access the list directly otherwise there will be implicit memory aliasing - // If the filter matched a process, we will send it to a channel. - if ss.filter.Match(&proc[i]) { - filtered = append(filtered, proc[i]) + // If the filter matched a connection, we will send it to a channel. + if ss.filter.Match(&conns[i]) { + filtered = append(filtered, conns[i]) } } ss.Messages <- filtered diff --git a/envd/internal/port/scanfilter.go b/envd/internal/port/scanfilter.go index 941023d..f87667f 100644 --- a/envd/internal/port/scanfilter.go +++ b/envd/internal/port/scanfilter.go @@ -4,8 +4,6 @@ package port import ( "slices" - - "github.com/shirou/gopsutil/v4/net" ) type ScannerFilter struct { @@ -13,15 +11,15 @@ type ScannerFilter struct { IPs []string } -func (sf *ScannerFilter) Match(proc *net.ConnectionStat) bool { +func (sf *ScannerFilter) Match(conn *ConnStat) bool { // Filter is an empty struct. if sf.State == "" && len(sf.IPs) == 0 { return false } - ipMatch := slices.Contains(sf.IPs, proc.Laddr.IP) + ipMatch := slices.Contains(sf.IPs, conn.LocalIP) - if ipMatch && sf.State == proc.Status { + if ipMatch && sf.State == conn.Status { return true } diff --git a/frontend/package.json b/frontend/package.json index f694403..85030ec 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -26,5 +26,8 @@ "tailwindcss": "^4.2.1", "typescript": "^5.9.3", "vite": "^7.3.1" + }, + "dependencies": { + "chart.js": "^4.5.1" } } diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index 9f0353e..5b60992 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -7,6 +7,10 @@ settings: importers: .: + dependencies: + chart.js: + specifier: ^4.5.1 + version: 4.5.1 devDependencies: '@fontsource-variable/jetbrains-mono': specifier: ^5.2.8 @@ -249,6 +253,9 @@ packages: '@jridgewell/trace-mapping@0.3.31': resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + '@kurkle/color@0.3.4': + resolution: {integrity: sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==} + '@polka/url@1.0.0-next.29': resolution: {integrity: sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==} @@ -547,6 +554,10 @@ packages: '@internationalized/date': ^3.8.1 svelte: ^5.33.0 + chart.js@4.5.1: + resolution: {integrity: sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==} + engines: {pnpm: '>=8'} + chokidar@4.0.3: resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} engines: {node: '>= 14.16.0'} @@ -980,6 +991,8 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.5 + '@kurkle/color@0.3.4': {} + '@polka/url@1.0.0-next.29': {} '@rollup/rollup-android-arm-eabi@4.59.0': @@ -1203,6 +1216,10 @@ snapshots: transitivePeerDependencies: - '@sveltejs/kit' + chart.js@4.5.1: + dependencies: + '@kurkle/color': 0.3.4 + chokidar@4.0.3: dependencies: readdirp: 4.1.2 diff --git a/frontend/src/app.css b/frontend/src/app.css index 72fd49b..9c2e326 100644 --- a/frontend/src/app.css +++ b/frontend/src/app.css @@ -48,6 +48,20 @@ --font-mono: 'JetBrains Mono Variable', monospace; --font-brand: 'Alice', serif; + /* Type scale — rem-based (root = 87.5%, giving 14px at browser default) + Heading tokens carry a default line-height; body/UI tokens inherit the global 1.6. */ + --text-display: 2.571rem; /* ~36px — auth/login section headings */ + --text-display--line-height: 1.1; + --text-page: 2rem; /* ~28px — page h1 titles */ + --text-page--line-height: 1.15; + --text-heading: 1.429rem; /* ~20px — dialog headings, empty-state */ + --text-heading--line-height: 1.25; + --text-body: 1rem; /* 14px — primary body, buttons, inputs */ + --text-ui: 0.929rem; /* ~13px — nav labels, table cells, secondary */ + --text-meta: 0.857rem; /* ~12px — key prefixes, minor info */ + --text-label: 0.786rem; /* ~11px — uppercase section labels */ + --text-badge: 0.714rem; /* ~10px — live badges, tiny indicators */ + /* Radii */ --radius-card: 8px; --radius-input: 5px; @@ -55,16 +69,22 @@ --radius-avatar: 5px; --radius-logo: 6px; - /* Shadows — flat aesthetic */ - --shadow-sm: 0 0 #0000; + /* Shadows */ + --shadow-sm: 0 1px 3px rgba(0, 0, 0, 0.35), 0 1px 2px rgba(0, 0, 0, 0.2); + --shadow-card: 0 4px 12px rgba(0, 0, 0, 0.4), 0 1px 3px rgba(0, 0, 0, 0.25); + --shadow-dialog: 0 16px 48px rgba(0, 0, 0, 0.6), 0 4px 12px rgba(0, 0, 0, 0.35); } /* Base styles */ html { font-family: var(--font-sans); - font-size: 14px; + font-size: 87.5%; /* 14px at browser default; scales with user text-size preferences */ + line-height: 1.6; color: var(--color-text-primary); background-color: var(--color-bg-0); + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + text-rendering: optimizeLegibility; } body { @@ -72,6 +92,11 @@ body { min-height: 100vh; } +/* Tabular figures on all mono text — numbers align in tables and metric displays */ +.font-mono { + font-variant-numeric: tabular-nums; +} + /* Selection */ ::selection { background: rgba(94, 140, 88, 0.25); @@ -97,17 +122,35 @@ body { background: var(--color-bg-5); } -/* Live status dot glow animation */ +/* Live status dot pulse animation — opacity-only for GPU compositor, zero paint cost */ @keyframes wrenn-glow { 0%, 100% { - box-shadow: 0 0 6px rgba(94, 140, 88, 0.5); + opacity: 1; } 50% { - box-shadow: 0 0 14px rgba(94, 140, 88, 0.2); + opacity: 0.3; } } +/* Outward ring ripple — for live/running status dots; more delightful than opacity-only */ +@keyframes status-ping { + 0% { + transform: scale(1); + opacity: 0.8; + } + 80%, + 100% { + transform: scale(2.8); + opacity: 0; + } +} + +.animate-status-ping { + animation: status-ping 2s cubic-bezier(0, 0, 0.2, 1) infinite; + will-change: transform, opacity; +} + /* Fade-up entrance animation */ @keyframes fadeUp { from { @@ -119,3 +162,27 @@ body { transform: translateY(0); } } + +/* Refresh icon spin — one full rotation */ +@keyframes spin-once { + from { transform: rotate(0deg); } + to { transform: rotate(360deg); } +} + +/* Floating icon — used on empty-state icon containers */ +@keyframes iconFloat { + 0%, 100% { transform: translateY(0); } + 50% { transform: translateY(-6px); } +} + +/* Respect user motion preferences — covers both CSS class animations and inline style animations */ +@media (prefers-reduced-motion: reduce) { + *, + *::before, + *::after { + animation-duration: 0.01ms !important; + animation-iteration-count: 1 !important; + transition-duration: 0.01ms !important; + scroll-behavior: auto !important; + } +} diff --git a/frontend/src/lib/api/audit.ts b/frontend/src/lib/api/audit.ts new file mode 100644 index 0000000..7d4dfe1 --- /dev/null +++ b/frontend/src/lib/api/audit.ts @@ -0,0 +1,38 @@ +import { apiFetch, type ApiResult } from '$lib/api/client'; + +export type AuditLog = { + id: string; + actor_type: 'user' | 'api_key' | 'system'; + actor_id?: string; + actor_name?: string; + resource_type: string; + resource_id?: string; + action: string; + scope: 'team' | 'admin'; + status: 'success' | 'info' | 'warning' | 'error'; + metadata?: Record; + created_at: string; +}; + +export type AuditListResponse = { + items: AuditLog[]; + next_before?: string; + next_before_id?: string; +}; + +export async function listAuditLogs(params?: { + before?: string; + before_id?: string; + resource_types?: string[]; + actions?: string[]; + limit?: number; +}): Promise> { + const q = new URLSearchParams(); + if (params?.before) q.set('before', params.before); + if (params?.before_id) q.set('before_id', params.before_id); + params?.resource_types?.forEach((t) => q.append('resource_type', t)); + params?.actions?.forEach((a) => q.append('action', a)); + if (params?.limit != null) q.set('limit', String(params.limit)); + const qs = q.toString(); + return apiFetch('GET', `/api/v1/audit-logs${qs ? '?' + qs : ''}`); +} diff --git a/frontend/src/lib/api/auth.ts b/frontend/src/lib/api/auth.ts index 51b987a..845b8a3 100644 --- a/frontend/src/lib/api/auth.ts +++ b/frontend/src/lib/api/auth.ts @@ -3,6 +3,7 @@ export type AuthResponse = { user_id: string; team_id: string; email: string; + name: string; }; export type AuthResult = { ok: true; data: AuthResponse } | { ok: false; error: string }; @@ -11,8 +12,8 @@ export async function apiLogin(email: string, password: string): Promise { - return authFetch('/api/v1/auth/signup', { email, password }); +export async function apiSignup(email: string, password: string, name: string): Promise { + return authFetch('/api/v1/auth/signup', { email, password, name }); } async function authFetch(url: string, body: Record): Promise { diff --git a/frontend/src/lib/api/builds.ts b/frontend/src/lib/api/builds.ts new file mode 100644 index 0000000..1de23b8 --- /dev/null +++ b/frontend/src/lib/api/builds.ts @@ -0,0 +1,76 @@ +import { apiFetch, type ApiResult } from '$lib/api/client'; + +export type BuildLogEntry = { + step: number; + phase: string; // "pre-build", "recipe", or "post-build" + cmd: string; + stdout: string; + stderr: string; + exit: number; + ok: boolean; + elapsed_ms: number; +}; + +export type Build = { + id: string; + name: string; + base_template: string; + recipe: string[]; + healthcheck?: string; + vcpus: number; + memory_mb: number; + status: string; + current_step: number; + total_steps: number; + logs: BuildLogEntry[]; + error?: string; + sandbox_id?: string; + host_id?: string; + created_at: string; + started_at?: string; + completed_at?: string; +}; + +export type CreateBuildParams = { + name: string; + base_template?: string; + recipe: string[]; + healthcheck?: string; + vcpus?: number; + memory_mb?: number; + skip_pre_post?: boolean; +}; + +export async function createBuild(params: CreateBuildParams): Promise> { + return apiFetch('POST', '/api/v1/admin/builds', params); +} + +export async function listBuilds(): Promise> { + return apiFetch('GET', '/api/v1/admin/builds'); +} + +export async function getBuild(id: string): Promise> { + return apiFetch('GET', `/api/v1/admin/builds/${id}`); +} + +export type AdminTemplate = { + name: string; + type: string; + vcpus: number; + memory_mb: number; + size_bytes: number; + team_id: string; + created_at: string; +}; + +export async function listAdminTemplates(): Promise> { + return apiFetch('GET', '/api/v1/admin/templates'); +} + +export async function deleteAdminTemplate(name: string): Promise> { + return apiFetch('DELETE', `/api/v1/admin/templates/${name}`); +} + +export async function cancelBuild(id: string): Promise> { + return apiFetch('POST', `/api/v1/admin/builds/${id}/cancel`); +} diff --git a/frontend/src/lib/api/capsules.ts b/frontend/src/lib/api/capsules.ts index c51737a..565f14f 100644 --- a/frontend/src/lib/api/capsules.ts +++ b/frontend/src/lib/api/capsules.ts @@ -20,6 +20,10 @@ export async function listCapsules(): Promise> { return apiFetch('GET', '/api/v1/sandboxes'); } +export async function getCapsule(id: string): Promise> { + return apiFetch('GET', `/api/v1/sandboxes/${id}`); +} + export type CreateCapsuleParams = { template?: string; vcpus?: number; @@ -50,6 +54,7 @@ export type Snapshot = { memory_mb?: number; size_bytes: number; created_at: string; + platform: boolean; }; export async function createSnapshot(sandboxId: string, name?: string): Promise> { diff --git a/frontend/src/lib/api/channels.ts b/frontend/src/lib/api/channels.ts new file mode 100644 index 0000000..130a9a8 --- /dev/null +++ b/frontend/src/lib/api/channels.ts @@ -0,0 +1,72 @@ +import { apiFetch, type ApiResult } from '$lib/api/client'; + +export type Channel = { + id: string; + team_id: string; + name: string; + provider: string; + events: string[]; + created_at: string; + updated_at: string; + secret?: string; // only present immediately after creation (webhook provider) +}; + +export const PROVIDERS = [ + { value: 'discord', label: 'Discord', fields: ['webhook_url'] }, + { value: 'slack', label: 'Slack', fields: ['webhook_url'] }, + { value: 'teams', label: 'Teams', fields: ['webhook_url'] }, + { value: 'googlechat', label: 'Google Chat', fields: ['webhook_url'] }, + { value: 'telegram', label: 'Telegram', fields: ['bot_token', 'chat_id'] }, + { value: 'matrix', label: 'Matrix', fields: ['homeserver_url', 'access_token', 'room_id'] }, + { value: 'webhook', label: 'Webhook', fields: ['url'] } +] as const; + +export const EVENT_TYPES = [ + { value: 'capsule.created', group: 'Capsule' }, + { value: 'capsule.running', group: 'Capsule' }, + { value: 'capsule.paused', group: 'Capsule' }, + { value: 'capsule.destroyed', group: 'Capsule' }, + { value: 'template.snapshot.created', group: 'Template' }, + { value: 'template.snapshot.deleted', group: 'Template' }, + { value: 'host.up', group: 'Host' }, + { value: 'host.down', group: 'Host' } +] as const; + +export async function listChannels(): Promise> { + return apiFetch('GET', '/api/v1/channels'); +} + +export async function createChannel( + name: string, + provider: string, + config: Record, + events: string[] +): Promise> { + return apiFetch('POST', '/api/v1/channels', { name, provider, config, events }); +} + +export async function updateChannel( + id: string, + name: string, + events: string[] +): Promise> { + return apiFetch('PATCH', `/api/v1/channels/${id}`, { name, events }); +} + +export async function deleteChannel(id: string): Promise> { + return apiFetch('DELETE', `/api/v1/channels/${id}`); +} + +export async function rotateConfig( + id: string, + config: Record +): Promise> { + return apiFetch('PUT', `/api/v1/channels/${id}/config`, { config }); +} + +export async function testChannel( + provider: string, + config: Record +): Promise> { + return apiFetch('POST', '/api/v1/channels/test', { provider, config }); +} diff --git a/frontend/src/lib/api/hosts.ts b/frontend/src/lib/api/hosts.ts new file mode 100644 index 0000000..031b7f0 --- /dev/null +++ b/frontend/src/lib/api/hosts.ts @@ -0,0 +1,84 @@ +import { apiFetch } from './client'; + +export type Host = { + id: string; + type: 'regular' | 'byoc'; + team_id?: string; + team_name?: string; + provider?: string; + availability_zone?: string; + arch?: string; + cpu_cores?: number; + memory_mb?: number; + disk_gb?: number; + address?: string; + status: 'pending' | 'online' | 'offline' | 'unreachable' | 'draining'; + last_heartbeat_at?: string; + created_by: string; + created_at: string; + updated_at: string; +}; + +export type CreateHostParams = { + type: 'regular' | 'byoc'; + team_id?: string; + provider?: string; + availability_zone?: string; +}; + +export type CreateHostResult = { + host: Host; + registration_token: string; +}; + +export async function listHosts(): Promise<{ ok: true; data: Host[] } | { ok: false; error: string }> { + return apiFetch('GET', '/api/v1/hosts'); +} + +export async function createHost( + params: CreateHostParams +): Promise<{ ok: true; data: CreateHostResult } | { ok: false; error: string }> { + return apiFetch('POST', '/api/v1/hosts', params); +} + +export async function deleteHost( + id: string, + force = false +): Promise<{ ok: true } | { ok: false; error: string; sandbox_ids?: string[] }> { + const url = `/api/v1/hosts/${id}${force ? '?force=true' : ''}`; + const res = await apiFetch('DELETE', url); + if (!res.ok) { + return res as { ok: false; error: string }; + } + return { ok: true }; +} + +export async function getDeletePreview( + id: string +): Promise<{ ok: true; data: { host: Host; sandbox_ids: string[] } } | { ok: false; error: string }> { + return apiFetch<{ host: Host; sandbox_ids: string[] }>('GET', `/api/v1/hosts/${id}/delete-preview`); +} + +export function statusColor(status: Host['status']): string { + switch (status) { + case 'online': + return 'var(--color-accent)'; + case 'pending': + return 'var(--color-amber)'; + case 'offline': + case 'unreachable': + return 'var(--color-red)'; + case 'draining': + return 'var(--color-blue)'; + default: + return 'var(--color-text-muted)'; + } +} + +export function formatSpecs(host: Host): string { + const parts: string[] = []; + if (host.cpu_cores) parts.push(`${host.cpu_cores} vCPU`); + if (host.memory_mb) parts.push(`${Math.round(host.memory_mb / 1024)}GB RAM`); + if (host.disk_gb) parts.push(`${host.disk_gb}GB disk`); + return parts.join(' · ') || '—'; +} diff --git a/frontend/src/lib/api/metrics.ts b/frontend/src/lib/api/metrics.ts new file mode 100644 index 0000000..baf9f11 --- /dev/null +++ b/frontend/src/lib/api/metrics.ts @@ -0,0 +1,25 @@ +import { apiFetch, type ApiResult } from '$lib/api/client'; + +export type MetricRange = '5m' | '10m' | '1h' | '6h' | '24h'; + +export type MetricPoint = { + timestamp_unix: number; + cpu_pct: number; + mem_bytes: number; + disk_bytes: number; +}; + +export type MetricsResponse = { + sandbox_id: string; + range: MetricRange; + points: MetricPoint[]; +}; + +export async function fetchSandboxMetrics(id: string, range: MetricRange): Promise> { + return apiFetch('GET', `/api/v1/sandboxes/${id}/metrics?range=${range}`); +} + +export const METRIC_RANGES: MetricRange[] = ['5m', '10m', '1h', '6h', '24h']; + +// All ranges poll every 10 seconds. +export const METRIC_POLL_INTERVAL = 10_000; diff --git a/frontend/src/lib/api/stats.ts b/frontend/src/lib/api/stats.ts new file mode 100644 index 0000000..3f85483 --- /dev/null +++ b/frontend/src/lib/api/stats.ts @@ -0,0 +1,44 @@ +import { apiFetch, type ApiResult } from '$lib/api/client'; + +export type TimeRange = '5m' | '1h' | '6h' | '24h' | '30d'; + +export type StatsResponse = { + range: TimeRange; + current: { + running_count: number; + vcpus_reserved: number; + memory_mb_reserved: number; + sampled_at?: string; + }; + peaks: { + running_count: number; + vcpus: number; + memory_mb: number; + }; + series: { + labels: string[]; + running: number[]; + vcpus: number[]; + memory_mb: number[]; + }; +}; + +export async function fetchStats(range: TimeRange): Promise> { + return apiFetch('GET', `/api/v1/sandboxes/stats?range=${range}`); +} + +export const POLL_INTERVALS: Record = { + '5m': 15_000, + '1h': 30_000, + '6h': 60_000, + '24h': 120_000, + '30d': 300_000, +}; + +export const RANGE_LABELS: Record = { + '5m': '5m', + '1h': '1h', + '6h': '6h', + '24h': '24h', + '30d': '30d', +}; diff --git a/frontend/src/lib/api/team.ts b/frontend/src/lib/api/team.ts new file mode 100644 index 0000000..0ffc4ed --- /dev/null +++ b/frontend/src/lib/api/team.ts @@ -0,0 +1,85 @@ +import { apiFetch, type ApiResult } from '$lib/api/client'; + +export type TeamMember = { + user_id: string; + name: string; + email: string; + role: 'owner' | 'admin' | 'member'; + joined_at: string; +}; + +export type TeamInfo = { + id: string; + name: string; + slug: string; + created_at: string; +}; + +export type TeamDetail = { + team: TeamInfo; + members: TeamMember[]; +}; + +export type UserSearchResult = { + user_id: string; + email: string; +}; + +export type TeamWithRole = { + id: string; + name: string; + slug: string; + is_byoc: boolean; + created_at: string; + role: string; +}; + +export async function listTeams(): Promise> { + return apiFetch('GET', '/api/v1/teams'); +} + +export async function createTeam(name: string): Promise> { + return apiFetch('POST', '/api/v1/teams', { name }); +} + +export async function switchTeam( + teamId: string +): Promise> { + return apiFetch('POST', '/api/v1/auth/switch-team', { team_id: teamId }); +} + +export async function getTeam(id: string): Promise> { + return apiFetch('GET', `/api/v1/teams/${id}`); +} + +export async function updateTeam(id: string, name: string): Promise> { + return apiFetch('PATCH', `/api/v1/teams/${id}`, { name }); +} + +export async function addMember(id: string, email: string): Promise> { + return apiFetch('POST', `/api/v1/teams/${id}/members`, { email }); +} + +export async function removeMember(id: string, userId: string): Promise> { + return apiFetch('DELETE', `/api/v1/teams/${id}/members/${userId}`); +} + +export async function updateMemberRole( + id: string, + userId: string, + role: 'admin' | 'member' +): Promise> { + return apiFetch('PATCH', `/api/v1/teams/${id}/members/${userId}`, { role }); +} + +export async function deleteTeam(id: string): Promise> { + return apiFetch('DELETE', `/api/v1/teams/${id}`); +} + +export async function leaveTeam(id: string): Promise> { + return apiFetch('POST', `/api/v1/teams/${id}/leave`); +} + +export async function searchUsers(email: string): Promise> { + return apiFetch('GET', `/api/v1/users/search?email=${encodeURIComponent(email)}`); +} diff --git a/frontend/src/lib/auth.svelte.ts b/frontend/src/lib/auth.svelte.ts index 86325df..d39a0f4 100644 --- a/frontend/src/lib/auth.svelte.ts +++ b/frontend/src/lib/auth.svelte.ts @@ -4,7 +4,8 @@ const STORAGE_KEYS = { token: 'wrenn_token', userId: 'wrenn_user_id', teamId: 'wrenn_team_id', - email: 'wrenn_email' + email: 'wrenn_email', + name: 'wrenn_name' } as const; function isTokenExpired(token: string): boolean { @@ -18,11 +19,23 @@ function isTokenExpired(token: string): boolean { } } +function decodeJWTPayload(token: string): Record { + try { + const payload = token.split('.')[1]; + return JSON.parse(atob(payload.replace(/-/g, '+').replace(/_/g, '/'))); + } catch { + return {}; + } +} + function createAuth() { let token = $state(null); let userId = $state(null); let teamId = $state(null); let email = $state(null); + let name = $state(null); + let isAdmin = $state(false); + let role = $state('member'); let initialized = $state(false); // Initialize from localStorage synchronously at module load. @@ -33,6 +46,10 @@ function createAuth() { userId = localStorage.getItem(STORAGE_KEYS.userId); teamId = localStorage.getItem(STORAGE_KEYS.teamId); email = localStorage.getItem(STORAGE_KEYS.email); + name = localStorage.getItem(STORAGE_KEYS.name); + const payload = decodeJWTPayload(stored); + isAdmin = Boolean(payload.is_admin); + role = String(payload.role || 'member'); } else if (stored) { // Expired — clean up. for (const key of Object.values(STORAGE_KEYS)) { @@ -57,6 +74,15 @@ function createAuth() { get email() { return email; }, + get name() { + return name; + }, + get isAdmin() { + return isAdmin; + }, + get role() { + return role; + }, get isAuthenticated() { return isAuthenticated; }, @@ -64,16 +90,21 @@ function createAuth() { return initialized; }, - login(data: { token: string; user_id: string; team_id: string; email: string }) { + login(data: { token: string; user_id: string; team_id: string; email: string; name: string }) { token = data.token; userId = data.user_id; teamId = data.team_id; email = data.email; + name = data.name; + const payload = decodeJWTPayload(data.token); + isAdmin = Boolean(payload.is_admin); + role = String(payload.role || 'member'); localStorage.setItem(STORAGE_KEYS.token, data.token); localStorage.setItem(STORAGE_KEYS.userId, data.user_id); localStorage.setItem(STORAGE_KEYS.teamId, data.team_id); localStorage.setItem(STORAGE_KEYS.email, data.email); + localStorage.setItem(STORAGE_KEYS.name, data.name); }, logout() { @@ -81,6 +112,9 @@ function createAuth() { userId = null; teamId = null; email = null; + name = null; + isAdmin = false; + role = 'member'; for (const key of Object.values(STORAGE_KEYS)) { localStorage.removeItem(key); diff --git a/frontend/src/lib/capsule-store.svelte.ts b/frontend/src/lib/capsule-store.svelte.ts new file mode 100644 index 0000000..acc0f60 --- /dev/null +++ b/frontend/src/lib/capsule-store.svelte.ts @@ -0,0 +1,3 @@ +// Shared state written by the list page and read by the capsules layout +// for the running count badge in the header. +export const capsuleRunningCount = $state({ value: 0 }); diff --git a/frontend/src/lib/components/AdminSidebar.svelte b/frontend/src/lib/components/AdminSidebar.svelte new file mode 100644 index 0000000..ebf4b64 --- /dev/null +++ b/frontend/src/lib/components/AdminSidebar.svelte @@ -0,0 +1,186 @@ + + + diff --git a/frontend/src/lib/components/AuthModal.svelte b/frontend/src/lib/components/AuthModal.svelte index 3ab217f..2f23a5c 100644 --- a/frontend/src/lib/components/AuthModal.svelte +++ b/frontend/src/lib/components/AuthModal.svelte @@ -61,12 +61,12 @@
{title} {subtitle} @@ -75,7 +75,7 @@
@@ -136,7 +136,7 @@ bind:value={password} placeholder="Password" autocomplete={mode === 'signin' ? 'current-password' : 'new-password'} - class="w-full rounded-[var(--radius-input)] border border-[var(--color-border)] bg-[var(--color-bg-2)] py-2.5 pl-9 pr-10 text-[13px] text-[var(--color-text-bright)] outline-none transition-all duration-150 placeholder:text-[var(--color-text-muted)] focus:border-[var(--color-accent)]" + class="w-full rounded-[var(--radius-input)] border border-[var(--color-border)] bg-[var(--color-bg-2)] py-2.5 pl-9 pr-10 text-ui text-[var(--color-text-bright)] outline-none transition-all duration-150 placeholder:text-[var(--color-text-muted)] focus:border-[var(--color-accent)]" /> @@ -165,14 +165,14 @@ -

+

{switchText} + + + + +{/if} diff --git a/frontend/src/lib/components/Sidebar.svelte b/frontend/src/lib/components/Sidebar.svelte index 0c165e2..4111dd8 100644 --- a/frontend/src/lib/components/Sidebar.svelte +++ b/frontend/src/lib/components/Sidebar.svelte @@ -1,7 +1,10 @@