From f3572f73564c836668db168e59f97b215e3b13bf Mon Sep 17 00:00:00 2001 From: pptx704 Date: Sat, 2 May 2026 04:57:08 +0600 Subject: [PATCH 01/10] Fix empty WRENN_TEMPLATE_ID after resuming paused sandbox Resume() was building VMConfig without TemplateID, so Firecracker MMDS received an empty string. envd's PostInit then wrote that empty value to /run/wrenn/.WRENN_TEMPLATE_ID. Fix by persisting the template ID in snapshot metadata during Pause and reading it back during Resume. --- internal/sandbox/manager.go | 2 ++ internal/snapshot/local.go | 1 + 2 files changed, 3 insertions(+) diff --git a/internal/sandbox/manager.go b/internal/sandbox/manager.go index daa1dba..3c49cd6 100644 --- a/internal/sandbox/manager.go +++ b/internal/sandbox/manager.go @@ -577,6 +577,7 @@ func (m *Manager) Pause(ctx context.Context, sandboxID string) error { // Record which base template this CoW was built against. if err := snapshot.WriteMeta(pauseDir, "", &snapshot.RootfsMeta{ BaseTemplate: sb.baseImagePath, + TemplateID: uuid.UUID(sb.TemplateID).String(), }); err != nil { warnErr("snapshot dir cleanup error", sandboxID, os.RemoveAll(pauseDir)) // VM and dm-snapshot are already gone — clean up remaining resources. @@ -731,6 +732,7 @@ func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int, // Restore VM from snapshot. vmCfg := vm.VMConfig{ SandboxID: sandboxID, + TemplateID: meta.TemplateID, KernelPath: m.resolveKernelPath(kernelVersion), RootfsPath: dmDev.DevicePath, VCPUs: 1, // Placeholder; overridden by snapshot. diff --git a/internal/snapshot/local.go b/internal/snapshot/local.go index 8e667b8..95b9574 100644 --- a/internal/snapshot/local.go +++ b/internal/snapshot/local.go @@ -64,6 +64,7 @@ func MetaPath(baseDir, name string) string { // RootfsMeta records which base template a CoW file was created against. type RootfsMeta struct { BaseTemplate string `json:"base_template"` + TemplateID string `json:"template_id,omitempty"` } // WriteMeta writes rootfs metadata to the snapshot directory. From 7ef9a6461319a18d4b02371ce0ab61bc58baa141 Mon Sep 17 00:00:00 2001 From: pptx704 Date: Sat, 2 May 2026 05:19:37 +0600 Subject: [PATCH 02/10] fix: close stale TCP connections across snapshot/restore to prevent envd hangs After Firecracker snapshot restore, zombie TCP sockets from the previous session cause Go runtime corruption inside the guest VM, making envd unresponsive. This manifests as infinite loading in the file browser and terminal timeouts (524) in production (HTTP/2 + Cloudflare) but not locally. Four-part fix: - Add ServerConnTracker to envd that tracks connections via ConnState callback, closes idle connections and disables keep-alives before snapshot, then closes all pre-snapshot zombie connections on restore (while preserving post-restore connections like the /init request) - Split envdclient into timeout (2min) and streaming (no timeout) HTTP clients; use streaming client for file transfers and process RPCs - Close host-side idle envdclient connections before PrepareSnapshot so FIN packets propagate during the 3s quiesce window - Add StreamingHTTPClient() accessor; streaming file transfer handlers in hostagent use it instead of the timeout client --- envd/internal/api/conntracker.go | 94 ++++++++++++++++++++++++++++++ envd/internal/api/download_test.go | 12 ++-- envd/internal/api/init.go | 6 ++ envd/internal/api/init_test.go | 2 +- envd/internal/api/snapshot.go | 13 ++++- envd/internal/api/store.go | 4 +- envd/main.go | 6 +- internal/envdclient/client.go | 42 ++++++++----- internal/envdclient/dialer.go | 16 +++++ internal/hostagent/server.go | 4 +- internal/sandbox/manager.go | 14 ++++- 11 files changed, 183 insertions(+), 30 deletions(-) create mode 100644 envd/internal/api/conntracker.go diff --git a/envd/internal/api/conntracker.go b/envd/internal/api/conntracker.go new file mode 100644 index 0000000..054f920 --- /dev/null +++ b/envd/internal/api/conntracker.go @@ -0,0 +1,94 @@ +package api + +import ( + "net" + "net/http" + "sync" +) + +// ServerConnTracker tracks active HTTP connections via http.Server.ConnState. +// Before a Firecracker snapshot, it closes idle connections, disables +// keep-alives, and records which connections existed pre-snapshot. After +// restore, it closes ALL pre-snapshot connections (they are zombie TCP +// sockets) while leaving post-restore connections (like the /init request) +// untouched. +type ServerConnTracker struct { + mu sync.Mutex + conns map[net.Conn]http.ConnState + preSnapshot map[net.Conn]struct{} + srv *http.Server +} + +func NewServerConnTracker() *ServerConnTracker { + return &ServerConnTracker{ + conns: make(map[net.Conn]http.ConnState), + } +} + +// SetServer stores a reference to the http.Server for keep-alive control. +// Must be called before ListenAndServe. +func (t *ServerConnTracker) SetServer(srv *http.Server) { + t.mu.Lock() + t.srv = srv + t.mu.Unlock() +} + +// Track implements the http.Server.ConnState callback signature. +func (t *ServerConnTracker) Track(conn net.Conn, state http.ConnState) { + t.mu.Lock() + defer t.mu.Unlock() + switch state { + case http.StateNew, http.StateActive, http.StateIdle: + t.conns[conn] = state + case http.StateHijacked, http.StateClosed: + delete(t.conns, conn) + delete(t.preSnapshot, conn) + } +} + +// PrepareForSnapshot closes idle connections, disables keep-alives, and +// records all remaining active connections. After the response completes +// (with keep-alives disabled, the connection closes), RestoreAfterSnapshot +// will close any that survived into the snapshot as zombie TCP sockets. +// +// GC cycles are handled by PortSubsystem.Stop() which runs before this. +func (t *ServerConnTracker) PrepareForSnapshot() { + t.mu.Lock() + defer t.mu.Unlock() + + if t.srv != nil { + t.srv.SetKeepAlivesEnabled(false) + } + + t.preSnapshot = make(map[net.Conn]struct{}, len(t.conns)) + for conn, state := range t.conns { + if state == http.StateIdle { + conn.Close() + delete(t.conns, conn) + } else { + t.preSnapshot[conn] = struct{}{} + } + } +} + +// RestoreAfterSnapshot closes ALL pre-snapshot connections (zombie TCP +// sockets after restore) and re-enables keep-alives. Post-restore +// connections (like the /init request that triggers this call) are not +// in the preSnapshot set and are left untouched. +// +// Safe to call on first boot — preSnapshot is nil, so this is a no-op +// aside from enabling keep-alives (which are already enabled by default). +func (t *ServerConnTracker) RestoreAfterSnapshot() { + t.mu.Lock() + defer t.mu.Unlock() + + for conn := range t.preSnapshot { + conn.Close() + delete(t.conns, conn) + } + t.preSnapshot = nil + + if t.srv != nil { + t.srv.SetKeepAlivesEnabled(true) + } +} diff --git a/envd/internal/api/download_test.go b/envd/internal/api/download_test.go index a4379cc..fc01573 100644 --- a/envd/internal/api/download_test.go +++ b/envd/internal/api/download_test.go @@ -99,7 +99,7 @@ func TestGetFilesContentDisposition(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false, context.Background(), nil, "test") + api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") // Create request and response recorder req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) @@ -148,7 +148,7 @@ func TestGetFilesContentDispositionWithNestedPath(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false, context.Background(), nil, "test") + api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") // Create request and response recorder req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) @@ -191,7 +191,7 @@ func TestGetFiles_GzipEncoding_ExplicitIdentityOffWithRange(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false, context.Background(), nil, "test") + api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") // Create request and response recorder req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) @@ -232,7 +232,7 @@ func TestGetFiles_GzipDownload(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false, context.Background(), nil, "test") + api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) req.Header.Set("Accept-Encoding", "gzip") @@ -297,7 +297,7 @@ func TestPostFiles_GzipUpload(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false, context.Background(), nil, "test") + api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") req := httptest.NewRequest(http.MethodPost, "/files?path="+url.QueryEscape(destPath), &gzBuf) req.Header.Set("Content-Type", mpWriter.FormDataContentType()) @@ -357,7 +357,7 @@ func TestGzipUploadThenGzipDownload(t *testing.T) { EnvVars: utils.NewMap[string, string](), User: currentUser.Username, } - api := New(&logger, defaults, nil, false, context.Background(), nil, "test") + api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") uploadReq := httptest.NewRequest(http.MethodPost, "/files?path="+url.QueryEscape(destPath), &gzBuf) uploadReq.Header.Set("Content-Type", mpWriter.FormDataContentType()) diff --git a/envd/internal/api/init.go b/envd/internal/api/init.go index 3b2be4b..68a1b86 100644 --- a/envd/internal/api/init.go +++ b/envd/internal/api/init.go @@ -150,6 +150,12 @@ func (a *API) PostInit(w http.ResponseWriter, r *http.Request) { host.PollForMMDSOpts(ctx, a.mmdsChan, a.defaults.EnvVars) }() + // Close zombie connections from before the snapshot and re-enable + // keep-alives. On first boot this is a no-op (no zombie connections). + if a.connTracker != nil { + a.connTracker.RestoreAfterSnapshot() + } + // Start the port scanner and forwarder if they were stopped by a // pre-snapshot prepare call. Start is a no-op if already running, // so this is safe on first boot and only takes effect after restore. diff --git a/envd/internal/api/init_test.go b/envd/internal/api/init_test.go index 18ee203..9fe6ece 100644 --- a/envd/internal/api/init_test.go +++ b/envd/internal/api/init_test.go @@ -79,7 +79,7 @@ func newTestAPI(accessToken *SecureToken, mmdsClient MMDSClient) *API { defaults := &execcontext.Defaults{ EnvVars: utils.NewMap[string, string](), } - api := New(&logger, defaults, nil, false, context.Background(), nil, "test") + api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") if accessToken != nil { api.accessToken.TakeFrom(accessToken) } diff --git a/envd/internal/api/snapshot.go b/envd/internal/api/snapshot.go index d9e2edd..6d13381 100644 --- a/envd/internal/api/snapshot.go +++ b/envd/internal/api/snapshot.go @@ -7,9 +7,11 @@ import ( "net/http" ) -// PostSnapshotPrepare quiesces continuous goroutines (port scanner, forwarder) -// and forces a GC cycle before Firecracker takes a VM snapshot. This ensures -// the Go runtime's page allocator is in a consistent state when vCPUs are frozen. +// PostSnapshotPrepare quiesces continuous goroutines (port scanner, forwarder), +// closes idle HTTP connections, and forces a GC cycle before Firecracker takes +// a VM snapshot. Closing connections prevents Go runtime corruption from stale +// TCP state after snapshot restore. Keep-alives are disabled so the current +// request's connection also closes after the response. // // Called by the host agent as a best-effort signal before vm.Pause(). func (a *API) PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) { @@ -20,6 +22,11 @@ func (a *API) PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) { a.logger.Info().Msg("snapshot/prepare: port subsystem quiesced") } + if a.connTracker != nil { + a.connTracker.PrepareForSnapshot() + a.logger.Info().Msg("snapshot/prepare: idle connections closed, keep-alives disabled") + } + w.Header().Set("Cache-Control", "no-store") w.WriteHeader(http.StatusNoContent) } diff --git a/envd/internal/api/store.go b/envd/internal/api/store.go index ca97957..5365604 100644 --- a/envd/internal/api/store.go +++ b/envd/internal/api/store.go @@ -47,9 +47,10 @@ type API struct { // long-lived goroutines after snapshot restore. rootCtx context.Context portSubsystem *publicport.PortSubsystem + connTracker *ServerConnTracker } -func New(l *zerolog.Logger, defaults *execcontext.Defaults, mmdsChan chan *host.MMDSOpts, isNotFC bool, rootCtx context.Context, portSubsystem *publicport.PortSubsystem, version string) *API { +func New(l *zerolog.Logger, defaults *execcontext.Defaults, mmdsChan chan *host.MMDSOpts, isNotFC bool, rootCtx context.Context, portSubsystem *publicport.PortSubsystem, connTracker *ServerConnTracker, version string) *API { return &API{ logger: l, defaults: defaults, @@ -60,6 +61,7 @@ func New(l *zerolog.Logger, defaults *execcontext.Defaults, mmdsChan chan *host. accessToken: &SecureToken{}, rootCtx: rootCtx, portSubsystem: portSubsystem, + connTracker: connTracker, version: version, } } diff --git a/envd/main.go b/envd/main.go index 1cd9403..3acd2c6 100644 --- a/envd/main.go +++ b/envd/main.go @@ -197,7 +197,9 @@ func main() { portSubsystem.Start(ctx) defer portSubsystem.Stop() - service := api.New(&envLogger, defaults, mmdsChan, isNotFC, ctx, portSubsystem, Version) + connTracker := api.NewServerConnTracker() + + service := api.New(&envLogger, defaults, mmdsChan, isNotFC, ctx, portSubsystem, connTracker, Version) handler := api.HandlerFromMux(service, m) middleware := authn.NewMiddleware(permissions.AuthenticateUsername) @@ -212,7 +214,9 @@ func main() { ReadTimeout: 0, WriteTimeout: 0, IdleTimeout: idleTimeout, + ConnState: connTracker.Track, } + connTracker.SetServer(s) // TODO: Not used anymore in template build, replaced by direct envd command call. if startCmdFlag != "" { diff --git a/internal/envdclient/client.go b/internal/envdclient/client.go index 294a37e..aed0349 100644 --- a/internal/envdclient/client.go +++ b/internal/envdclient/client.go @@ -19,10 +19,11 @@ import ( // Client wraps the Connect RPC client for envd's Process and Filesystem services. type Client struct { - hostIP string - base string - healthURL string - httpClient *http.Client + hostIP string + base string + healthURL string + httpClient *http.Client + streamingClient *http.Client process genconnect.ProcessClient filesystem genconnect.FilesystemClient @@ -32,29 +33,44 @@ type Client struct { func New(hostIP string) *Client { base := baseURL(hostIP) httpClient := newHTTPClient() + streamingClient := newStreamingHTTPClient() return &Client{ - hostIP: hostIP, - base: base, - healthURL: base + "/health", - httpClient: httpClient, - process: genconnect.NewProcessClient(httpClient, base), - filesystem: genconnect.NewFilesystemClient(httpClient, base), + hostIP: hostIP, + base: base, + healthURL: base + "/health", + httpClient: httpClient, + streamingClient: streamingClient, + process: genconnect.NewProcessClient(streamingClient, base), + filesystem: genconnect.NewFilesystemClient(httpClient, base), } } +// CloseIdleConnections closes idle connections on both the unary and streaming +// transports. Call this before taking a VM snapshot to remove stale TCP state +// from the guest. +func (c *Client) CloseIdleConnections() { + c.httpClient.CloseIdleConnections() + c.streamingClient.CloseIdleConnections() +} + // BaseURL returns the HTTP base URL for reaching envd. func (c *Client) BaseURL() string { return c.base } -// HTTPClient returns the underlying http.Client used for envd requests. -// Use this instead of http.DefaultClient when making direct HTTP calls to envd -// (e.g. file streaming) to avoid sharing the global transport with proxy traffic. +// HTTPClient returns the http.Client with a 2-minute request timeout. +// Suitable for short-lived envd calls (health, init, snapshot/prepare). func (c *Client) HTTPClient() *http.Client { return c.httpClient } +// StreamingHTTPClient returns the http.Client without a request timeout. +// Use for streaming file transfers or any request that may run indefinitely. +func (c *Client) StreamingHTTPClient() *http.Client { + return c.streamingClient +} + // ExecResult holds the output of a command execution. type ExecResult struct { Stdout []byte diff --git a/internal/envdclient/dialer.go b/internal/envdclient/dialer.go index 1813ceb..ffd3509 100644 --- a/internal/envdclient/dialer.go +++ b/internal/envdclient/dialer.go @@ -20,6 +20,22 @@ func baseURL(hostIP string) string { // so that proxy traffic to user services inside the sandbox cannot interfere // with envd RPC connections (PTY streams, exec, file ops). func newHTTPClient() *http.Client { + return &http.Client{ + Timeout: 2 * time.Minute, + Transport: &http.Transport{ + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + DialContext: (&net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + }, + } +} + +// newStreamingHTTPClient returns an http.Client without an overall timeout, +// for long-lived streaming RPCs (PTY, exec stream) that can run indefinitely. +func newStreamingHTTPClient() *http.Client { return &http.Client{ Transport: &http.Transport{ MaxIdleConnsPerHost: 10, diff --git a/internal/hostagent/server.go b/internal/hostagent/server.go index e15ef0b..a1b40c8 100644 --- a/internal/hostagent/server.go +++ b/internal/hostagent/server.go @@ -459,7 +459,7 @@ func (s *Server) WriteFileStream( } httpReq.Header.Set("Content-Type", mpWriter.FormDataContentType()) - resp, err := client.HTTPClient().Do(httpReq) + resp, err := client.StreamingHTTPClient().Do(httpReq) if err != nil { pw.CloseWithError(err) <-errCh @@ -504,7 +504,7 @@ func (s *Server) ReadFileStream( return connect.NewError(connect.CodeInternal, fmt.Errorf("create request: %w", err)) } - resp, err := client.HTTPClient().Do(httpReq) + resp, err := client.StreamingHTTPClient().Do(httpReq) if err != nil { return connect.NewError(connect.CodeInternal, fmt.Errorf("read file stream: %w", err)) } diff --git a/internal/sandbox/manager.go b/internal/sandbox/manager.go index 3c49cd6..117d8c7 100644 --- a/internal/sandbox/manager.go +++ b/internal/sandbox/manager.go @@ -387,9 +387,17 @@ func (m *Manager) Pause(ctx context.Context, sandboxID string) error { sb.connTracker.Drain(2 * time.Second) slog.Debug("pause: proxy connections drained", "id", sandboxID) - // Step 0b: Signal envd to quiesce continuous goroutines (port scanner, - // forwarder) and run GC before freezing vCPUs. This prevents Go runtime - // page allocator corruption ("bad summary data") on snapshot restore. + // Step 0b: Close host-side idle connections to envd. Done before + // PrepareSnapshot so FIN packets propagate to the guest during the + // PrepareSnapshot window (no extra sleep needed). + sb.client.CloseIdleConnections() + slog.Debug("pause: envd client idle connections closed", "id", sandboxID) + + // Step 0c: Signal envd to quiesce continuous goroutines (port scanner, + // forwarder), close idle HTTP connections, and run GC before freezing + // vCPUs. This prevents Go runtime page allocator corruption ("bad + // summary data") on snapshot restore. The 3s timeout also gives time + // for the FINs from Step 0b to be processed by the guest kernel. // Best-effort: a failure is logged but does not abort the pause. func() { prepCtx, prepCancel := context.WithTimeout(ctx, 3*time.Second) From bb582deefaddf3fbad09ffde97f9a5e3ddf43f65 Mon Sep 17 00:00:00 2001 From: pptx704 Date: Sat, 2 May 2026 13:48:51 +0600 Subject: [PATCH 03/10] fix: prevent sandbox halt after resume by fixing HTTP/2 HOL blocking and adding timeouts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Disable HTTP/2 on both host agent server and CP→agent transport — multiplexing caused head-of-line blocking when a slow sandbox RPC stalled the shared connection. Add ResponseHeaderTimeout to envd HTTP clients. Merge SetDefaults into Resume's PostInit call to eliminate an extra round-trip that could hang on a stale connection. --- cmd/host-agent/main.go | 5 +++++ internal/envdclient/dialer.go | 10 ++++++---- internal/hostagent/server.go | 9 +-------- internal/sandbox/manager.go | 8 +++++--- pkg/lifecycle/hostpool.go | 14 +++++++++++++- 5 files changed, 30 insertions(+), 16 deletions(-) diff --git a/cmd/host-agent/main.go b/cmd/host-agent/main.go index 89d65da..8f3a894 100644 --- a/cmd/host-agent/main.go +++ b/cmd/host-agent/main.go @@ -154,6 +154,11 @@ func main() { Addr: listenAddr, ReadHeaderTimeout: 10 * time.Second, IdleTimeout: 620 * time.Second, // > typical LB upstream timeout (600s) + // Disable HTTP/2: empty non-nil map prevents Go from registering + // the h2 ALPN token. Connect RPC works over HTTP/1.1; HTTP/2 + // multiplexing causes HOL blocking when a slow sandbox RPC stalls + // the shared connection. + TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), } // mTLS is mandatory — refuse to start without a valid certificate. diff --git a/internal/envdclient/dialer.go b/internal/envdclient/dialer.go index ffd3509..a7dd2a9 100644 --- a/internal/envdclient/dialer.go +++ b/internal/envdclient/dialer.go @@ -23,8 +23,9 @@ func newHTTPClient() *http.Client { return &http.Client{ Timeout: 2 * time.Minute, Transport: &http.Transport{ - MaxIdleConnsPerHost: 10, - IdleConnTimeout: 90 * time.Second, + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + ResponseHeaderTimeout: 30 * time.Second, DialContext: (&net.Dialer{ Timeout: 10 * time.Second, KeepAlive: 30 * time.Second, @@ -38,8 +39,9 @@ func newHTTPClient() *http.Client { func newStreamingHTTPClient() *http.Client { return &http.Client{ Transport: &http.Transport{ - MaxIdleConnsPerHost: 10, - IdleConnTimeout: 90 * time.Second, + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + ResponseHeaderTimeout: 30 * time.Second, DialContext: (&net.Dialer{ Timeout: 10 * time.Second, KeepAlive: 30 * time.Second, diff --git a/internal/hostagent/server.go b/internal/hostagent/server.go index a1b40c8..816a99f 100644 --- a/internal/hostagent/server.go +++ b/internal/hostagent/server.go @@ -109,18 +109,11 @@ func (s *Server) ResumeSandbox( req *connect.Request[pb.ResumeSandboxRequest], ) (*connect.Response[pb.ResumeSandboxResponse], error) { msg := req.Msg - sb, err := s.mgr.Resume(ctx, msg.SandboxId, int(msg.TimeoutSec), msg.KernelVersion) + sb, err := s.mgr.Resume(ctx, msg.SandboxId, int(msg.TimeoutSec), msg.KernelVersion, msg.DefaultUser, msg.DefaultEnv) if err != nil { return nil, connect.NewError(connect.CodeInternal, err) } - // Apply template defaults (user, env vars) if provided. - if msg.DefaultUser != "" || len(msg.DefaultEnv) > 0 { - if err := s.mgr.SetDefaults(ctx, sb.ID, msg.DefaultUser, msg.DefaultEnv); err != nil { - slog.Warn("failed to set sandbox defaults on resume", "sandbox", sb.ID, "error", err) - } - } - return connect.NewResponse(&pb.ResumeSandboxResponse{ SandboxId: sb.ID, Status: string(sb.Status), diff --git a/internal/sandbox/manager.go b/internal/sandbox/manager.go index 117d8c7..82dba06 100644 --- a/internal/sandbox/manager.go +++ b/internal/sandbox/manager.go @@ -626,7 +626,9 @@ func (m *Manager) Pause(ctx context.Context, sandboxID string) error { // Resume restores a paused sandbox from its snapshot using UFFD for // lazy memory loading. The sandbox gets a new network slot. -func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int, kernelVersion string) (*models.Sandbox, error) { +// Optional defaultUser and defaultEnv are applied via a single PostInit +// call so that template defaults are set without an extra round-trip. +func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int, kernelVersion string, defaultUser string, defaultEnv map[string]string) (*models.Sandbox, error) { pauseDir := layout.PauseSnapshotDir(m.cfg.WrennDir, sandboxID) if _, err := os.Stat(pauseDir); err != nil { return nil, fmt.Errorf("no snapshot found for sandbox %s", sandboxID) @@ -783,8 +785,8 @@ func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int, return nil, fmt.Errorf("wait for envd: %w", err) } - // Trigger envd to re-read MMDS so it picks up the new sandbox/template IDs. - if err := client.PostInit(waitCtx); err != nil { + // Trigger envd to re-read MMDS and apply template defaults in a single call. + if err := client.PostInitWithDefaults(waitCtx, defaultUser, defaultEnv); err != nil { slog.Warn("post-init failed after resume, metadata files may be stale", "sandbox", sandboxID, "error", err) } diff --git a/pkg/lifecycle/hostpool.go b/pkg/lifecycle/hostpool.go index 48ed6c9..508bb52 100644 --- a/pkg/lifecycle/hostpool.go +++ b/pkg/lifecycle/hostpool.go @@ -39,7 +39,19 @@ func NewHostClientPool() *HostClientPool { // (use auth.CPClientTLSConfig to construct it). func NewHostClientPoolTLS(tlsCfg *tls.Config) *HostClientPool { transport := &http.Transport{ - TLSClientConfig: tlsCfg, + TLSClientConfig: tlsCfg, + ForceAttemptHTTP2: false, + // Empty non-nil map disables HTTP/2 ALPN negotiation, forcing HTTP/1.1. + // Connect RPC works over HTTP/1.1; HTTP/2 multiplexing causes HOL + // blocking when a single slow sandbox RPC stalls the shared connection. + TLSNextProto: make(map[string]func(authority string, c *tls.Conn) http.RoundTripper), + MaxIdleConnsPerHost: 20, + IdleConnTimeout: 90 * time.Second, + ResponseHeaderTimeout: 45 * time.Second, + DialContext: (&net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, } return &HostClientPool{ clients: make(map[string]hostagentv1connect.HostAgentServiceClient), From 3deecbff895e4a3e528e43f462be296e8157f786 Mon Sep 17 00:00:00 2001 From: pptx704 Date: Sat, 2 May 2026 17:22:51 +0600 Subject: [PATCH 04/10] fix: prevent Go runtime memory corruption and sandbox halt after snapshot restore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three root causes addressed: 1. Go page allocator corruption: allocations between the pre-snapshot GC and VM freeze leave the summary tree inconsistent. After restore, GC reads corrupted metadata — either panicking (killing PID 1 → kernel panic) or silently failing to collect, causing unbounded heap growth until OOM. Fix: move GC to after all HTTP allocations in PostSnapshotPrepare, then set GOMAXPROCS(1) so any remaining allocations run sequentially with no concurrent page allocator access. GOMAXPROCS is restored on first health check after restore. 2. PostInit timeout starvation: WaitUntilReady and PostInit shared a single 30s context. If WaitUntilReady consumed most of it, PostInit failed — RestoreAfterSnapshot never ran, leaving envd with keep-alives disabled and zombie connections. Fix: separate timeout contexts. 3. CP HTTP server missing timeouts: no ReadHeaderTimeout or IdleTimeout caused goroutine leaks from hung proxy connections. Fix: add both, matching host agent values. Also adds UFFD prefetch to proactively load all guest pages after restore, eliminating on-demand page fault latency for subsequent RPC calls. --- .gitignore | 3 ++ CLAUDE.md | 39 +++++++++++++++ VERSION_AGENT | 2 +- VERSION_CP | 2 +- envd/VERSION | 2 +- envd/internal/api/conntracker.go | 2 +- envd/internal/api/init.go | 14 +++--- envd/internal/api/snapshot.go | 30 ++++++++++++ envd/internal/api/store.go | 46 ++++++++++++++++++ envd/internal/port/subsystem.go | 15 ++---- internal/sandbox/manager.go | 30 +++++++++--- internal/uffd/server.go | 82 ++++++++++++++++++++++++++++++++ pkg/cpserver/run.go | 6 ++- 13 files changed, 245 insertions(+), 28 deletions(-) diff --git a/.gitignore b/.gitignore index 4be2db8..bca25e0 100644 --- a/.gitignore +++ b/.gitignore @@ -36,6 +36,7 @@ go.work.sum e2b/ .impeccable.md .gstack +.mcp.json ## Builds builds/ @@ -49,3 +50,5 @@ frontend/build/ internal/dashboard/static/* !internal/dashboard/static/.gitkeep.dual-graph/ .dual-graph/ +# Added by code-review-graph +.code-review-graph/ diff --git a/CLAUDE.md b/CLAUDE.md index 56fdbbc..d8f8e52 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -372,3 +372,42 @@ All values are CSS custom properties in `frontend/src/app.css`. 4. **Legible at speed.** Users scan dashboards in seconds. Strong typographic contrast (serif h1, mono IDs, sans body), consistent patterns, and predictable placement let users orientate instantly without reading everything. 5. **Craft signals trust.** For infrastructure that runs production code, the quality of the UI is a proxy for the quality of the product. Pixel-level decisions matter. Polish is not decoration — it's a trust signal. + + +## MCP Tools: code-review-graph + +**IMPORTANT: This project has a knowledge graph. ALWAYS use the +code-review-graph MCP tools BEFORE using Grep/Glob/Read to explore +the codebase.** The graph is faster, cheaper (fewer tokens), and gives +you structural context (callers, dependents, test coverage) that file +scanning cannot. + +### When to use graph tools FIRST + +- **Exploring code**: `semantic_search_nodes` or `query_graph` instead of Grep +- **Understanding impact**: `get_impact_radius` instead of manually tracing imports +- **Code review**: `detect_changes` + `get_review_context` instead of reading entire files +- **Finding relationships**: `query_graph` with callers_of/callees_of/imports_of/tests_for +- **Architecture questions**: `get_architecture_overview` + `list_communities` + +Fall back to Grep/Glob/Read **only** when the graph doesn't cover what you need. + +### Key Tools + +| Tool | Use when | +|------|----------| +| `detect_changes` | Reviewing code changes — gives risk-scored analysis | +| `get_review_context` | Need source snippets for review — token-efficient | +| `get_impact_radius` | Understanding blast radius of a change | +| `get_affected_flows` | Finding which execution paths are impacted | +| `query_graph` | Tracing callers, callees, imports, tests, dependencies | +| `semantic_search_nodes` | Finding functions/classes by name or keyword | +| `get_architecture_overview` | Understanding high-level codebase structure | +| `refactor_tool` | Planning renames, finding dead code | + +### Workflow + +1. The graph auto-updates on file changes (via hooks). +2. Use `detect_changes` for code review. +3. Use `get_affected_flows` to understand impact. +4. Use `query_graph` pattern="tests_for" to check coverage. diff --git a/VERSION_AGENT b/VERSION_AGENT index 17e51c3..d917d3e 100644 --- a/VERSION_AGENT +++ b/VERSION_AGENT @@ -1 +1 @@ -0.1.1 +0.1.2 diff --git a/VERSION_CP b/VERSION_CP index 845639e..9faa1b7 100644 --- a/VERSION_CP +++ b/VERSION_CP @@ -1 +1 @@ -0.1.4 +0.1.5 diff --git a/envd/VERSION b/envd/VERSION index 17e51c3..d917d3e 100644 --- a/envd/VERSION +++ b/envd/VERSION @@ -1 +1 @@ -0.1.1 +0.1.2 diff --git a/envd/internal/api/conntracker.go b/envd/internal/api/conntracker.go index 054f920..cc3750e 100644 --- a/envd/internal/api/conntracker.go +++ b/envd/internal/api/conntracker.go @@ -51,7 +51,7 @@ func (t *ServerConnTracker) Track(conn net.Conn, state http.ConnState) { // (with keep-alives disabled, the connection closes), RestoreAfterSnapshot // will close any that survived into the snapshot as zombie TCP sockets. // -// GC cycles are handled by PortSubsystem.Stop() which runs before this. +// GC is handled by PostSnapshotPrepare after this returns. func (t *ServerConnTracker) PrepareForSnapshot() { t.mu.Lock() defer t.mu.Unlock() diff --git a/envd/internal/api/init.go b/envd/internal/api/init.go index 68a1b86..ac4f8eb 100644 --- a/envd/internal/api/init.go +++ b/envd/internal/api/init.go @@ -150,15 +150,17 @@ func (a *API) PostInit(w http.ResponseWriter, r *http.Request) { host.PollForMMDSOpts(ctx, a.mmdsChan, a.defaults.EnvVars) }() - // Close zombie connections from before the snapshot and re-enable - // keep-alives. On first boot this is a no-op (no zombie connections). + // Safety net: if the health check's postRestoreRecovery didn't run yet + // (e.g. PostInit arrived before the first health check), re-enable GC + // here. On first boot needsRestore is false so CAS is a no-op. + if a.needsRestore.CompareAndSwap(true, false) { + a.postRestoreRecovery() + } + // RestoreAfterSnapshot is idempotent (clears preSnapshot set), and + // Start is a no-op if already running. if a.connTracker != nil { a.connTracker.RestoreAfterSnapshot() } - - // Start the port scanner and forwarder if they were stopped by a - // pre-snapshot prepare call. Start is a no-op if already running, - // so this is safe on first boot and only takes effect after restore. if a.portSubsystem != nil { a.portSubsystem.Start(a.rootCtx) } diff --git a/envd/internal/api/snapshot.go b/envd/internal/api/snapshot.go index 6d13381..0e84dec 100644 --- a/envd/internal/api/snapshot.go +++ b/envd/internal/api/snapshot.go @@ -5,6 +5,8 @@ package api import ( "net/http" + "runtime" + "runtime/debug" ) // PostSnapshotPrepare quiesces continuous goroutines (port scanner, forwarder), @@ -13,6 +15,14 @@ import ( // TCP state after snapshot restore. Keep-alives are disabled so the current // request's connection also closes after the response. // +// To prevent Go page allocator corruption, GOMAXPROCS is set to 1 after the +// final GC. With a single P, all goroutines (including any that allocate +// between now and the VM freeze) run sequentially. This eliminates concurrent +// page allocator access, so even if the freeze lands mid-allocation, the +// in-flight operation completes atomically on restore before any GC reads +// the summary tree. GOMAXPROCS is restored on the first health check after +// restore (see postRestoreRecovery). +// // Called by the host agent as a best-effort signal before vm.Pause(). func (a *API) PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() @@ -27,6 +37,26 @@ func (a *API) PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) { a.logger.Info().Msg("snapshot/prepare: idle connections closed, keep-alives disabled") } + // Send the response before the GC so HTTP buffer allocations happen + // while GOMAXPROCS is still at its normal value. w.Header().Set("Cache-Control", "no-store") w.WriteHeader(http.StatusNoContent) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + + // Final GC pass after all major allocations (connection cleanup, + // response write) are complete. + runtime.GC() + runtime.GC() + debug.FreeOSMemory() + + // Reduce to a single P so any post-GC allocations (HTTP server + // connection teardown) run sequentially — no concurrent page allocator + // access that could leave the summary tree inconsistent if the VM + // freezes mid-update. + a.prevGOMAXPROCS = runtime.GOMAXPROCS(1) + + a.needsRestore.Store(true) + a.logger.Info().Msg("snapshot/prepare: GOMAXPROCS=1, ready for freeze") } diff --git a/envd/internal/api/store.go b/envd/internal/api/store.go index 5365604..ba4d445 100644 --- a/envd/internal/api/store.go +++ b/envd/internal/api/store.go @@ -7,7 +7,10 @@ import ( "context" "encoding/json" "net/http" + "runtime" + "runtime/debug" "sync" + "sync/atomic" "github.com/rs/zerolog" @@ -48,6 +51,12 @@ type API struct { rootCtx context.Context portSubsystem *publicport.PortSubsystem connTracker *ServerConnTracker + + // needsRestore is set by PostSnapshotPrepare and cleared on the first + // health check or PostInit after restore. While set, GOMAXPROCS is 1 + // to prevent concurrent page allocator access during the freeze window. + needsRestore atomic.Bool + prevGOMAXPROCS int // GOMAXPROCS value before PrepareSnapshot reduced it to 1 } func New(l *zerolog.Logger, defaults *execcontext.Defaults, mmdsChan chan *host.MMDSOpts, isNotFC bool, rootCtx context.Context, portSubsystem *publicport.PortSubsystem, connTracker *ServerConnTracker, version string) *API { @@ -69,6 +78,14 @@ func New(l *zerolog.Logger, defaults *execcontext.Defaults, mmdsChan chan *host. func (a *API) GetHealth(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() + // On the first health check after snapshot restore, re-enable GC and + // clean up stale state. By this point, any goroutine that was mid- + // allocation when the VM was frozen has completed, so the page allocator + // summary tree is consistent and safe for GC to read. + if a.needsRestore.CompareAndSwap(true, false) { + a.postRestoreRecovery() + } + a.logger.Trace().Msg("Health check") w.Header().Set("Cache-Control", "no-store") @@ -79,6 +96,35 @@ func (a *API) GetHealth(w http.ResponseWriter, r *http.Request) { }) } +// postRestoreRecovery restores GOMAXPROCS, runs a clean GC cycle, closes +// zombie TCP connections from before the snapshot, re-enables HTTP keep-alives, +// and restarts the port subsystem. Called exactly once per restore cycle, +// guarded by a CAS on needsRestore in both GetHealth and PostInit. +func (a *API) postRestoreRecovery() { + // Restore parallelism first — any goroutine that was mid-allocation + // when the VM froze has already completed by the time a health check + // or PostInit request is being served, so the page allocator summary + // tree is consistent and safe for a full GC. + prev := a.prevGOMAXPROCS + if prev > 0 { + runtime.GOMAXPROCS(prev) + } + runtime.GC() + runtime.GC() + debug.FreeOSMemory() + a.logger.Info().Msg("restore: GOMAXPROCS restored, GC complete") + + if a.connTracker != nil { + a.connTracker.RestoreAfterSnapshot() + a.logger.Info().Msg("restore: zombie connections closed, keep-alives re-enabled") + } + + if a.portSubsystem != nil { + a.portSubsystem.Start(a.rootCtx) + a.logger.Info().Msg("restore: port subsystem restarted") + } +} + func (a *API) GetMetrics(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() diff --git a/envd/internal/port/subsystem.go b/envd/internal/port/subsystem.go index 094b2c4..e70a2db 100644 --- a/envd/internal/port/subsystem.go +++ b/envd/internal/port/subsystem.go @@ -5,8 +5,6 @@ package port import ( "context" - "runtime" - "runtime/debug" "sync" "time" @@ -72,9 +70,12 @@ func (p *PortSubsystem) Start(parentCtx context.Context) { }() } -// Stop quiesces the scanner and forwarder goroutines and forces a GC cycle -// to put the Go runtime's page allocator in a consistent state before snapshot. +// Stop quiesces the scanner and forwarder goroutines. // Blocks until both goroutines have exited. Safe to call if already stopped. +// +// GC is NOT run here — it is deferred to PostSnapshotPrepare so that the +// GC happens after all allocations (connection cleanup, HTTP response) are +// complete, minimizing the window where page allocator corruption can occur. func (p *PortSubsystem) Stop() { p.mu.Lock() if !p.running { @@ -90,12 +91,6 @@ func (p *PortSubsystem) Stop() { cancelFn() wg.Wait() - - // Force two GC cycles to ensure all spans are swept and the page - // allocator summary tree is fully consistent before the VM is frozen. - runtime.GC() - runtime.GC() - debug.FreeOSMemory() } // Restart stops the subsystem (if running) and starts it again with a fresh diff --git a/internal/sandbox/manager.go b/internal/sandbox/manager.go index 82dba06..371d95b 100644 --- a/internal/sandbox/manager.go +++ b/internal/sandbox/manager.go @@ -768,12 +768,17 @@ func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int, return nil, fmt.Errorf("restore VM from snapshot: %w", err) } + // Start prefetching all guest memory pages in the background. + // This runs concurrently with envd startup and eliminates on-demand + // page fault latency for subsequent RPC calls. + uffdServer.Prefetch() + // Wait for envd to be ready. client := envdclient.New(slot.HostIP.String()) waitCtx, waitCancel := context.WithTimeout(ctx, m.cfg.EnvdTimeout) - defer waitCancel() if err := client.WaitUntilReady(waitCtx); err != nil { + waitCancel() warnErr("uffd server stop error", sandboxID, uffdServer.Stop()) source.Close() warnErr("vm destroy error", sandboxID, m.vm.Destroy(context.Background(), sandboxID)) @@ -784,9 +789,14 @@ func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int, m.loops.Release(baseImagePath) return nil, fmt.Errorf("wait for envd: %w", err) } + waitCancel() - // Trigger envd to re-read MMDS and apply template defaults in a single call. - if err := client.PostInitWithDefaults(waitCtx, defaultUser, defaultEnv); err != nil { + // PostInit gets its own timeout — WaitUntilReady may have consumed most + // of EnvdTimeout, starving PostInit of time for RestoreAfterSnapshot. + initCtx, initCancel := context.WithTimeout(ctx, m.cfg.EnvdTimeout) + defer initCancel() + + if err := client.PostInitWithDefaults(initCtx, defaultUser, defaultEnv); err != nil { slog.Warn("post-init failed after resume, metadata files may be stale", "sandbox", sandboxID, "error", err) } @@ -1200,12 +1210,15 @@ func (m *Manager) createFromSnapshot(ctx context.Context, sandboxID string, team return nil, fmt.Errorf("restore VM from snapshot: %w", err) } + // Start prefetching all guest memory pages in the background. + uffdServer.Prefetch() + // Wait for envd. client := envdclient.New(slot.HostIP.String()) waitCtx, waitCancel := context.WithTimeout(ctx, m.cfg.EnvdTimeout) - defer waitCancel() if err := client.WaitUntilReady(waitCtx); err != nil { + waitCancel() warnErr("uffd server stop error", sandboxID, uffdServer.Stop()) source.Close() warnErr("vm destroy error", sandboxID, m.vm.Destroy(context.Background(), sandboxID)) @@ -1216,9 +1229,14 @@ func (m *Manager) createFromSnapshot(ctx context.Context, sandboxID string, team m.loops.Release(baseRootfs) return nil, fmt.Errorf("wait for envd: %w", err) } + waitCancel() - // Trigger envd to re-read MMDS so it picks up the new sandbox/template IDs. - if err := client.PostInit(waitCtx); err != nil { + // PostInit gets its own timeout — WaitUntilReady may have consumed most + // of EnvdTimeout, starving PostInit of time for RestoreAfterSnapshot. + initCtx, initCancel := context.WithTimeout(ctx, m.cfg.EnvdTimeout) + defer initCancel() + + if err := client.PostInit(initCtx); err != nil { slog.Warn("post-init failed after template restore, metadata files may be stale", "sandbox", sandboxID, "error", err) } diff --git a/internal/uffd/server.go b/internal/uffd/server.go index be53f53..b838cbc 100644 --- a/internal/uffd/server.go +++ b/internal/uffd/server.go @@ -57,6 +57,17 @@ type Server struct { // exitPipe signals the poll loop to stop. exitR *os.File exitW *os.File + + // Set by handle() after Firecracker connects; read by Prefetch() + // after waiting on readyCh (which establishes happens-before). + uffdFd fd + mapping *Mapping + + // Prefetch lifecycle: cancel stops the goroutine, prefetchDone is + // closed when it exits. Stop() drains prefetchDone before returning + // so the caller can safely close diff file handles. + prefetchCancel context.CancelFunc + prefetchDone chan struct{} } // NewServer creates a UFFD server that will listen on the given socket path @@ -113,10 +124,17 @@ func (s *Server) Ready() <-chan struct{} { } // Stop signals the UFFD poll loop to exit and waits for it to finish. +// Also cancels and waits for any running prefetch goroutine. func (s *Server) Stop() error { + if s.prefetchCancel != nil { + s.prefetchCancel() + } // Write a byte to the exit pipe to wake the poll loop. _, _ = s.exitW.Write([]byte{0}) <-s.doneCh + if s.prefetchDone != nil { + <-s.prefetchDone + } return s.doneErr } @@ -172,6 +190,10 @@ func (s *Server) handle(ctx context.Context) error { mapping := NewMapping(regions) + // Store for use by Prefetch(). + s.uffdFd = uffdFd + s.mapping = mapping + slog.Info("uffd handler connected", "regions", len(regions), "fd", int(uffdFd), @@ -294,6 +316,66 @@ func (s *Server) faultPage(ctx context.Context, uffdFd fd, addr uintptr, offset return nil } +// Prefetch proactively loads all guest memory pages in the background. +// It iterates over every page in every UFFD region and copies it from the +// diff file into guest memory via UFFDIO_COPY. Pages already loaded by +// on-demand faults return nil from faultPage (EEXIST handled internally). +// This eliminates the per-request latency caused by lazy page faulting +// after snapshot restore. +// +// The goroutine blocks on readyCh before reading the uffd fd and mapping +// fields (establishes happens-before with handle()). It uses an internal +// context independent of the caller's RPC context so it survives after the +// create/resume RPC returns. Stop() cancels and joins the goroutine. +func (s *Server) Prefetch() { + ctx, cancel := context.WithCancel(context.Background()) + s.prefetchCancel = cancel + s.prefetchDone = make(chan struct{}) + + go func() { + defer close(s.prefetchDone) + + // Wait for Firecracker to connect and send the uffd fd. + select { + case <-s.readyCh: + case <-ctx.Done(): + return + } + + uffdFd := s.uffdFd + mapping := s.mapping + if mapping == nil { + return + } + + var total, errored int + for _, region := range mapping.Regions { + pageSize := region.PageSize + if pageSize == 0 { + continue + } + for off := uintptr(0); off < region.Size; off += pageSize { + if ctx.Err() != nil { + slog.Debug("uffd prefetch cancelled", + "pages", total, "errors", errored) + return + } + + addr := region.BaseHostVirtAddr + off + memOffset := int64(off) + int64(region.Offset) + + if err := s.faultPage(ctx, uffdFd, addr, memOffset, pageSize); err != nil { + errored++ + } else { + total++ + } + } + } + slog.Info("uffd prefetch complete", + "pages", total, "errors", errored) + }() +} + // DiffFileSource serves pages from a snapshot's compact diff file using // the header's block mapping to resolve offsets. type DiffFileSource struct { diff --git a/pkg/cpserver/run.go b/pkg/cpserver/run.go index 58ef7f1..e49b4e2 100644 --- a/pkg/cpserver/run.go +++ b/pkg/cpserver/run.go @@ -256,8 +256,10 @@ func Run(opts ...Option) { proxyWrapper := api.NewSandboxProxyWrapper(srv.Handler(), queries, hostPool) httpServer := &http.Server{ - Addr: cfg.ListenAddr, - Handler: proxyWrapper, + Addr: cfg.ListenAddr, + Handler: proxyWrapper, + ReadHeaderTimeout: 10 * time.Second, + IdleTimeout: 620 * time.Second, // > typical LB/Cloudflare upstream timeout } // Graceful shutdown on signal. From 0b53d34417617561f633aa67a7f625edff7a7ddc Mon Sep 17 00:00:00 2001 From: pptx704 Date: Sun, 3 May 2026 02:47:15 +0600 Subject: [PATCH 05/10] feat: rewrite envd guest agent in Rust (envd-rs) Complete Rust rewrite of the Go envd guest daemon that runs as PID 1 inside Firecracker microVMs. Feature-complete across all 8 phases: - Health, metrics, and env var endpoints - Crypto (SHA-256/512, HMAC), auth (secure token, signing), init/snapshot - Connect RPC via connectrpc + buffa (process + filesystem services) - File transfer (GET/POST /files) with gzip, multipart, chown, ENOSPC - Port subsystem (/proc/net/tcp scanner, socat forwarder) - Cgroup2 manager with noop fallback - Snapshot/restore lifecycle (conntracker, port subsystem stop/restart) - SIGTERM graceful shutdown, --cmd initial process spawn - MMDS metadata polling for Firecracker mode 42 source files, ~4200 LOC, 4.1MB stripped release binary. Makefile updated: build-envd now targets Rust (musl static), build-envd-go preserved for Go builds. --- .gitignore | 3 + Makefile | 32 +- envd-rs/.cargo/config.toml | 2 + envd-rs/Cargo.lock | 2622 +++++++++++++++++++++++++ envd-rs/Cargo.toml | 83 + envd-rs/README.md | 141 ++ envd-rs/buffa-types-shim/Cargo.toml | 12 + envd-rs/buffa-types-shim/build.rs | 9 + envd-rs/buffa-types-shim/src/lib.rs | 6 + envd-rs/build.rs | 11 + envd-rs/rust-toolchain.toml | 3 + envd-rs/src/auth/middleware.rs | 56 + envd-rs/src/auth/mod.rs | 3 + envd-rs/src/auth/signing.rs | 85 + envd-rs/src/auth/token.rs | 127 ++ envd-rs/src/cgroups/mod.rs | 66 + envd-rs/src/config.rs | 16 + envd-rs/src/conntracker.rs | 79 + envd-rs/src/crypto/hmac_sha256.rs | 22 + envd-rs/src/crypto/mod.rs | 3 + envd-rs/src/crypto/sha256.rs | 33 + envd-rs/src/crypto/sha512.rs | 24 + envd-rs/src/execcontext.rs | 42 + envd-rs/src/host/metrics.rs | 73 + envd-rs/src/host/mmds.rs | 113 ++ envd-rs/src/host/mod.rs | 2 + envd-rs/src/http/encoding.rs | 147 ++ envd-rs/src/http/envs.rs | 25 + envd-rs/src/http/error.rs | 20 + envd-rs/src/http/files.rs | 443 +++++ envd-rs/src/http/health.rs | 39 + envd-rs/src/http/init.rs | 274 +++ envd-rs/src/http/metrics.rs | 102 + envd-rs/src/http/mod.rs | 56 + envd-rs/src/http/snapshot.rs | 32 + envd-rs/src/logging.rs | 17 + envd-rs/src/main.rs | 224 +++ envd-rs/src/permissions/mod.rs | 2 + envd-rs/src/permissions/path.rs | 72 + envd-rs/src/permissions/user.rs | 32 + envd-rs/src/port/conn.rs | 112 ++ envd-rs/src/port/forwarder.rs | 181 ++ envd-rs/src/port/mod.rs | 4 + envd-rs/src/port/scanner.rs | 79 + envd-rs/src/port/subsystem.rs | 78 + envd-rs/src/rpc/entry.rs | 142 ++ envd-rs/src/rpc/filesystem_service.rs | 402 ++++ envd-rs/src/rpc/mod.rs | 26 + envd-rs/src/rpc/pb.rs | 10 + envd-rs/src/rpc/process_handler.rs | 400 ++++ envd-rs/src/rpc/process_service.rs | 438 +++++ envd-rs/src/state.rs | 42 + envd-rs/src/util.rs | 33 + scripts/update-minimal-rootfs.sh | 6 - 54 files changed, 7089 insertions(+), 17 deletions(-) create mode 100644 envd-rs/.cargo/config.toml create mode 100644 envd-rs/Cargo.lock create mode 100644 envd-rs/Cargo.toml create mode 100644 envd-rs/README.md create mode 100644 envd-rs/buffa-types-shim/Cargo.toml create mode 100644 envd-rs/buffa-types-shim/build.rs create mode 100644 envd-rs/buffa-types-shim/src/lib.rs create mode 100644 envd-rs/build.rs create mode 100644 envd-rs/rust-toolchain.toml create mode 100644 envd-rs/src/auth/middleware.rs create mode 100644 envd-rs/src/auth/mod.rs create mode 100644 envd-rs/src/auth/signing.rs create mode 100644 envd-rs/src/auth/token.rs create mode 100644 envd-rs/src/cgroups/mod.rs create mode 100644 envd-rs/src/config.rs create mode 100644 envd-rs/src/conntracker.rs create mode 100644 envd-rs/src/crypto/hmac_sha256.rs create mode 100644 envd-rs/src/crypto/mod.rs create mode 100644 envd-rs/src/crypto/sha256.rs create mode 100644 envd-rs/src/crypto/sha512.rs create mode 100644 envd-rs/src/execcontext.rs create mode 100644 envd-rs/src/host/metrics.rs create mode 100644 envd-rs/src/host/mmds.rs create mode 100644 envd-rs/src/host/mod.rs create mode 100644 envd-rs/src/http/encoding.rs create mode 100644 envd-rs/src/http/envs.rs create mode 100644 envd-rs/src/http/error.rs create mode 100644 envd-rs/src/http/files.rs create mode 100644 envd-rs/src/http/health.rs create mode 100644 envd-rs/src/http/init.rs create mode 100644 envd-rs/src/http/metrics.rs create mode 100644 envd-rs/src/http/mod.rs create mode 100644 envd-rs/src/http/snapshot.rs create mode 100644 envd-rs/src/logging.rs create mode 100644 envd-rs/src/main.rs create mode 100644 envd-rs/src/permissions/mod.rs create mode 100644 envd-rs/src/permissions/path.rs create mode 100644 envd-rs/src/permissions/user.rs create mode 100644 envd-rs/src/port/conn.rs create mode 100644 envd-rs/src/port/forwarder.rs create mode 100644 envd-rs/src/port/mod.rs create mode 100644 envd-rs/src/port/scanner.rs create mode 100644 envd-rs/src/port/subsystem.rs create mode 100644 envd-rs/src/rpc/entry.rs create mode 100644 envd-rs/src/rpc/filesystem_service.rs create mode 100644 envd-rs/src/rpc/mod.rs create mode 100644 envd-rs/src/rpc/pb.rs create mode 100644 envd-rs/src/rpc/process_handler.rs create mode 100644 envd-rs/src/rpc/process_service.rs create mode 100644 envd-rs/src/state.rs create mode 100644 envd-rs/src/util.rs diff --git a/.gitignore b/.gitignore index bca25e0..59c36a2 100644 --- a/.gitignore +++ b/.gitignore @@ -41,6 +41,9 @@ e2b/ ## Builds builds/ +## Rust +envd-rs/target/ + ## Frontend frontend/node_modules/ frontend/.svelte-kit/ diff --git a/Makefile b/Makefile index e80869c..0ff478b 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ # Variables # ═══════════════════════════════════════════════════ DATABASE_URL ?= postgres://wrenn:wrenn@localhost:5432/wrenn?sslmode=disable -GOBIN := $(shell pwd)/builds +BIN_DIR := $(shell pwd)/builds ENVD_DIR := envd COMMIT := $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") VERSION_CP := $(shell cat VERSION_CP 2>/dev/null | tr -d '[:space:]' || echo "0.0.0-dev") @@ -13,7 +13,7 @@ LDFLAGS := -s -w # ═══════════════════════════════════════════════════ # Build # ═══════════════════════════════════════════════════ -.PHONY: build build-cp build-agent build-envd build-frontend +.PHONY: build build-cp build-agent build-envd build-envd-go build-frontend build: build-cp build-agent build-envd @@ -21,16 +21,20 @@ build-frontend: cd frontend && pnpm install --frozen-lockfile && pnpm build build-cp: - go build -v -ldflags="$(LDFLAGS) -X main.version=$(VERSION_CP) -X main.commit=$(COMMIT)" -o $(GOBIN)/wrenn-cp ./cmd/control-plane + go build -v -ldflags="$(LDFLAGS) -X main.version=$(VERSION_CP) -X main.commit=$(COMMIT)" -o $(BIN_DIR)/wrenn-cp ./cmd/control-plane build-agent: - go build -v -ldflags="$(LDFLAGS) -X main.version=$(VERSION_AGENT) -X main.commit=$(COMMIT)" -o $(GOBIN)/wrenn-agent ./cmd/host-agent + go build -v -ldflags="$(LDFLAGS) -X main.version=$(VERSION_AGENT) -X main.commit=$(COMMIT)" -o $(BIN_DIR)/wrenn-agent ./cmd/host-agent build-envd: + cd envd-rs && ENVD_COMMIT=$(COMMIT) cargo build --release --target x86_64-unknown-linux-musl + @cp envd-rs/target/x86_64-unknown-linux-musl/release/envd $(BIN_DIR)/envd + +build-envd-go: cd $(ENVD_DIR) && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \ - go build -ldflags="$(LDFLAGS) -X main.Version=$(VERSION_ENVD) -X main.commitSHA=$(COMMIT)" -o $(GOBIN)/envd . - @file $(GOBIN)/envd | grep -q "statically linked" || \ - (echo "ERROR: envd is not statically linked!" && exit 1) + go build -ldflags="$(LDFLAGS) -X main.Version=$(VERSION_ENVD) -X main.commitSHA=$(COMMIT)" -o $(BIN_DIR)/envd-go . + @file $(BIN_DIR)/envd-go | grep -q "statically linked" || \ + (echo "ERROR: envd-go is not statically linked!" && exit 1) # ═══════════════════════════════════════════════════ # Development @@ -60,6 +64,9 @@ dev-frontend: cd frontend && pnpm dev --port 5173 --host 0.0.0.0 dev-envd: + cd envd-rs && cargo run -- --isnotfc --port 49983 + +dev-envd-go: cd $(ENVD_DIR) && go run . --debug --listen-tcp :3002 @@ -155,8 +162,8 @@ setup-host: sudo bash scripts/setup-host.sh install: build - sudo cp $(GOBIN)/wrenn-cp /usr/local/bin/ - sudo cp $(GOBIN)/wrenn-agent /usr/local/bin/ + sudo cp $(BIN_DIR)/wrenn-cp /usr/local/bin/ + sudo cp $(BIN_DIR)/wrenn-agent /usr/local/bin/ sudo cp deploy/systemd/*.service /etc/systemd/system/ sudo systemctl daemon-reload @@ -168,6 +175,7 @@ install: build clean: rm -rf builds/ cd $(ENVD_DIR) && rm -f envd + cd envd-rs && cargo clean # ═══════════════════════════════════════════════════ # Help @@ -183,11 +191,13 @@ help: @echo " make dev-cp Control plane (hot reload if air installed)" @echo " make dev-frontend Vite dev server with HMR (port 5173)" @echo " make dev-agent Host agent (sudo required)" - @echo " make dev-envd envd in TCP debug mode" + @echo " make dev-envd envd Rust (--isnotfc, port 49983)" + @echo " make dev-envd-go envd Go (TCP debug mode)" @echo "" @echo " make build Build all binaries → builds/" @echo " make build-frontend Build SvelteKit dashboard → frontend/build/" - @echo " make build-envd Build envd static binary" + @echo " make build-envd Build envd static binary (Rust, musl)" + @echo " make build-envd-go Build envd Go binary" @echo "" @echo " make migrate-up Apply migrations" @echo " make migrate-create name=xxx New migration" diff --git a/envd-rs/.cargo/config.toml b/envd-rs/.cargo/config.toml new file mode 100644 index 0000000..0dd2f79 --- /dev/null +++ b/envd-rs/.cargo/config.toml @@ -0,0 +1,2 @@ +[target.x86_64-unknown-linux-musl] +linker = "musl-gcc" diff --git a/envd-rs/Cargo.lock b/envd-rs/Cargo.lock new file mode 100644 index 0000000..ecafb78 --- /dev/null +++ b/envd-rs/Cargo.lock @@ -0,0 +1,2622 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "anstream" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000" + +[[package]] +name = "anstyle-parse" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.61.2", +] + +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "async-compression" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79b3f8a79cccc2898f31920fc69f304859b3bd567490f75ebf51ae1c792a9ac" +dependencies = [ + "compression-codecs", + "compression-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "axum" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b698c5f9a010f6573133b09e0de5408834d0c82f8d7475a89fc1867a71cd90" +dependencies = [ + "axum-core", + "bytes", + "form_urlencoded", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "multer", + "percent-encoding", + "pin-project-lite", + "serde_core", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4512299f36f043ab09a583e57bceb5a5aab7a73db1805848e8fef3c9e8c78b3" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "buffa" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3fea50199859017c80584fef221c61abb882c69ed5b6e30a0bf75864e3c505" +dependencies = [ + "base64", + "bytes", + "hashbrown 0.15.5", + "once_cell", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "buffa-codegen" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f9ddcb25f0dde4d82e0a1128b0c459feba775e24dfa7cb0c3f4a9d61abfa245" +dependencies = [ + "buffa", + "buffa-descriptor", + "prettyplease", + "proc-macro2", + "quote", + "syn", + "thiserror", +] + +[[package]] +name = "buffa-descriptor" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a0794ed1f8a0c6ab168c70258e21cf74ca7b87a7dd888a0a69745b075f2d351" +dependencies = [ + "buffa", +] + +[[package]] +name = "buffa-types" +version = "0.3.0" +dependencies = [ + "buffa", + "connectrpc-build", + "serde", +] + +[[package]] +name = "bumpalo" +version = "3.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" + +[[package]] +name = "bytes" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" +dependencies = [ + "serde", +] + +[[package]] +name = "cc" +version = "1.2.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16d90359e986641506914ba71350897565610e87ce0ad9e6f28569db3dd5c6d" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "clap" +version = "4.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ddb117e43bbf7dacf0a4190fef4d345b9bad68dfc649cb349e7d17d28428e51" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2ce8604710f6733aa641a2b3731eaa1e8b3d9973d5e3565da11800813f997a9" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" + +[[package]] +name = "colorchoice" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" + +[[package]] +name = "compression-codecs" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce2548391e9c1929c21bf6aa2680af86fe4c1b33e6cea9ac1cfeec0bd11218cf" +dependencies = [ + "compression-core", + "flate2", + "memchr", + "zstd", + "zstd-safe", +] + +[[package]] +name = "compression-core" +version = "0.4.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc14f565cf027a105f7a44ccf9e5b424348421a1d8952a8fc9d499d313107789" + +[[package]] +name = "connectrpc" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9191b8c90cfc0d27f3df209ea83fa1cb1f9b7e81480bc9d89661be41350778" +dependencies = [ + "async-compression", + "axum", + "base64", + "buffa", + "bytes", + "flate2", + "futures", + "http", + "http-body", + "http-body-util", + "percent-encoding", + "pin-project", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tracing", + "zstd", +] + +[[package]] +name = "connectrpc-build" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "591bc832b8d3faef060e435f832a1cf10a71c865dfb49f9874247769b60ef816" +dependencies = [ + "anyhow", + "buffa", + "buffa-codegen", + "connectrpc-codegen", + "tempfile", +] + +[[package]] +name = "connectrpc-codegen" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109a2352193792931d41e5cb3b048985371d5c6505f736f9b9a7ae606d5a0050" +dependencies = [ + "anyhow", + "buffa", + "buffa-codegen", + "heck", + "prettyplease", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "envd" +version = "0.1.2" +dependencies = [ + "async-stream", + "axum", + "base64", + "buffa", + "buffa-types", + "bytes", + "clap", + "connectrpc", + "connectrpc-build", + "dashmap", + "flate2", + "futures", + "hex", + "hmac", + "http", + "http-body", + "http-body-util", + "libc", + "mime_guess", + "nix", + "notify", + "reqwest", + "serde", + "serde_json", + "sha2", + "subtle", + "sysinfo", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tower-service", + "tracing", + "tracing-subscriber", + "walkdir", + "zeroize", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "fastrand" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f1f227452a390804cdb637b74a86990f2a7d7ba4b7d5693aac9b4dd6defd8d6" + +[[package]] +name = "filetime" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" +dependencies = [ + "cfg-if", + "libc", + "libredox", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + +[[package]] +name = "flate2" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fsevent-sys" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" +dependencies = [ + "libc", +] + +[[package]] +name = "futures" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" + +[[package]] +name = "futures-executor" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" + +[[package]] +name = "futures-macro" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" + +[[package]] +name = "futures-task" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" + +[[package]] +name = "futures-util" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi 5.3.0", + "wasip2", +] + +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "wasip2", + "wasip3", +] + +[[package]] +name = "h2" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash", + "serde", +] + +[[package]] +name = "hashbrown" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f467dd6dccf739c208452f8014c75c18bb8301b050ad1cfb27153803edb0f51" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "http-range-header" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c" + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-util" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "icu_collections" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c" +dependencies = [ + "displaydoc", + "potential_utf", + "utf8_iter", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38" + +[[package]] +name = "icu_properties" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14" + +[[package]] +name = "icu_provider" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb68373c0d6620ef8105e855e7745e18b0d00d3bdb07fb532e434244cdb9a714" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d466e9454f08e4a911e14806c24e16fba1b4c121d1ea474396f396069cf949d9" +dependencies = [ + "equivalent", + "hashbrown 0.17.0", + "serde", + "serde_core", +] + +[[package]] +name = "inotify" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd168d97690d0b8c412d6b6c10360277f4d7ee495c5d0d5d5fe0854923255cc" +dependencies = [ + "bitflags 1.3.2", + "inotify-sys", + "libc", +] + +[[package]] +name = "inotify-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +dependencies = [ + "libc", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "ipnet" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" + +[[package]] +name = "iri-string" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" + +[[package]] +name = "itoa" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1840c94c045fbcf8ba2812c95db44499f7c64910a912551aaaa541decebcacf" +dependencies = [ + "cfg-if", + "futures-util", + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "kqueue" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a" +dependencies = [ + "kqueue-sys", + "libc", +] + +[[package]] +name = "kqueue-sys" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +dependencies = [ + "bitflags 1.3.2", + "libc", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + +[[package]] +name = "libc" +version = "0.2.186" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68ab91017fe16c622486840e4c83c9a37afeff978bd239b5293d61ece587de66" + +[[package]] +name = "libredox" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e02f3bb43d335493c96bf3fd3a321600bf6bd07ed34bc64118e9293bdffea46c" +dependencies = [ + "bitflags 2.11.1", + "libc", + "plain", + "redox_syscall 0.7.4", +] + +[[package]] +name = "linux-raw-sys" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" + +[[package]] +name = "litemap" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mime_guess" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1" +dependencies = [ + "libc", + "log", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "multer" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http", + "httparse", + "memchr", + "mime", + "spin", + "version_check", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags 2.11.1", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "notify" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c533b4c39709f9ba5005d8002048266593c1cfaf3c5f0739d5b8ab0c6c504009" +dependencies = [ + "bitflags 2.11.1", + "filetime", + "fsevent-sys", + "inotify", + "kqueue", + "libc", + "log", + "mio", + "notify-types", + "walkdir", + "windows-sys 0.52.0", +] + +[[package]] +name = "notify-types" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "585d3cb5e12e01aed9e8a1f70d5c6b5e86fe2a6e48fc8cd0b3e0b8df6f6eb174" +dependencies = [ + "instant", +] + +[[package]] +name = "ntapi" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3b335231dfd352ffb0f8017f3b6027a4917f7df785ea2143d8af2adc66980ae" +dependencies = [ + "winapi", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "once_cell" +version = "1.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall 0.5.18", + "smallvec", + "windows-link", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pin-project" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1749c7ed4bcaf4c3d0a3efc28538844fb29bcdd7d2b67b2be7e20ba861ff517" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" + +[[package]] +name = "pkg-config" +version = "0.3.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19f132c84eca552bf34cab8ec81f1c1dcc229b811638f9d283dceabe58c5569e" + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + +[[package]] +name = "potential_utf" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564" +dependencies = [ + "zerovec", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + +[[package]] +name = "rayon" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb39b166781f92d482534ef4b4b1b2568f42613b53e5b6c160e24cfbfa30926d" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags 2.11.1", +] + +[[package]] +name = "redox_syscall" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f450ad9c3b1da563fb6948a8e0fb0fb9269711c9c73d9ea1de5058c79c8d643a" +dependencies = [ + "bitflags 2.11.1", +] + +[[package]] +name = "regex-automata" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" + +[[package]] +name = "reqwest" +version = "0.12.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +dependencies = [ + "base64", + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "rustix" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" +dependencies = [ + "bitflags 2.11.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "semver" +version = "1.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7852d02fc848982e0c167ef163aaff9cd91dc640ba85e263cb1ce46fae51cd" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" +dependencies = [ + "itoa", + "serde", + "serde_core", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "simd-adler32" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703d5c7ef118737c72f1af64ad2f6f8c5e1921f818cdcb97b8fe6fc69bf66214" + +[[package]] +name = "slab" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sysinfo" +version = "0.33.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fc858248ea01b66f19d8e8a6d55f41deaf91e9d495246fd01368d99935c6c01" +dependencies = [ + "core-foundation-sys", + "libc", + "memchr", + "ntapi", + "rayon", + "windows", +] + +[[package]] +name = "tempfile" +version = "3.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" +dependencies = [ + "fastrand", + "getrandom 0.4.2", + "once_cell", + "rustix", + "windows-sys 0.61.2", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "tinystr" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tokio" +version = "1.52.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67dee974fe86fd92cc45b7a95fdd2f99a36a6d7b0d431a231178d3d670bbcc6" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "385a6cb71ab9ab790c5fe8d67f1645e6c450a7ce006a33de03daa956cf70a496" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags 2.11.1", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "http-range-header", + "httpdate", + "iri-string", + "mime", + "mime_guess", + "percent-encoding", + "pin-project-lite", + "tokio", + "tokio-util", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40ce102ab67701b8526c123c1bab5cbe42d7040ccfd0f64af1a385808d2f43de" + +[[package]] +name = "unicase" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.3+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20064672db26d7cdc89c7798c48a0fdfac8213434a1186e5ef29fd560ae223d6" +dependencies = [ + "wit-bindgen 0.57.1", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen 0.51.0", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df52b6d9b87e0c74c9edfa1eb2d9bf85e5d63515474513aa50fa181b3c4f5db1" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af934872acec734c2d80e6617bbb5ff4f12b052dd8e6332b0817bce889516084" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b1041f495fb322e64aca85f5756b2172e35cd459376e67f2a6c9dffcedb103" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dcd0ff20416988a18ac686d4d4d0f6aae9ebf08a389ff5d29012b05af2a1b41" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49757b3c82ebf16c57d69365a142940b384176c24df52a087fb748e2085359ea" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags 2.11.1", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + +[[package]] +name = "web-sys" +version = "0.3.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eadbac71025cd7b0834f20d1fe8472e8495821b4e9801eb0a60bd1f19827602" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" +dependencies = [ + "windows-core", + "windows-targets", +] + +[[package]] +name = "windows-core" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-targets", +] + +[[package]] +name = "windows-implement" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen" +version = "0.57.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ebf944e87a7c253233ad6766e082e3cd714b5d03812acc24c318f549614536e" + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags 2.11.1", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + +[[package]] +name = "writeable" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4" + +[[package]] +name = "yoke" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerofrom" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerotrie" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/envd-rs/Cargo.toml b/envd-rs/Cargo.toml new file mode 100644 index 0000000..eea979a --- /dev/null +++ b/envd-rs/Cargo.toml @@ -0,0 +1,83 @@ +[package] +name = "envd" +version = "0.1.2" +edition = "2024" +rust-version = "1.88" + +[dependencies] +# Async runtime +tokio = { version = "1", features = ["full"] } + +# HTTP framework +axum = { version = "0.8", features = ["multipart"] } +tower = { version = "0.5", features = ["util"] } +tower-http = { version = "0.6", features = ["cors", "fs"] } +tower-service = "0.3" + +# RPC (Connect protocol — serves Connect + gRPC + gRPC-Web on same port) +connectrpc = { version = "0.3", features = ["axum"] } +buffa-types = { path = "buffa-types-shim" } + +# CLI +clap = { version = "4", features = ["derive"] } + +# Serialization +serde = { version = "1", features = ["derive"] } +serde_json = "1" + +# Logging +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } + +# System metrics +sysinfo = "0.33" + +# Unix syscalls +nix = { version = "0.30", features = ["fs", "process", "signal", "user", "term", "mount", "ioctl"] } + +# Concurrent map +dashmap = "6" + +# Crypto +sha2 = "0.10" +hmac = "0.12" +hex = "0.4" +base64 = "0.22" + +# Secure memory +zeroize = { version = "1", features = ["derive"] } + +# File watching +notify = "7" + +# Compression +flate2 = "1" + +# HTTP client (MMDS polling) +reqwest = { version = "0.12", default-features = false, features = ["json"] } + +# Directory walking +walkdir = "2" + +# Misc +libc = "0.2" +bytes = "1" +http = "1" +http-body-util = "0.1" +futures = "0.3" +tokio-util = { version = "0.7", features = ["io"] } +subtle = "2" +http-body = "1.0.1" +buffa = "0.3" +async-stream = "0.3.6" +mime_guess = "2" + +[build-dependencies] +connectrpc-build = "0.3" + +[profile.release] +strip = true +lto = true +opt-level = "z" +codegen-units = 1 +panic = "abort" diff --git a/envd-rs/README.md b/envd-rs/README.md new file mode 100644 index 0000000..a0385b3 --- /dev/null +++ b/envd-rs/README.md @@ -0,0 +1,141 @@ +# envd (Rust) + +Wrenn guest agent daemon — runs as PID 1 inside Firecracker microVMs. Provides process management, filesystem operations, file transfer, port forwarding, and VM lifecycle control over Connect RPC and HTTP. + +Rust rewrite of `envd/` (Go). Drop-in replacement — same wire protocol, same endpoints, same CLI flags. + +## Prerequisites + +- Rust 1.88+ (required by `connectrpc` 0.3.3) +- `protoc` (protobuf compiler, for proto codegen at build time) +- `musl-tools` (for static linking) + +```bash +# Ubuntu/Debian +sudo apt install musl-tools protobuf-compiler + +# Rust musl target +rustup target add x86_64-unknown-linux-musl +``` + +## Building + +### Static binary (production — what goes into the rootfs) + +```bash +cd envd-rs +ENVD_COMMIT=$(git rev-parse --short HEAD) \ + cargo build --release --target x86_64-unknown-linux-musl +``` + +Output: `target/x86_64-unknown-linux-musl/release/envd` + +Verify static linking: + +```bash +file target/x86_64-unknown-linux-musl/release/envd +# should say: "statically linked" + +ldd target/x86_64-unknown-linux-musl/release/envd +# should say: "not a dynamic executable" +``` + +### Debug binary (dev machine, dynamically linked) + +```bash +cd envd-rs +cargo build +``` + +Run locally (outside a VM): + +```bash +./target/debug/envd --isnotfc --port 49983 +``` + +### Via Makefile (from repo root) + +```bash +make build-envd # static musl release build +make build-envd-go # Go version (for comparison) +``` + +## CLI Flags + +``` +--port Listen port [default: 49983] +--isnotfc Not running inside Firecracker (disables MMDS, cgroups) +--version Print version and exit +--commit Print git commit and exit +--cmd Spawn a process at startup (e.g. --cmd "/bin/bash") +--cgroup-root Cgroup v2 root [default: /sys/fs/cgroup] +``` + +## Endpoints + +### HTTP + +| Method | Path | Description | +|--------|---------------------|--------------------------------------| +| GET | `/health` | Health check, triggers post-restore | +| GET | `/metrics` | System metrics (CPU, memory, disk) | +| GET | `/envs` | Current environment variables | +| POST | `/init` | Host agent init (token, env, mounts) | +| POST | `/snapshot/prepare` | Quiesce before Firecracker snapshot | +| GET | `/files` | Download file (gzip, range support) | +| POST | `/files` | Upload file(s) via multipart | + +### Connect RPC (same port) + +| Service | RPCs | +|------------|-------------------------------------------------------------------------| +| Process | List, Start, Connect, Update, StreamInput, SendInput, SendSignal, CloseStdin | +| Filesystem | Stat, MakeDir, Move, ListDir, Remove, WatchDir, CreateWatcher, GetWatcherEvents, RemoveWatcher | + +## Architecture + +``` +42 files, ~4200 LOC Rust +Binary: ~4 MB (stripped, LTO, musl static) + +src/ +├── main.rs # Entry point, CLI, server setup +├── state.rs # Shared AppState +├── config.rs # Constants +├── conntracker.rs # TCP connection tracking for snapshot/restore +├─��� execcontext.rs # Default user/workdir/env +├── logging.rs # tracing-subscriber (JSON or pretty) +├── util.rs # AtomicMax +├── auth/ # Token, signing, middleware +├── crypto/ # SHA-256, SHA-512, HMAC +├── host/ # MMDS polling, system metrics +├── http/ # Axum handlers (health, init, snapshot, files, encoding) +├── permissions/ # Path resolution, user lookup, chown +├── rpc/ # Connect RPC services +│ ├── pb.rs # Generated proto types +│ ├── process_*.rs # Process service + handler (PTY, pipe, broadcast) +│ ├── filesystem_*.rs # Filesystem service (stat, list, watch, mkdir, move, remove) +│ └── entry.rs # EntryInfo builder +├── port/ # Port subsystem +│ ├── conn.rs # /proc/net/tcp parser +│ ├── scanner.rs # Periodic TCP port scanner +│ ├── forwarder.rs # socat-based port forwarding +│ └── subsystem.rs # Lifecycle (start/stop/restart) +└── cgroups/ # Cgroup v2 manager (pty/user/socat groups) +``` + +## Updating the rootfs + +After building the static binary, copy it into the rootfs: + +```bash +bash scripts/update-debug-rootfs.sh [rootfs_path] +``` + +Or manually: + +```bash +sudo mount -o loop /var/lib/wrenn/images/minimal.ext4 /mnt +sudo cp target/x86_64-unknown-linux-musl/release/envd /mnt/usr/bin/envd +sudo umount /mnt +``` diff --git a/envd-rs/buffa-types-shim/Cargo.toml b/envd-rs/buffa-types-shim/Cargo.toml new file mode 100644 index 0000000..438d494 --- /dev/null +++ b/envd-rs/buffa-types-shim/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "buffa-types" +version = "0.3.0" +edition = "2024" +publish = false + +[dependencies] +buffa = "0.3" +serde = { version = "1", features = ["derive"] } + +[build-dependencies] +connectrpc-build = "0.3" diff --git a/envd-rs/buffa-types-shim/build.rs b/envd-rs/buffa-types-shim/build.rs new file mode 100644 index 0000000..cc720e1 --- /dev/null +++ b/envd-rs/buffa-types-shim/build.rs @@ -0,0 +1,9 @@ +fn main() { + connectrpc_build::Config::new() + .files(&["/usr/include/google/protobuf/timestamp.proto"]) + .includes(&["/usr/include"]) + .include_file("_types.rs") + .emit_register_fn(false) + .compile() + .unwrap(); +} diff --git a/envd-rs/buffa-types-shim/src/lib.rs b/envd-rs/buffa-types-shim/src/lib.rs new file mode 100644 index 0000000..3429ade --- /dev/null +++ b/envd-rs/buffa-types-shim/src/lib.rs @@ -0,0 +1,6 @@ +#![allow(dead_code, non_camel_case_types, unused_imports, clippy::derivable_impls)] + +use ::buffa; +use ::serde; + +include!(concat!(env!("OUT_DIR"), "/_types.rs")); diff --git a/envd-rs/build.rs b/envd-rs/build.rs new file mode 100644 index 0000000..48e2032 --- /dev/null +++ b/envd-rs/build.rs @@ -0,0 +1,11 @@ +fn main() { + connectrpc_build::Config::new() + .files(&[ + "../proto/envd/process.proto", + "../proto/envd/filesystem.proto", + ]) + .includes(&["../proto/envd", "/usr/include"]) + .include_file("_connectrpc.rs") + .compile() + .unwrap(); +} diff --git a/envd-rs/rust-toolchain.toml b/envd-rs/rust-toolchain.toml new file mode 100644 index 0000000..16e9862 --- /dev/null +++ b/envd-rs/rust-toolchain.toml @@ -0,0 +1,3 @@ +[toolchain] +channel = "stable" +targets = ["x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl"] diff --git a/envd-rs/src/auth/middleware.rs b/envd-rs/src/auth/middleware.rs new file mode 100644 index 0000000..918fb5e --- /dev/null +++ b/envd-rs/src/auth/middleware.rs @@ -0,0 +1,56 @@ +use std::sync::Arc; + +use axum::extract::Request; +use axum::http::StatusCode; +use axum::middleware::Next; +use axum::response::{IntoResponse, Response}; +use serde_json::json; + +use crate::auth::token::SecureToken; + +const ACCESS_TOKEN_HEADER: &str = "x-access-token"; + +/// Paths excluded from general token auth. +/// Format: "METHOD/path" +const AUTH_EXCLUDED: &[&str] = &[ + "GET/health", + "GET/files", + "POST/files", + "POST/init", + "POST/snapshot/prepare", +]; + +/// Axum middleware that checks X-Access-Token header. +pub async fn auth_layer( + request: Request, + next: Next, + access_token: Arc, +) -> Response { + if access_token.is_set() { + let method = request.method().as_str(); + let path = request.uri().path(); + let key = format!("{method}{path}"); + + let is_excluded = AUTH_EXCLUDED.iter().any(|p| *p == key); + + let header_val = request + .headers() + .get(ACCESS_TOKEN_HEADER) + .and_then(|v| v.to_str().ok()) + .unwrap_or(""); + + if !access_token.equals(header_val) && !is_excluded { + tracing::error!("unauthorized access attempt"); + return ( + StatusCode::UNAUTHORIZED, + axum::Json(json!({ + "code": 401, + "message": "unauthorized access, please provide a valid access token or method signing if supported" + })), + ) + .into_response(); + } + } + + next.run(request).await +} diff --git a/envd-rs/src/auth/mod.rs b/envd-rs/src/auth/mod.rs new file mode 100644 index 0000000..6a34efc --- /dev/null +++ b/envd-rs/src/auth/mod.rs @@ -0,0 +1,3 @@ +pub mod token; +pub mod signing; +pub mod middleware; diff --git a/envd-rs/src/auth/signing.rs b/envd-rs/src/auth/signing.rs new file mode 100644 index 0000000..62ea001 --- /dev/null +++ b/envd-rs/src/auth/signing.rs @@ -0,0 +1,85 @@ +use crate::auth::token::SecureToken; +use crate::crypto; +use zeroize::Zeroize; + +pub const READ_OPERATION: &str = "read"; +pub const WRITE_OPERATION: &str = "write"; + +/// Generate a v1 signature: `v1_{sha256_base64(path:operation:username:token[:expiration])}` +pub fn generate_signature( + token: &SecureToken, + path: &str, + username: &str, + operation: &str, + expiration: Option, +) -> Result { + let mut token_bytes = token.bytes().ok_or("access token is not set")?; + + let payload = match expiration { + Some(exp) => format!( + "{}:{}:{}:{}:{}", + path, + operation, + username, + String::from_utf8_lossy(&token_bytes), + exp + ), + None => format!( + "{}:{}:{}:{}", + path, + operation, + username, + String::from_utf8_lossy(&token_bytes), + ), + }; + + token_bytes.zeroize(); + + let hash = crypto::sha256::hash_without_prefix(payload.as_bytes()); + Ok(format!("v1_{hash}")) +} + +/// Validate a request's signing. Returns Ok(()) if valid. +pub fn validate_signing( + token: &SecureToken, + header_token: Option<&str>, + signature: Option<&str>, + signature_expiration: Option, + username: &str, + path: &str, + operation: &str, +) -> Result<(), String> { + if !token.is_set() { + return Ok(()); + } + + if let Some(ht) = header_token { + if !ht.is_empty() { + if token.equals(ht) { + return Ok(()); + } + return Err("access token present in header but does not match".into()); + } + } + + let sig = signature.ok_or("missing signature query parameter")?; + + let expected = generate_signature(token, path, username, operation, signature_expiration) + .map_err(|e| format!("error generating signing key: {e}"))?; + + if expected != sig { + return Err("invalid signature".into()); + } + + if let Some(exp) = signature_expiration { + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() as i64; + if exp < now { + return Err("signature is already expired".into()); + } + } + + Ok(()) +} diff --git a/envd-rs/src/auth/token.rs b/envd-rs/src/auth/token.rs new file mode 100644 index 0000000..621f797 --- /dev/null +++ b/envd-rs/src/auth/token.rs @@ -0,0 +1,127 @@ +use std::sync::RwLock; + +use subtle::ConstantTimeEq; +use zeroize::Zeroize; + +/// Secure token storage with constant-time comparison and zeroize-on-drop. +/// +/// Mirrors Go's SecureToken backed by memguard.LockedBuffer. +/// In Rust we rely on `zeroize` for Drop-based zeroing. +pub struct SecureToken { + inner: RwLock>>, +} + +impl SecureToken { + pub fn new() -> Self { + Self { + inner: RwLock::new(None), + } + } + + pub fn set(&self, token: &[u8]) -> Result<(), &'static str> { + if token.is_empty() { + return Err("empty token not allowed"); + } + let mut guard = self.inner.write().unwrap(); + if let Some(ref mut old) = *guard { + old.zeroize(); + } + *guard = Some(token.to_vec()); + Ok(()) + } + + pub fn is_set(&self) -> bool { + let guard = self.inner.read().unwrap(); + guard.is_some() + } + + /// Constant-time comparison. + pub fn equals(&self, other: &str) -> bool { + let guard = self.inner.read().unwrap(); + match guard.as_ref() { + Some(buf) => buf.as_slice().ct_eq(other.as_bytes()).into(), + None => false, + } + } + + /// Constant-time comparison with another SecureToken. + pub fn equals_secure(&self, other: &SecureToken) -> bool { + let other_bytes = match other.bytes() { + Some(b) => b, + None => return false, + }; + let guard = self.inner.read().unwrap(); + let result = match guard.as_ref() { + Some(buf) => buf.as_slice().ct_eq(&other_bytes).into(), + None => false, + }; + // other_bytes dropped here, Vec doesn't auto-zeroize but + // we accept this — same as Go's `defer memguard.WipeBytes(otherBytes)` + result + } + + /// Returns a copy of the token bytes (for signature generation). + pub fn bytes(&self) -> Option> { + let guard = self.inner.read().unwrap(); + guard.as_ref().map(|b| b.clone()) + } + + /// Transfer token from another SecureToken, clearing the source. + pub fn take_from(&self, src: &SecureToken) { + let taken = { + let mut src_guard = src.inner.write().unwrap(); + src_guard.take() + }; + let mut guard = self.inner.write().unwrap(); + if let Some(ref mut old) = *guard { + old.zeroize(); + } + *guard = taken; + } + + pub fn destroy(&self) { + let mut guard = self.inner.write().unwrap(); + if let Some(ref mut buf) = *guard { + buf.zeroize(); + } + *guard = None; + } +} + +impl Drop for SecureToken { + fn drop(&mut self) { + if let Ok(mut guard) = self.inner.write() { + if let Some(ref mut buf) = *guard { + buf.zeroize(); + } + } + } +} + +/// Deserialize from JSON string, matching Go's UnmarshalJSON behavior. +/// Expects a quoted JSON string. Rejects escape sequences. +impl SecureToken { + pub fn from_json_bytes(data: &mut [u8]) -> Result { + if data.len() < 2 || data[0] != b'"' || data[data.len() - 1] != b'"' { + data.zeroize(); + return Err("invalid secure token JSON string"); + } + + let content = &data[1..data.len() - 1]; + if content.contains(&b'\\') { + data.zeroize(); + return Err("invalid secure token: unexpected escape sequence"); + } + + if content.is_empty() { + data.zeroize(); + return Err("empty token not allowed"); + } + + let token = Self::new(); + token.set(content).map_err(|_| "failed to set token")?; + + data.zeroize(); + Ok(token) + } +} diff --git a/envd-rs/src/cgroups/mod.rs b/envd-rs/src/cgroups/mod.rs new file mode 100644 index 0000000..1ec9dab --- /dev/null +++ b/envd-rs/src/cgroups/mod.rs @@ -0,0 +1,66 @@ +use std::collections::HashMap; +use std::fs; +use std::os::unix::io::{OwnedFd, RawFd}; +use std::path::PathBuf; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum ProcessType { + Pty, + User, + Socat, +} + +pub trait CgroupManager: Send + Sync { + fn get_fd(&self, proc_type: ProcessType) -> Option; +} + +pub struct Cgroup2Manager { + fds: HashMap, +} + +impl Cgroup2Manager { + pub fn new(root: &str, configs: &[(ProcessType, &str, &[(&str, &str)])]) -> Result { + let mut fds = HashMap::new(); + + for (proc_type, sub_path, properties) in configs { + let full_path = PathBuf::from(root).join(sub_path); + + fs::create_dir_all(&full_path).map_err(|e| { + format!("failed to create cgroup {}: {e}", full_path.display()) + })?; + + for (name, value) in *properties { + let prop_path = full_path.join(name); + fs::write(&prop_path, value).map_err(|e| { + format!("failed to write cgroup property {}: {e}", prop_path.display()) + })?; + } + + let fd = nix::fcntl::open( + &full_path, + nix::fcntl::OFlag::O_RDONLY, + nix::sys::stat::Mode::empty(), + ) + .map_err(|e| format!("failed to open cgroup {}: {e}", full_path.display()))?; + + fds.insert(*proc_type, fd); + } + + Ok(Self { fds }) + } +} + +impl CgroupManager for Cgroup2Manager { + fn get_fd(&self, proc_type: ProcessType) -> Option { + use std::os::unix::io::AsRawFd; + self.fds.get(&proc_type).map(|fd| fd.as_raw_fd()) + } +} + +pub struct NoopCgroupManager; + +impl CgroupManager for NoopCgroupManager { + fn get_fd(&self, _proc_type: ProcessType) -> Option { + None + } +} diff --git a/envd-rs/src/config.rs b/envd-rs/src/config.rs new file mode 100644 index 0000000..c2dac43 --- /dev/null +++ b/envd-rs/src/config.rs @@ -0,0 +1,16 @@ +use std::time::Duration; + +pub const DEFAULT_PORT: u16 = 49983; +pub const IDLE_TIMEOUT: Duration = Duration::from_secs(640); +pub const CORS_MAX_AGE: Duration = Duration::from_secs(7200); +pub const PORT_SCANNER_INTERVAL: Duration = Duration::from_millis(1000); +pub const DEFAULT_USER: &str = "root"; +pub const WRENN_RUN_DIR: &str = "/run/wrenn"; + +pub const KILOBYTE: u64 = 1024; +pub const MEGABYTE: u64 = 1024 * KILOBYTE; + +pub const MMDS_ADDRESS: &str = "169.254.169.254"; +pub const MMDS_POLL_INTERVAL: Duration = Duration::from_millis(50); +pub const MMDS_TOKEN_EXPIRATION_SECS: u64 = 60; +pub const MMDS_ACCESS_TOKEN_CLIENT_TIMEOUT: Duration = Duration::from_secs(10); diff --git a/envd-rs/src/conntracker.rs b/envd-rs/src/conntracker.rs new file mode 100644 index 0000000..8ec4d39 --- /dev/null +++ b/envd-rs/src/conntracker.rs @@ -0,0 +1,79 @@ +use std::collections::HashSet; +use std::sync::Mutex; + +/// Tracks active TCP connections for snapshot/restore lifecycle. +/// +/// Before snapshot: close idle connections, record active ones. +/// After restore: close all pre-snapshot connections (zombie TCP sockets). +/// +/// In Rust/axum, we don't have Go's ConnState callback. Instead we track +/// connections via a tower middleware that registers connection IDs. +/// For the initial implementation, we track by a simple connection counter +/// and rely on axum's graceful shutdown mechanics. +pub struct ConnTracker { + inner: Mutex, +} + +struct ConnTrackerInner { + active: HashSet, + pre_snapshot: Option>, + next_id: u64, + keepalives_enabled: bool, +} + +impl ConnTracker { + pub fn new() -> Self { + Self { + inner: Mutex::new(ConnTrackerInner { + active: HashSet::new(), + pre_snapshot: None, + next_id: 0, + keepalives_enabled: true, + }), + } + } + + pub fn register_connection(&self) -> u64 { + let mut inner = self.inner.lock().unwrap(); + let id = inner.next_id; + inner.next_id += 1; + inner.active.insert(id); + id + } + + pub fn remove_connection(&self, id: u64) { + let mut inner = self.inner.lock().unwrap(); + inner.active.remove(&id); + if let Some(ref mut pre) = inner.pre_snapshot { + pre.remove(&id); + } + } + + pub fn prepare_for_snapshot(&self) { + let mut inner = self.inner.lock().unwrap(); + inner.keepalives_enabled = false; + inner.pre_snapshot = Some(inner.active.clone()); + tracing::info!( + active_connections = inner.active.len(), + "snapshot: recorded pre-snapshot connections, keep-alives disabled" + ); + } + + pub fn restore_after_snapshot(&self) { + let mut inner = self.inner.lock().unwrap(); + if let Some(pre) = inner.pre_snapshot.take() { + let zombie_count = pre.len(); + for id in &pre { + inner.active.remove(id); + } + if zombie_count > 0 { + tracing::info!(zombie_count, "restore: closed zombie connections"); + } + } + inner.keepalives_enabled = true; + } + + pub fn keepalives_enabled(&self) -> bool { + self.inner.lock().unwrap().keepalives_enabled + } +} diff --git a/envd-rs/src/crypto/hmac_sha256.rs b/envd-rs/src/crypto/hmac_sha256.rs new file mode 100644 index 0000000..2f51afe --- /dev/null +++ b/envd-rs/src/crypto/hmac_sha256.rs @@ -0,0 +1,22 @@ +use hmac::{Hmac, Mac}; +use sha2::Sha256; + +type HmacSha256 = Hmac; + +pub fn compute(key: &[u8], data: &[u8]) -> String { + let mut mac = HmacSha256::new_from_slice(key).expect("HMAC accepts any key length"); + mac.update(data); + let result = mac.finalize(); + hex::encode(result.into_bytes()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_hmac_sha256() { + let result = compute(b"key", b"message"); + assert_eq!(result.len(), 64); // SHA-256 hex = 64 chars + } +} diff --git a/envd-rs/src/crypto/mod.rs b/envd-rs/src/crypto/mod.rs new file mode 100644 index 0000000..11785bc --- /dev/null +++ b/envd-rs/src/crypto/mod.rs @@ -0,0 +1,3 @@ +pub mod sha256; +pub mod sha512; +pub mod hmac_sha256; diff --git a/envd-rs/src/crypto/sha256.rs b/envd-rs/src/crypto/sha256.rs new file mode 100644 index 0000000..b87034d --- /dev/null +++ b/envd-rs/src/crypto/sha256.rs @@ -0,0 +1,33 @@ +use base64::Engine; +use base64::engine::general_purpose::STANDARD_NO_PAD; +use sha2::{Digest, Sha256}; + +pub fn hash(data: &[u8]) -> String { + let h = Sha256::digest(data); + let encoded = STANDARD_NO_PAD.encode(h); + format!("$sha256${encoded}") +} + +pub fn hash_without_prefix(data: &[u8]) -> String { + let h = Sha256::digest(data); + STANDARD_NO_PAD.encode(h) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_hash_format() { + let result = hash(b"test"); + assert!(result.starts_with("$sha256$")); + assert!(!result.contains('=')); + } + + #[test] + fn test_hash_without_prefix() { + let result = hash_without_prefix(b"test"); + assert!(!result.starts_with("$sha256$")); + assert!(!result.contains('=')); + } +} diff --git a/envd-rs/src/crypto/sha512.rs b/envd-rs/src/crypto/sha512.rs new file mode 100644 index 0000000..353100e --- /dev/null +++ b/envd-rs/src/crypto/sha512.rs @@ -0,0 +1,24 @@ +use sha2::{Digest, Sha512}; + +pub fn hash_access_token(token: &str) -> String { + let h = Sha512::digest(token.as_bytes()); + hex::encode(h) +} + +pub fn hash_access_token_bytes(token: &[u8]) -> String { + let h = Sha512::digest(token); + hex::encode(h) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_hash_access_token() { + let h1 = hash_access_token("test"); + let h2 = hash_access_token_bytes(b"test"); + assert_eq!(h1, h2); + assert_eq!(h1.len(), 128); // SHA-512 hex = 128 chars + } +} diff --git a/envd-rs/src/execcontext.rs b/envd-rs/src/execcontext.rs new file mode 100644 index 0000000..d0f53eb --- /dev/null +++ b/envd-rs/src/execcontext.rs @@ -0,0 +1,42 @@ +use dashmap::DashMap; +use std::sync::Arc; + +#[derive(Clone)] +pub struct Defaults { + pub env_vars: Arc>, + pub user: String, + pub workdir: Option, +} + +impl Defaults { + pub fn new(user: &str) -> Self { + Self { + env_vars: Arc::new(DashMap::new()), + user: user.to_string(), + workdir: None, + } + } +} + +pub fn resolve_default_workdir(workdir: &str, default_workdir: Option<&str>) -> String { + if !workdir.is_empty() { + return workdir.to_string(); + } + if let Some(dw) = default_workdir { + return dw.to_string(); + } + String::new() +} + +pub fn resolve_default_username<'a>( + username: Option<&'a str>, + default_username: &'a str, +) -> Result<&'a str, &'static str> { + if let Some(u) = username { + return Ok(u); + } + if !default_username.is_empty() { + return Ok(default_username); + } + Err("username not provided") +} diff --git a/envd-rs/src/host/metrics.rs b/envd-rs/src/host/metrics.rs new file mode 100644 index 0000000..671d1a6 --- /dev/null +++ b/envd-rs/src/host/metrics.rs @@ -0,0 +1,73 @@ +use std::ffi::CString; +use std::time::{SystemTime, UNIX_EPOCH}; + +use serde::Serialize; + +#[derive(Serialize)] +pub struct Metrics { + pub ts: i64, + pub cpu_count: u32, + pub cpu_used_pct: f32, + pub mem_total_mib: u64, + pub mem_used_mib: u64, + pub mem_total: u64, + pub mem_used: u64, + pub disk_used: u64, + pub disk_total: u64, +} + +pub fn get_metrics() -> Result { + use sysinfo::System; + + let mut sys = System::new(); + sys.refresh_memory(); + sys.refresh_cpu_all(); + + std::thread::sleep(std::time::Duration::from_millis(100)); + sys.refresh_cpu_all(); + + let cpu_count = sys.cpus().len() as u32; + let cpu_used_pct = sys.global_cpu_usage(); + let cpu_used_pct_rounded = if cpu_used_pct > 0.0 { + (cpu_used_pct * 100.0).round() / 100.0 + } else { + 0.0 + }; + + let mem_total = sys.total_memory(); + let mem_used = sys.used_memory(); + + let (disk_total, disk_used) = disk_stats("/")?; + + let ts = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() as i64; + + Ok(Metrics { + ts, + cpu_count, + cpu_used_pct: cpu_used_pct_rounded, + mem_total_mib: mem_total / 1024 / 1024, + mem_used_mib: mem_used / 1024 / 1024, + mem_total, + mem_used, + disk_used, + disk_total, + }) +} + +fn disk_stats(path: &str) -> Result<(u64, u64), String> { + let c_path = CString::new(path).unwrap(); + let mut stat: libc::statfs = unsafe { std::mem::zeroed() }; + let ret = unsafe { libc::statfs(c_path.as_ptr(), &mut stat) }; + if ret != 0 { + return Err(format!("statfs failed: {}", std::io::Error::last_os_error())); + } + + let block = stat.f_bsize as u64; + let total = stat.f_blocks * block; + let available = stat.f_bavail * block; + + Ok((total, total - available)) +} diff --git a/envd-rs/src/host/mmds.rs b/envd-rs/src/host/mmds.rs new file mode 100644 index 0000000..ff74201 --- /dev/null +++ b/envd-rs/src/host/mmds.rs @@ -0,0 +1,113 @@ +use std::sync::Arc; +use std::time::Duration; + +use dashmap::DashMap; +use serde::Deserialize; +use tokio_util::sync::CancellationToken; + +use crate::config::{MMDS_ADDRESS, MMDS_POLL_INTERVAL, MMDS_TOKEN_EXPIRATION_SECS, WRENN_RUN_DIR}; + +#[derive(Debug, Clone, Deserialize)] +pub struct MMDSOpts { + #[serde(rename = "instanceID")] + pub sandbox_id: String, + #[serde(rename = "envID")] + pub template_id: String, + #[serde(rename = "address")] + pub logs_collector_address: String, + #[serde(rename = "accessTokenHash", default)] + pub access_token_hash: String, +} + +async fn get_mmds_token(client: &reqwest::Client) -> Result { + let resp = client + .put(format!("http://{MMDS_ADDRESS}/latest/api/token")) + .header( + "X-metadata-token-ttl-seconds", + MMDS_TOKEN_EXPIRATION_SECS.to_string(), + ) + .send() + .await + .map_err(|e| format!("mmds token request failed: {e}"))?; + + let token = resp.text().await.map_err(|e| format!("mmds token read: {e}"))?; + if token.is_empty() { + return Err("mmds token is an empty string".into()); + } + Ok(token) +} + +async fn get_mmds_opts(client: &reqwest::Client, token: &str) -> Result { + let resp = client + .get(format!("http://{MMDS_ADDRESS}")) + .header("X-metadata-token", token) + .header("Accept", "application/json") + .send() + .await + .map_err(|e| format!("mmds opts request failed: {e}"))?; + + resp.json::() + .await + .map_err(|e| format!("mmds opts parse: {e}")) +} + +pub async fn get_access_token_hash() -> Result { + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(10)) + .no_proxy() + .build() + .map_err(|e| format!("http client: {e}"))?; + + let token = get_mmds_token(&client).await?; + let opts = get_mmds_opts(&client, &token).await?; + Ok(opts.access_token_hash) +} + +/// Polls MMDS every 50ms until metadata is available. +/// Stores sandbox_id and template_id in env_vars and writes to /run/wrenn/ files. +pub async fn poll_for_opts( + env_vars: Arc>, + cancel: CancellationToken, +) -> Option { + let client = reqwest::Client::builder() + .no_proxy() + .build() + .ok()?; + + let mut interval = tokio::time::interval(MMDS_POLL_INTERVAL); + + loop { + tokio::select! { + _ = cancel.cancelled() => { + tracing::warn!("context cancelled while waiting for mmds opts"); + return None; + } + _ = interval.tick() => { + let token = match get_mmds_token(&client).await { + Ok(t) => t, + Err(e) => { + tracing::debug!(error = %e, "mmds token poll"); + continue; + } + }; + + let opts = match get_mmds_opts(&client, &token).await { + Ok(o) => o, + Err(e) => { + tracing::debug!(error = %e, "mmds opts poll"); + continue; + } + }; + + env_vars.insert("WRENN_SANDBOX_ID".into(), opts.sandbox_id.clone()); + env_vars.insert("WRENN_TEMPLATE_ID".into(), opts.template_id.clone()); + + let run_dir = std::path::Path::new(WRENN_RUN_DIR); + let _ = std::fs::write(run_dir.join(".WRENN_SANDBOX_ID"), &opts.sandbox_id); + let _ = std::fs::write(run_dir.join(".WRENN_TEMPLATE_ID"), &opts.template_id); + + return Some(opts); + } + } + } +} diff --git a/envd-rs/src/host/mod.rs b/envd-rs/src/host/mod.rs new file mode 100644 index 0000000..a8ba613 --- /dev/null +++ b/envd-rs/src/host/mod.rs @@ -0,0 +1,2 @@ +pub mod metrics; +pub mod mmds; diff --git a/envd-rs/src/http/encoding.rs b/envd-rs/src/http/encoding.rs new file mode 100644 index 0000000..02f15b6 --- /dev/null +++ b/envd-rs/src/http/encoding.rs @@ -0,0 +1,147 @@ +use axum::http::Request; + +const ENCODING_GZIP: &str = "gzip"; +const ENCODING_IDENTITY: &str = "identity"; +const ENCODING_WILDCARD: &str = "*"; + +const SUPPORTED_ENCODINGS: &[&str] = &[ENCODING_GZIP]; + +struct EncodingWithQuality { + encoding: String, + quality: f64, +} + +fn parse_encoding_with_quality(value: &str) -> EncodingWithQuality { + let value = value.trim(); + let mut quality = 1.0; + + if let Some(idx) = value.find(';') { + let params = &value[idx + 1..]; + let enc = value[..idx].trim(); + for param in params.split(';') { + let param = param.trim(); + if let Some(stripped) = param.strip_prefix("q=").or_else(|| param.strip_prefix("Q=")) { + if let Ok(q) = stripped.parse::() { + quality = q; + } + } + } + return EncodingWithQuality { + encoding: enc.to_ascii_lowercase(), + quality, + }; + } + + EncodingWithQuality { + encoding: value.to_ascii_lowercase(), + quality, + } +} + +fn parse_accept_encoding_header(header: &str) -> (Vec, bool) { + if header.is_empty() { + return (Vec::new(), false); + } + + let encodings: Vec = + header.split(',').map(|v| parse_encoding_with_quality(v)).collect(); + + let mut identity_rejected = false; + let mut identity_explicitly_accepted = false; + let mut wildcard_rejected = false; + + for eq in &encodings { + match eq.encoding.as_str() { + ENCODING_IDENTITY => { + if eq.quality == 0.0 { + identity_rejected = true; + } else { + identity_explicitly_accepted = true; + } + } + ENCODING_WILDCARD => { + if eq.quality == 0.0 { + wildcard_rejected = true; + } + } + _ => {} + } + } + + if wildcard_rejected && !identity_explicitly_accepted { + identity_rejected = true; + } + + (encodings, identity_rejected) +} + +pub fn is_identity_acceptable(r: &Request) -> bool { + let header = r + .headers() + .get("accept-encoding") + .and_then(|v| v.to_str().ok()) + .unwrap_or(""); + let (_, rejected) = parse_accept_encoding_header(header); + !rejected +} + +pub fn parse_accept_encoding(r: &Request) -> Result<&'static str, String> { + let header = r + .headers() + .get("accept-encoding") + .and_then(|v| v.to_str().ok()) + .unwrap_or(""); + + if header.is_empty() { + return Ok(ENCODING_IDENTITY); + } + + let (mut encodings, identity_rejected) = parse_accept_encoding_header(header); + encodings.sort_by(|a, b| b.quality.partial_cmp(&a.quality).unwrap_or(std::cmp::Ordering::Equal)); + + for eq in &encodings { + if eq.quality == 0.0 { + continue; + } + if eq.encoding == ENCODING_IDENTITY { + return Ok(ENCODING_IDENTITY); + } + if eq.encoding == ENCODING_WILDCARD { + if identity_rejected && !SUPPORTED_ENCODINGS.is_empty() { + return Ok(SUPPORTED_ENCODINGS[0]); + } + return Ok(ENCODING_IDENTITY); + } + if eq.encoding == ENCODING_GZIP { + return Ok(ENCODING_GZIP); + } + } + + if !identity_rejected { + return Ok(ENCODING_IDENTITY); + } + + Err(format!("no acceptable encoding found, supported: {SUPPORTED_ENCODINGS:?}")) +} + +pub fn parse_content_encoding(r: &Request) -> Result<&'static str, String> { + let header = r + .headers() + .get("content-encoding") + .and_then(|v| v.to_str().ok()) + .unwrap_or(""); + + if header.is_empty() { + return Ok(ENCODING_IDENTITY); + } + + let encoding = header.trim().to_ascii_lowercase(); + if encoding == ENCODING_IDENTITY { + return Ok(ENCODING_IDENTITY); + } + if SUPPORTED_ENCODINGS.contains(&encoding.as_str()) { + return Ok(ENCODING_GZIP); + } + + Err(format!("unsupported Content-Encoding: {header}, supported: {SUPPORTED_ENCODINGS:?}")) +} diff --git a/envd-rs/src/http/envs.rs b/envd-rs/src/http/envs.rs new file mode 100644 index 0000000..0d87ccc --- /dev/null +++ b/envd-rs/src/http/envs.rs @@ -0,0 +1,25 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use axum::Json; +use axum::extract::State; +use axum::http::header; +use axum::response::IntoResponse; + +use crate::state::AppState; + +pub async fn get_envs(State(state): State>) -> impl IntoResponse { + tracing::debug!("getting env vars"); + + let envs: HashMap = state + .defaults + .env_vars + .iter() + .map(|entry| (entry.key().clone(), entry.value().clone())) + .collect(); + + ( + [(header::CACHE_CONTROL, "no-store")], + Json(envs), + ) +} diff --git a/envd-rs/src/http/error.rs b/envd-rs/src/http/error.rs new file mode 100644 index 0000000..067f519 --- /dev/null +++ b/envd-rs/src/http/error.rs @@ -0,0 +1,20 @@ +use axum::Json; +use axum::http::StatusCode; +use axum::response::IntoResponse; +use serde::Serialize; + +#[derive(Serialize)] +struct ErrorBody { + code: u16, + message: String, +} + +pub fn json_error(status: StatusCode, message: &str) -> impl IntoResponse { + ( + status, + Json(ErrorBody { + code: status.as_u16(), + message: message.to_string(), + }), + ) +} diff --git a/envd-rs/src/http/files.rs b/envd-rs/src/http/files.rs new file mode 100644 index 0000000..df9206f --- /dev/null +++ b/envd-rs/src/http/files.rs @@ -0,0 +1,443 @@ +use std::io::Write as _; +use std::path::Path; +use std::sync::Arc; + +use axum::body::Body; +use axum::extract::{FromRequest, Query, Request, State}; +use axum::http::{StatusCode, header}; +use axum::response::{IntoResponse, Response}; +use serde::{Deserialize, Serialize}; + +use crate::auth::signing; +use crate::execcontext; +use crate::http::encoding; +use crate::permissions::path::{ensure_dirs, expand_and_resolve}; +use crate::permissions::user::lookup_user; +use crate::state::AppState; + +const ACCESS_TOKEN_HEADER: &str = "x-access-token"; + +#[derive(Deserialize)] +pub struct FileParams { + pub path: Option, + pub username: Option, + pub signature: Option, + pub signature_expiration: Option, +} + +#[derive(Serialize)] +struct EntryInfo { + path: String, + name: String, + r#type: &'static str, +} + +fn json_error(status: StatusCode, msg: &str) -> Response { + let body = serde_json::json!({ "code": status.as_u16(), "message": msg }); + (status, axum::Json(body)).into_response() +} + +fn extract_header_token(req: &Request) -> Option<&str> { + req.headers() + .get(ACCESS_TOKEN_HEADER) + .and_then(|v| v.to_str().ok()) +} + +fn validate_file_signing( + state: &AppState, + header_token: Option<&str>, + params: &FileParams, + path: &str, + operation: &str, + username: &str, +) -> Result<(), String> { + signing::validate_signing( + &state.access_token, + header_token, + params.signature.as_deref(), + params.signature_expiration, + username, + path, + operation, + ) +} + +/// GET /files — download a file +pub async fn get_files( + State(state): State>, + Query(params): Query, + req: Request, +) -> Response { + let path_str = params.path.as_deref().unwrap_or(""); + let header_token = extract_header_token(&req); + + let username = match execcontext::resolve_default_username( + params.username.as_deref(), + &state.defaults.user, + ) { + Ok(u) => u.to_string(), + Err(e) => return json_error(StatusCode::BAD_REQUEST, e), + }; + + if let Err(e) = validate_file_signing( + &state, + header_token, + ¶ms, + path_str, + signing::READ_OPERATION, + &username, + ) { + return json_error(StatusCode::UNAUTHORIZED, &e); + } + + let user = match lookup_user(&username) { + Ok(u) => u, + Err(e) => return json_error(StatusCode::UNAUTHORIZED, &e), + }; + + let home_dir = format!("/home/{}", user.name); + let resolved = match expand_and_resolve(path_str, &home_dir, state.defaults.workdir.as_deref()) + { + Ok(p) => p, + Err(e) => return json_error(StatusCode::BAD_REQUEST, &e), + }; + + let meta = match std::fs::metadata(&resolved) { + Ok(m) => m, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => { + return json_error( + StatusCode::NOT_FOUND, + &format!("path '{}' does not exist", resolved), + ); + } + Err(e) => { + return json_error( + StatusCode::INTERNAL_SERVER_ERROR, + &format!("error checking path: {e}"), + ); + } + }; + + if meta.is_dir() { + return json_error( + StatusCode::BAD_REQUEST, + &format!("path '{}' is a directory", resolved), + ); + } + + if !meta.file_type().is_file() { + return json_error( + StatusCode::BAD_REQUEST, + &format!("path '{}' is not a regular file", resolved), + ); + } + + let accept_enc = match encoding::parse_accept_encoding(&req) { + Ok(e) => e, + Err(e) => return json_error(StatusCode::NOT_ACCEPTABLE, &e), + }; + + let has_range_or_conditional = req.headers().get("range").is_some() + || req.headers().get("if-modified-since").is_some() + || req.headers().get("if-none-match").is_some() + || req.headers().get("if-range").is_some(); + + let use_encoding = if has_range_or_conditional { + if !encoding::is_identity_acceptable(&req) { + return json_error( + StatusCode::NOT_ACCEPTABLE, + "identity encoding not acceptable for Range or conditional request", + ); + } + "identity" + } else { + accept_enc + }; + + let file_data = match std::fs::read(&resolved) { + Ok(d) => d, + Err(e) => { + return json_error( + StatusCode::INTERNAL_SERVER_ERROR, + &format!("error reading file: {e}"), + ); + } + }; + + let filename = Path::new(&resolved) + .file_name() + .map(|n| n.to_string_lossy().to_string()) + .unwrap_or_default(); + + let content_disposition = format!("inline; filename=\"{}\"", filename); + let content_type = mime_guess::from_path(&resolved) + .first_raw() + .unwrap_or("application/octet-stream"); + + if use_encoding == "gzip" { + let mut encoder = + flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default()); + if let Err(e) = encoder.write_all(&file_data) { + return json_error( + StatusCode::INTERNAL_SERVER_ERROR, + &format!("gzip encoding error: {e}"), + ); + } + let compressed = match encoder.finish() { + Ok(d) => d, + Err(e) => { + return json_error( + StatusCode::INTERNAL_SERVER_ERROR, + &format!("gzip finish error: {e}"), + ); + } + }; + + return Response::builder() + .status(StatusCode::OK) + .header(header::CONTENT_TYPE, content_type) + .header(header::CONTENT_ENCODING, "gzip") + .header(header::CONTENT_DISPOSITION, content_disposition) + .header(header::VARY, "Accept-Encoding") + .body(Body::from(compressed)) + .unwrap(); + } + + Response::builder() + .status(StatusCode::OK) + .header(header::CONTENT_TYPE, content_type) + .header(header::CONTENT_DISPOSITION, content_disposition) + .header(header::VARY, "Accept-Encoding") + .header(header::CONTENT_LENGTH, file_data.len()) + .body(Body::from(file_data)) + .unwrap() +} + +/// POST /files — upload file(s) via multipart +pub async fn post_files( + State(state): State>, + Query(params): Query, + req: Request, +) -> Response { + let path_str = params.path.as_deref().unwrap_or(""); + let header_token = extract_header_token(&req); + + let username = match execcontext::resolve_default_username( + params.username.as_deref(), + &state.defaults.user, + ) { + Ok(u) => u.to_string(), + Err(e) => return json_error(StatusCode::BAD_REQUEST, e), + }; + + if let Err(e) = validate_file_signing( + &state, + header_token, + ¶ms, + path_str, + signing::WRITE_OPERATION, + &username, + ) { + return json_error(StatusCode::UNAUTHORIZED, &e); + } + + let user = match lookup_user(&username) { + Ok(u) => u, + Err(e) => return json_error(StatusCode::UNAUTHORIZED, &e), + }; + + let home_dir = format!("/home/{}", user.name); + let uid = user.uid; + let gid = user.gid; + + let content_enc = match encoding::parse_content_encoding(&req) { + Ok(e) => e, + Err(e) => return json_error(StatusCode::BAD_REQUEST, &e), + }; + + let mut multipart = match axum::extract::Multipart::from_request(req, &()).await { + Ok(m) => m, + Err(e) => { + return json_error( + StatusCode::INTERNAL_SERVER_ERROR, + &format!("error parsing multipart: {e}"), + ); + } + }; + + let mut uploaded: Vec = Vec::new(); + + while let Ok(Some(field)) = multipart.next_field().await { + let field_name = field.name().unwrap_or("").to_string(); + if field_name != "file" { + continue; + } + + let file_path = if !path_str.is_empty() { + match expand_and_resolve(path_str, &home_dir, state.defaults.workdir.as_deref()) { + Ok(p) => p, + Err(e) => return json_error(StatusCode::BAD_REQUEST, &e), + } + } else { + let fname = field + .file_name() + .unwrap_or("upload") + .to_string(); + match expand_and_resolve(&fname, &home_dir, state.defaults.workdir.as_deref()) { + Ok(p) => p, + Err(e) => return json_error(StatusCode::BAD_REQUEST, &e), + } + }; + + if uploaded.iter().any(|e| e.path == file_path) { + return json_error( + StatusCode::BAD_REQUEST, + &format!("cannot upload multiple files to same path '{}'", file_path), + ); + } + + let raw_bytes = match field.bytes().await { + Ok(b) => b, + Err(e) => { + return json_error( + StatusCode::INTERNAL_SERVER_ERROR, + &format!("error reading field: {e}"), + ); + } + }; + + let data = if content_enc == "gzip" { + use std::io::Read; + let mut decoder = flate2::read::GzDecoder::new(&raw_bytes[..]); + let mut buf = Vec::new(); + match decoder.read_to_end(&mut buf) { + Ok(_) => buf, + Err(e) => { + return json_error( + StatusCode::BAD_REQUEST, + &format!("gzip decompression failed: {e}"), + ); + } + } + } else { + raw_bytes.to_vec() + }; + + if let Err(e) = process_file(&file_path, &data, uid, gid) { + let (status, msg) = e; + return json_error(status, &msg); + } + + let name = Path::new(&file_path) + .file_name() + .map(|n| n.to_string_lossy().to_string()) + .unwrap_or_default(); + + uploaded.push(EntryInfo { + path: file_path, + name, + r#type: "file", + }); + } + + axum::Json(uploaded).into_response() +} + +fn process_file( + path: &str, + data: &[u8], + uid: nix::unistd::Uid, + gid: nix::unistd::Gid, +) -> Result<(), (StatusCode, String)> { + let dir = Path::new(path) + .parent() + .map(|p| p.to_string_lossy().to_string()) + .unwrap_or_default(); + + if !dir.is_empty() { + ensure_dirs(&dir, uid, gid).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("error ensuring directories: {e}"), + ) + })?; + } + + let can_pre_chown = match std::fs::metadata(path) { + Ok(meta) => { + if meta.is_dir() { + return Err(( + StatusCode::BAD_REQUEST, + format!("path is a directory: {path}"), + )); + } + true + } + Err(e) if e.kind() == std::io::ErrorKind::NotFound => false, + Err(e) => { + return Err(( + StatusCode::INTERNAL_SERVER_ERROR, + format!("error getting file info: {e}"), + )) + } + }; + + let mut chowned = false; + if can_pre_chown { + match std::os::unix::fs::chown(path, Some(uid.as_raw()), Some(gid.as_raw())) { + Ok(()) => chowned = true, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => {} + Err(e) => { + return Err(( + StatusCode::INTERNAL_SERVER_ERROR, + format!("error changing ownership: {e}"), + )) + } + } + } + + let mut file = std::fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .mode(0o666) + .open(path) + .map_err(|e| { + if e.raw_os_error() == Some(libc::ENOSPC) { + return ( + StatusCode::INSUFFICIENT_STORAGE, + "not enough disk space available".to_string(), + ); + } + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("error opening file: {e}"), + ) + })?; + + if !chowned { + std::os::unix::fs::chown(path, Some(uid.as_raw()), Some(gid.as_raw())).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("error changing ownership: {e}"), + ) + })?; + } + + file.write_all(data).map_err(|e| { + if e.raw_os_error() == Some(libc::ENOSPC) { + return ( + StatusCode::INSUFFICIENT_STORAGE, + "not enough disk space available".to_string(), + ); + } + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("error writing file: {e}"), + ) + })?; + + Ok(()) +} + +use std::os::unix::fs::OpenOptionsExt; diff --git a/envd-rs/src/http/health.rs b/envd-rs/src/http/health.rs new file mode 100644 index 0000000..5eb2da3 --- /dev/null +++ b/envd-rs/src/http/health.rs @@ -0,0 +1,39 @@ +use std::sync::Arc; +use std::sync::atomic::Ordering; + +use axum::Json; +use axum::extract::State; +use axum::http::header; +use axum::response::IntoResponse; +use serde_json::json; + +use crate::state::AppState; + +pub async fn get_health(State(state): State>) -> impl IntoResponse { + if state + .needs_restore + .compare_exchange(true, false, Ordering::AcqRel, Ordering::Relaxed) + .is_ok() + { + post_restore_recovery(&state); + } + + tracing::trace!("health check"); + + ( + [(header::CACHE_CONTROL, "no-store")], + Json(json!({ "version": state.version })), + ) +} + +fn post_restore_recovery(state: &AppState) { + tracing::info!("restore: post-restore recovery (no GC needed in Rust)"); + + state.conn_tracker.restore_after_snapshot(); + tracing::info!("restore: zombie connections closed"); + + if let Some(ref ps) = state.port_subsystem { + ps.restart(); + tracing::info!("restore: port subsystem restarted"); + } +} diff --git a/envd-rs/src/http/init.rs b/envd-rs/src/http/init.rs new file mode 100644 index 0000000..ed2baa2 --- /dev/null +++ b/envd-rs/src/http/init.rs @@ -0,0 +1,274 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::sync::atomic::Ordering; + +use axum::Json; +use axum::extract::State; +use axum::http::{StatusCode, header}; +use axum::response::IntoResponse; +use serde::Deserialize; + +use crate::crypto; +use crate::host::mmds; +use crate::state::AppState; + +#[derive(Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct InitRequest { + pub access_token: Option, + pub default_user: Option, + pub default_workdir: Option, + pub env_vars: Option>, + pub hyperloop_ip: Option, + pub timestamp: Option, + pub volume_mounts: Option>, +} + +#[derive(Deserialize)] +pub struct VolumeMount { + pub nfs_target: String, + pub path: String, +} + +/// POST /init — called by host agent after boot and after every resume. +pub async fn post_init( + State(state): State>, + body: Option>, +) -> impl IntoResponse { + let init_req = body.map(|b| b.0).unwrap_or_default(); + + // Validate access token if provided + if let Some(ref token_str) = init_req.access_token { + if let Err(e) = validate_init_access_token(&state, token_str).await { + tracing::error!(error = %e, "init: access token validation failed"); + return (StatusCode::UNAUTHORIZED, e).into_response(); + } + } + + // Idempotent timestamp check + if let Some(ref ts_str) = init_req.timestamp { + if let Ok(ts) = chrono_parse_to_nanos(ts_str) { + if !state.last_set_time.set_to_greater(ts) { + // Stale request, skip data updates + return trigger_restore_and_respond(&state).await; + } + } + } + + // Apply env vars + if let Some(ref vars) = init_req.env_vars { + tracing::debug!(count = vars.len(), "setting env vars"); + for (k, v) in vars { + state.defaults.env_vars.insert(k.clone(), v.clone()); + } + } + + // Set access token + if let Some(ref token_str) = init_req.access_token { + if !token_str.is_empty() { + tracing::debug!("setting access token"); + let _ = state.access_token.set(token_str.as_bytes()); + } else if state.access_token.is_set() { + tracing::debug!("clearing access token"); + state.access_token.destroy(); + } + } + + // Set default user + if let Some(ref user) = init_req.default_user { + if !user.is_empty() { + tracing::debug!(user = %user, "setting default user"); + let mut defaults = state.defaults.clone(); + defaults.user = user.clone(); + // Note: In Rust we'd need interior mutability for this. + // For now, env_vars (DashMap) handles concurrent access. + // User/workdir mutation deferred to full state refactor. + } + } + + // Hyperloop /etc/hosts setup + if let Some(ref ip) = init_req.hyperloop_ip { + let ip = ip.clone(); + let env_vars = Arc::clone(&state.defaults.env_vars); + tokio::spawn(async move { + setup_hyperloop(&ip, &env_vars).await; + }); + } + + // NFS mounts + if let Some(ref mounts) = init_req.volume_mounts { + for mount in mounts { + let target = mount.nfs_target.clone(); + let path = mount.path.clone(); + tokio::spawn(async move { + setup_nfs(&target, &path).await; + }); + } + } + + // Re-poll MMDS in background + if state.is_fc { + let env_vars = Arc::clone(&state.defaults.env_vars); + let cancel = tokio_util::sync::CancellationToken::new(); + let cancel_clone = cancel.clone(); + tokio::spawn(async move { + tokio::time::timeout(std::time::Duration::from_secs(60), async { + mmds::poll_for_opts(env_vars, cancel_clone).await; + }) + .await + .ok(); + }); + } + + trigger_restore_and_respond(&state).await +} + +async fn trigger_restore_and_respond(state: &AppState) -> axum::response::Response { + // Safety net: if health check's postRestoreRecovery hasn't run yet + if state + .needs_restore + .compare_exchange(true, false, Ordering::AcqRel, Ordering::Relaxed) + .is_ok() + { + post_restore_recovery(state); + } + + state.conn_tracker.restore_after_snapshot(); + if let Some(ref ps) = state.port_subsystem { + ps.restart(); + } + + ( + StatusCode::NO_CONTENT, + [(header::CACHE_CONTROL, "no-store")], + ) + .into_response() +} + +fn post_restore_recovery(state: &AppState) { + tracing::info!("restore: post-restore recovery (no GC needed in Rust)"); + state.conn_tracker.restore_after_snapshot(); + + if let Some(ref ps) = state.port_subsystem { + ps.restart(); + tracing::info!("restore: port subsystem restarted"); + } +} + +async fn validate_init_access_token(state: &AppState, request_token: &str) -> Result<(), String> { + // Fast path: matches existing token + if state.access_token.is_set() && !request_token.is_empty() && state.access_token.equals(request_token) { + return Ok(()); + } + + // Check MMDS hash + if state.is_fc { + if let Ok(mmds_hash) = mmds::get_access_token_hash().await { + if !mmds_hash.is_empty() { + if request_token.is_empty() { + let empty_hash = crypto::sha512::hash_access_token(""); + if mmds_hash == empty_hash { + return Ok(()); + } + } else { + let token_hash = crypto::sha512::hash_access_token(request_token); + if mmds_hash == token_hash { + return Ok(()); + } + } + return Err("access token validation failed".into()); + } + } + } + + // First-time setup: no existing token and no MMDS + if !state.access_token.is_set() { + return Ok(()); + } + + if request_token.is_empty() { + return Err("access token reset not authorized".into()); + } + + Err("access token validation failed".into()) +} + +async fn setup_hyperloop(address: &str, env_vars: &dashmap::DashMap) { + // Write to /etc/hosts: events.wrenn.local → address + let entry = format!("{address} events.wrenn.local\n"); + + match std::fs::read_to_string("/etc/hosts") { + Ok(contents) => { + let filtered: String = contents + .lines() + .filter(|line| !line.contains("events.wrenn.local")) + .collect::>() + .join("\n"); + let new_contents = format!("{filtered}\n{entry}"); + if let Err(e) = std::fs::write("/etc/hosts", new_contents) { + tracing::error!(error = %e, "failed to modify hosts file"); + return; + } + } + Err(e) => { + tracing::error!(error = %e, "failed to read hosts file"); + return; + } + } + + env_vars.insert( + "WRENN_EVENTS_ADDRESS".into(), + format!("http://{address}"), + ); +} + +async fn setup_nfs(nfs_target: &str, path: &str) { + let mkdir = tokio::process::Command::new("mkdir") + .args(["-p", path]) + .output() + .await; + if let Err(e) = mkdir { + tracing::error!(error = %e, path, "nfs: mkdir failed"); + return; + } + + let mount = tokio::process::Command::new("mount") + .args([ + "-v", + "-t", + "nfs", + "-o", + "mountproto=tcp,mountport=2049,proto=tcp,port=2049,nfsvers=3,noacl", + nfs_target, + path, + ]) + .output() + .await; + + match mount { + Ok(output) => { + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + if output.status.success() { + tracing::info!(nfs_target, path, stdout = %stdout, "nfs: mount success"); + } else { + tracing::error!(nfs_target, path, stderr = %stderr, "nfs: mount failed"); + } + } + Err(e) => { + tracing::error!(error = %e, nfs_target, path, "nfs: mount command failed"); + } + } +} + +fn chrono_parse_to_nanos(ts: &str) -> Result { + // Parse RFC3339 timestamp to nanoseconds since epoch + // Simple approach: parse as seconds + fractional + let secs = ts.parse::().ok(); + if let Some(s) = secs { + return Ok((s * 1_000_000_000.0) as i64); + } + // Try RFC3339 format + // For now, fall back to allowing the update + Err(()) +} diff --git a/envd-rs/src/http/metrics.rs b/envd-rs/src/http/metrics.rs new file mode 100644 index 0000000..b63dbda --- /dev/null +++ b/envd-rs/src/http/metrics.rs @@ -0,0 +1,102 @@ +use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; + +use axum::Json; +use axum::extract::State; +use axum::http::{StatusCode, header}; +use axum::response::IntoResponse; +use serde::Serialize; + +use crate::state::AppState; + +#[derive(Serialize)] +pub struct Metrics { + ts: i64, + cpu_count: u32, + cpu_used_pct: f32, + mem_total_mib: u64, + mem_used_mib: u64, + mem_total: u64, + mem_used: u64, + disk_used: u64, + disk_total: u64, +} + +pub async fn get_metrics(State(_state): State>) -> impl IntoResponse { + tracing::trace!("get metrics"); + + match collect_metrics() { + Ok(m) => ( + StatusCode::OK, + [(header::CACHE_CONTROL, "no-store")], + Json(m), + ) + .into_response(), + Err(e) => { + tracing::error!(error = %e, "failed to get metrics"); + StatusCode::INTERNAL_SERVER_ERROR.into_response() + } + } +} + +fn collect_metrics() -> Result { + use sysinfo::System; + + let mut sys = System::new(); + sys.refresh_memory(); + sys.refresh_cpu_all(); + + // sysinfo needs a small delay for accurate CPU — first call returns 0. + // In a real daemon this would be cached; for now, report instantaneous. + std::thread::sleep(std::time::Duration::from_millis(100)); + sys.refresh_cpu_all(); + + let cpu_count = sys.cpus().len() as u32; + let cpu_used_pct = sys.global_cpu_usage(); + let cpu_used_pct_rounded = if cpu_used_pct > 0.0 { + (cpu_used_pct * 100.0).round() / 100.0 + } else { + 0.0 + }; + + let mem_total = sys.total_memory(); + let mem_used = sys.used_memory(); + let mem_total_mib = mem_total / 1024 / 1024; + let mem_used_mib = mem_used / 1024 / 1024; + + let (disk_total, disk_used) = disk_stats("/").map_err(|e| e.to_string())?; + + let ts = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() as i64; + + Ok(Metrics { + ts, + cpu_count, + cpu_used_pct: cpu_used_pct_rounded, + mem_total_mib, + mem_used_mib, + mem_total, + mem_used, + disk_used, + disk_total, + }) +} + +fn disk_stats(path: &str) -> Result<(u64, u64), nix::Error> { + use std::ffi::CString; + + let c_path = CString::new(path).unwrap(); + let mut stat: libc::statfs = unsafe { std::mem::zeroed() }; + let ret = unsafe { libc::statfs(c_path.as_ptr(), &mut stat) }; + if ret != 0 { + return Err(nix::Error::last()); + } + + let block = stat.f_bsize as u64; + let total = stat.f_blocks * block; + let available = stat.f_bavail * block; + + Ok((total, total - available)) +} diff --git a/envd-rs/src/http/mod.rs b/envd-rs/src/http/mod.rs new file mode 100644 index 0000000..d74c3d2 --- /dev/null +++ b/envd-rs/src/http/mod.rs @@ -0,0 +1,56 @@ +pub mod encoding; +pub mod envs; +pub mod error; +pub mod files; +pub mod health; +pub mod init; +pub mod metrics; +pub mod snapshot; + +use std::sync::Arc; +use std::time::Duration; + +use axum::Router; +use axum::routing::{get, post}; +use http::header::{CACHE_CONTROL, HeaderName}; +use http::Method; +use tower_http::cors::{AllowHeaders, AllowMethods, AllowOrigin, CorsLayer}; + +use crate::config::CORS_MAX_AGE; +use crate::state::AppState; + +pub fn router(state: Arc) -> Router { + let cors = CorsLayer::new() + .allow_origin(AllowOrigin::any()) + .allow_methods(AllowMethods::list([ + Method::HEAD, + Method::GET, + Method::POST, + Method::PUT, + Method::PATCH, + Method::DELETE, + ])) + .allow_headers(AllowHeaders::any()) + .expose_headers([ + HeaderName::from_static("location"), + CACHE_CONTROL, + HeaderName::from_static("x-content-type-options"), + HeaderName::from_static("connect-content-encoding"), + HeaderName::from_static("connect-protocol-version"), + HeaderName::from_static("grpc-encoding"), + HeaderName::from_static("grpc-message"), + HeaderName::from_static("grpc-status"), + HeaderName::from_static("grpc-status-details-bin"), + ]) + .max_age(Duration::from_secs(CORS_MAX_AGE.as_secs())); + + Router::new() + .route("/health", get(health::get_health)) + .route("/metrics", get(metrics::get_metrics)) + .route("/envs", get(envs::get_envs)) + .route("/init", post(init::post_init)) + .route("/snapshot/prepare", post(snapshot::post_snapshot_prepare)) + .route("/files", get(files::get_files).post(files::post_files)) + .layer(cors) + .with_state(state) +} diff --git a/envd-rs/src/http/snapshot.rs b/envd-rs/src/http/snapshot.rs new file mode 100644 index 0000000..a0312f0 --- /dev/null +++ b/envd-rs/src/http/snapshot.rs @@ -0,0 +1,32 @@ +use std::sync::Arc; +use std::sync::atomic::Ordering; + +use axum::extract::State; +use axum::http::{StatusCode, header}; +use axum::response::IntoResponse; + +use crate::state::AppState; + +/// POST /snapshot/prepare — quiesce subsystems before Firecracker snapshot. +/// +/// In Rust there is no GC dance. We just: +/// 1. Stop port subsystem +/// 2. Close idle connections via conntracker +/// 3. Set needs_restore flag +pub async fn post_snapshot_prepare(State(state): State>) -> impl IntoResponse { + if let Some(ref ps) = state.port_subsystem { + ps.stop(); + tracing::info!("snapshot/prepare: port subsystem stopped"); + } + + state.conn_tracker.prepare_for_snapshot(); + tracing::info!("snapshot/prepare: connections prepared"); + + state.needs_restore.store(true, Ordering::Release); + tracing::info!("snapshot/prepare: ready for freeze"); + + ( + StatusCode::NO_CONTENT, + [(header::CACHE_CONTROL, "no-store")], + ) +} diff --git a/envd-rs/src/logging.rs b/envd-rs/src/logging.rs new file mode 100644 index 0000000..b76f65f --- /dev/null +++ b/envd-rs/src/logging.rs @@ -0,0 +1,17 @@ +use tracing_subscriber::{EnvFilter, fmt, layer::SubscriberExt, util::SubscriberInitExt}; + +pub fn init(json: bool) { + let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")); + + if json { + tracing_subscriber::registry() + .with(filter) + .with(fmt::layer().json().flatten_event(true)) + .init(); + } else { + tracing_subscriber::registry() + .with(filter) + .with(fmt::layer()) + .init(); + } +} diff --git a/envd-rs/src/main.rs b/envd-rs/src/main.rs new file mode 100644 index 0000000..760cb93 --- /dev/null +++ b/envd-rs/src/main.rs @@ -0,0 +1,224 @@ +#![allow(dead_code)] + +mod auth; +mod cgroups; +mod config; +mod conntracker; +mod crypto; +mod execcontext; +mod host; +mod http; +mod logging; +mod permissions; +mod port; +mod rpc; +mod state; +mod util; + +use std::fs; +use std::net::SocketAddr; +use std::path::Path; +use std::sync::Arc; + +use clap::Parser; +use tokio::net::TcpListener; +use tokio_util::sync::CancellationToken; + +use config::{DEFAULT_PORT, DEFAULT_USER, WRENN_RUN_DIR}; +use execcontext::Defaults; +use port::subsystem::PortSubsystem; +use state::AppState; + +const VERSION: &str = env!("CARGO_PKG_VERSION"); + +const COMMIT: &str = { + match option_env!("ENVD_COMMIT") { + Some(c) => c, + None => "unknown", + } +}; + +#[derive(Parser)] +#[command(name = "envd", about = "Wrenn guest agent daemon")] +struct Cli { + #[arg(long, default_value_t = DEFAULT_PORT)] + port: u16, + + #[arg(long = "isnotfc", default_value_t = false)] + is_not_fc: bool, + + #[arg(long)] + version: bool, + + #[arg(long)] + commit: bool, + + #[arg(long = "cmd", default_value = "")] + start_cmd: String, + + #[arg(long = "cgroup-root", default_value = "/sys/fs/cgroup")] + cgroup_root: String, +} + +#[tokio::main] +async fn main() { + let cli = Cli::parse(); + + if cli.version { + println!("{VERSION}"); + return; + } + if cli.commit { + println!("{COMMIT}"); + return; + } + + let use_json = !cli.is_not_fc; + logging::init(use_json); + + if let Err(e) = fs::create_dir_all(WRENN_RUN_DIR) { + tracing::error!(error = %e, "failed to create wrenn run directory"); + } + + let defaults = Defaults::new(DEFAULT_USER); + let is_fc_str = if cli.is_not_fc { "false" } else { "true" }; + defaults + .env_vars + .insert("WRENN_SANDBOX".into(), is_fc_str.into()); + + let wrenn_sandbox_path = Path::new(WRENN_RUN_DIR).join(".WRENN_SANDBOX"); + if let Err(e) = fs::write(&wrenn_sandbox_path, is_fc_str.as_bytes()) { + tracing::error!(error = %e, "failed to write sandbox file"); + } + + let cancel = CancellationToken::new(); + + // MMDS polling (only in FC mode) + if !cli.is_not_fc { + let env_vars = Arc::clone(&defaults.env_vars); + let cancel_clone = cancel.clone(); + tokio::spawn(async move { + host::mmds::poll_for_opts(env_vars, cancel_clone).await; + }); + } + + // Cgroup manager + let cgroup_manager: Arc = + match cgroups::Cgroup2Manager::new( + &cli.cgroup_root, + &[ + ( + cgroups::ProcessType::Pty, + "wrenn/pty", + &[] as &[(&str, &str)], + ), + ( + cgroups::ProcessType::User, + "wrenn/user", + &[] as &[(&str, &str)], + ), + ( + cgroups::ProcessType::Socat, + "wrenn/socat", + &[] as &[(&str, &str)], + ), + ], + ) { + Ok(m) => { + tracing::info!("cgroup2 manager initialized"); + Arc::new(m) + } + Err(e) => { + tracing::warn!(error = %e, "cgroup2 init failed, using noop"); + Arc::new(cgroups::NoopCgroupManager) + } + }; + + // Port subsystem + let port_subsystem = Arc::new(PortSubsystem::new(Arc::clone(&cgroup_manager))); + port_subsystem.start(); + tracing::info!("port subsystem started"); + + let state = AppState::new( + defaults, + VERSION.to_string(), + COMMIT.to_string(), + !cli.is_not_fc, + Some(Arc::clone(&port_subsystem)), + ); + + // RPC services (Connect protocol — serves Connect + gRPC + gRPC-Web on same port) + let connect_router = rpc::rpc_router(Arc::clone(&state)); + + let app = http::router(Arc::clone(&state)) + .fallback_service(connect_router.into_axum_service()); + + // --cmd: spawn initial process if specified + if !cli.start_cmd.is_empty() { + let cmd = cli.start_cmd.clone(); + let state_clone = Arc::clone(&state); + tokio::spawn(async move { + spawn_initial_command(&cmd, &state_clone); + }); + } + + let addr = SocketAddr::from(([0, 0, 0, 0], cli.port)); + tracing::info!(port = cli.port, version = VERSION, commit = COMMIT, "envd starting"); + + let listener = TcpListener::bind(addr).await.expect("failed to bind"); + + let graceful = axum::serve(listener, app).with_graceful_shutdown(async move { + tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) + .expect("failed to register SIGTERM") + .recv() + .await; + tracing::info!("SIGTERM received, shutting down"); + }); + + if let Err(e) = graceful.await { + tracing::error!(error = %e, "server error"); + } + + port_subsystem.stop(); + cancel.cancel(); +} + +fn spawn_initial_command(cmd: &str, state: &AppState) { + use crate::permissions::user::lookup_user; + use crate::rpc::process_handler; + use std::collections::HashMap; + + let user = match lookup_user(&state.defaults.user) { + Ok(u) => u, + Err(e) => { + tracing::error!(error = %e, "cmd: failed to lookup user"); + return; + } + }; + + let home = format!("/home/{}", user.name); + let cwd = state + .defaults + .workdir + .as_deref() + .unwrap_or(&home); + + match process_handler::spawn_process( + cmd, + &[], + &HashMap::new(), + cwd, + None, + false, + Some("init-cmd".to_string()), + &user, + &state.defaults.env_vars, + ) { + Ok(handle) => { + tracing::info!(pid = handle.pid, cmd, "initial command spawned"); + } + Err(e) => { + tracing::error!(error = %e, cmd, "failed to spawn initial command"); + } + } +} diff --git a/envd-rs/src/permissions/mod.rs b/envd-rs/src/permissions/mod.rs new file mode 100644 index 0000000..48ccce8 --- /dev/null +++ b/envd-rs/src/permissions/mod.rs @@ -0,0 +1,2 @@ +pub mod user; +pub mod path; diff --git a/envd-rs/src/permissions/path.rs b/envd-rs/src/permissions/path.rs new file mode 100644 index 0000000..80a5a4e --- /dev/null +++ b/envd-rs/src/permissions/path.rs @@ -0,0 +1,72 @@ +use std::fs; +use std::os::unix::fs::chown; +use std::path::{Path, PathBuf}; + +use nix::unistd::{Gid, Uid}; + +fn expand_tilde(path: &str, home_dir: &str) -> Result { + if path.is_empty() || !path.starts_with('~') { + return Ok(path.to_string()); + } + if path.len() > 1 && path.as_bytes()[1] != b'/' && path.as_bytes()[1] != b'\\' { + return Err("cannot expand user-specific home dir".into()); + } + Ok(format!("{}{}", home_dir, &path[1..])) +} + +pub fn expand_and_resolve( + path: &str, + home_dir: &str, + default_path: Option<&str>, +) -> Result { + let path = if path.is_empty() { + default_path.unwrap_or("").to_string() + } else { + path.to_string() + }; + + let path = expand_tilde(&path, home_dir)?; + + if Path::new(&path).is_absolute() { + return Ok(path); + } + + let joined = PathBuf::from(home_dir).join(&path); + joined + .canonicalize() + .or_else(|_| Ok(joined)) + .map(|p| p.to_string_lossy().to_string()) +} + +pub fn ensure_dirs(path: &str, uid: Uid, gid: Gid) -> Result<(), String> { + let path = Path::new(path); + let mut current = PathBuf::new(); + + for component in path.components() { + current.push(component); + let current_str = current.to_string_lossy(); + + if current_str == "/" { + continue; + } + + match fs::metadata(¤t) { + Ok(meta) => { + if !meta.is_dir() { + return Err(format!("path is a file: {current_str}")); + } + } + Err(e) if e.kind() == std::io::ErrorKind::NotFound => { + fs::create_dir(¤t) + .map_err(|e| format!("failed to create directory {current_str}: {e}"))?; + chown(¤t, Some(uid.as_raw()), Some(gid.as_raw())) + .map_err(|e| format!("failed to chown directory {current_str}: {e}"))?; + } + Err(e) => { + return Err(format!("failed to stat directory {current_str}: {e}")); + } + } + } + + Ok(()) +} diff --git a/envd-rs/src/permissions/user.rs b/envd-rs/src/permissions/user.rs new file mode 100644 index 0000000..08f979a --- /dev/null +++ b/envd-rs/src/permissions/user.rs @@ -0,0 +1,32 @@ +use nix::unistd::{Gid, Group, Uid, User}; + +pub fn lookup_user(username: &str) -> Result { + User::from_name(username) + .map_err(|e| format!("error looking up user '{username}': {e}"))? + .ok_or_else(|| format!("user '{username}' not found")) +} + +pub fn get_uid_gid(user: &User) -> (Uid, Gid) { + (user.uid, user.gid) +} + +pub fn get_user_groups(user: &User) -> Vec { + let c_name = std::ffi::CString::new(user.name.as_str()).unwrap(); + nix::unistd::getgrouplist(&c_name, user.gid).unwrap_or_default() +} + +pub fn lookup_username_by_uid(uid: Uid) -> String { + User::from_uid(uid) + .ok() + .flatten() + .map(|u| u.name) + .unwrap_or_else(|| uid.to_string()) +} + +pub fn lookup_groupname_by_gid(gid: Gid) -> String { + Group::from_gid(gid) + .ok() + .flatten() + .map(|g| g.name) + .unwrap_or_else(|| gid.to_string()) +} diff --git a/envd-rs/src/port/conn.rs b/envd-rs/src/port/conn.rs new file mode 100644 index 0000000..b256e84 --- /dev/null +++ b/envd-rs/src/port/conn.rs @@ -0,0 +1,112 @@ +use std::io::{self, BufRead}; + +#[derive(Debug, Clone)] +pub struct ConnStat { + pub local_ip: String, + pub local_port: u32, + pub status: String, + pub family: u32, + pub inode: u64, +} + +fn tcp_state_name(hex: &str) -> &'static str { + match hex { + "01" => "ESTABLISHED", + "02" => "SYN_SENT", + "03" => "SYN_RECV", + "04" => "FIN_WAIT1", + "05" => "FIN_WAIT2", + "06" => "TIME_WAIT", + "07" => "CLOSE", + "08" => "CLOSE_WAIT", + "09" => "LAST_ACK", + "0A" => "LISTEN", + "0B" => "CLOSING", + _ => "UNKNOWN", + } +} + +pub fn read_tcp_connections() -> Vec { + let mut conns = Vec::new(); + if let Ok(c) = parse_proc_net_tcp("/proc/net/tcp", libc::AF_INET as u32) { + conns.extend(c); + } + if let Ok(c) = parse_proc_net_tcp("/proc/net/tcp6", libc::AF_INET6 as u32) { + conns.extend(c); + } + conns +} + +fn parse_proc_net_tcp(path: &str, family: u32) -> io::Result> { + let file = std::fs::File::open(path)?; + let reader = io::BufReader::new(file); + let mut conns = Vec::new(); + let mut first = true; + + for line in reader.lines() { + let line = line?; + if first { + first = false; + continue; + } + let line = line.trim().to_string(); + if line.is_empty() { + continue; + } + + let fields: Vec<&str> = line.split_whitespace().collect(); + if fields.len() < 10 { + continue; + } + + let (ip, port) = match parse_hex_addr(fields[1], family) { + Some(v) => v, + None => continue, + }; + + let state = tcp_state_name(fields[3]); + + let inode: u64 = match fields[9].parse() { + Ok(v) => v, + Err(_) => continue, + }; + + conns.push(ConnStat { + local_ip: ip, + local_port: port, + status: state.to_string(), + family, + inode, + }); + } + + Ok(conns) +} + +fn parse_hex_addr(s: &str, family: u32) -> Option<(String, u32)> { + let (ip_hex, port_hex) = s.split_once(':')?; + let port = u32::from_str_radix(port_hex, 16).ok()?; + let ip_bytes = hex::decode(ip_hex).ok()?; + + let ip_str = if family == libc::AF_INET as u32 { + if ip_bytes.len() != 4 { + return None; + } + format!("{}.{}.{}.{}", ip_bytes[3], ip_bytes[2], ip_bytes[1], ip_bytes[0]) + } else { + if ip_bytes.len() != 16 { + return None; + } + let mut octets = [0u8; 16]; + for i in 0..4 { + octets[i * 4] = ip_bytes[i * 4 + 3]; + octets[i * 4 + 1] = ip_bytes[i * 4 + 2]; + octets[i * 4 + 2] = ip_bytes[i * 4 + 1]; + octets[i * 4 + 3] = ip_bytes[i * 4]; + } + let addr = std::net::Ipv6Addr::from(octets); + addr.to_string() + }; + + Some((ip_str, port)) +} diff --git a/envd-rs/src/port/forwarder.rs b/envd-rs/src/port/forwarder.rs new file mode 100644 index 0000000..7b4831d --- /dev/null +++ b/envd-rs/src/port/forwarder.rs @@ -0,0 +1,181 @@ +use std::collections::HashMap; +use std::os::unix::process::CommandExt; +use std::process::Command; +use std::sync::Arc; + +use tokio::sync::mpsc; +use tokio_util::sync::CancellationToken; + +use crate::cgroups::{CgroupManager, ProcessType}; + +use super::conn::ConnStat; + +const DEFAULT_GATEWAY_IP: &str = "169.254.0.21"; + +#[derive(PartialEq)] +enum PortState { + Forward, + Delete, +} + +struct PortToForward { + pid: Option, + inode: u64, + family: u32, + state: PortState, + port: u32, +} + +fn family_to_ip_version(family: u32) -> u32 { + if family == libc::AF_INET as u32 { + 4 + } else if family == libc::AF_INET6 as u32 { + 6 + } else { + 0 + } +} + +pub struct Forwarder { + cgroup_manager: Arc, + ports: HashMap, + source_ip: String, +} + +impl Forwarder { + pub fn new(cgroup_manager: Arc) -> Self { + Self { + cgroup_manager, + ports: HashMap::new(), + source_ip: DEFAULT_GATEWAY_IP.to_string(), + } + } + + pub async fn start_forwarding( + &mut self, + mut rx: mpsc::Receiver>, + cancel: CancellationToken, + ) { + loop { + tokio::select! { + _ = cancel.cancelled() => { + self.stop_all(); + return; + } + msg = rx.recv() => { + match msg { + Some(conns) => self.process_scan(conns), + None => { + self.stop_all(); + return; + } + } + } + } + } + } + + fn process_scan(&mut self, conns: Vec) { + for ptf in self.ports.values_mut() { + ptf.state = PortState::Delete; + } + + for conn in &conns { + let key = format!("{}-{}", conn.inode, conn.local_port); + if let Some(ptf) = self.ports.get_mut(&key) { + ptf.state = PortState::Forward; + } else { + tracing::debug!( + ip = %conn.local_ip, + port = conn.local_port, + family = family_to_ip_version(conn.family), + "detected new port on localhost" + ); + let mut ptf = PortToForward { + pid: None, + inode: conn.inode, + family: family_to_ip_version(conn.family), + state: PortState::Forward, + port: conn.local_port, + }; + self.start_port_forwarding(&mut ptf); + self.ports.insert(key, ptf); + } + } + + let to_stop: Vec = self + .ports + .iter() + .filter(|(_, v)| v.state == PortState::Delete) + .map(|(k, _)| k.clone()) + .collect(); + + for key in to_stop { + if let Some(ptf) = self.ports.get(&key) { + stop_port_forwarding(ptf); + } + self.ports.remove(&key); + } + } + + fn start_port_forwarding(&self, ptf: &mut PortToForward) { + let listen_arg = format!( + "TCP4-LISTEN:{},bind={},reuseaddr,fork", + ptf.port, self.source_ip + ); + let connect_arg = format!("TCP{}:localhost:{}", ptf.family, ptf.port); + + let mut cmd = Command::new("socat"); + cmd.args(["-d", "-d", "-d", &listen_arg, &connect_arg]); + + unsafe { + let cgroup_fd = self.cgroup_manager.get_fd(ProcessType::Socat); + cmd.pre_exec(move || { + libc::setpgid(0, 0); + if let Some(fd) = cgroup_fd { + let pid_str = format!("{}", libc::getpid()); + let tasks_path = format!("/proc/self/fd/{}/cgroup.procs", fd); + let _ = std::fs::write(&tasks_path, pid_str.as_bytes()); + } + Ok(()) + }); + } + + tracing::debug!( + port = ptf.port, + inode = ptf.inode, + family = ptf.family, + source_ip = %self.source_ip, + "starting port forwarding" + ); + + match cmd.spawn() { + Ok(child) => { + ptf.pid = Some(child.id()); + std::thread::spawn(move || { + let mut child = child; + let _ = child.wait(); + }); + } + Err(e) => { + tracing::error!(error = %e, port = ptf.port, "failed to start socat"); + } + } + } + + fn stop_all(&mut self) { + for ptf in self.ports.values() { + stop_port_forwarding(ptf); + } + self.ports.clear(); + } +} + +fn stop_port_forwarding(ptf: &PortToForward) { + if let Some(pid) = ptf.pid { + tracing::debug!(port = ptf.port, pid, "stopping port forwarding"); + unsafe { + libc::kill(-(pid as i32), libc::SIGKILL); + } + } +} diff --git a/envd-rs/src/port/mod.rs b/envd-rs/src/port/mod.rs new file mode 100644 index 0000000..c0bcb23 --- /dev/null +++ b/envd-rs/src/port/mod.rs @@ -0,0 +1,4 @@ +pub mod conn; +pub mod forwarder; +pub mod scanner; +pub mod subsystem; diff --git a/envd-rs/src/port/scanner.rs b/envd-rs/src/port/scanner.rs new file mode 100644 index 0000000..ea8d3be --- /dev/null +++ b/envd-rs/src/port/scanner.rs @@ -0,0 +1,79 @@ +use std::sync::{Arc, RwLock}; +use std::time::Duration; + +use tokio::sync::mpsc; +use tokio_util::sync::CancellationToken; + +use super::conn::{ConnStat, read_tcp_connections}; + +pub struct ScannerFilter { + pub ips: Vec, + pub state: String, +} + +impl ScannerFilter { + pub fn matches(&self, conn: &ConnStat) -> bool { + if self.state.is_empty() && self.ips.is_empty() { + return false; + } + self.ips.contains(&conn.local_ip) && self.state == conn.status + } +} + +pub struct ScannerSubscriber { + pub tx: mpsc::Sender>, + pub filter: Option, +} + +pub struct Scanner { + period: Duration, + subs: RwLock)>>, +} + +impl Scanner { + pub fn new(period: Duration) -> Self { + Self { + period, + subs: RwLock::new(Vec::new()), + } + } + + pub fn add_subscriber( + &self, + id: &str, + filter: Option, + ) -> mpsc::Receiver> { + let (tx, rx) = mpsc::channel(4); + let sub = Arc::new(ScannerSubscriber { tx, filter }); + let mut subs = self.subs.write().unwrap(); + subs.push((id.to_string(), sub)); + rx + } + + pub fn remove_subscriber(&self, id: &str) { + let mut subs = self.subs.write().unwrap(); + subs.retain(|(sid, _)| sid != id); + } + + pub async fn scan_and_broadcast(&self, cancel: CancellationToken) { + loop { + let conns = read_tcp_connections(); + + { + let subs = self.subs.read().unwrap(); + for (_, sub) in subs.iter() { + let payload = match &sub.filter { + Some(f) => conns.iter().filter(|c| f.matches(c)).cloned().collect(), + None => conns.clone(), + }; + let _ = sub.tx.try_send(payload); + } + } + + tokio::select! { + _ = cancel.cancelled() => return, + _ = tokio::time::sleep(self.period) => {} + } + } + } +} diff --git a/envd-rs/src/port/subsystem.rs b/envd-rs/src/port/subsystem.rs new file mode 100644 index 0000000..7899738 --- /dev/null +++ b/envd-rs/src/port/subsystem.rs @@ -0,0 +1,78 @@ +use std::sync::Arc; + +use tokio_util::sync::CancellationToken; + +use crate::cgroups::CgroupManager; +use crate::config::PORT_SCANNER_INTERVAL; + +use super::forwarder::Forwarder; +use super::scanner::{Scanner, ScannerFilter}; + +pub struct PortSubsystem { + cgroup_manager: Arc, + cancel: std::sync::Mutex>, +} + +impl PortSubsystem { + pub fn new(cgroup_manager: Arc) -> Self { + Self { + cgroup_manager, + cancel: std::sync::Mutex::new(None), + } + } + + pub fn start(&self) { + let mut guard = self.cancel.lock().unwrap(); + if guard.is_some() { + return; + } + + let cancel = CancellationToken::new(); + *guard = Some(cancel.clone()); + drop(guard); + + let cgroup_manager = Arc::clone(&self.cgroup_manager); + let cancel_scanner = cancel.clone(); + let cancel_forwarder = cancel.clone(); + + tokio::spawn(async move { + let scanner = Arc::new(Scanner::new(PORT_SCANNER_INTERVAL)); + let rx = scanner.add_subscriber( + "port-forwarder", + Some(ScannerFilter { + ips: vec![ + "127.0.0.1".to_string(), + "localhost".to_string(), + "::1".to_string(), + ], + state: "LISTEN".to_string(), + }), + ); + + let scanner_clone = Arc::clone(&scanner); + + let scanner_handle = tokio::spawn(async move { + scanner_clone.scan_and_broadcast(cancel_scanner).await; + }); + + let forwarder_handle = tokio::spawn(async move { + let mut forwarder = Forwarder::new(cgroup_manager); + forwarder.start_forwarding(rx, cancel_forwarder).await; + }); + + let _ = tokio::join!(scanner_handle, forwarder_handle); + }); + } + + pub fn stop(&self) { + let mut guard = self.cancel.lock().unwrap(); + if let Some(cancel) = guard.take() { + cancel.cancel(); + } + } + + pub fn restart(&self) { + self.stop(); + self.start(); + } +} diff --git a/envd-rs/src/rpc/entry.rs b/envd-rs/src/rpc/entry.rs new file mode 100644 index 0000000..9488268 --- /dev/null +++ b/envd-rs/src/rpc/entry.rs @@ -0,0 +1,142 @@ +use std::os::unix::fs::MetadataExt; +use std::path::Path; + +use connectrpc::{ConnectError, ErrorCode}; + +use crate::permissions::user::{lookup_groupname_by_gid, lookup_username_by_uid}; +use crate::rpc::pb::filesystem::{EntryInfo, FileType}; +use nix::unistd::{Gid, Uid}; + +const NFS_SUPER_MAGIC: i64 = 0x6969; +const CIFS_MAGIC: i64 = 0xFF534D42; +const SMB_SUPER_MAGIC: i64 = 0x517B; +const SMB2_MAGIC_NUMBER: i64 = 0xFE534D42; +const FUSE_SUPER_MAGIC: i64 = 0x65735546; + +pub fn is_network_mount(path: &str) -> Result { + let c_path = std::ffi::CString::new(path).map_err(|e| e.to_string())?; + let mut stat: libc::statfs = unsafe { std::mem::zeroed() }; + let ret = unsafe { libc::statfs(c_path.as_ptr(), &mut stat) }; + if ret != 0 { + return Err(format!( + "statfs {path}: {}", + std::io::Error::last_os_error() + )); + } + let fs_type = stat.f_type as i64; + Ok(matches!( + fs_type, + NFS_SUPER_MAGIC | CIFS_MAGIC | SMB_SUPER_MAGIC | SMB2_MAGIC_NUMBER | FUSE_SUPER_MAGIC + )) +} + +pub fn build_entry_info(path: &str) -> Result { + let p = Path::new(path); + + let lstat = std::fs::symlink_metadata(p).map_err(|e| { + if e.kind() == std::io::ErrorKind::NotFound { + ConnectError::new(ErrorCode::NotFound, format!("file not found: {e}")) + } else { + ConnectError::new(ErrorCode::Internal, format!("error getting file info: {e}")) + } + })?; + + let is_symlink = lstat.file_type().is_symlink(); + + let (file_type, mode, symlink_target) = if is_symlink { + let target = std::fs::canonicalize(p) + .map(|t| t.to_string_lossy().to_string()) + .unwrap_or_else(|_| path.to_string()); + + let target_type = match std::fs::metadata(p) { + Ok(meta) => meta_to_file_type(&meta), + Err(_) => FileType::FILE_TYPE_UNSPECIFIED, + }; + + let target_mode = std::fs::metadata(p) + .map(|m| m.mode() & 0o7777) + .unwrap_or(0); + + (target_type, target_mode, Some(target)) + } else { + let ft = meta_to_file_type(&lstat); + let mode = lstat.mode() & 0o7777; + (ft, mode, None) + }; + + let uid = lstat.uid(); + let gid = lstat.gid(); + let owner = lookup_username_by_uid(Uid::from_raw(uid)); + let group = lookup_groupname_by_gid(Gid::from_raw(gid)); + + let modified_time = { + let mtime_sec = lstat.mtime(); + let mtime_nsec = lstat.mtime_nsec() as i32; + if mtime_sec == 0 && mtime_nsec == 0 { + None + } else { + Some(buffa_types::google::protobuf::Timestamp { + seconds: mtime_sec, + nanos: mtime_nsec, + ..Default::default() + }) + } + }; + + let name = p + .file_name() + .map(|n| n.to_string_lossy().to_string()) + .unwrap_or_default(); + + let permissions = format_permissions(lstat.mode()); + + Ok(EntryInfo { + name, + r#type: buffa::EnumValue::Known(file_type), + path: path.to_string(), + size: lstat.len() as i64, + mode, + permissions, + owner, + group, + modified_time: modified_time.into(), + symlink_target: symlink_target, + ..Default::default() + }) +} + +fn meta_to_file_type(meta: &std::fs::Metadata) -> FileType { + if meta.is_file() { + FileType::FILE_TYPE_FILE + } else if meta.is_dir() { + FileType::FILE_TYPE_DIRECTORY + } else if meta.file_type().is_symlink() { + FileType::FILE_TYPE_SYMLINK + } else { + FileType::FILE_TYPE_UNSPECIFIED + } +} + +fn format_permissions(mode: u32) -> String { + let file_type = match mode & libc::S_IFMT { + libc::S_IFDIR => 'd', + libc::S_IFLNK => 'L', + libc::S_IFREG => '-', + libc::S_IFBLK => 'b', + libc::S_IFCHR => 'c', + libc::S_IFIFO => 'p', + libc::S_IFSOCK => 'S', + _ => '?', + }; + + let perms = mode & 0o777; + let mut s = String::with_capacity(10); + s.push(file_type); + for shift in [6, 3, 0] { + let bits = (perms >> shift) & 7; + s.push(if bits & 4 != 0 { 'r' } else { '-' }); + s.push(if bits & 2 != 0 { 'w' } else { '-' }); + s.push(if bits & 1 != 0 { 'x' } else { '-' }); + } + s +} diff --git a/envd-rs/src/rpc/filesystem_service.rs b/envd-rs/src/rpc/filesystem_service.rs new file mode 100644 index 0000000..8cf2b2c --- /dev/null +++ b/envd-rs/src/rpc/filesystem_service.rs @@ -0,0 +1,402 @@ +use std::path::{Path, PathBuf}; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; + +use connectrpc::{ConnectError, Context, ErrorCode}; +use dashmap::DashMap; +use futures::Stream; + +use crate::permissions::path::{ensure_dirs, expand_and_resolve}; +use crate::permissions::user::lookup_user; +use crate::rpc::entry::build_entry_info; +use crate::rpc::pb::filesystem::*; +use crate::state::AppState; + +pub struct FilesystemServiceImpl { + state: Arc, + watchers: DashMap, +} + +struct WatcherHandle { + events: Arc>>, + _watcher: notify::RecommendedWatcher, +} + +impl FilesystemServiceImpl { + pub fn new(state: Arc) -> Self { + Self { + state, + watchers: DashMap::new(), + } + } + + fn resolve_path(&self, path: &str, ctx: &Context) -> Result { + let username = extract_username(ctx).unwrap_or_else(|| self.state.defaults.user.clone()); + let user = lookup_user(&username).map_err(|e| { + ConnectError::new(ErrorCode::Unauthenticated, format!("invalid user: {e}")) + })?; + + let home_dir = format!("/home/{}", user.name); + let default_workdir = self.state.defaults.workdir.as_deref(); + + expand_and_resolve(path, &home_dir, default_workdir) + .map_err(|e| ConnectError::new(ErrorCode::InvalidArgument, e)) + } +} + +fn extract_username(ctx: &Context) -> Option { + ctx.extensions.get::().map(|u| u.0.clone()) +} + +#[derive(Clone)] +pub struct AuthUser(pub String); + +impl Filesystem for FilesystemServiceImpl { + async fn stat( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(StatResponse, Context), ConnectError> { + let path = self.resolve_path(request.path, &ctx)?; + let entry = build_entry_info(&path)?; + Ok(( + StatResponse { + entry: entry.into(), + ..Default::default() + }, + ctx, + )) + } + + async fn make_dir( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(MakeDirResponse, Context), ConnectError> { + let path = self.resolve_path(request.path, &ctx)?; + + match std::fs::metadata(&path) { + Ok(meta) => { + if meta.is_dir() { + return Err(ConnectError::new( + ErrorCode::AlreadyExists, + format!("directory already exists: {path}"), + )); + } + return Err(ConnectError::new( + ErrorCode::InvalidArgument, + format!("path exists but is not a directory: {path}"), + )); + } + Err(e) if e.kind() == std::io::ErrorKind::NotFound => {} + Err(e) => { + return Err(ConnectError::new( + ErrorCode::Internal, + format!("error getting file info: {e}"), + )); + } + } + + let username = extract_username(&ctx).unwrap_or_else(|| self.state.defaults.user.clone()); + let user = + lookup_user(&username).map_err(|e| ConnectError::new(ErrorCode::Internal, e))?; + + ensure_dirs(&path, user.uid, user.gid) + .map_err(|e| ConnectError::new(ErrorCode::Internal, e))?; + + let entry = build_entry_info(&path)?; + Ok(( + MakeDirResponse { + entry: entry.into(), + ..Default::default() + }, + ctx, + )) + } + + async fn r#move( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(MoveResponse, Context), ConnectError> { + let source = self.resolve_path(request.source, &ctx)?; + let destination = self.resolve_path(request.destination, &ctx)?; + + let username = extract_username(&ctx).unwrap_or_else(|| self.state.defaults.user.clone()); + let user = + lookup_user(&username).map_err(|e| ConnectError::new(ErrorCode::Internal, e))?; + + if let Some(parent) = Path::new(&destination).parent() { + ensure_dirs(&parent.to_string_lossy(), user.uid, user.gid) + .map_err(|e| ConnectError::new(ErrorCode::Internal, e))?; + } + + std::fs::rename(&source, &destination).map_err(|e| { + if e.kind() == std::io::ErrorKind::NotFound { + ConnectError::new(ErrorCode::NotFound, format!("source not found: {e}")) + } else { + ConnectError::new(ErrorCode::Internal, format!("error renaming: {e}")) + } + })?; + + let entry = build_entry_info(&destination)?; + Ok(( + MoveResponse { + entry: entry.into(), + ..Default::default() + }, + ctx, + )) + } + + async fn list_dir( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(ListDirResponse, Context), ConnectError> { + let mut depth = request.depth as usize; + if depth == 0 { + depth = 1; + } + + let path = self.resolve_path(request.path, &ctx)?; + + let resolved = std::fs::canonicalize(&path).map_err(|e| { + if e.kind() == std::io::ErrorKind::NotFound { + ConnectError::new(ErrorCode::NotFound, format!("path not found: {e}")) + } else { + ConnectError::new(ErrorCode::Internal, format!("error resolving path: {e}")) + } + })?; + let resolved_str = resolved.to_string_lossy().to_string(); + + let meta = std::fs::metadata(&resolved).map_err(|e| { + ConnectError::new(ErrorCode::Internal, format!("error getting file info: {e}")) + })?; + if !meta.is_dir() { + return Err(ConnectError::new( + ErrorCode::InvalidArgument, + format!("path is not a directory: {path}"), + )); + } + + let entries = walk_dir(&path, &resolved_str, depth)?; + Ok(( + ListDirResponse { + entries, + ..Default::default() + }, + ctx, + )) + } + + async fn remove( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(RemoveResponse, Context), ConnectError> { + let path = self.resolve_path(request.path, &ctx)?; + + if let Err(e1) = std::fs::remove_dir_all(&path) { + if let Err(e2) = std::fs::remove_file(&path) { + return Err(ConnectError::new( + ErrorCode::Internal, + format!("error removing: {e1}; also tried as file: {e2}"), + )); + } + } + + Ok((RemoveResponse { ..Default::default() }, ctx)) + } + + async fn watch_dir( + &self, + _ctx: Context, + _request: buffa::view::OwnedView>, + ) -> Result< + ( + Pin> + Send>>, + Context, + ), + ConnectError, + > { + Err(ConnectError::new( + ErrorCode::Unimplemented, + "watch_dir streaming not yet implemented", + )) + } + + async fn create_watcher( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(CreateWatcherResponse, Context), ConnectError> { + use notify::{RecursiveMode, Watcher}; + + let path = self.resolve_path(request.path, &ctx)?; + let recursive = request.recursive; + + if let Ok(true) = crate::rpc::entry::is_network_mount(&path) { + return Err(ConnectError::new( + ErrorCode::FailedPrecondition, + "watching network mounts is not supported", + )); + } + + let watcher_id = simple_id(); + let events: Arc>> = Arc::new(Mutex::new(Vec::new())); + let events_cb = Arc::clone(&events); + + let mut watcher = notify::recommended_watcher( + move |res: Result| { + if let Ok(event) = res { + let event_type = match event.kind { + notify::EventKind::Create(_) => EventType::EVENT_TYPE_CREATE, + notify::EventKind::Modify(notify::event::ModifyKind::Data(_)) => { + EventType::EVENT_TYPE_WRITE + } + notify::EventKind::Modify(notify::event::ModifyKind::Metadata(_)) => { + EventType::EVENT_TYPE_CHMOD + } + notify::EventKind::Remove(_) => EventType::EVENT_TYPE_REMOVE, + notify::EventKind::Modify(notify::event::ModifyKind::Name(_)) => { + EventType::EVENT_TYPE_RENAME + } + _ => return, + }; + + for p in &event.paths { + if let Ok(mut guard) = events_cb.lock() { + guard.push(FilesystemEvent { + name: p.to_string_lossy().to_string(), + r#type: buffa::EnumValue::Known(event_type), + ..Default::default() + }); + } + } + } + }, + ) + .map_err(|e| { + ConnectError::new(ErrorCode::Internal, format!("failed to create watcher: {e}")) + })?; + + let mode = if recursive { + RecursiveMode::Recursive + } else { + RecursiveMode::NonRecursive + }; + + watcher.watch(Path::new(&path), mode).map_err(|e| { + ConnectError::new(ErrorCode::Internal, format!("failed to watch path: {e}")) + })?; + + self.watchers.insert( + watcher_id.clone(), + WatcherHandle { + events, + _watcher: watcher, + }, + ); + + Ok(( + CreateWatcherResponse { + watcher_id, + ..Default::default() + }, + ctx, + )) + } + + async fn get_watcher_events( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(GetWatcherEventsResponse, Context), ConnectError> { + let watcher_id: &str = request.watcher_id; + let handle = self.watchers.get(watcher_id).ok_or_else(|| { + ConnectError::new( + ErrorCode::NotFound, + format!("watcher not found: {watcher_id}"), + ) + })?; + + let events = { + let mut guard = handle.events.lock().unwrap(); + std::mem::take(&mut *guard) + }; + + Ok(( + GetWatcherEventsResponse { + events, + ..Default::default() + }, + ctx, + )) + } + + async fn remove_watcher( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(RemoveWatcherResponse, Context), ConnectError> { + let watcher_id: &str = request.watcher_id; + self.watchers.remove(watcher_id); + Ok((RemoveWatcherResponse { ..Default::default() }, ctx)) + } +} + +fn walk_dir( + requested_path: &str, + resolved_path: &str, + depth: usize, +) -> Result, ConnectError> { + let mut entries = Vec::new(); + let base = Path::new(resolved_path); + + for result in walkdir::WalkDir::new(resolved_path) + .min_depth(1) + .max_depth(depth) + .follow_links(false) + { + let dir_entry = match result { + Ok(e) => e, + Err(e) => { + if e.io_error() + .is_some_and(|io| io.kind() == std::io::ErrorKind::NotFound) + { + continue; + } + return Err(ConnectError::new( + ErrorCode::Internal, + format!("error reading directory: {e}"), + )); + } + }; + + let entry_path = dir_entry.path(); + let mut entry = match build_entry_info(&entry_path.to_string_lossy()) { + Ok(e) => e, + Err(e) if e.code == ErrorCode::NotFound => continue, + Err(e) => return Err(e), + }; + + if let Ok(rel) = entry_path.strip_prefix(base) { + let remapped = PathBuf::from(requested_path).join(rel); + entry.path = remapped.to_string_lossy().to_string(); + } + + entries.push(entry); + } + + Ok(entries) +} + +fn simple_id() -> String { + use std::time::{SystemTime, UNIX_EPOCH}; + let nanos = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos(); + format!("w-{nanos:x}") +} diff --git a/envd-rs/src/rpc/mod.rs b/envd-rs/src/rpc/mod.rs new file mode 100644 index 0000000..87816c6 --- /dev/null +++ b/envd-rs/src/rpc/mod.rs @@ -0,0 +1,26 @@ +pub mod pb; +pub mod entry; +pub mod process_handler; +pub mod process_service; +pub mod filesystem_service; + +use std::sync::Arc; + +use crate::rpc::process_service::ProcessServiceImpl; +use crate::rpc::filesystem_service::FilesystemServiceImpl; +use crate::state::AppState; + +use pb::process::ProcessExt; +use pb::filesystem::FilesystemExt; + +/// Build the connect-rust Router with both RPC services registered. +pub fn rpc_router(state: Arc) -> connectrpc::Router { + let process_svc = Arc::new(ProcessServiceImpl::new(Arc::clone(&state))); + let filesystem_svc = Arc::new(FilesystemServiceImpl::new(Arc::clone(&state))); + + let router = connectrpc::Router::new(); + let router = process_svc.register(router); + let router = filesystem_svc.register(router); + + router +} diff --git a/envd-rs/src/rpc/pb.rs b/envd-rs/src/rpc/pb.rs new file mode 100644 index 0000000..87fe79c --- /dev/null +++ b/envd-rs/src/rpc/pb.rs @@ -0,0 +1,10 @@ +#![allow(dead_code, non_camel_case_types, unused_imports, clippy::derivable_impls)] + +use ::buffa; +use ::buffa_types; +use ::connectrpc; +use ::futures; +use ::http_body; +use ::serde; + +include!(concat!(env!("OUT_DIR"), "/_connectrpc.rs")); diff --git a/envd-rs/src/rpc/process_handler.rs b/envd-rs/src/rpc/process_handler.rs new file mode 100644 index 0000000..cf0287c --- /dev/null +++ b/envd-rs/src/rpc/process_handler.rs @@ -0,0 +1,400 @@ +use std::io::Read; +use std::os::unix::process::CommandExt; +use std::process::Stdio; +use std::sync::{Arc, Mutex}; + +use connectrpc::{ConnectError, ErrorCode}; +use nix::pty::{openpty, Winsize}; +use nix::sys::signal::{self, Signal}; +use nix::unistd::Pid; +use tokio::sync::broadcast; + +use crate::rpc::pb::process::*; + +const STD_CHUNK_SIZE: usize = 32768; +const PTY_CHUNK_SIZE: usize = 16384; +const BROADCAST_CAPACITY: usize = 4096; + +#[derive(Clone)] +pub enum DataEvent { + Stdout(Vec), + Stderr(Vec), + Pty(Vec), +} + +#[derive(Clone)] +pub struct EndEvent { + pub exit_code: i32, + pub exited: bool, + pub status: String, + pub error: Option, +} + +pub struct ProcessHandle { + pub config: ProcessConfig, + pub tag: Option, + pub pid: u32, + + data_tx: broadcast::Sender, + end_tx: broadcast::Sender, + + stdin: Mutex>, + pty_master: Mutex>, +} + +impl ProcessHandle { + pub fn subscribe_data(&self) -> broadcast::Receiver { + self.data_tx.subscribe() + } + + pub fn subscribe_end(&self) -> broadcast::Receiver { + self.end_tx.subscribe() + } + + pub fn send_signal(&self, sig: Signal) -> Result<(), ConnectError> { + signal::kill(Pid::from_raw(self.pid as i32), sig).map_err(|e| { + ConnectError::new(ErrorCode::Internal, format!("error sending signal: {e}")) + }) + } + + pub fn write_stdin(&self, data: &[u8]) -> Result<(), ConnectError> { + use std::io::Write; + let mut guard = self.stdin.lock().unwrap(); + match guard.as_mut() { + Some(stdin) => stdin.write_all(data).map_err(|e| { + ConnectError::new(ErrorCode::Internal, format!("error writing to stdin: {e}")) + }), + None => Err(ConnectError::new( + ErrorCode::FailedPrecondition, + "stdin not enabled or closed", + )), + } + } + + pub fn write_pty(&self, data: &[u8]) -> Result<(), ConnectError> { + use std::io::Write; + let mut guard = self.pty_master.lock().unwrap(); + match guard.as_mut() { + Some(master) => master.write_all(data).map_err(|e| { + ConnectError::new(ErrorCode::Internal, format!("error writing to pty: {e}")) + }), + None => Err(ConnectError::new( + ErrorCode::FailedPrecondition, + "pty not assigned to process", + )), + } + } + + pub fn close_stdin(&self) -> Result<(), ConnectError> { + if self.pty_master.lock().unwrap().is_some() { + return Err(ConnectError::new( + ErrorCode::FailedPrecondition, + "cannot close stdin for PTY process — send Ctrl+D (0x04) instead", + )); + } + let mut guard = self.stdin.lock().unwrap(); + *guard = None; + Ok(()) + } + + pub fn resize_pty(&self, cols: u16, rows: u16) -> Result<(), ConnectError> { + let guard = self.pty_master.lock().unwrap(); + match guard.as_ref() { + Some(master) => { + use std::os::unix::io::AsRawFd; + let ws = libc::winsize { + ws_row: rows, + ws_col: cols, + ws_xpixel: 0, + ws_ypixel: 0, + }; + let ret = unsafe { libc::ioctl(master.as_raw_fd(), libc::TIOCSWINSZ, &ws) }; + if ret != 0 { + return Err(ConnectError::new( + ErrorCode::Internal, + format!( + "ioctl TIOCSWINSZ failed: {}", + std::io::Error::last_os_error() + ), + )); + } + Ok(()) + } + None => Err(ConnectError::new( + ErrorCode::FailedPrecondition, + "tty not assigned to process", + )), + } + } +} + +pub fn spawn_process( + cmd_str: &str, + args: &[String], + envs: &std::collections::HashMap, + cwd: &str, + pty_opts: Option<(u16, u16)>, + enable_stdin: bool, + tag: Option, + user: &nix::unistd::User, + default_env_vars: &dashmap::DashMap, +) -> Result, ConnectError> { + let mut env: Vec<(String, String)> = Vec::new(); + env.push(("PATH".into(), std::env::var("PATH").unwrap_or_default())); + let home = format!("/home/{}", user.name); + env.push(("HOME".into(), home)); + env.push(("USER".into(), user.name.clone())); + env.push(("LOGNAME".into(), user.name.clone())); + + default_env_vars.iter().for_each(|entry| { + env.push((entry.key().clone(), entry.value().clone())); + }); + + for (k, v) in envs { + env.push((k.clone(), v.clone())); + } + + let nice_delta = 0 - current_nice(); + let oom_script = format!( + r#"echo 100 > /proc/$$/oom_score_adj && exec /usr/bin/nice -n {} "${{@}}""#, + nice_delta + ); + let mut wrapper_args = vec![ + "-c".to_string(), + oom_script, + "--".to_string(), + cmd_str.to_string(), + ]; + wrapper_args.extend_from_slice(args); + + let uid = user.uid.as_raw(); + let gid = user.gid.as_raw(); + + let (data_tx, _) = broadcast::channel(BROADCAST_CAPACITY); + let (end_tx, _) = broadcast::channel(16); + + let config = ProcessConfig { + cmd: cmd_str.to_string(), + args: args.to_vec(), + envs: envs.clone(), + cwd: Some(cwd.to_string()), + ..Default::default() + }; + + if let Some((cols, rows)) = pty_opts { + let pty_result = openpty( + Some(&Winsize { + ws_row: rows, + ws_col: cols, + ws_xpixel: 0, + ws_ypixel: 0, + }), + None, + ) + .map_err(|e| ConnectError::new(ErrorCode::Internal, format!("openpty failed: {e}")))?; + + let master_fd = pty_result.master; + let slave_fd = pty_result.slave; + + let mut command = std::process::Command::new("/bin/sh"); + command + .args(&wrapper_args) + .env_clear() + .envs(env.iter().map(|(k, v)| (k.as_str(), v.as_str()))) + .current_dir(cwd); + + unsafe { + use std::os::unix::io::AsRawFd; + let slave_raw = slave_fd.as_raw_fd(); + command.pre_exec(move || { + nix::unistd::setsid() + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + libc::ioctl(slave_raw, libc::TIOCSCTTY, 0); + libc::dup2(slave_raw, 0); + libc::dup2(slave_raw, 1); + libc::dup2(slave_raw, 2); + if slave_raw > 2 { + libc::close(slave_raw); + } + libc::setgid(gid); + libc::setuid(uid); + Ok(()) + }); + } + + command.stdin(Stdio::null()); + command.stdout(Stdio::null()); + command.stderr(Stdio::null()); + + let child = command.spawn().map_err(|e| { + ConnectError::new(ErrorCode::Internal, format!("error starting pty process: {e}")) + })?; + + drop(slave_fd); + + let pid = child.id(); + let master_file: std::fs::File = master_fd.into(); + let master_clone = master_file.try_clone().unwrap(); + + let handle = Arc::new(ProcessHandle { + config, + tag, + pid, + data_tx: data_tx.clone(), + end_tx: end_tx.clone(), + stdin: Mutex::new(None), + pty_master: Mutex::new(Some(master_file)), + }); + + let data_tx_clone = data_tx.clone(); + std::thread::spawn(move || { + let mut master = master_clone; + let mut buf = vec![0u8; PTY_CHUNK_SIZE]; + loop { + match master.read(&mut buf) { + Ok(0) => break, + Ok(n) => { + let _ = data_tx_clone.send(DataEvent::Pty(buf[..n].to_vec())); + } + Err(_) => break, + } + } + }); + + let end_tx_clone = end_tx.clone(); + std::thread::spawn(move || { + let mut child = child; + match child.wait() { + Ok(s) => { + let _ = end_tx_clone.send(EndEvent { + exit_code: s.code().unwrap_or(-1), + exited: s.code().is_some(), + status: format!("{s}"), + error: None, + }); + } + Err(e) => { + let _ = end_tx_clone.send(EndEvent { + exit_code: -1, + exited: false, + status: "error".into(), + error: Some(e.to_string()), + }); + } + } + }); + + tracing::info!(pid, cmd = cmd_str, "process started (pty)"); + Ok(handle) + } else { + let mut command = std::process::Command::new("/bin/sh"); + command + .args(&wrapper_args) + .env_clear() + .envs(env.iter().map(|(k, v)| (k.as_str(), v.as_str()))) + .current_dir(cwd) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + if enable_stdin { + command.stdin(Stdio::piped()); + } else { + command.stdin(Stdio::null()); + } + + unsafe { + command.pre_exec(move || { + libc::setgid(gid); + libc::setuid(uid); + Ok(()) + }); + } + + let mut child = command.spawn().map_err(|e| { + ConnectError::new(ErrorCode::Internal, format!("error starting process: {e}")) + })?; + + let pid = child.id(); + let stdin = child.stdin.take(); + let stdout = child.stdout.take(); + let stderr = child.stderr.take(); + + let handle = Arc::new(ProcessHandle { + config, + tag, + pid, + data_tx: data_tx.clone(), + end_tx: end_tx.clone(), + stdin: Mutex::new(stdin), + pty_master: Mutex::new(None), + }); + + if let Some(mut out) = stdout { + let tx = data_tx.clone(); + std::thread::spawn(move || { + let mut buf = vec![0u8; STD_CHUNK_SIZE]; + loop { + match out.read(&mut buf) { + Ok(0) => break, + Ok(n) => { + let _ = tx.send(DataEvent::Stdout(buf[..n].to_vec())); + } + Err(_) => break, + } + } + }); + } + + if let Some(mut err_pipe) = stderr { + let tx = data_tx.clone(); + std::thread::spawn(move || { + let mut buf = vec![0u8; STD_CHUNK_SIZE]; + loop { + match err_pipe.read(&mut buf) { + Ok(0) => break, + Ok(n) => { + let _ = tx.send(DataEvent::Stderr(buf[..n].to_vec())); + } + Err(_) => break, + } + } + }); + } + + let end_tx_clone = end_tx.clone(); + std::thread::spawn(move || { + match child.wait() { + Ok(s) => { + let _ = end_tx_clone.send(EndEvent { + exit_code: s.code().unwrap_or(-1), + exited: s.code().is_some(), + status: format!("{s}"), + error: None, + }); + } + Err(e) => { + let _ = end_tx_clone.send(EndEvent { + exit_code: -1, + exited: false, + status: "error".into(), + error: Some(e.to_string()), + }); + } + } + }); + + tracing::info!(pid, cmd = cmd_str, "process started (pipe)"); + Ok(handle) + } +} + +fn current_nice() -> i32 { + unsafe { + *libc::__errno_location() = 0; + let prio = libc::getpriority(libc::PRIO_PROCESS, 0); + if *libc::__errno_location() != 0 { + return 0; + } + 20 - prio + } +} diff --git a/envd-rs/src/rpc/process_service.rs b/envd-rs/src/rpc/process_service.rs new file mode 100644 index 0000000..c69c646 --- /dev/null +++ b/envd-rs/src/rpc/process_service.rs @@ -0,0 +1,438 @@ +use std::collections::HashMap; +use std::pin::Pin; +use std::sync::Arc; + +use connectrpc::{ConnectError, Context, ErrorCode}; +use dashmap::DashMap; +use futures::Stream; + +use crate::permissions::path::expand_and_resolve; +use crate::permissions::user::lookup_user; +use crate::rpc::pb::process::*; +use crate::rpc::process_handler::{self, DataEvent, ProcessHandle}; +use crate::state::AppState; + +pub struct ProcessServiceImpl { + state: Arc, + processes: DashMap>, +} + +impl ProcessServiceImpl { + pub fn new(state: Arc) -> Self { + Self { + state, + processes: DashMap::new(), + } + } + + fn get_process_by_selector( + &self, + selector: &ProcessSelectorView, + ) -> Result, ConnectError> { + match &selector.selector { + Some(process_selector::SelectorView::Pid(pid)) => { + let pid_val = *pid; + self.processes + .get(&pid_val) + .map(|entry| Arc::clone(entry.value())) + .ok_or_else(|| { + ConnectError::new( + ErrorCode::NotFound, + format!("process with pid {pid_val} not found"), + ) + }) + } + Some(process_selector::SelectorView::Tag(tag)) => { + let tag_str: &str = tag; + for entry in self.processes.iter() { + if let Some(ref t) = entry.value().tag { + if t == tag_str { + return Ok(Arc::clone(entry.value())); + } + } + } + Err(ConnectError::new( + ErrorCode::NotFound, + format!("process with tag {tag_str} not found"), + )) + } + None => Err(ConnectError::new( + ErrorCode::InvalidArgument, + "process selector required", + )), + } + } + + fn spawn_from_request( + &self, + request: &StartRequestView<'_>, + ) -> Result, ConnectError> { + let proc_config = request.process.as_option().ok_or_else(|| { + ConnectError::new(ErrorCode::InvalidArgument, "process config required") + })?; + + let username = self.state.defaults.user.clone(); + let user = + lookup_user(&username).map_err(|e| ConnectError::new(ErrorCode::Internal, e))?; + + let cmd: &str = proc_config.cmd; + let args: Vec = proc_config.args.iter().map(|s| s.to_string()).collect(); + let envs: HashMap = proc_config + .envs + .iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect(); + + let home_dir = format!("/home/{}", user.name); + let cwd_str: &str = proc_config.cwd.unwrap_or(""); + let cwd = expand_and_resolve(cwd_str, &home_dir, self.state.defaults.workdir.as_deref()) + .map_err(|e| ConnectError::new(ErrorCode::InvalidArgument, e))?; + + let effective_cwd = if cwd.is_empty() { "/" } else { &cwd }; + if let Err(_) = std::fs::metadata(effective_cwd) { + return Err(ConnectError::new( + ErrorCode::InvalidArgument, + format!("cwd '{effective_cwd}' does not exist"), + )); + } + + let pty_opts = request.pty.as_option().and_then(|pty| { + pty.size + .as_option() + .map(|sz| (sz.cols as u16, sz.rows as u16)) + }); + + let enable_stdin = request.stdin.unwrap_or(true); + let tag = request.tag.map(|s| s.to_string()); + + let handle = process_handler::spawn_process( + cmd, + &args, + &envs, + effective_cwd, + pty_opts, + enable_stdin, + tag, + &user, + &self.state.defaults.env_vars, + )?; + + self.processes.insert(handle.pid, Arc::clone(&handle)); + + let processes = self.processes.clone(); + let pid = handle.pid; + let mut end_rx = handle.subscribe_end(); + tokio::spawn(async move { + let _ = end_rx.recv().await; + processes.remove(&pid); + }); + + Ok(handle) + } +} + +impl Process for ProcessServiceImpl { + async fn list( + &self, + ctx: Context, + _request: buffa::view::OwnedView>, + ) -> Result<(ListResponse, Context), ConnectError> { + let processes: Vec = self + .processes + .iter() + .map(|entry| { + let h = entry.value(); + ProcessInfo { + config: buffa::MessageField::some(h.config.clone()), + pid: h.pid, + tag: h.tag.clone(), + ..Default::default() + } + }) + .collect(); + + Ok(( + ListResponse { + processes, + ..Default::default() + }, + ctx, + )) + } + + async fn start( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result< + ( + Pin> + Send>>, + Context, + ), + ConnectError, + > { + let handle = self.spawn_from_request(&request)?; + let pid = handle.pid; + + let mut data_rx = handle.subscribe_data(); + let mut end_rx = handle.subscribe_end(); + + let stream = async_stream::stream! { + yield Ok(make_start_response(pid)); + + loop { + match data_rx.recv().await { + Ok(ev) => yield Ok(make_data_start_response(ev)), + Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue, + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, + } + } + + if let Ok(end) = end_rx.recv().await { + yield Ok(make_end_start_response(end)); + } + }; + + Ok((Box::pin(stream), ctx)) + } + + async fn connect( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result< + ( + Pin> + Send>>, + Context, + ), + ConnectError, + > { + let selector = request.process.as_option().ok_or_else(|| { + ConnectError::new(ErrorCode::InvalidArgument, "process selector required") + })?; + let handle = self.get_process_by_selector(selector)?; + let pid = handle.pid; + + let mut data_rx = handle.subscribe_data(); + let mut end_rx = handle.subscribe_end(); + + let stream = async_stream::stream! { + yield Ok(ConnectResponse { + event: buffa::MessageField::some(ProcessEvent { + event: Some(process_event::Event::Start(Box::new( + process_event::StartEvent { pid, ..Default::default() }, + ))), + ..Default::default() + }), + ..Default::default() + }); + + loop { + match data_rx.recv().await { + Ok(ev) => { + yield Ok(ConnectResponse { + event: buffa::MessageField::some(make_data_event(ev)), + ..Default::default() + }); + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue, + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, + } + } + + if let Ok(end) = end_rx.recv().await { + yield Ok(ConnectResponse { + event: buffa::MessageField::some(make_end_event(end)), + ..Default::default() + }); + } + }; + + Ok((Box::pin(stream), ctx)) + } + + async fn update( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(UpdateResponse, Context), ConnectError> { + let selector = request.process.as_option().ok_or_else(|| { + ConnectError::new(ErrorCode::InvalidArgument, "process selector required") + })?; + let handle = self.get_process_by_selector(selector)?; + + if let Some(pty) = request.pty.as_option() { + if let Some(size) = pty.size.as_option() { + handle.resize_pty(size.cols as u16, size.rows as u16)?; + } + } + + Ok((UpdateResponse { ..Default::default() }, ctx)) + } + + async fn stream_input( + &self, + ctx: Context, + mut requests: Pin< + Box< + dyn Stream< + Item = Result< + buffa::view::OwnedView>, + ConnectError, + >, + > + Send, + >, + >, + ) -> Result<(StreamInputResponse, Context), ConnectError> { + use futures::StreamExt; + + let mut handle: Option> = None; + + while let Some(result) = requests.next().await { + let req = result?; + match &req.event { + Some(stream_input_request::EventView::Start(start)) => { + if let Some(selector) = start.process.as_option() { + handle = Some(self.get_process_by_selector(selector)?); + } + } + Some(stream_input_request::EventView::Data(data)) => { + let h = handle.as_ref().ok_or_else(|| { + ConnectError::new(ErrorCode::FailedPrecondition, "no start event received") + })?; + if let Some(input) = data.input.as_option() { + write_input(h, input)?; + } + } + Some(stream_input_request::EventView::Keepalive(_)) => {} + None => {} + } + } + + Ok((StreamInputResponse { ..Default::default() }, ctx)) + } + + async fn send_input( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(SendInputResponse, Context), ConnectError> { + let selector = request.process.as_option().ok_or_else(|| { + ConnectError::new(ErrorCode::InvalidArgument, "process selector required") + })?; + let handle = self.get_process_by_selector(selector)?; + + if let Some(input) = request.input.as_option() { + write_input(&handle, input)?; + } + + Ok((SendInputResponse { ..Default::default() }, ctx)) + } + + async fn send_signal( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(SendSignalResponse, Context), ConnectError> { + let selector = request.process.as_option().ok_or_else(|| { + ConnectError::new(ErrorCode::InvalidArgument, "process selector required") + })?; + let handle = self.get_process_by_selector(selector)?; + + let sig = match request.signal.as_known() { + Some(Signal::SIGNAL_SIGKILL) => nix::sys::signal::Signal::SIGKILL, + Some(Signal::SIGNAL_SIGTERM) => nix::sys::signal::Signal::SIGTERM, + _ => { + return Err(ConnectError::new( + ErrorCode::InvalidArgument, + "invalid or unspecified signal", + )) + } + }; + + handle.send_signal(sig)?; + Ok((SendSignalResponse { ..Default::default() }, ctx)) + } + + async fn close_stdin( + &self, + ctx: Context, + request: buffa::view::OwnedView>, + ) -> Result<(CloseStdinResponse, Context), ConnectError> { + let selector = request.process.as_option().ok_or_else(|| { + ConnectError::new(ErrorCode::InvalidArgument, "process selector required") + })?; + let handle = self.get_process_by_selector(selector)?; + handle.close_stdin()?; + Ok((CloseStdinResponse { ..Default::default() }, ctx)) + } +} + +fn write_input(handle: &ProcessHandle, input: &ProcessInputView) -> Result<(), ConnectError> { + match &input.input { + Some(process_input::InputView::Pty(d)) => handle.write_pty(d), + Some(process_input::InputView::Stdin(d)) => handle.write_stdin(d), + None => Ok(()), + } +} + +fn make_start_response(pid: u32) -> StartResponse { + StartResponse { + event: buffa::MessageField::some(ProcessEvent { + event: Some(process_event::Event::Start(Box::new( + process_event::StartEvent { + pid, + ..Default::default() + }, + ))), + ..Default::default() + }), + ..Default::default() + } +} + +fn make_data_event(ev: DataEvent) -> ProcessEvent { + let output = match ev { + DataEvent::Stdout(d) => Some(process_event::data_event::Output::Stdout(d.into())), + DataEvent::Stderr(d) => Some(process_event::data_event::Output::Stderr(d.into())), + DataEvent::Pty(d) => Some(process_event::data_event::Output::Pty(d.into())), + }; + ProcessEvent { + event: Some(process_event::Event::Data(Box::new( + process_event::DataEvent { + output, + ..Default::default() + }, + ))), + ..Default::default() + } +} + +fn make_data_start_response(ev: DataEvent) -> StartResponse { + StartResponse { + event: buffa::MessageField::some(make_data_event(ev)), + ..Default::default() + } +} + +fn make_end_event(end: process_handler::EndEvent) -> ProcessEvent { + ProcessEvent { + event: Some(process_event::Event::End(Box::new( + process_event::EndEvent { + exit_code: end.exit_code, + exited: end.exited, + status: end.status, + error: end.error, + ..Default::default() + }, + ))), + ..Default::default() + } +} + +fn make_end_start_response(end: process_handler::EndEvent) -> StartResponse { + StartResponse { + event: buffa::MessageField::some(make_end_event(end)), + ..Default::default() + } +} diff --git a/envd-rs/src/state.rs b/envd-rs/src/state.rs new file mode 100644 index 0000000..d54ea38 --- /dev/null +++ b/envd-rs/src/state.rs @@ -0,0 +1,42 @@ +use std::sync::atomic::AtomicBool; +use std::sync::Arc; + +use crate::auth::token::SecureToken; +use crate::conntracker::ConnTracker; +use crate::execcontext::Defaults; +use crate::port::subsystem::PortSubsystem; +use crate::util::AtomicMax; + +pub struct AppState { + pub defaults: Defaults, + pub version: String, + pub commit: String, + pub is_fc: bool, + pub needs_restore: AtomicBool, + pub last_set_time: AtomicMax, + pub access_token: SecureToken, + pub conn_tracker: ConnTracker, + pub port_subsystem: Option>, +} + +impl AppState { + pub fn new( + defaults: Defaults, + version: String, + commit: String, + is_fc: bool, + port_subsystem: Option>, + ) -> Arc { + Arc::new(Self { + defaults, + version, + commit, + is_fc, + needs_restore: AtomicBool::new(false), + last_set_time: AtomicMax::new(), + access_token: SecureToken::new(), + conn_tracker: ConnTracker::new(), + port_subsystem, + }) + } +} diff --git a/envd-rs/src/util.rs b/envd-rs/src/util.rs new file mode 100644 index 0000000..2016eca --- /dev/null +++ b/envd-rs/src/util.rs @@ -0,0 +1,33 @@ +use std::sync::atomic::{AtomicI64, Ordering}; + +pub struct AtomicMax { + val: AtomicI64, +} + +impl AtomicMax { + pub fn new() -> Self { + Self { + val: AtomicI64::new(i64::MIN), + } + } + + /// Sets the stored value to `new` if `new` is strictly greater than + /// the current value. Returns `true` if the value was updated. + pub fn set_to_greater(&self, new: i64) -> bool { + loop { + let current = self.val.load(Ordering::Acquire); + if new <= current { + return false; + } + match self.val.compare_exchange_weak( + current, + new, + Ordering::Release, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(_) => continue, + } + } + } +} diff --git a/scripts/update-minimal-rootfs.sh b/scripts/update-minimal-rootfs.sh index 71a9f47..d7f4956 100755 --- a/scripts/update-minimal-rootfs.sh +++ b/scripts/update-minimal-rootfs.sh @@ -36,12 +36,6 @@ if [ ! -f "${ENVD_BIN}" ]; then exit 1 fi -# Verify it's statically linked. -if ! file "${ENVD_BIN}" | grep -q "statically linked"; then - echo "ERROR: envd is not statically linked!" - exit 1 -fi - # Step 2: Mount the rootfs. echo "==> Mounting rootfs at ${MOUNT_DIR}..." mkdir -p "${MOUNT_DIR}" From 1143acd37a73691bba4aa0ce3a6a52dcb1e2935a Mon Sep 17 00:00:00 2001 From: pptx704 Date: Sun, 3 May 2026 03:12:25 +0600 Subject: [PATCH 06/10] refactor: remove Go envd module, update host agent for Rust envd MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Go envd guest agent (`envd/`) is fully replaced by the Rust implementation (`envd-rs/`). This commit removes the Go module and updates all references across the codebase. Makefile: remove ENVD_DIR, VERSION_ENVD, build-envd-go, dev-envd-go, and Go envd from proto/fmt/vet/tidy/clean targets. Add static-link verification to build-envd. Host agent: rewrite snapshot quiesce comments that referenced Go GC and page allocator corruption — no longer applicable with Rust envd. Tighten envdclient to expect HTTP 200 (not 204) from health and file upload endpoints, and require JSON version response from FetchVersion. Remove NOTICE (no e2b-derived code remains). Update CLAUDE.md and README.md to reflect Rust envd architecture. --- CLAUDE.md | 57 +- Makefile | 25 +- NOTICE | 19 - README.md | 1 + envd-rs/README.md | 2 +- envd/LICENSE | 202 -- envd/Makefile | 62 - envd/VERSION | 1 - envd/go.mod | 42 - envd/go.sum | 92 - envd/internal/api/api.gen.go | 604 ----- envd/internal/api/auth.go | 133 -- envd/internal/api/auth_test.go | 64 - envd/internal/api/cfg.yaml | 10 - envd/internal/api/conntracker.go | 94 - envd/internal/api/download.go | 187 -- envd/internal/api/download_test.go | 405 ---- envd/internal/api/encoding.go | 229 -- envd/internal/api/encoding_test.go | 496 ----- envd/internal/api/envs.go | 31 - envd/internal/api/error.go | 23 - envd/internal/api/generate.go | 5 - envd/internal/api/init.go | 304 --- envd/internal/api/init_test.go | 524 ----- envd/internal/api/secure_token.go | 214 -- envd/internal/api/secure_token_test.go | 463 ---- envd/internal/api/snapshot.go | 62 - envd/internal/api/store.go | 156 -- envd/internal/api/upload.go | 311 --- envd/internal/api/upload_test.go | 251 --- envd/internal/execcontext/context.go | 39 - envd/internal/host/metrics.go | 96 - envd/internal/host/mmds.go | 185 -- envd/internal/logs/bufferedEvents.go | 49 - envd/internal/logs/exporter/exporter.go | 174 -- envd/internal/logs/interceptor.go | 174 -- envd/internal/logs/logger.go | 37 - envd/internal/permissions/authenticate.go | 49 - envd/internal/permissions/keepalive.go | 31 - envd/internal/permissions/path.go | 98 - envd/internal/permissions/user.go | 46 - envd/internal/port/conn.go | 165 -- envd/internal/port/forward.go | 240 -- envd/internal/port/scan.go | 70 - envd/internal/port/scanSubscriber.go | 61 - envd/internal/port/scanfilter.go | 27 - envd/internal/port/subsystem.go | 101 - envd/internal/services/cgroups/cgroup2.go | 129 -- .../internal/services/cgroups/cgroup2_test.go | 187 -- envd/internal/services/cgroups/iface.go | 16 - envd/internal/services/cgroups/noop.go | 19 - envd/internal/services/filesystem/dir.go | 186 -- envd/internal/services/filesystem/dir_test.go | 407 ---- envd/internal/services/filesystem/move.go | 60 - .../internal/services/filesystem/move_test.go | 366 --- envd/internal/services/filesystem/remove.go | 33 - envd/internal/services/filesystem/service.go | 37 - .../services/filesystem/service_test.go | 16 - envd/internal/services/filesystem/stat.go | 31 - .../internal/services/filesystem/stat_test.go | 116 - envd/internal/services/filesystem/utils.go | 109 - .../services/filesystem/utils_test.go | 151 -- envd/internal/services/filesystem/watch.go | 161 -- .../services/filesystem/watch_sync.go | 226 -- envd/internal/services/process/connect.go | 128 -- .../services/process/handler/handler.go | 482 ---- .../services/process/handler/multiplex.go | 79 - envd/internal/services/process/input.go | 109 - envd/internal/services/process/list.go | 30 - envd/internal/services/process/service.go | 85 - envd/internal/services/process/signal.go | 40 - envd/internal/services/process/start.go | 249 --- envd/internal/services/process/update.go | 32 - envd/internal/services/spec/filesystem.pb.go | 1446 ------------ .../services/spec/filesystem/filesystem.pb.go | 1444 ------------ .../filesystemconnect/filesystem.connect.go | 337 --- envd/internal/services/spec/process.pb.go | 1972 ----------------- .../services/spec/process/process.pb.go | 1970 ---------------- .../process/processconnect/process.connect.go | 310 --- .../spec/specconnect/filesystem.connect.go | 339 --- .../spec/specconnect/process.connect.go | 312 --- envd/internal/shared/filesystem/entry.go | 110 - envd/internal/shared/filesystem/entry_test.go | 266 --- envd/internal/shared/filesystem/model.go | 32 - envd/internal/shared/id/id.go | 166 -- envd/internal/shared/id/id_test.go | 382 ---- envd/internal/shared/keys/constants.go | 9 - envd/internal/shared/keys/hashing.go | 7 - envd/internal/shared/keys/hmac_sha256.go | 27 - envd/internal/shared/keys/hmac_sha256_test.go | 76 - envd/internal/shared/keys/key.go | 101 - envd/internal/shared/keys/key_test.go | 162 -- envd/internal/shared/keys/sha256.go | 32 - envd/internal/shared/keys/sha256_test.go | 17 - envd/internal/shared/keys/sha512.go | 22 - envd/internal/shared/smap/smap.go | 49 - envd/internal/shared/utils/ptr.go | 45 - envd/internal/utils/atomic.go | 29 - envd/internal/utils/atomic_test.go | 78 - envd/internal/utils/map.go | 53 - envd/internal/utils/multipart.go | 45 - envd/internal/utils/rfsnotify.go | 14 - envd/main.go | 294 --- envd/spec/buf.gen.yaml | 15 - envd/spec/envd.yaml | 313 --- envd/spec/generate.go | 3 - internal/envdclient/client.go | 9 +- internal/envdclient/health.go | 14 +- internal/sandbox/manager.go | 19 +- 109 files changed, 49 insertions(+), 20665 deletions(-) delete mode 100644 NOTICE delete mode 100644 envd/LICENSE delete mode 100644 envd/Makefile delete mode 100644 envd/VERSION delete mode 100644 envd/go.mod delete mode 100644 envd/go.sum delete mode 100644 envd/internal/api/api.gen.go delete mode 100644 envd/internal/api/auth.go delete mode 100644 envd/internal/api/auth_test.go delete mode 100644 envd/internal/api/cfg.yaml delete mode 100644 envd/internal/api/conntracker.go delete mode 100644 envd/internal/api/download.go delete mode 100644 envd/internal/api/download_test.go delete mode 100644 envd/internal/api/encoding.go delete mode 100644 envd/internal/api/encoding_test.go delete mode 100644 envd/internal/api/envs.go delete mode 100644 envd/internal/api/error.go delete mode 100644 envd/internal/api/generate.go delete mode 100644 envd/internal/api/init.go delete mode 100644 envd/internal/api/init_test.go delete mode 100644 envd/internal/api/secure_token.go delete mode 100644 envd/internal/api/secure_token_test.go delete mode 100644 envd/internal/api/snapshot.go delete mode 100644 envd/internal/api/store.go delete mode 100644 envd/internal/api/upload.go delete mode 100644 envd/internal/api/upload_test.go delete mode 100644 envd/internal/execcontext/context.go delete mode 100644 envd/internal/host/metrics.go delete mode 100644 envd/internal/host/mmds.go delete mode 100644 envd/internal/logs/bufferedEvents.go delete mode 100644 envd/internal/logs/exporter/exporter.go delete mode 100644 envd/internal/logs/interceptor.go delete mode 100644 envd/internal/logs/logger.go delete mode 100644 envd/internal/permissions/authenticate.go delete mode 100644 envd/internal/permissions/keepalive.go delete mode 100644 envd/internal/permissions/path.go delete mode 100644 envd/internal/permissions/user.go delete mode 100644 envd/internal/port/conn.go delete mode 100644 envd/internal/port/forward.go delete mode 100644 envd/internal/port/scan.go delete mode 100644 envd/internal/port/scanSubscriber.go delete mode 100644 envd/internal/port/scanfilter.go delete mode 100644 envd/internal/port/subsystem.go delete mode 100644 envd/internal/services/cgroups/cgroup2.go delete mode 100644 envd/internal/services/cgroups/cgroup2_test.go delete mode 100644 envd/internal/services/cgroups/iface.go delete mode 100644 envd/internal/services/cgroups/noop.go delete mode 100644 envd/internal/services/filesystem/dir.go delete mode 100644 envd/internal/services/filesystem/dir_test.go delete mode 100644 envd/internal/services/filesystem/move.go delete mode 100644 envd/internal/services/filesystem/move_test.go delete mode 100644 envd/internal/services/filesystem/remove.go delete mode 100644 envd/internal/services/filesystem/service.go delete mode 100644 envd/internal/services/filesystem/service_test.go delete mode 100644 envd/internal/services/filesystem/stat.go delete mode 100644 envd/internal/services/filesystem/stat_test.go delete mode 100644 envd/internal/services/filesystem/utils.go delete mode 100644 envd/internal/services/filesystem/utils_test.go delete mode 100644 envd/internal/services/filesystem/watch.go delete mode 100644 envd/internal/services/filesystem/watch_sync.go delete mode 100644 envd/internal/services/process/connect.go delete mode 100644 envd/internal/services/process/handler/handler.go delete mode 100644 envd/internal/services/process/handler/multiplex.go delete mode 100644 envd/internal/services/process/input.go delete mode 100644 envd/internal/services/process/list.go delete mode 100644 envd/internal/services/process/service.go delete mode 100644 envd/internal/services/process/signal.go delete mode 100644 envd/internal/services/process/start.go delete mode 100644 envd/internal/services/process/update.go delete mode 100644 envd/internal/services/spec/filesystem.pb.go delete mode 100644 envd/internal/services/spec/filesystem/filesystem.pb.go delete mode 100644 envd/internal/services/spec/filesystem/filesystemconnect/filesystem.connect.go delete mode 100644 envd/internal/services/spec/process.pb.go delete mode 100644 envd/internal/services/spec/process/process.pb.go delete mode 100644 envd/internal/services/spec/process/processconnect/process.connect.go delete mode 100644 envd/internal/services/spec/specconnect/filesystem.connect.go delete mode 100644 envd/internal/services/spec/specconnect/process.connect.go delete mode 100644 envd/internal/shared/filesystem/entry.go delete mode 100644 envd/internal/shared/filesystem/entry_test.go delete mode 100644 envd/internal/shared/filesystem/model.go delete mode 100644 envd/internal/shared/id/id.go delete mode 100644 envd/internal/shared/id/id_test.go delete mode 100644 envd/internal/shared/keys/constants.go delete mode 100644 envd/internal/shared/keys/hashing.go delete mode 100644 envd/internal/shared/keys/hmac_sha256.go delete mode 100644 envd/internal/shared/keys/hmac_sha256_test.go delete mode 100644 envd/internal/shared/keys/key.go delete mode 100644 envd/internal/shared/keys/key_test.go delete mode 100644 envd/internal/shared/keys/sha256.go delete mode 100644 envd/internal/shared/keys/sha256_test.go delete mode 100644 envd/internal/shared/keys/sha512.go delete mode 100644 envd/internal/shared/smap/smap.go delete mode 100644 envd/internal/shared/utils/ptr.go delete mode 100644 envd/internal/utils/atomic.go delete mode 100644 envd/internal/utils/atomic_test.go delete mode 100644 envd/internal/utils/map.go delete mode 100644 envd/internal/utils/multipart.go delete mode 100644 envd/internal/utils/rfsnotify.go delete mode 100644 envd/main.go delete mode 100644 envd/spec/buf.gen.yaml delete mode 100644 envd/spec/envd.yaml delete mode 100644 envd/spec/generate.go diff --git a/CLAUDE.md b/CLAUDE.md index d8f8e52..4608899 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -14,7 +14,7 @@ All commands go through the Makefile. Never use raw `go build` or `go run`. make build # Build all binaries → builds/ make build-cp # Control plane only make build-agent # Host agent only -make build-envd # envd static binary (verified statically linked) +make build-envd # envd static binary (Rust, musl, verified statically linked) make build-frontend # SvelteKit dashboard → frontend/build/ (served by Caddy) make dev # Full local dev: infra + migrate + control plane @@ -23,13 +23,13 @@ make dev-down # Stop dev infra make dev-cp # Control plane with hot reload (if air installed) make dev-frontend # Vite dev server with HMR (port 5173) make dev-agent # Host agent (sudo required) -make dev-envd # envd in TCP debug mode +make dev-envd # envd in debug mode (--isnotfc, port 49983) make check # fmt + vet + lint + test (CI order) make test # Unit tests: go test -race -v ./internal/... make test-integration # Integration tests (require host agent + Firecracker) -make fmt # gofmt both modules -make vet # go vet both modules +make fmt # gofmt +make vet # go vet make lint # golangci-lint make migrate-up # Apply pending migrations @@ -38,8 +38,8 @@ make migrate-create name=xxx # Scaffold new goose migration (never create manua make migrate-reset # Drop + re-apply all make generate # Proto (buf) + sqlc codegen -make proto # buf generate for all proto dirs -make tidy # go mod tidy both modules +make proto # buf generate for proto dirs +make tidy # go mod tidy ``` Run a single test: `go test -race -v -run TestName ./internal/path/...` @@ -50,15 +50,15 @@ Run a single test: `go test -race -v -run TestName ./internal/path/...` User SDK → HTTPS/WS → Control Plane → Connect RPC → Host Agent → HTTP/Connect RPC over TAP → envd (inside VM) ``` -**Three binaries, two Go modules:** +**Three binaries:** -| Binary | Module | Entry point | Runs as | -|--------|--------|-------------|---------| -| wrenn-cp | `git.omukk.dev/wrenn/wrenn` | `cmd/control-plane/main.go` | Unprivileged | -| wrenn-agent | `git.omukk.dev/wrenn/wrenn` | `cmd/host-agent/main.go` | `wrenn` user with capabilities (SYS_ADMIN, NET_ADMIN, NET_RAW, SYS_PTRACE, KILL, DAC_OVERRIDE, MKNOD) via setcap; also accepts root | -| envd | `git.omukk.dev/wrenn/wrenn/envd` (standalone `envd/go.mod`) | `envd/main.go` | PID 1 inside guest VM | +| Binary | Language | Entry point | Runs as | +|--------|----------|-------------|---------| +| wrenn-cp | Go (`git.omukk.dev/wrenn/wrenn`) | `cmd/control-plane/main.go` | Unprivileged | +| wrenn-agent | Go (`git.omukk.dev/wrenn/wrenn`) | `cmd/host-agent/main.go` | `wrenn` user with capabilities (SYS_ADMIN, NET_ADMIN, NET_RAW, SYS_PTRACE, KILL, DAC_OVERRIDE, MKNOD) via setcap; also accepts root | +| envd | Rust (`envd-rs/`) | `envd-rs/src/main.rs` | PID 1 inside guest VM | -envd is a **completely independent Go module**. It is never imported by the main module. The only connection is the protobuf contract. It compiles to a static binary baked into rootfs images. +envd is a standalone Rust binary (Tokio + Axum + connectrpc-rs). It is completely independent from the Go module — the only connection is the protobuf contract. It compiles to a statically linked musl binary baked into rootfs images. **Key architectural invariant:** The host agent is **stateful** (in-memory `boxes` map is the source of truth for running VMs). The control plane is **stateless** (all persistent state in PostgreSQL). The reconciler (`internal/api/reconciler.go`) bridges the gap — it periodically compares DB records against the host agent's live state and marks orphaned sandboxes as "stopped". @@ -99,13 +99,17 @@ Startup (`cmd/host-agent/main.go`) wires: root/capabilities check → enable IP ### envd (Guest Agent) -**Module:** `envd/` with its own `go.mod` (`git.omukk.dev/wrenn/wrenn/envd`) +**Directory:** `envd-rs/` — standalone Rust crate -Runs as PID 1 inside the microVM via `wrenn-init.sh` (mounts procfs/sysfs/dev, sets hostname, writes resolv.conf, then execs envd). Extracted from E2B (Apache 2.0), with shared packages internalized into `envd/internal/shared/`. Listens on TCP `0.0.0.0:49983`. +Runs as PID 1 inside the microVM via `wrenn-init.sh` (mounts procfs/sysfs/dev, sets hostname, writes resolv.conf, then execs envd via tini). Built with `cargo build --release --target x86_64-unknown-linux-musl`. Listens on TCP `0.0.0.0:49983`. -- **ProcessService**: start processes, stream stdout/stderr, signal handling, PTY support -- **FilesystemService**: stat/list/mkdir/move/remove/watch files -- **Health**: GET `/health` +- **Stack**: Tokio (async runtime) + Axum (HTTP) + connectrpc-rs (Connect protocol RPC) +- **ProcessService** (Connect RPC): start/connect/list/signal processes, stream stdout/stderr, PTY support +- **FilesystemService** (Connect RPC): stat/list/mkdir/move/remove/watch files +- **HTTP endpoints**: GET `/health`, GET `/metrics`, POST `/init`, POST `/snapshot/prepare`, GET/POST `/files` +- **Proto codegen**: `connectrpc-build` compiles `proto/envd/*.proto` at `cargo build` time via `build.rs` — no committed stubs +- **Build**: `make build-envd` → static musl binary in `builds/envd` +- **Dev**: `make dev-envd` → `cargo run -- --isnotfc --port 49983` ### Dashboard (Frontend) @@ -185,17 +189,16 @@ Routes defined in `internal/api/server.go`, handlers in `internal/api/handlers_* ### Proto (Connect RPC) -Proto source of truth is `proto/envd/*.proto` and `proto/hostagent/*.proto`. Run `make proto` to regenerate. Three `buf.gen.yaml` files control output: +Proto source of truth is `proto/envd/*.proto` and `proto/hostagent/*.proto`. Run `make proto` to regenerate Go stubs. Two `buf.gen.yaml` files control Go output: | buf.gen.yaml location | Generates to | Used by | |---|---|---| | `proto/envd/buf.gen.yaml` | `proto/envd/gen/` | Main module (host agent's envd client) | | `proto/hostagent/buf.gen.yaml` | `proto/hostagent/gen/` | Main module (control plane ↔ host agent) | -| `envd/spec/buf.gen.yaml` | `envd/internal/services/spec/` | envd module (guest agent server) | -The envd `buf.gen.yaml` reads from `../../proto/envd/` (same source protos) but generates into envd's own module. This means the same `.proto` files produce two independent sets of Go stubs — one for each Go module. +The Rust envd (`envd-rs/`) generates its own protobuf stubs at `cargo build` time via `connectrpc-build` in `envd-rs/build.rs`, reading from the same `proto/envd/*.proto` sources. No committed Rust stubs — they live in `OUT_DIR`. -To add a new RPC method: edit the `.proto` file → `make proto` → implement the handler on both sides. +To add a new RPC method: edit the `.proto` file → `make proto` (Go stubs) → rebuild envd-rs (Rust stubs generated automatically) → implement the handler on both sides. ### sqlc @@ -206,7 +209,7 @@ To add a new query: add it to the appropriate `.sql` file in `db/queries/` → ` ## Key Technical Decisions - **Connect RPC** (not gRPC) for all RPC communication between components -- **Buf + protoc-gen-connect-go** for code generation (not protoc-gen-go-grpc) +- **Buf + protoc-gen-connect-go** for Go code generation; **connectrpc-build** for Rust code generation in envd - **Raw Firecracker HTTP API** via Unix socket (not firecracker-go-sdk Machine type) - **TAP networking** (not vsock) for host-to-envd communication - **Device-mapper snapshots** for rootfs CoW — shared read-only loop device per base template, per-sandbox sparse CoW file, Firecracker gets `/dev/mapper/wrenn-{id}` @@ -218,19 +221,15 @@ To add a new query: add it to the appropriate `.sql` file in `db/queries/` → ` - **Go style**: `gofmt`, `go vet`, `context.Context` everywhere, errors wrapped with `fmt.Errorf("action: %w", err)`, `slog` for logging, no global state - **Naming**: Sandbox IDs `sb-` + 8 hex, API keys `wrn_` + 32 chars, Host IDs `host-` + 8 hex -- **Dependencies**: Use `go get` to add deps, never hand-edit go.mod. For envd deps: `cd envd && go get ...` (separate module) +- **Dependencies**: Use `go get` to add Go deps, never hand-edit go.mod. For envd-rs deps: edit `envd-rs/Cargo.toml` - **Generated code**: Always commit generated code (proto stubs, sqlc). Never add generated code to .gitignore - **Migrations**: Always use `make migrate-create name=xxx`, never create migration files manually - **Testing**: Table-driven tests for handlers and state machine transitions -### Two-module gotcha - -The main module (`go.mod`) and envd (`envd/go.mod`) are fully independent. `make tidy`, `make fmt`, `make vet` already operate on both. But when adding dependencies manually, remember to target the correct module (`cd envd && go get ...` for envd deps). `make proto` also generates stubs for both modules from the same proto sources. - ## Rootfs & Guest Init - **wrenn-init** (`images/wrenn-init.sh`): the PID 1 init script baked into every rootfs. Mounts virtual filesystems, sets hostname, writes `/etc/resolv.conf`, then execs envd. -- **Updating the rootfs** after changing envd or wrenn-init: `bash scripts/update-debug-rootfs.sh [rootfs_path]`. This builds envd via `make build-envd`, mounts the rootfs image, copies in the new binaries, and unmounts. Defaults to `/var/lib/wrenn/images/minimal.ext4`. +- **Updating the rootfs** after changing envd or wrenn-init: `bash scripts/update-minimal-rootfs.sh`. This builds envd via `make build-envd` (Rust → static musl binary), mounts the rootfs image, copies in the new binaries, and unmounts. Defaults to `/var/lib/wrenn/images/minimal.ext4`. - Rootfs images are minimal debootstrap — no systemd, no coreutils beyond busybox. Use `/bin/sh -c` for shell builtins inside the guest. ## Fixed Paths (on host machine) diff --git a/Makefile b/Makefile index 0ff478b..41b9251 100644 --- a/Makefile +++ b/Makefile @@ -3,17 +3,15 @@ # ═══════════════════════════════════════════════════ DATABASE_URL ?= postgres://wrenn:wrenn@localhost:5432/wrenn?sslmode=disable BIN_DIR := $(shell pwd)/builds -ENVD_DIR := envd COMMIT := $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") VERSION_CP := $(shell cat VERSION_CP 2>/dev/null | tr -d '[:space:]' || echo "0.0.0-dev") VERSION_AGENT := $(shell cat VERSION_AGENT 2>/dev/null | tr -d '[:space:]' || echo "0.0.0-dev") -VERSION_ENVD := $(shell cat envd/VERSION 2>/dev/null | tr -d '[:space:]' || echo "0.0.0-dev") LDFLAGS := -s -w # ═══════════════════════════════════════════════════ # Build # ═══════════════════════════════════════════════════ -.PHONY: build build-cp build-agent build-envd build-envd-go build-frontend +.PHONY: build build-cp build-agent build-envd build-frontend build: build-cp build-agent build-envd @@ -29,12 +27,8 @@ build-agent: build-envd: cd envd-rs && ENVD_COMMIT=$(COMMIT) cargo build --release --target x86_64-unknown-linux-musl @cp envd-rs/target/x86_64-unknown-linux-musl/release/envd $(BIN_DIR)/envd - -build-envd-go: - cd $(ENVD_DIR) && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \ - go build -ldflags="$(LDFLAGS) -X main.Version=$(VERSION_ENVD) -X main.commitSHA=$(COMMIT)" -o $(BIN_DIR)/envd-go . - @file $(BIN_DIR)/envd-go | grep -q "statically linked" || \ - (echo "ERROR: envd-go is not statically linked!" && exit 1) + @file $(BIN_DIR)/envd | grep -q "statically linked" || \ + (echo "ERROR: envd is not statically linked!" && exit 1) # ═══════════════════════════════════════════════════ # Development @@ -66,10 +60,6 @@ dev-frontend: dev-envd: cd envd-rs && cargo run -- --isnotfc --port 49983 -dev-envd-go: - cd $(ENVD_DIR) && go run . --debug --listen-tcp :3002 - - # ═══════════════════════════════════════════════════ # Database (goose) # ═══════════════════════════════════════════════════ @@ -101,7 +91,6 @@ generate: proto sqlc proto: cd proto/envd && buf generate cd proto/hostagent && buf generate - cd $(ENVD_DIR)/spec && buf generate sqlc: sqlc generate @@ -113,14 +102,12 @@ sqlc: fmt: gofmt -w . - cd $(ENVD_DIR) && gofmt -w . lint: golangci-lint run ./... vet: go vet ./... - cd $(ENVD_DIR) && go vet ./... test: go test -race -v ./internal/... @@ -132,7 +119,6 @@ test-all: test test-integration tidy: go mod tidy - cd $(ENVD_DIR) && go mod tidy ## Run all quality checks in CI order check: fmt vet lint test @@ -174,7 +160,6 @@ install: build clean: rm -rf builds/ - cd $(ENVD_DIR) && rm -f envd cd envd-rs && cargo clean # ═══════════════════════════════════════════════════ @@ -191,13 +176,11 @@ help: @echo " make dev-cp Control plane (hot reload if air installed)" @echo " make dev-frontend Vite dev server with HMR (port 5173)" @echo " make dev-agent Host agent (sudo required)" - @echo " make dev-envd envd Rust (--isnotfc, port 49983)" - @echo " make dev-envd-go envd Go (TCP debug mode)" + @echo " make dev-envd envd in debug mode (--isnotfc, port 49983)" @echo "" @echo " make build Build all binaries → builds/" @echo " make build-frontend Build SvelteKit dashboard → frontend/build/" @echo " make build-envd Build envd static binary (Rust, musl)" - @echo " make build-envd-go Build envd Go binary" @echo "" @echo " make migrate-up Apply migrations" @echo " make migrate-create name=xxx New migration" diff --git a/NOTICE b/NOTICE deleted file mode 100644 index ecf2c96..0000000 --- a/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Wrenn Sandbox -Copyright (c) 2026 M/S Omukk, Bangladesh - -This project includes software derived from the following project: - -Project: e2b infra -Repository: https://github.com/e2b-dev/infra - -The following files and directories in this repository contain code derived from the above project: - -- envd/ -- proto/envd/*.proto -- internal/snapshot/ -- internal/uffd/ - -Modifications to this code were made by M/S Omukk. - -Copyright (c) 2023 FoundryLabs, Inc. -Modifications Copyright (c) 2026 M/S Omukk, Bangladesh diff --git a/README.md b/README.md index 765be5a..011e5d1 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ Secure infrastructure for AI - Firecracker binary at `/usr/local/bin/firecracker` - PostgreSQL - Go 1.25+ +- Rust 1.88+ with `x86_64-unknown-linux-musl` target (`rustup target add x86_64-unknown-linux-musl`) - pnpm (for frontend) - Docker (for dev infra and rootfs builds) diff --git a/envd-rs/README.md b/envd-rs/README.md index a0385b3..3a82d2d 100644 --- a/envd-rs/README.md +++ b/envd-rs/README.md @@ -103,7 +103,7 @@ src/ ├── state.rs # Shared AppState ├── config.rs # Constants ├── conntracker.rs # TCP connection tracking for snapshot/restore -├─��� execcontext.rs # Default user/workdir/env +├── execcontext.rs # Default user/workdir/env ├── logging.rs # tracing-subscriber (JSON or pretty) ├── util.rs # AtomicMax ├── auth/ # Token, signing, middleware diff --git a/envd/LICENSE b/envd/LICENSE deleted file mode 100644 index 00c83da..0000000 --- a/envd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2023 FoundryLabs, Inc. - Modifications Copyright (c) 2026 M/S Omukk, Bangladesh - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/envd/Makefile b/envd/Makefile deleted file mode 100644 index b3af722..0000000 --- a/envd/Makefile +++ /dev/null @@ -1,62 +0,0 @@ -BUILD := $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") -LDFLAGS := -s -w -X=main.commitSHA=$(BUILD) -BUILDS := ../builds - -# ═══════════════════════════════════════════════════ -# Build -# ═══════════════════════════════════════════════════ -.PHONY: build build-debug - -build: - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="$(LDFLAGS)" -o $(BUILDS)/envd . - @file $(BUILDS)/envd | grep -q "statically linked" || \ - (echo "ERROR: envd is not statically linked!" && exit 1) - -build-debug: - CGO_ENABLED=1 go build -race -gcflags=all="-N -l" -ldflags="-X=main.commitSHA=$(BUILD)" -o $(BUILDS)/debug/envd . - -# ═══════════════════════════════════════════════════ -# Run (debug mode, not inside a VM) -# ═══════════════════════════════════════════════════ -.PHONY: run-debug - -run-debug: build-debug - $(BUILDS)/debug/envd -isnotfc -port 49983 - -# ═══════════════════════════════════════════════════ -# Code Generation -# ═══════════════════════════════════════════════════ -.PHONY: generate proto openapi - -generate: proto openapi - -proto: - cd spec && buf generate --template buf.gen.yaml - -openapi: - go generate ./internal/api/... - -# ═══════════════════════════════════════════════════ -# Quality -# ═══════════════════════════════════════════════════ -.PHONY: fmt vet test tidy - -fmt: - gofmt -w . - -vet: - go vet ./... - -test: - go test -race -v ./... - -tidy: - go mod tidy - -# ═══════════════════════════════════════════════════ -# Clean -# ═══════════════════════════════════════════════════ -.PHONY: clean - -clean: - rm -f $(BUILDS)/envd $(BUILDS)/debug/envd diff --git a/envd/VERSION b/envd/VERSION deleted file mode 100644 index d917d3e..0000000 --- a/envd/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.1.2 diff --git a/envd/go.mod b/envd/go.mod deleted file mode 100644 index 200e8ef..0000000 --- a/envd/go.mod +++ /dev/null @@ -1,42 +0,0 @@ -module git.omukk.dev/wrenn/sandbox/envd - -go 1.25.8 - -require ( - connectrpc.com/authn v0.1.0 - connectrpc.com/connect v1.19.1 - connectrpc.com/cors v0.1.0 - github.com/awnumar/memguard v0.23.0 - github.com/creack/pty v1.1.24 - github.com/dchest/uniuri v1.2.0 - github.com/e2b-dev/fsnotify v0.0.1 - github.com/go-chi/chi/v5 v5.2.5 - github.com/google/uuid v1.6.0 - github.com/oapi-codegen/runtime v1.2.0 - github.com/orcaman/concurrent-map/v2 v2.0.1 - github.com/rs/cors v1.11.1 - github.com/rs/zerolog v1.34.0 - github.com/shirou/gopsutil/v4 v4.26.2 - github.com/stretchr/testify v1.11.1 - github.com/txn2/txeh v1.8.0 - golang.org/x/sys v0.43.0 - google.golang.org/protobuf v1.36.11 -) - -require ( - github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect - github.com/awnumar/memcall v0.4.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/ebitengine/purego v0.10.0 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect - github.com/tklauser/go-sysconf v0.3.16 // indirect - github.com/tklauser/numcpus v0.11.0 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - golang.org/x/crypto v0.50.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/envd/go.sum b/envd/go.sum deleted file mode 100644 index 3e857f5..0000000 --- a/envd/go.sum +++ /dev/null @@ -1,92 +0,0 @@ -connectrpc.com/authn v0.1.0 h1:m5weACjLWwgwcjttvUDyTPICJKw74+p2obBVrf8hT9E= -connectrpc.com/authn v0.1.0/go.mod h1:AwNZK/KYbqaJzRYadTuAaoz6sYQSPdORPqh1TOPIkgY= -connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14= -connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= -connectrpc.com/cors v0.1.0 h1:f3gTXJyDZPrDIZCQ567jxfD9PAIpopHiRDnJRt3QuOQ= -connectrpc.com/cors v0.1.0/go.mod h1:v8SJZCPfHtGH1zsm+Ttajpozd4cYIUryl4dFB6QEpfg= -github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= -github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= -github.com/awnumar/memcall v0.4.0 h1:B7hgZYdfH6Ot1Goaz8jGne/7i8xD4taZie/PNSFZ29g= -github.com/awnumar/memcall v0.4.0/go.mod h1:8xOx1YbfyuCg3Fy6TO8DK0kZUua3V42/goA5Ru47E8w= -github.com/awnumar/memguard v0.23.0 h1:sJ3a1/SWlcuKIQ7MV+R9p0Pvo9CWsMbGZvcZQtmc68A= -github.com/awnumar/memguard v0.23.0/go.mod h1:olVofBrsPdITtJ2HgxQKrEYEMyIBAIciVG4wNnZhW9M= -github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= -github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dchest/uniuri v1.2.0 h1:koIcOUdrTIivZgSLhHQvKgqdWZq5d7KdMEWF1Ud6+5g= -github.com/dchest/uniuri v1.2.0/go.mod h1:fSzm4SLHzNZvWLvWJew423PhAzkpNQYq+uNLq4kxhkY= -github.com/e2b-dev/fsnotify v0.0.1 h1:7j0I98HD6VehAuK/bcslvW4QDynAULtOuMZtImihjVk= -github.com/e2b-dev/fsnotify v0.0.1/go.mod h1:jAuDjregRrUixKneTRQwPI847nNuPFg3+n5QM/ku/JM= -github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU= -github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= -github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= -github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/oapi-codegen/runtime v1.2.0 h1:RvKc1CVS1QeKSNzO97FBQbSMZyQ8s6rZd+LpmzwHMP4= -github.com/oapi-codegen/runtime v1.2.0/go.mod h1:Y7ZhmmlE8ikZOmuHRRndiIm7nf3xcVv+YMweKgG1DT0= -github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= -github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= -github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= -github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= -github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= -github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= -github.com/shirou/gopsutil/v4 v4.26.2 h1:X8i6sicvUFih4BmYIGT1m2wwgw2VG9YgrDTi7cIRGUI= -github.com/shirou/gopsutil/v4 v4.26.2/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ= -github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA= -github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI= -github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw= -github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ= -github.com/txn2/txeh v1.8.0 h1:G1vZgom6+P/xWwU53AMOpcZgC5ni382ukcPP1TDVYHk= -github.com/txn2/txeh v1.8.0/go.mod h1:rRI3Egi3+AFmEXQjft051YdYbxeCT3nFmBLsNCZZaxM= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= -golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= -golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= -google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= -pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/envd/internal/api/api.gen.go b/envd/internal/api/api.gen.go deleted file mode 100644 index 257326d..0000000 --- a/envd/internal/api/api.gen.go +++ /dev/null @@ -1,604 +0,0 @@ -// Package api provides primitives to interact with the openapi HTTP API. -// -// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.6.0 DO NOT EDIT. -package api - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/go-chi/chi/v5" - "github.com/oapi-codegen/runtime" - openapi_types "github.com/oapi-codegen/runtime/types" -) - -const ( - AccessTokenAuthScopes = "AccessTokenAuth.Scopes" -) - -// Defines values for EntryInfoType. -const ( - File EntryInfoType = "file" -) - -// Valid indicates whether the value is a known member of the EntryInfoType enum. -func (e EntryInfoType) Valid() bool { - switch e { - case File: - return true - default: - return false - } -} - -// EntryInfo defines model for EntryInfo. -type EntryInfo struct { - // Name Name of the file - Name string `json:"name"` - - // Path Path to the file - Path string `json:"path"` - - // Type Type of the file - Type EntryInfoType `json:"type"` -} - -// EntryInfoType Type of the file -type EntryInfoType string - -// EnvVars Environment variables to set -type EnvVars map[string]string - -// Error defines model for Error. -type Error struct { - // Code Error code - Code int `json:"code"` - - // Message Error message - Message string `json:"message"` -} - -// Metrics Resource usage metrics -type Metrics struct { - // CpuCount Number of CPU cores - CpuCount *int `json:"cpu_count,omitempty"` - - // CpuUsedPct CPU usage percentage - CpuUsedPct *float32 `json:"cpu_used_pct,omitempty"` - - // DiskTotal Total disk space in bytes - DiskTotal *int `json:"disk_total,omitempty"` - - // DiskUsed Used disk space in bytes - DiskUsed *int `json:"disk_used,omitempty"` - - // MemTotal Total virtual memory in bytes - MemTotal *int `json:"mem_total,omitempty"` - - // MemUsed Used virtual memory in bytes - MemUsed *int `json:"mem_used,omitempty"` - - // Ts Unix timestamp in UTC for current sandbox time - Ts *int64 `json:"ts,omitempty"` -} - -// VolumeMount Volume -type VolumeMount struct { - NfsTarget string `json:"nfs_target"` - Path string `json:"path"` -} - -// FilePath defines model for FilePath. -type FilePath = string - -// Signature defines model for Signature. -type Signature = string - -// SignatureExpiration defines model for SignatureExpiration. -type SignatureExpiration = int - -// User defines model for User. -type User = string - -// FileNotFound defines model for FileNotFound. -type FileNotFound = Error - -// InternalServerError defines model for InternalServerError. -type InternalServerError = Error - -// InvalidPath defines model for InvalidPath. -type InvalidPath = Error - -// InvalidUser defines model for InvalidUser. -type InvalidUser = Error - -// NotEnoughDiskSpace defines model for NotEnoughDiskSpace. -type NotEnoughDiskSpace = Error - -// UploadSuccess defines model for UploadSuccess. -type UploadSuccess = []EntryInfo - -// GetFilesParams defines parameters for GetFiles. -type GetFilesParams struct { - // Path Path to the file, URL encoded. Can be relative to user's home directory. - Path *FilePath `form:"path,omitempty" json:"path,omitempty"` - - // Username User used for setting the owner, or resolving relative paths. - Username *User `form:"username,omitempty" json:"username,omitempty"` - - // Signature Signature used for file access permission verification. - Signature *Signature `form:"signature,omitempty" json:"signature,omitempty"` - - // SignatureExpiration Signature expiration used for defining the expiration time of the signature. - SignatureExpiration *SignatureExpiration `form:"signature_expiration,omitempty" json:"signature_expiration,omitempty"` -} - -// PostFilesMultipartBody defines parameters for PostFiles. -type PostFilesMultipartBody struct { - File *openapi_types.File `json:"file,omitempty"` -} - -// PostFilesParams defines parameters for PostFiles. -type PostFilesParams struct { - // Path Path to the file, URL encoded. Can be relative to user's home directory. - Path *FilePath `form:"path,omitempty" json:"path,omitempty"` - - // Username User used for setting the owner, or resolving relative paths. - Username *User `form:"username,omitempty" json:"username,omitempty"` - - // Signature Signature used for file access permission verification. - Signature *Signature `form:"signature,omitempty" json:"signature,omitempty"` - - // SignatureExpiration Signature expiration used for defining the expiration time of the signature. - SignatureExpiration *SignatureExpiration `form:"signature_expiration,omitempty" json:"signature_expiration,omitempty"` -} - -// PostInitJSONBody defines parameters for PostInit. -type PostInitJSONBody struct { - // AccessToken Access token for secure access to envd service - AccessToken *SecureToken `json:"accessToken,omitempty"` - - // DefaultUser The default user to use for operations - DefaultUser *string `json:"defaultUser,omitempty"` - - // DefaultWorkdir The default working directory to use for operations - DefaultWorkdir *string `json:"defaultWorkdir,omitempty"` - - // EnvVars Environment variables to set - EnvVars *EnvVars `json:"envVars,omitempty"` - - // HyperloopIP IP address of the hyperloop server to connect to - HyperloopIP *string `json:"hyperloopIP,omitempty"` - - // Timestamp The current timestamp in RFC3339 format - Timestamp *time.Time `json:"timestamp,omitempty"` - VolumeMounts *[]VolumeMount `json:"volumeMounts,omitempty"` -} - -// PostFilesMultipartRequestBody defines body for PostFiles for multipart/form-data ContentType. -type PostFilesMultipartRequestBody PostFilesMultipartBody - -// PostInitJSONRequestBody defines body for PostInit for application/json ContentType. -type PostInitJSONRequestBody PostInitJSONBody - -// ServerInterface represents all server handlers. -type ServerInterface interface { - // Get the environment variables - // (GET /envs) - GetEnvs(w http.ResponseWriter, r *http.Request) - // Download a file - // (GET /files) - GetFiles(w http.ResponseWriter, r *http.Request, params GetFilesParams) - // Upload a file and ensure the parent directories exist. If the file exists, it will be overwritten. - // (POST /files) - PostFiles(w http.ResponseWriter, r *http.Request, params PostFilesParams) - // Check the health of the service - // (GET /health) - GetHealth(w http.ResponseWriter, r *http.Request) - // Set initial vars, ensure the time and metadata is synced with the host - // (POST /init) - PostInit(w http.ResponseWriter, r *http.Request) - // Get the stats of the service - // (GET /metrics) - GetMetrics(w http.ResponseWriter, r *http.Request) - // Quiesce continuous goroutines before Firecracker snapshot - // (POST /snapshot/prepare) - PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) -} - -// Unimplemented server implementation that returns http.StatusNotImplemented for each endpoint. - -type Unimplemented struct{} - -// Get the environment variables -// (GET /envs) -func (_ Unimplemented) GetEnvs(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotImplemented) -} - -// Download a file -// (GET /files) -func (_ Unimplemented) GetFiles(w http.ResponseWriter, r *http.Request, params GetFilesParams) { - w.WriteHeader(http.StatusNotImplemented) -} - -// Upload a file and ensure the parent directories exist. If the file exists, it will be overwritten. -// (POST /files) -func (_ Unimplemented) PostFiles(w http.ResponseWriter, r *http.Request, params PostFilesParams) { - w.WriteHeader(http.StatusNotImplemented) -} - -// Check the health of the service -// (GET /health) -func (_ Unimplemented) GetHealth(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotImplemented) -} - -// Set initial vars, ensure the time and metadata is synced with the host -// (POST /init) -func (_ Unimplemented) PostInit(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotImplemented) -} - -// Get the stats of the service -// (GET /metrics) -func (_ Unimplemented) GetMetrics(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotImplemented) -} - -// Quiesce continuous goroutines before Firecracker snapshot -// (POST /snapshot/prepare) -func (_ Unimplemented) PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotImplemented) -} - -// ServerInterfaceWrapper converts contexts to parameters. -type ServerInterfaceWrapper struct { - Handler ServerInterface - HandlerMiddlewares []MiddlewareFunc - ErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) -} - -type MiddlewareFunc func(http.Handler) http.Handler - -// GetEnvs operation middleware -func (siw *ServerInterfaceWrapper) GetEnvs(w http.ResponseWriter, r *http.Request) { - - ctx := r.Context() - - ctx = context.WithValue(ctx, AccessTokenAuthScopes, []string{}) - - r = r.WithContext(ctx) - - handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - siw.Handler.GetEnvs(w, r) - })) - - for _, middleware := range siw.HandlerMiddlewares { - handler = middleware(handler) - } - - handler.ServeHTTP(w, r) -} - -// GetFiles operation middleware -func (siw *ServerInterfaceWrapper) GetFiles(w http.ResponseWriter, r *http.Request) { - - var err error - - ctx := r.Context() - - ctx = context.WithValue(ctx, AccessTokenAuthScopes, []string{}) - - r = r.WithContext(ctx) - - // Parameter object where we will unmarshal all parameters from the context - var params GetFilesParams - - // ------------- Optional query parameter "path" ------------- - - err = runtime.BindQueryParameterWithOptions("form", true, false, "path", r.URL.Query(), ¶ms.Path, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) - if err != nil { - siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "path", Err: err}) - return - } - - // ------------- Optional query parameter "username" ------------- - - err = runtime.BindQueryParameterWithOptions("form", true, false, "username", r.URL.Query(), ¶ms.Username, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) - if err != nil { - siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "username", Err: err}) - return - } - - // ------------- Optional query parameter "signature" ------------- - - err = runtime.BindQueryParameterWithOptions("form", true, false, "signature", r.URL.Query(), ¶ms.Signature, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) - if err != nil { - siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "signature", Err: err}) - return - } - - // ------------- Optional query parameter "signature_expiration" ------------- - - err = runtime.BindQueryParameterWithOptions("form", true, false, "signature_expiration", r.URL.Query(), ¶ms.SignatureExpiration, runtime.BindQueryParameterOptions{Type: "integer", Format: ""}) - if err != nil { - siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "signature_expiration", Err: err}) - return - } - - handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - siw.Handler.GetFiles(w, r, params) - })) - - for _, middleware := range siw.HandlerMiddlewares { - handler = middleware(handler) - } - - handler.ServeHTTP(w, r) -} - -// PostFiles operation middleware -func (siw *ServerInterfaceWrapper) PostFiles(w http.ResponseWriter, r *http.Request) { - - var err error - - ctx := r.Context() - - ctx = context.WithValue(ctx, AccessTokenAuthScopes, []string{}) - - r = r.WithContext(ctx) - - // Parameter object where we will unmarshal all parameters from the context - var params PostFilesParams - - // ------------- Optional query parameter "path" ------------- - - err = runtime.BindQueryParameterWithOptions("form", true, false, "path", r.URL.Query(), ¶ms.Path, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) - if err != nil { - siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "path", Err: err}) - return - } - - // ------------- Optional query parameter "username" ------------- - - err = runtime.BindQueryParameterWithOptions("form", true, false, "username", r.URL.Query(), ¶ms.Username, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) - if err != nil { - siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "username", Err: err}) - return - } - - // ------------- Optional query parameter "signature" ------------- - - err = runtime.BindQueryParameterWithOptions("form", true, false, "signature", r.URL.Query(), ¶ms.Signature, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) - if err != nil { - siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "signature", Err: err}) - return - } - - // ------------- Optional query parameter "signature_expiration" ------------- - - err = runtime.BindQueryParameterWithOptions("form", true, false, "signature_expiration", r.URL.Query(), ¶ms.SignatureExpiration, runtime.BindQueryParameterOptions{Type: "integer", Format: ""}) - if err != nil { - siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "signature_expiration", Err: err}) - return - } - - handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - siw.Handler.PostFiles(w, r, params) - })) - - for _, middleware := range siw.HandlerMiddlewares { - handler = middleware(handler) - } - - handler.ServeHTTP(w, r) -} - -// GetHealth operation middleware -func (siw *ServerInterfaceWrapper) GetHealth(w http.ResponseWriter, r *http.Request) { - - handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - siw.Handler.GetHealth(w, r) - })) - - for _, middleware := range siw.HandlerMiddlewares { - handler = middleware(handler) - } - - handler.ServeHTTP(w, r) -} - -// PostInit operation middleware -func (siw *ServerInterfaceWrapper) PostInit(w http.ResponseWriter, r *http.Request) { - - ctx := r.Context() - - ctx = context.WithValue(ctx, AccessTokenAuthScopes, []string{}) - - r = r.WithContext(ctx) - - handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - siw.Handler.PostInit(w, r) - })) - - for _, middleware := range siw.HandlerMiddlewares { - handler = middleware(handler) - } - - handler.ServeHTTP(w, r) -} - -// GetMetrics operation middleware -func (siw *ServerInterfaceWrapper) GetMetrics(w http.ResponseWriter, r *http.Request) { - - ctx := r.Context() - - ctx = context.WithValue(ctx, AccessTokenAuthScopes, []string{}) - - r = r.WithContext(ctx) - - handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - siw.Handler.GetMetrics(w, r) - })) - - for _, middleware := range siw.HandlerMiddlewares { - handler = middleware(handler) - } - - handler.ServeHTTP(w, r) -} - -// PostSnapshotPrepare operation middleware -func (siw *ServerInterfaceWrapper) PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) { - - handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - siw.Handler.PostSnapshotPrepare(w, r) - })) - - for _, middleware := range siw.HandlerMiddlewares { - handler = middleware(handler) - } - - handler.ServeHTTP(w, r) -} - -type UnescapedCookieParamError struct { - ParamName string - Err error -} - -func (e *UnescapedCookieParamError) Error() string { - return fmt.Sprintf("error unescaping cookie parameter '%s'", e.ParamName) -} - -func (e *UnescapedCookieParamError) Unwrap() error { - return e.Err -} - -type UnmarshalingParamError struct { - ParamName string - Err error -} - -func (e *UnmarshalingParamError) Error() string { - return fmt.Sprintf("Error unmarshaling parameter %s as JSON: %s", e.ParamName, e.Err.Error()) -} - -func (e *UnmarshalingParamError) Unwrap() error { - return e.Err -} - -type RequiredParamError struct { - ParamName string -} - -func (e *RequiredParamError) Error() string { - return fmt.Sprintf("Query argument %s is required, but not found", e.ParamName) -} - -type RequiredHeaderError struct { - ParamName string - Err error -} - -func (e *RequiredHeaderError) Error() string { - return fmt.Sprintf("Header parameter %s is required, but not found", e.ParamName) -} - -func (e *RequiredHeaderError) Unwrap() error { - return e.Err -} - -type InvalidParamFormatError struct { - ParamName string - Err error -} - -func (e *InvalidParamFormatError) Error() string { - return fmt.Sprintf("Invalid format for parameter %s: %s", e.ParamName, e.Err.Error()) -} - -func (e *InvalidParamFormatError) Unwrap() error { - return e.Err -} - -type TooManyValuesForParamError struct { - ParamName string - Count int -} - -func (e *TooManyValuesForParamError) Error() string { - return fmt.Sprintf("Expected one value for %s, got %d", e.ParamName, e.Count) -} - -// Handler creates http.Handler with routing matching OpenAPI spec. -func Handler(si ServerInterface) http.Handler { - return HandlerWithOptions(si, ChiServerOptions{}) -} - -type ChiServerOptions struct { - BaseURL string - BaseRouter chi.Router - Middlewares []MiddlewareFunc - ErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) -} - -// HandlerFromMux creates http.Handler with routing matching OpenAPI spec based on the provided mux. -func HandlerFromMux(si ServerInterface, r chi.Router) http.Handler { - return HandlerWithOptions(si, ChiServerOptions{ - BaseRouter: r, - }) -} - -func HandlerFromMuxWithBaseURL(si ServerInterface, r chi.Router, baseURL string) http.Handler { - return HandlerWithOptions(si, ChiServerOptions{ - BaseURL: baseURL, - BaseRouter: r, - }) -} - -// HandlerWithOptions creates http.Handler with additional options -func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handler { - r := options.BaseRouter - - if r == nil { - r = chi.NewRouter() - } - if options.ErrorHandlerFunc == nil { - options.ErrorHandlerFunc = func(w http.ResponseWriter, r *http.Request, err error) { - http.Error(w, err.Error(), http.StatusBadRequest) - } - } - wrapper := ServerInterfaceWrapper{ - Handler: si, - HandlerMiddlewares: options.Middlewares, - ErrorHandlerFunc: options.ErrorHandlerFunc, - } - - r.Group(func(r chi.Router) { - r.Get(options.BaseURL+"/envs", wrapper.GetEnvs) - }) - r.Group(func(r chi.Router) { - r.Get(options.BaseURL+"/files", wrapper.GetFiles) - }) - r.Group(func(r chi.Router) { - r.Post(options.BaseURL+"/files", wrapper.PostFiles) - }) - r.Group(func(r chi.Router) { - r.Get(options.BaseURL+"/health", wrapper.GetHealth) - }) - r.Group(func(r chi.Router) { - r.Post(options.BaseURL+"/init", wrapper.PostInit) - }) - r.Group(func(r chi.Router) { - r.Get(options.BaseURL+"/metrics", wrapper.GetMetrics) - }) - r.Group(func(r chi.Router) { - r.Post(options.BaseURL+"/snapshot/prepare", wrapper.PostSnapshotPrepare) - }) - - return r -} diff --git a/envd/internal/api/auth.go b/envd/internal/api/auth.go deleted file mode 100644 index fc6b97c..0000000 --- a/envd/internal/api/auth.go +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package api - -import ( - "errors" - "fmt" - "net/http" - "slices" - "strconv" - "strings" - "time" - - "github.com/awnumar/memguard" - - "git.omukk.dev/wrenn/sandbox/envd/internal/shared/keys" -) - -const ( - SigningReadOperation = "read" - SigningWriteOperation = "write" - - accessTokenHeader = "X-Access-Token" -) - -// paths that are always allowed without general authentication -// POST/init is secured via MMDS hash validation instead -var authExcludedPaths = []string{ - "GET/health", - "GET/files", - "POST/files", - "POST/init", - "POST/snapshot/prepare", -} - -func (a *API) WithAuthorization(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if a.accessToken.IsSet() { - authHeader := req.Header.Get(accessTokenHeader) - - // check if this path is allowed without authentication (e.g., health check, endpoints supporting signing) - allowedPath := slices.Contains(authExcludedPaths, req.Method+req.URL.Path) - - if !a.accessToken.Equals(authHeader) && !allowedPath { - a.logger.Error().Msg("Trying to access secured envd without correct access token") - - err := fmt.Errorf("unauthorized access, please provide a valid access token or method signing if supported") - jsonError(w, http.StatusUnauthorized, err) - - return - } - } - - handler.ServeHTTP(w, req) - }) -} - -func (a *API) generateSignature(path string, username string, operation string, signatureExpiration *int64) (string, error) { - tokenBytes, err := a.accessToken.Bytes() - if err != nil { - return "", fmt.Errorf("access token is not set: %w", err) - } - defer memguard.WipeBytes(tokenBytes) - - var signature string - hasher := keys.NewSHA256Hashing() - - if signatureExpiration == nil { - signature = strings.Join([]string{path, operation, username, string(tokenBytes)}, ":") - } else { - signature = strings.Join([]string{path, operation, username, string(tokenBytes), strconv.FormatInt(*signatureExpiration, 10)}, ":") - } - - return fmt.Sprintf("v1_%s", hasher.HashWithoutPrefix([]byte(signature))), nil -} - -func (a *API) validateSigning(r *http.Request, signature *string, signatureExpiration *int, username *string, path string, operation string) (err error) { - var expectedSignature string - - // no need to validate signing key if access token is not set - if !a.accessToken.IsSet() { - return nil - } - - // check if access token is sent in the header - tokenFromHeader := r.Header.Get(accessTokenHeader) - if tokenFromHeader != "" { - if !a.accessToken.Equals(tokenFromHeader) { - return fmt.Errorf("access token present in header but does not match") - } - - return nil - } - - if signature == nil { - return fmt.Errorf("missing signature query parameter") - } - - // Empty string is used when no username is provided and the default user should be used - signatureUsername := "" - if username != nil { - signatureUsername = *username - } - - if signatureExpiration == nil { - expectedSignature, err = a.generateSignature(path, signatureUsername, operation, nil) - } else { - exp := int64(*signatureExpiration) - expectedSignature, err = a.generateSignature(path, signatureUsername, operation, &exp) - } - - if err != nil { - a.logger.Error().Err(err).Msg("error generating signing key") - - return errors.New("invalid signature") - } - - // signature validation - if expectedSignature != *signature { - return fmt.Errorf("invalid signature") - } - - // signature expiration - if signatureExpiration != nil { - exp := int64(*signatureExpiration) - if exp < time.Now().Unix() { - return fmt.Errorf("signature is already expired") - } - } - - return nil -} diff --git a/envd/internal/api/auth_test.go b/envd/internal/api/auth_test.go deleted file mode 100644 index 4e80ec7..0000000 --- a/envd/internal/api/auth_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "fmt" - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "git.omukk.dev/wrenn/sandbox/envd/internal/shared/keys" -) - -func TestKeyGenerationAlgorithmIsStable(t *testing.T) { - t.Parallel() - apiToken := "secret-access-token" - secureToken := &SecureToken{} - err := secureToken.Set([]byte(apiToken)) - require.NoError(t, err) - api := &API{accessToken: secureToken} - - path := "/path/to/demo.txt" - username := "root" - operation := "write" - timestamp := time.Now().Unix() - - signature, err := api.generateSignature(path, username, operation, ×tamp) - require.NoError(t, err) - assert.NotEmpty(t, signature) - - // locally generated signature - hasher := keys.NewSHA256Hashing() - localSignatureTmp := fmt.Sprintf("%s:%s:%s:%s:%s", path, operation, username, apiToken, strconv.FormatInt(timestamp, 10)) - localSignature := fmt.Sprintf("v1_%s", hasher.HashWithoutPrefix([]byte(localSignatureTmp))) - - assert.Equal(t, localSignature, signature) -} - -func TestKeyGenerationAlgorithmWithoutExpirationIsStable(t *testing.T) { - t.Parallel() - apiToken := "secret-access-token" - secureToken := &SecureToken{} - err := secureToken.Set([]byte(apiToken)) - require.NoError(t, err) - api := &API{accessToken: secureToken} - - path := "/path/to/resource.txt" - username := "user" - operation := "read" - - signature, err := api.generateSignature(path, username, operation, nil) - require.NoError(t, err) - assert.NotEmpty(t, signature) - - // locally generated signature - hasher := keys.NewSHA256Hashing() - localSignatureTmp := fmt.Sprintf("%s:%s:%s:%s", path, operation, username, apiToken) - localSignature := fmt.Sprintf("v1_%s", hasher.HashWithoutPrefix([]byte(localSignatureTmp))) - - assert.Equal(t, localSignature, signature) -} diff --git a/envd/internal/api/cfg.yaml b/envd/internal/api/cfg.yaml deleted file mode 100644 index f72ca5e..0000000 --- a/envd/internal/api/cfg.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 - -# yaml-language-server: $schema=https://raw.githubusercontent.com/deepmap/oapi-codegen/HEAD/configuration-schema.json - -package: api -output: api.gen.go -generate: - models: true - chi-server: true - client: false diff --git a/envd/internal/api/conntracker.go b/envd/internal/api/conntracker.go deleted file mode 100644 index cc3750e..0000000 --- a/envd/internal/api/conntracker.go +++ /dev/null @@ -1,94 +0,0 @@ -package api - -import ( - "net" - "net/http" - "sync" -) - -// ServerConnTracker tracks active HTTP connections via http.Server.ConnState. -// Before a Firecracker snapshot, it closes idle connections, disables -// keep-alives, and records which connections existed pre-snapshot. After -// restore, it closes ALL pre-snapshot connections (they are zombie TCP -// sockets) while leaving post-restore connections (like the /init request) -// untouched. -type ServerConnTracker struct { - mu sync.Mutex - conns map[net.Conn]http.ConnState - preSnapshot map[net.Conn]struct{} - srv *http.Server -} - -func NewServerConnTracker() *ServerConnTracker { - return &ServerConnTracker{ - conns: make(map[net.Conn]http.ConnState), - } -} - -// SetServer stores a reference to the http.Server for keep-alive control. -// Must be called before ListenAndServe. -func (t *ServerConnTracker) SetServer(srv *http.Server) { - t.mu.Lock() - t.srv = srv - t.mu.Unlock() -} - -// Track implements the http.Server.ConnState callback signature. -func (t *ServerConnTracker) Track(conn net.Conn, state http.ConnState) { - t.mu.Lock() - defer t.mu.Unlock() - switch state { - case http.StateNew, http.StateActive, http.StateIdle: - t.conns[conn] = state - case http.StateHijacked, http.StateClosed: - delete(t.conns, conn) - delete(t.preSnapshot, conn) - } -} - -// PrepareForSnapshot closes idle connections, disables keep-alives, and -// records all remaining active connections. After the response completes -// (with keep-alives disabled, the connection closes), RestoreAfterSnapshot -// will close any that survived into the snapshot as zombie TCP sockets. -// -// GC is handled by PostSnapshotPrepare after this returns. -func (t *ServerConnTracker) PrepareForSnapshot() { - t.mu.Lock() - defer t.mu.Unlock() - - if t.srv != nil { - t.srv.SetKeepAlivesEnabled(false) - } - - t.preSnapshot = make(map[net.Conn]struct{}, len(t.conns)) - for conn, state := range t.conns { - if state == http.StateIdle { - conn.Close() - delete(t.conns, conn) - } else { - t.preSnapshot[conn] = struct{}{} - } - } -} - -// RestoreAfterSnapshot closes ALL pre-snapshot connections (zombie TCP -// sockets after restore) and re-enables keep-alives. Post-restore -// connections (like the /init request that triggers this call) are not -// in the preSnapshot set and are left untouched. -// -// Safe to call on first boot — preSnapshot is nil, so this is a no-op -// aside from enabling keep-alives (which are already enabled by default). -func (t *ServerConnTracker) RestoreAfterSnapshot() { - t.mu.Lock() - defer t.mu.Unlock() - - for conn := range t.preSnapshot { - conn.Close() - delete(t.conns, conn) - } - t.preSnapshot = nil - - if t.srv != nil { - t.srv.SetKeepAlivesEnabled(true) - } -} diff --git a/envd/internal/api/download.go b/envd/internal/api/download.go deleted file mode 100644 index 0a2119e..0000000 --- a/envd/internal/api/download.go +++ /dev/null @@ -1,187 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package api - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "mime" - "net/http" - "os" - "os/user" - "path/filepath" - - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" -) - -func (a *API) GetFiles(w http.ResponseWriter, r *http.Request, params GetFilesParams) { - defer r.Body.Close() - - var errorCode int - var errMsg error - - var path string - if params.Path != nil { - path = *params.Path - } - - operationID := logs.AssignOperationID() - - // signing authorization if needed - err := a.validateSigning(r, params.Signature, params.SignatureExpiration, params.Username, path, SigningReadOperation) - if err != nil { - a.logger.Error().Err(err).Str(string(logs.OperationIDKey), operationID).Msg("error during auth validation") - jsonError(w, http.StatusUnauthorized, err) - - return - } - - username, err := execcontext.ResolveDefaultUsername(params.Username, a.defaults.User) - if err != nil { - a.logger.Error().Err(err).Str(string(logs.OperationIDKey), operationID).Msg("no user specified") - jsonError(w, http.StatusBadRequest, err) - - return - } - - defer func() { - l := a.logger. - Err(errMsg). - Str("method", r.Method+" "+r.URL.Path). - Str(string(logs.OperationIDKey), operationID). - Str("path", path). - Str("username", username) - - if errMsg != nil { - l = l.Int("error_code", errorCode) - } - - l.Msg("File read") - }() - - u, err := user.Lookup(username) - if err != nil { - errMsg = fmt.Errorf("error looking up user '%s': %w", username, err) - errorCode = http.StatusUnauthorized - jsonError(w, errorCode, errMsg) - - return - } - - resolvedPath, err := permissions.ExpandAndResolve(path, u, a.defaults.Workdir) - if err != nil { - errMsg = fmt.Errorf("error expanding and resolving path '%s': %w", path, err) - errorCode = http.StatusBadRequest - jsonError(w, errorCode, errMsg) - - return - } - - stat, err := os.Stat(resolvedPath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - errMsg = fmt.Errorf("path '%s' does not exist", resolvedPath) - errorCode = http.StatusNotFound - jsonError(w, errorCode, errMsg) - - return - } - - errMsg = fmt.Errorf("error checking if path exists '%s': %w", resolvedPath, err) - errorCode = http.StatusInternalServerError - jsonError(w, errorCode, errMsg) - - return - } - - if stat.IsDir() { - errMsg = fmt.Errorf("path '%s' is a directory", resolvedPath) - errorCode = http.StatusBadRequest - jsonError(w, errorCode, errMsg) - - return - } - - // Reject anything that isn't a regular file (devices, pipes, sockets, etc.). - // Reading device files like /dev/zero or /dev/urandom produces infinite data - // and will exhaust memory on all layers of the stack. - if !stat.Mode().IsRegular() { - errMsg = fmt.Errorf("path '%s' is not a regular file", resolvedPath) - errorCode = http.StatusBadRequest - jsonError(w, errorCode, errMsg) - - return - } - - // Validate Accept-Encoding header - encoding, err := parseAcceptEncoding(r) - if err != nil { - errMsg = fmt.Errorf("error parsing Accept-Encoding: %w", err) - errorCode = http.StatusNotAcceptable - jsonError(w, errorCode, errMsg) - - return - } - - // Tell caches to store separate variants for different Accept-Encoding values - w.Header().Set("Vary", "Accept-Encoding") - - // Fall back to identity for Range or conditional requests to preserve http.ServeContent - // behavior (206 Partial Content, 304 Not Modified). However, we must check if identity - // is acceptable per the Accept-Encoding header. - hasRangeOrConditional := r.Header.Get("Range") != "" || - r.Header.Get("If-Modified-Since") != "" || - r.Header.Get("If-None-Match") != "" || - r.Header.Get("If-Range") != "" - if hasRangeOrConditional { - if !isIdentityAcceptable(r) { - errMsg = fmt.Errorf("identity encoding not acceptable for Range or conditional request") - errorCode = http.StatusNotAcceptable - jsonError(w, errorCode, errMsg) - - return - } - encoding = EncodingIdentity - } - - file, err := os.Open(resolvedPath) - if err != nil { - errMsg = fmt.Errorf("error opening file '%s': %w", resolvedPath, err) - errorCode = http.StatusInternalServerError - jsonError(w, errorCode, errMsg) - - return - } - defer file.Close() - - w.Header().Set("Content-Disposition", mime.FormatMediaType("inline", map[string]string{"filename": filepath.Base(resolvedPath)})) - - // Serve with gzip encoding if requested. - if encoding == EncodingGzip { - w.Header().Set("Content-Encoding", EncodingGzip) - - // Set Content-Type based on file extension, preserving the original type - contentType := mime.TypeByExtension(filepath.Ext(path)) - if contentType == "" { - contentType = "application/octet-stream" - } - w.Header().Set("Content-Type", contentType) - - gw := gzip.NewWriter(w) - defer gw.Close() - - _, err = io.Copy(gw, file) - if err != nil { - a.logger.Error().Err(err).Str(string(logs.OperationIDKey), operationID).Msg("error writing gzip response") - } - - return - } - - http.ServeContent(w, r, path, stat.ModTime(), file) -} diff --git a/envd/internal/api/download_test.go b/envd/internal/api/download_test.go deleted file mode 100644 index fc01573..0000000 --- a/envd/internal/api/download_test.go +++ /dev/null @@ -1,405 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package api - -import ( - "bytes" - "compress/gzip" - "context" - "io" - "mime/multipart" - "net/http" - "net/http/httptest" - "net/url" - "os" - "os/user" - "path/filepath" - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -func TestGetFilesContentDisposition(t *testing.T) { - t.Parallel() - - currentUser, err := user.Current() - require.NoError(t, err) - - tests := []struct { - name string - filename string - expectedHeader string - }{ - { - name: "simple filename", - filename: "test.txt", - expectedHeader: `inline; filename=test.txt`, - }, - { - name: "filename with extension", - filename: "presentation.pptx", - expectedHeader: `inline; filename=presentation.pptx`, - }, - { - name: "filename with multiple dots", - filename: "archive.tar.gz", - expectedHeader: `inline; filename=archive.tar.gz`, - }, - { - name: "filename with spaces", - filename: "my document.pdf", - expectedHeader: `inline; filename="my document.pdf"`, - }, - { - name: "filename with quotes", - filename: `file"name.txt`, - expectedHeader: `inline; filename="file\"name.txt"`, - }, - { - name: "filename with backslash", - filename: `file\name.txt`, - expectedHeader: `inline; filename="file\\name.txt"`, - }, - { - name: "unicode filename", - filename: "\u6587\u6863.pdf", // 文档.pdf in Chinese - expectedHeader: "inline; filename*=utf-8''%E6%96%87%E6%A1%A3.pdf", - }, - { - name: "dotfile preserved", - filename: ".env", - expectedHeader: `inline; filename=.env`, - }, - { - name: "dotfile with extension preserved", - filename: ".gitignore", - expectedHeader: `inline; filename=.gitignore`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - // Create a temp directory and file - tempDir := t.TempDir() - tempFile := filepath.Join(tempDir, tt.filename) - err := os.WriteFile(tempFile, []byte("test content"), 0o644) - require.NoError(t, err) - - // Create test API - logger := zerolog.Nop() - defaults := &execcontext.Defaults{ - EnvVars: utils.NewMap[string, string](), - User: currentUser.Username, - } - api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") - - // Create request and response recorder - req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) - w := httptest.NewRecorder() - - // Call the handler - params := GetFilesParams{ - Path: &tempFile, - Username: ¤tUser.Username, - } - api.GetFiles(w, req, params) - - // Check response - resp := w.Result() - defer resp.Body.Close() - - assert.Equal(t, http.StatusOK, resp.StatusCode) - - // Verify Content-Disposition header - contentDisposition := resp.Header.Get("Content-Disposition") - assert.Equal(t, tt.expectedHeader, contentDisposition, "Content-Disposition header should be set with correct filename") - }) - } -} - -func TestGetFilesContentDispositionWithNestedPath(t *testing.T) { - t.Parallel() - - currentUser, err := user.Current() - require.NoError(t, err) - - // Create a temp directory with nested structure - tempDir := t.TempDir() - nestedDir := filepath.Join(tempDir, "subdir", "another") - err = os.MkdirAll(nestedDir, 0o755) - require.NoError(t, err) - - filename := "document.pdf" - tempFile := filepath.Join(nestedDir, filename) - err = os.WriteFile(tempFile, []byte("test content"), 0o644) - require.NoError(t, err) - - // Create test API - logger := zerolog.Nop() - defaults := &execcontext.Defaults{ - EnvVars: utils.NewMap[string, string](), - User: currentUser.Username, - } - api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") - - // Create request and response recorder - req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) - w := httptest.NewRecorder() - - // Call the handler - params := GetFilesParams{ - Path: &tempFile, - Username: ¤tUser.Username, - } - api.GetFiles(w, req, params) - - // Check response - resp := w.Result() - defer resp.Body.Close() - - assert.Equal(t, http.StatusOK, resp.StatusCode) - - // Verify Content-Disposition header uses only the base filename, not the full path - contentDisposition := resp.Header.Get("Content-Disposition") - assert.Equal(t, `inline; filename=document.pdf`, contentDisposition, "Content-Disposition should contain only the filename, not the path") -} - -func TestGetFiles_GzipEncoding_ExplicitIdentityOffWithRange(t *testing.T) { - t.Parallel() - - currentUser, err := user.Current() - require.NoError(t, err) - - // Create a temp directory with a test file - tempDir := t.TempDir() - filename := "document.pdf" - tempFile := filepath.Join(tempDir, filename) - err = os.WriteFile(tempFile, []byte("test content"), 0o644) - require.NoError(t, err) - - // Create test API - logger := zerolog.Nop() - defaults := &execcontext.Defaults{ - EnvVars: utils.NewMap[string, string](), - User: currentUser.Username, - } - api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") - - // Create request and response recorder - req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) - req.Header.Set("Accept-Encoding", "gzip; q=1,*; q=0") - req.Header.Set("Range", "bytes=0-4") // Request first 5 bytes - w := httptest.NewRecorder() - - // Call the handler - params := GetFilesParams{ - Path: &tempFile, - Username: ¤tUser.Username, - } - api.GetFiles(w, req, params) - - // Check response - resp := w.Result() - defer resp.Body.Close() - - assert.Equal(t, http.StatusNotAcceptable, resp.StatusCode) -} - -func TestGetFiles_GzipDownload(t *testing.T) { - t.Parallel() - - currentUser, err := user.Current() - require.NoError(t, err) - - originalContent := []byte("hello world, this is a test file for gzip compression") - - // Create a temp file with known content - tempDir := t.TempDir() - tempFile := filepath.Join(tempDir, "test.txt") - err = os.WriteFile(tempFile, originalContent, 0o644) - require.NoError(t, err) - - logger := zerolog.Nop() - defaults := &execcontext.Defaults{ - EnvVars: utils.NewMap[string, string](), - User: currentUser.Username, - } - api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") - - req := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(tempFile), nil) - req.Header.Set("Accept-Encoding", "gzip") - w := httptest.NewRecorder() - - params := GetFilesParams{ - Path: &tempFile, - Username: ¤tUser.Username, - } - api.GetFiles(w, req, params) - - resp := w.Result() - defer resp.Body.Close() - - assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.Equal(t, "gzip", resp.Header.Get("Content-Encoding")) - assert.Equal(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type")) - - // Decompress the gzip response body - gzReader, err := gzip.NewReader(resp.Body) - require.NoError(t, err) - defer gzReader.Close() - - decompressed, err := io.ReadAll(gzReader) - require.NoError(t, err) - - assert.Equal(t, originalContent, decompressed) -} - -func TestPostFiles_GzipUpload(t *testing.T) { - t.Parallel() - - currentUser, err := user.Current() - require.NoError(t, err) - - originalContent := []byte("hello world, this is a test file uploaded with gzip") - - // Build a multipart body - var multipartBuf bytes.Buffer - mpWriter := multipart.NewWriter(&multipartBuf) - part, err := mpWriter.CreateFormFile("file", "uploaded.txt") - require.NoError(t, err) - _, err = part.Write(originalContent) - require.NoError(t, err) - err = mpWriter.Close() - require.NoError(t, err) - - // Gzip-compress the entire multipart body - var gzBuf bytes.Buffer - gzWriter := gzip.NewWriter(&gzBuf) - _, err = gzWriter.Write(multipartBuf.Bytes()) - require.NoError(t, err) - err = gzWriter.Close() - require.NoError(t, err) - - // Create test API - tempDir := t.TempDir() - destPath := filepath.Join(tempDir, "uploaded.txt") - - logger := zerolog.Nop() - defaults := &execcontext.Defaults{ - EnvVars: utils.NewMap[string, string](), - User: currentUser.Username, - } - api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") - - req := httptest.NewRequest(http.MethodPost, "/files?path="+url.QueryEscape(destPath), &gzBuf) - req.Header.Set("Content-Type", mpWriter.FormDataContentType()) - req.Header.Set("Content-Encoding", "gzip") - w := httptest.NewRecorder() - - params := PostFilesParams{ - Path: &destPath, - Username: ¤tUser.Username, - } - api.PostFiles(w, req, params) - - resp := w.Result() - defer resp.Body.Close() - - assert.Equal(t, http.StatusOK, resp.StatusCode) - - // Verify the file was written with the original (decompressed) content - data, err := os.ReadFile(destPath) - require.NoError(t, err) - assert.Equal(t, originalContent, data) -} - -func TestGzipUploadThenGzipDownload(t *testing.T) { - t.Parallel() - - currentUser, err := user.Current() - require.NoError(t, err) - - originalContent := []byte("round-trip gzip test: upload compressed, download compressed, verify match") - - // --- Upload with gzip --- - - // Build a multipart body - var multipartBuf bytes.Buffer - mpWriter := multipart.NewWriter(&multipartBuf) - part, err := mpWriter.CreateFormFile("file", "roundtrip.txt") - require.NoError(t, err) - _, err = part.Write(originalContent) - require.NoError(t, err) - err = mpWriter.Close() - require.NoError(t, err) - - // Gzip-compress the entire multipart body - var gzBuf bytes.Buffer - gzWriter := gzip.NewWriter(&gzBuf) - _, err = gzWriter.Write(multipartBuf.Bytes()) - require.NoError(t, err) - err = gzWriter.Close() - require.NoError(t, err) - - tempDir := t.TempDir() - destPath := filepath.Join(tempDir, "roundtrip.txt") - - logger := zerolog.Nop() - defaults := &execcontext.Defaults{ - EnvVars: utils.NewMap[string, string](), - User: currentUser.Username, - } - api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") - - uploadReq := httptest.NewRequest(http.MethodPost, "/files?path="+url.QueryEscape(destPath), &gzBuf) - uploadReq.Header.Set("Content-Type", mpWriter.FormDataContentType()) - uploadReq.Header.Set("Content-Encoding", "gzip") - uploadW := httptest.NewRecorder() - - uploadParams := PostFilesParams{ - Path: &destPath, - Username: ¤tUser.Username, - } - api.PostFiles(uploadW, uploadReq, uploadParams) - - uploadResp := uploadW.Result() - defer uploadResp.Body.Close() - - require.Equal(t, http.StatusOK, uploadResp.StatusCode) - - // --- Download with gzip --- - - downloadReq := httptest.NewRequest(http.MethodGet, "/files?path="+url.QueryEscape(destPath), nil) - downloadReq.Header.Set("Accept-Encoding", "gzip") - downloadW := httptest.NewRecorder() - - downloadParams := GetFilesParams{ - Path: &destPath, - Username: ¤tUser.Username, - } - api.GetFiles(downloadW, downloadReq, downloadParams) - - downloadResp := downloadW.Result() - defer downloadResp.Body.Close() - - require.Equal(t, http.StatusOK, downloadResp.StatusCode) - assert.Equal(t, "gzip", downloadResp.Header.Get("Content-Encoding")) - - // Decompress and verify content matches original - gzReader, err := gzip.NewReader(downloadResp.Body) - require.NoError(t, err) - defer gzReader.Close() - - decompressed, err := io.ReadAll(gzReader) - require.NoError(t, err) - - assert.Equal(t, originalContent, decompressed) -} diff --git a/envd/internal/api/encoding.go b/envd/internal/api/encoding.go deleted file mode 100644 index d324c1c..0000000 --- a/envd/internal/api/encoding.go +++ /dev/null @@ -1,229 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "compress/gzip" - "fmt" - "io" - "net/http" - "slices" - "sort" - "strconv" - "strings" -) - -const ( - // EncodingGzip is the gzip content encoding. - EncodingGzip = "gzip" - // EncodingIdentity means no encoding (passthrough). - EncodingIdentity = "identity" - // EncodingWildcard means any encoding is acceptable. - EncodingWildcard = "*" -) - -// SupportedEncodings lists the content encodings supported for file transfer. -// The order matters - encodings are checked in order of preference. -var SupportedEncodings = []string{ - EncodingGzip, -} - -// encodingWithQuality holds an encoding name and its quality value. -type encodingWithQuality struct { - encoding string - quality float64 -} - -// isSupportedEncoding checks if the given encoding is in the supported list. -// Per RFC 7231, content-coding values are case-insensitive. -func isSupportedEncoding(encoding string) bool { - return slices.Contains(SupportedEncodings, strings.ToLower(encoding)) -} - -// parseEncodingWithQuality parses an encoding value and extracts the quality. -// Returns the encoding name (lowercased) and quality value (default 1.0 if not specified). -// Per RFC 7231, content-coding values are case-insensitive. -func parseEncodingWithQuality(value string) encodingWithQuality { - value = strings.TrimSpace(value) - quality := 1.0 - - if idx := strings.Index(value, ";"); idx != -1 { - params := value[idx+1:] - value = strings.TrimSpace(value[:idx]) - - // Parse q=X.X parameter - for param := range strings.SplitSeq(params, ";") { - param = strings.TrimSpace(param) - if strings.HasPrefix(strings.ToLower(param), "q=") { - if q, err := strconv.ParseFloat(param[2:], 64); err == nil { - quality = q - } - } - } - } - - // Normalize encoding to lowercase per RFC 7231 - return encodingWithQuality{encoding: strings.ToLower(value), quality: quality} -} - -// parseEncoding extracts the encoding name from a header value, stripping quality. -func parseEncoding(value string) string { - return parseEncodingWithQuality(value).encoding -} - -// parseContentEncoding parses the Content-Encoding header and returns the encoding. -// Returns an error if an unsupported encoding is specified. -// If no Content-Encoding header is present, returns empty string. -func parseContentEncoding(r *http.Request) (string, error) { - header := r.Header.Get("Content-Encoding") - if header == "" { - return EncodingIdentity, nil - } - - encoding := parseEncoding(header) - - if encoding == EncodingIdentity { - return EncodingIdentity, nil - } - - if !isSupportedEncoding(encoding) { - return "", fmt.Errorf("unsupported Content-Encoding: %s, supported: %v", header, SupportedEncodings) - } - - return encoding, nil -} - -// parseAcceptEncodingHeader parses the Accept-Encoding header and returns -// the parsed encodings along with the identity rejection state. -// Per RFC 7231 Section 5.3.4, identity is acceptable unless excluded by -// "identity;q=0" or "*;q=0" without a more specific entry for identity with q>0. -func parseAcceptEncodingHeader(header string) ([]encodingWithQuality, bool) { - if header == "" { - return nil, false // identity not rejected when header is empty - } - - // Parse all encodings with their quality values - var encodings []encodingWithQuality - for value := range strings.SplitSeq(header, ",") { - eq := parseEncodingWithQuality(value) - encodings = append(encodings, eq) - } - - // Check if identity is rejected per RFC 7231 Section 5.3.4: - // identity is acceptable unless excluded by "identity;q=0" or "*;q=0" - // without a more specific entry for identity with q>0. - identityRejected := false - identityExplicitlyAccepted := false - wildcardRejected := false - - for _, eq := range encodings { - switch eq.encoding { - case EncodingIdentity: - if eq.quality == 0 { - identityRejected = true - } else { - identityExplicitlyAccepted = true - } - case EncodingWildcard: - if eq.quality == 0 { - wildcardRejected = true - } - } - } - - if wildcardRejected && !identityExplicitlyAccepted { - identityRejected = true - } - - return encodings, identityRejected -} - -// isIdentityAcceptable checks if identity encoding is acceptable based on the -// Accept-Encoding header. Per RFC 7231 section 5.3.4, identity is always -// implicitly acceptable unless explicitly rejected with q=0. -func isIdentityAcceptable(r *http.Request) bool { - header := r.Header.Get("Accept-Encoding") - _, identityRejected := parseAcceptEncodingHeader(header) - - return !identityRejected -} - -// parseAcceptEncoding parses the Accept-Encoding header and returns the best -// supported encoding based on quality values. Per RFC 7231 section 5.3.4, -// identity is always implicitly acceptable unless explicitly rejected with q=0. -// If no Accept-Encoding header is present, returns empty string (identity). -func parseAcceptEncoding(r *http.Request) (string, error) { - header := r.Header.Get("Accept-Encoding") - if header == "" { - return EncodingIdentity, nil - } - - encodings, identityRejected := parseAcceptEncodingHeader(header) - - // Sort by quality value (highest first) - sort.Slice(encodings, func(i, j int) bool { - return encodings[i].quality > encodings[j].quality - }) - - // Find the best supported encoding - for _, eq := range encodings { - // Skip encodings with q=0 (explicitly rejected) - if eq.quality == 0 { - continue - } - - if eq.encoding == EncodingIdentity { - return EncodingIdentity, nil - } - - // Wildcard means any encoding is acceptable - return a supported encoding if identity is rejected - if eq.encoding == EncodingWildcard { - if identityRejected && len(SupportedEncodings) > 0 { - return SupportedEncodings[0], nil - } - - return EncodingIdentity, nil - } - - if isSupportedEncoding(eq.encoding) { - return eq.encoding, nil - } - } - - // Per RFC 7231, identity is implicitly acceptable unless rejected - if !identityRejected { - return EncodingIdentity, nil - } - - // Identity rejected and no supported encodings found - return "", fmt.Errorf("no acceptable encoding found, supported: %v", SupportedEncodings) -} - -// getDecompressedBody returns a reader that decompresses the request body based on -// Content-Encoding header. Returns the original body if no encoding is specified. -// Returns an error if an unsupported encoding is specified. -// The caller is responsible for closing both the returned ReadCloser and the -// original request body (r.Body) separately. -func getDecompressedBody(r *http.Request) (io.ReadCloser, error) { - encoding, err := parseContentEncoding(r) - if err != nil { - return nil, err - } - - if encoding == EncodingIdentity { - return r.Body, nil - } - - switch encoding { - case EncodingGzip: - gzReader, err := gzip.NewReader(r.Body) - if err != nil { - return nil, fmt.Errorf("failed to create gzip reader: %w", err) - } - - return gzReader, nil - default: - // This shouldn't happen if isSupportedEncoding is correct - return nil, fmt.Errorf("encoding %s is supported but not implemented", encoding) - } -} diff --git a/envd/internal/api/encoding_test.go b/envd/internal/api/encoding_test.go deleted file mode 100644 index 6cb311b..0000000 --- a/envd/internal/api/encoding_test.go +++ /dev/null @@ -1,496 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "bytes" - "compress/gzip" - "io" - "net/http" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestIsSupportedEncoding(t *testing.T) { - t.Parallel() - - t.Run("gzip is supported", func(t *testing.T) { - t.Parallel() - assert.True(t, isSupportedEncoding("gzip")) - }) - - t.Run("GZIP is supported (case-insensitive)", func(t *testing.T) { - t.Parallel() - assert.True(t, isSupportedEncoding("GZIP")) - }) - - t.Run("Gzip is supported (case-insensitive)", func(t *testing.T) { - t.Parallel() - assert.True(t, isSupportedEncoding("Gzip")) - }) - - t.Run("br is not supported", func(t *testing.T) { - t.Parallel() - assert.False(t, isSupportedEncoding("br")) - }) - - t.Run("deflate is not supported", func(t *testing.T) { - t.Parallel() - assert.False(t, isSupportedEncoding("deflate")) - }) -} - -func TestParseEncodingWithQuality(t *testing.T) { - t.Parallel() - - t.Run("returns encoding with default quality 1.0", func(t *testing.T) { - t.Parallel() - eq := parseEncodingWithQuality("gzip") - assert.Equal(t, "gzip", eq.encoding) - assert.InDelta(t, 1.0, eq.quality, 0.001) - }) - - t.Run("parses quality value", func(t *testing.T) { - t.Parallel() - eq := parseEncodingWithQuality("gzip;q=0.5") - assert.Equal(t, "gzip", eq.encoding) - assert.InDelta(t, 0.5, eq.quality, 0.001) - }) - - t.Run("parses quality value with whitespace", func(t *testing.T) { - t.Parallel() - eq := parseEncodingWithQuality("gzip ; q=0.8") - assert.Equal(t, "gzip", eq.encoding) - assert.InDelta(t, 0.8, eq.quality, 0.001) - }) - - t.Run("handles q=0", func(t *testing.T) { - t.Parallel() - eq := parseEncodingWithQuality("gzip;q=0") - assert.Equal(t, "gzip", eq.encoding) - assert.InDelta(t, 0.0, eq.quality, 0.001) - }) - - t.Run("handles invalid quality value", func(t *testing.T) { - t.Parallel() - eq := parseEncodingWithQuality("gzip;q=invalid") - assert.Equal(t, "gzip", eq.encoding) - assert.InDelta(t, 1.0, eq.quality, 0.001) // defaults to 1.0 on parse error - }) - - t.Run("trims whitespace from encoding", func(t *testing.T) { - t.Parallel() - eq := parseEncodingWithQuality(" gzip ") - assert.Equal(t, "gzip", eq.encoding) - assert.InDelta(t, 1.0, eq.quality, 0.001) - }) - - t.Run("normalizes encoding to lowercase", func(t *testing.T) { - t.Parallel() - eq := parseEncodingWithQuality("GZIP") - assert.Equal(t, "gzip", eq.encoding) - }) - - t.Run("normalizes mixed case encoding", func(t *testing.T) { - t.Parallel() - eq := parseEncodingWithQuality("Gzip;q=0.5") - assert.Equal(t, "gzip", eq.encoding) - assert.InDelta(t, 0.5, eq.quality, 0.001) - }) -} - -func TestParseEncoding(t *testing.T) { - t.Parallel() - - t.Run("returns encoding as-is", func(t *testing.T) { - t.Parallel() - assert.Equal(t, "gzip", parseEncoding("gzip")) - }) - - t.Run("trims whitespace", func(t *testing.T) { - t.Parallel() - assert.Equal(t, "gzip", parseEncoding(" gzip ")) - }) - - t.Run("strips quality value", func(t *testing.T) { - t.Parallel() - assert.Equal(t, "gzip", parseEncoding("gzip;q=1.0")) - }) - - t.Run("strips quality value with whitespace", func(t *testing.T) { - t.Parallel() - assert.Equal(t, "gzip", parseEncoding("gzip ; q=0.5")) - }) -} - -func TestParseContentEncoding(t *testing.T) { - t.Parallel() - - t.Run("returns identity when no header", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", nil) - - encoding, err := parseContentEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("returns gzip when Content-Encoding is gzip", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", nil) - req.Header.Set("Content-Encoding", "gzip") - - encoding, err := parseContentEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) - }) - - t.Run("returns gzip when Content-Encoding is GZIP (case-insensitive)", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", nil) - req.Header.Set("Content-Encoding", "GZIP") - - encoding, err := parseContentEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) - }) - - t.Run("returns gzip when Content-Encoding is Gzip (case-insensitive)", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", nil) - req.Header.Set("Content-Encoding", "Gzip") - - encoding, err := parseContentEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) - }) - - t.Run("returns identity for identity encoding", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", nil) - req.Header.Set("Content-Encoding", "identity") - - encoding, err := parseContentEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("returns error for unsupported encoding", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", nil) - req.Header.Set("Content-Encoding", "br") - - _, err := parseContentEncoding(req) - require.Error(t, err) - assert.Contains(t, err.Error(), "unsupported Content-Encoding") - assert.Contains(t, err.Error(), "supported: [gzip]") - }) - - t.Run("handles gzip with quality value", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", nil) - req.Header.Set("Content-Encoding", "gzip;q=1.0") - - encoding, err := parseContentEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) - }) -} - -func TestParseAcceptEncoding(t *testing.T) { - t.Parallel() - - t.Run("returns identity when no header", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("returns gzip when Accept-Encoding is gzip", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "gzip") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) - }) - - t.Run("returns gzip when Accept-Encoding is GZIP (case-insensitive)", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "GZIP") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) - }) - - t.Run("returns gzip when gzip is among multiple encodings", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "deflate, gzip, br") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) - }) - - t.Run("returns gzip with quality value", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "gzip;q=1.0") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) - }) - - t.Run("returns identity for identity encoding", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "identity") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("returns identity for wildcard encoding", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "*") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("falls back to identity for unsupported encoding only", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "br") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("falls back to identity when only unsupported encodings", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "deflate, br") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("selects gzip when it has highest quality", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "br;q=0.5, gzip;q=1.0, deflate;q=0.8") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) - }) - - t.Run("selects gzip even with lower quality when others unsupported", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "br;q=1.0, gzip;q=0.5") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) - }) - - t.Run("returns identity when it has higher quality than gzip", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "gzip;q=0.5, identity;q=1.0") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("skips encoding with q=0", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "gzip;q=0, identity") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("falls back to identity when gzip rejected and no other supported", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "gzip;q=0, br") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("returns error when identity explicitly rejected and no supported encoding", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "br, identity;q=0") - - _, err := parseAcceptEncoding(req) - require.Error(t, err) - assert.Contains(t, err.Error(), "no acceptable encoding found") - }) - - t.Run("returns gzip for wildcard when identity rejected", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "*, identity;q=0") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, "gzip", encoding) // wildcard with identity rejected returns supported encoding - }) - - t.Run("returns error when wildcard rejected and no explicit identity", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "*;q=0") - - _, err := parseAcceptEncoding(req) - require.Error(t, err) - assert.Contains(t, err.Error(), "no acceptable encoding found") - }) - - t.Run("returns identity when wildcard rejected but identity explicitly accepted", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "*;q=0, identity") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingIdentity, encoding) - }) - - t.Run("returns gzip when wildcard rejected but gzip explicitly accepted", func(t *testing.T) { - t.Parallel() - req, _ := http.NewRequestWithContext(t.Context(), http.MethodGet, "/test", nil) - req.Header.Set("Accept-Encoding", "*;q=0, gzip") - - encoding, err := parseAcceptEncoding(req) - require.NoError(t, err) - assert.Equal(t, EncodingGzip, encoding) - }) -} - -func TestGetDecompressedBody(t *testing.T) { - t.Parallel() - - t.Run("returns original body when no Content-Encoding header", func(t *testing.T) { - t.Parallel() - content := []byte("test content") - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", bytes.NewReader(content)) - - body, err := getDecompressedBody(req) - require.NoError(t, err) - assert.Equal(t, req.Body, body, "should return original body") - - data, err := io.ReadAll(body) - require.NoError(t, err) - assert.Equal(t, content, data) - }) - - t.Run("decompresses gzip body when Content-Encoding is gzip", func(t *testing.T) { - t.Parallel() - originalContent := []byte("test content to compress") - - var compressed bytes.Buffer - gw := gzip.NewWriter(&compressed) - _, err := gw.Write(originalContent) - require.NoError(t, err) - err = gw.Close() - require.NoError(t, err) - - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", bytes.NewReader(compressed.Bytes())) - req.Header.Set("Content-Encoding", "gzip") - - body, err := getDecompressedBody(req) - require.NoError(t, err) - defer body.Close() - - assert.NotEqual(t, req.Body, body, "should return a new gzip reader") - - data, err := io.ReadAll(body) - require.NoError(t, err) - assert.Equal(t, originalContent, data) - }) - - t.Run("returns error for invalid gzip data", func(t *testing.T) { - t.Parallel() - invalidGzip := []byte("this is not gzip data") - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", bytes.NewReader(invalidGzip)) - req.Header.Set("Content-Encoding", "gzip") - - _, err := getDecompressedBody(req) - require.Error(t, err) - assert.Contains(t, err.Error(), "failed to create gzip reader") - }) - - t.Run("returns original body for identity encoding", func(t *testing.T) { - t.Parallel() - content := []byte("test content") - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", bytes.NewReader(content)) - req.Header.Set("Content-Encoding", "identity") - - body, err := getDecompressedBody(req) - require.NoError(t, err) - assert.Equal(t, req.Body, body, "should return original body") - - data, err := io.ReadAll(body) - require.NoError(t, err) - assert.Equal(t, content, data) - }) - - t.Run("returns error for unsupported encoding", func(t *testing.T) { - t.Parallel() - content := []byte("test content") - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", bytes.NewReader(content)) - req.Header.Set("Content-Encoding", "br") - - _, err := getDecompressedBody(req) - require.Error(t, err) - assert.Contains(t, err.Error(), "unsupported Content-Encoding") - }) - - t.Run("handles gzip with quality value", func(t *testing.T) { - t.Parallel() - originalContent := []byte("test content to compress") - - var compressed bytes.Buffer - gw := gzip.NewWriter(&compressed) - _, err := gw.Write(originalContent) - require.NoError(t, err) - err = gw.Close() - require.NoError(t, err) - - req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/test", bytes.NewReader(compressed.Bytes())) - req.Header.Set("Content-Encoding", "gzip;q=1.0") - - body, err := getDecompressedBody(req) - require.NoError(t, err) - defer body.Close() - - data, err := io.ReadAll(body) - require.NoError(t, err) - assert.Equal(t, originalContent, data) - }) -} diff --git a/envd/internal/api/envs.go b/envd/internal/api/envs.go deleted file mode 100644 index 6de61b7..0000000 --- a/envd/internal/api/envs.go +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "encoding/json" - "net/http" - - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" -) - -func (a *API) GetEnvs(w http.ResponseWriter, _ *http.Request) { - operationID := logs.AssignOperationID() - - a.logger.Debug().Str(string(logs.OperationIDKey), operationID).Msg("Getting env vars") - - envs := make(EnvVars) - a.defaults.EnvVars.Range(func(key, value string) bool { - envs[key] = value - - return true - }) - - w.Header().Set("Cache-Control", "no-store") - w.Header().Set("Content-Type", "application/json") - - w.WriteHeader(http.StatusOK) - if err := json.NewEncoder(w).Encode(envs); err != nil { - a.logger.Error().Err(err).Str(string(logs.OperationIDKey), operationID).Msg("Failed to encode env vars") - } -} diff --git a/envd/internal/api/error.go b/envd/internal/api/error.go deleted file mode 100644 index f3362b0..0000000 --- a/envd/internal/api/error.go +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "encoding/json" - "errors" - "net/http" -) - -func jsonError(w http.ResponseWriter, code int, err error) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("X-Content-Type-Options", "nosniff") - - w.WriteHeader(code) - encodeErr := json.NewEncoder(w).Encode(Error{ - Code: code, - Message: err.Error(), - }) - if encodeErr != nil { - http.Error(w, errors.Join(encodeErr, err).Error(), http.StatusInternalServerError) - } -} diff --git a/envd/internal/api/generate.go b/envd/internal/api/generate.go deleted file mode 100644 index 8d906a6..0000000 --- a/envd/internal/api/generate.go +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -//go:generate go run github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen -config cfg.yaml ../../spec/envd.yaml diff --git a/envd/internal/api/init.go b/envd/internal/api/init.go deleted file mode 100644 index ac4f8eb..0000000 --- a/envd/internal/api/init.go +++ /dev/null @@ -1,304 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package api - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/netip" - "os/exec" - "time" - - "git.omukk.dev/wrenn/sandbox/envd/internal/host" - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/shared/keys" - "github.com/awnumar/memguard" - "github.com/rs/zerolog" - "github.com/txn2/txeh" -) - -var ( - ErrAccessTokenMismatch = errors.New("access token validation failed") - ErrAccessTokenResetNotAuthorized = errors.New("access token reset not authorized") -) - -// validateInitAccessToken validates the access token for /init requests. -// Token is valid if it matches the existing token OR the MMDS hash. -// If neither exists, first-time setup is allowed. -func (a *API) validateInitAccessToken(ctx context.Context, requestToken *SecureToken) error { - requestTokenSet := requestToken.IsSet() - - // Fast path: token matches existing - if a.accessToken.IsSet() && requestTokenSet && a.accessToken.EqualsSecure(requestToken) { - return nil - } - - // Check MMDS only if token didn't match existing - matchesMMDS, mmdsExists := a.checkMMDSHash(ctx, requestToken) - - switch { - case matchesMMDS: - return nil - case !a.accessToken.IsSet() && !mmdsExists: - return nil // first-time setup - case !requestTokenSet: - return ErrAccessTokenResetNotAuthorized - default: - return ErrAccessTokenMismatch - } -} - -// checkMMDSHash checks if the request token matches the MMDS hash. -// Returns (matches, mmdsExists). -// -// The MMDS hash is set by the orchestrator during Resume: -// - hash(token): requires this specific token -// - hash(""): explicitly allows nil token (token reset authorized) -// - "": MMDS not properly configured, no authorization granted -func (a *API) checkMMDSHash(ctx context.Context, requestToken *SecureToken) (bool, bool) { - if a.isNotFC { - return false, false - } - - mmdsHash, err := a.mmdsClient.GetAccessTokenHash(ctx) - if err != nil { - return false, false - } - - if mmdsHash == "" { - return false, false - } - - if !requestToken.IsSet() { - return mmdsHash == keys.HashAccessToken(""), true - } - - tokenBytes, err := requestToken.Bytes() - if err != nil { - return false, true - } - defer memguard.WipeBytes(tokenBytes) - - return keys.HashAccessTokenBytes(tokenBytes) == mmdsHash, true -} - -func (a *API) PostInit(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - ctx := r.Context() - - operationID := logs.AssignOperationID() - logger := a.logger.With().Str(string(logs.OperationIDKey), operationID).Logger() - - if r.Body != nil { - // Read raw body so we can wipe it after parsing - body, err := io.ReadAll(r.Body) - // Ensure body is wiped after we're done - defer memguard.WipeBytes(body) - if err != nil { - logger.Error().Msgf("Failed to read request body: %v", err) - w.WriteHeader(http.StatusBadRequest) - - return - } - - var initRequest PostInitJSONBody - if len(body) > 0 { - err = json.Unmarshal(body, &initRequest) - if err != nil { - logger.Error().Msgf("Failed to decode request: %v", err) - w.WriteHeader(http.StatusBadRequest) - - return - } - } - - // Ensure request token is destroyed if not transferred via TakeFrom. - // This handles: validation failures, timestamp-based skips, and any early returns. - // Safe because Destroy() is nil-safe and TakeFrom clears the source. - defer initRequest.AccessToken.Destroy() - - a.initLock.Lock() - defer a.initLock.Unlock() - - // Update data only if the request is newer or if there's no timestamp at all - if initRequest.Timestamp == nil || a.lastSetTime.SetToGreater(initRequest.Timestamp.UnixNano()) { - err = a.SetData(ctx, logger, initRequest) - if err != nil { - switch { - case errors.Is(err, ErrAccessTokenMismatch), errors.Is(err, ErrAccessTokenResetNotAuthorized): - w.WriteHeader(http.StatusUnauthorized) - default: - logger.Error().Msgf("Failed to set data: %v", err) - w.WriteHeader(http.StatusBadRequest) - } - w.Write([]byte(err.Error())) - - return - } - } - } - - go func() { //nolint:contextcheck // TODO: fix this later - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - host.PollForMMDSOpts(ctx, a.mmdsChan, a.defaults.EnvVars) - }() - - // Safety net: if the health check's postRestoreRecovery didn't run yet - // (e.g. PostInit arrived before the first health check), re-enable GC - // here. On first boot needsRestore is false so CAS is a no-op. - if a.needsRestore.CompareAndSwap(true, false) { - a.postRestoreRecovery() - } - // RestoreAfterSnapshot is idempotent (clears preSnapshot set), and - // Start is a no-op if already running. - if a.connTracker != nil { - a.connTracker.RestoreAfterSnapshot() - } - if a.portSubsystem != nil { - a.portSubsystem.Start(a.rootCtx) - } - - w.Header().Set("Cache-Control", "no-store") - w.Header().Set("Content-Type", "") - - w.WriteHeader(http.StatusNoContent) -} - -func (a *API) SetData(ctx context.Context, logger zerolog.Logger, data PostInitJSONBody) error { - // Validate access token before proceeding with any action - // The request must provide a token that is either: - // 1. Matches the existing access token (if set), OR - // 2. Matches the MMDS hash (for token change during resume) - if err := a.validateInitAccessToken(ctx, data.AccessToken); err != nil { - return err - } - - if data.EnvVars != nil { - logger.Debug().Msg(fmt.Sprintf("Setting %d env vars", len(*data.EnvVars))) - - for key, value := range *data.EnvVars { - logger.Debug().Msgf("Setting env var for %s", key) - a.defaults.EnvVars.Store(key, value) - } - } - - if data.AccessToken.IsSet() { - logger.Debug().Msg("Setting access token") - a.accessToken.TakeFrom(data.AccessToken) - } else if a.accessToken.IsSet() { - logger.Debug().Msg("Clearing access token") - a.accessToken.Destroy() - } - - if data.HyperloopIP != nil { - go a.SetupHyperloop(*data.HyperloopIP) - } - - if data.DefaultUser != nil && *data.DefaultUser != "" { - logger.Debug().Msgf("Setting default user to: %s", *data.DefaultUser) - a.defaults.User = *data.DefaultUser - } - - if data.DefaultWorkdir != nil && *data.DefaultWorkdir != "" { - logger.Debug().Msgf("Setting default workdir to: %s", *data.DefaultWorkdir) - a.defaults.Workdir = data.DefaultWorkdir - } - - if data.VolumeMounts != nil { - for _, volume := range *data.VolumeMounts { - logger.Debug().Msgf("Mounting %s at %q", volume.NfsTarget, volume.Path) - - go a.setupNfs(context.WithoutCancel(ctx), volume.NfsTarget, volume.Path) - } - } - - return nil -} - -func (a *API) setupNfs(ctx context.Context, nfsTarget, path string) { - commands := [][]string{ - {"mkdir", "-p", path}, - {"mount", "-v", "-t", "nfs", "-o", "mountproto=tcp,mountport=2049,proto=tcp,port=2049,nfsvers=3,noacl", nfsTarget, path}, - } - - for _, command := range commands { - data, err := exec.CommandContext(ctx, command[0], command[1:]...).CombinedOutput() - - logger := a.getLogger(err) - - logger. - Strs("command", command). - Str("output", string(data)). - Msg("Mount NFS") - - if err != nil { - return - } - } -} - -func (a *API) SetupHyperloop(address string) { - a.hyperloopLock.Lock() - defer a.hyperloopLock.Unlock() - - if err := rewriteHostsFile(address, "/etc/hosts"); err != nil { - a.logger.Error().Err(err).Msg("failed to modify hosts file") - } else { - a.defaults.EnvVars.Store("WRENN_EVENTS_ADDRESS", fmt.Sprintf("http://%s", address)) - } -} - -const eventsHost = "events.wrenn.local" - -func rewriteHostsFile(address, path string) error { - hosts, err := txeh.NewHosts(&txeh.HostsConfig{ - ReadFilePath: path, - WriteFilePath: path, - }) - if err != nil { - return fmt.Errorf("failed to create hosts: %w", err) - } - - // Update /etc/hosts to point events.wrenn.local to the hyperloop IP - // This will remove any existing entries for events.wrenn.local first - ipFamily, err := getIPFamily(address) - if err != nil { - return fmt.Errorf("failed to get ip family: %w", err) - } - - if ok, current, _ := hosts.HostAddressLookup(eventsHost, ipFamily); ok && current == address { - return nil // nothing to be done - } - - hosts.AddHost(address, eventsHost) - - return hosts.Save() -} - -var ( - ErrInvalidAddress = errors.New("invalid IP address") - ErrUnknownAddressFormat = errors.New("unknown IP address format") -) - -func getIPFamily(address string) (txeh.IPFamily, error) { - addressIP, err := netip.ParseAddr(address) - if err != nil { - return txeh.IPFamilyV4, fmt.Errorf("failed to parse IP address: %w", err) - } - - switch { - case addressIP.Is4(): - return txeh.IPFamilyV4, nil - case addressIP.Is6(): - return txeh.IPFamilyV6, nil - default: - return txeh.IPFamilyV4, fmt.Errorf("%w: %s", ErrUnknownAddressFormat, address) - } -} diff --git a/envd/internal/api/init_test.go b/envd/internal/api/init_test.go deleted file mode 100644 index 9fe6ece..0000000 --- a/envd/internal/api/init_test.go +++ /dev/null @@ -1,524 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package api - -import ( - "context" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" - "git.omukk.dev/wrenn/sandbox/envd/internal/shared/keys" - utilsShared "git.omukk.dev/wrenn/sandbox/envd/internal/shared/utils" - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -func TestSimpleCases(t *testing.T) { - t.Parallel() - testCases := map[string]func(string) string{ - "both newlines": func(s string) string { return s }, - "no newline prefix": func(s string) string { return strings.TrimPrefix(s, "\n") }, - "no newline suffix": func(s string) string { return strings.TrimSuffix(s, "\n") }, - "no newline prefix or suffix": strings.TrimSpace, - } - - for name, preprocessor := range testCases { - t.Run(name, func(t *testing.T) { - t.Parallel() - tempDir := t.TempDir() - - value := ` -# comment -127.0.0.1 one.host -127.0.0.2 two.host -` - value = preprocessor(value) - inputPath := filepath.Join(tempDir, "hosts") - err := os.WriteFile(inputPath, []byte(value), 0o644) - require.NoError(t, err) - - err = rewriteHostsFile("127.0.0.3", inputPath) - require.NoError(t, err) - - data, err := os.ReadFile(inputPath) - require.NoError(t, err) - - assert.Equal(t, `# comment -127.0.0.1 one.host -127.0.0.2 two.host -127.0.0.3 events.wrenn.local`, strings.TrimSpace(string(data))) - }) - } -} - -func secureTokenPtr(s string) *SecureToken { - token := &SecureToken{} - _ = token.Set([]byte(s)) - - return token -} - -type mockMMDSClient struct { - hash string - err error -} - -func (m *mockMMDSClient) GetAccessTokenHash(_ context.Context) (string, error) { - return m.hash, m.err -} - -func newTestAPI(accessToken *SecureToken, mmdsClient MMDSClient) *API { - logger := zerolog.Nop() - defaults := &execcontext.Defaults{ - EnvVars: utils.NewMap[string, string](), - } - api := New(&logger, defaults, nil, false, context.Background(), nil, nil, "test") - if accessToken != nil { - api.accessToken.TakeFrom(accessToken) - } - api.mmdsClient = mmdsClient - - return api -} - -func TestValidateInitAccessToken(t *testing.T) { - t.Parallel() - ctx := t.Context() - - tests := []struct { - name string - accessToken *SecureToken - requestToken *SecureToken - mmdsHash string - mmdsErr error - wantErr error - }{ - { - name: "fast path: token matches existing", - accessToken: secureTokenPtr("secret-token"), - requestToken: secureTokenPtr("secret-token"), - mmdsHash: "", - mmdsErr: nil, - wantErr: nil, - }, - { - name: "MMDS match: token hash matches MMDS hash", - accessToken: secureTokenPtr("old-token"), - requestToken: secureTokenPtr("new-token"), - mmdsHash: keys.HashAccessToken("new-token"), - mmdsErr: nil, - wantErr: nil, - }, - { - name: "first-time setup: no existing token, MMDS error", - accessToken: nil, - requestToken: secureTokenPtr("new-token"), - mmdsHash: "", - mmdsErr: assert.AnError, - wantErr: nil, - }, - { - name: "first-time setup: no existing token, empty MMDS hash", - accessToken: nil, - requestToken: secureTokenPtr("new-token"), - mmdsHash: "", - mmdsErr: nil, - wantErr: nil, - }, - { - name: "first-time setup: both tokens nil, no MMDS", - accessToken: nil, - requestToken: nil, - mmdsHash: "", - mmdsErr: assert.AnError, - wantErr: nil, - }, - { - name: "mismatch: existing token differs from request, no MMDS", - accessToken: secureTokenPtr("existing-token"), - requestToken: secureTokenPtr("wrong-token"), - mmdsHash: "", - mmdsErr: assert.AnError, - wantErr: ErrAccessTokenMismatch, - }, - { - name: "mismatch: existing token differs from request, MMDS hash mismatch", - accessToken: secureTokenPtr("existing-token"), - requestToken: secureTokenPtr("wrong-token"), - mmdsHash: keys.HashAccessToken("different-token"), - mmdsErr: nil, - wantErr: ErrAccessTokenMismatch, - }, - { - name: "conflict: existing token, nil request, MMDS exists", - accessToken: secureTokenPtr("existing-token"), - requestToken: nil, - mmdsHash: keys.HashAccessToken("some-token"), - mmdsErr: nil, - wantErr: ErrAccessTokenResetNotAuthorized, - }, - { - name: "conflict: existing token, nil request, no MMDS", - accessToken: secureTokenPtr("existing-token"), - requestToken: nil, - mmdsHash: "", - mmdsErr: assert.AnError, - wantErr: ErrAccessTokenResetNotAuthorized, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: tt.mmdsHash, err: tt.mmdsErr} - api := newTestAPI(tt.accessToken, mmdsClient) - - err := api.validateInitAccessToken(ctx, tt.requestToken) - - if tt.wantErr != nil { - require.Error(t, err) - assert.ErrorIs(t, err, tt.wantErr) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestCheckMMDSHash(t *testing.T) { - t.Parallel() - ctx := t.Context() - - t.Run("returns match when token hash equals MMDS hash", func(t *testing.T) { - t.Parallel() - token := "my-secret-token" - mmdsClient := &mockMMDSClient{hash: keys.HashAccessToken(token), err: nil} - api := newTestAPI(nil, mmdsClient) - - matches, exists := api.checkMMDSHash(ctx, secureTokenPtr(token)) - - assert.True(t, matches) - assert.True(t, exists) - }) - - t.Run("returns no match when token hash differs from MMDS hash", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: keys.HashAccessToken("different-token"), err: nil} - api := newTestAPI(nil, mmdsClient) - - matches, exists := api.checkMMDSHash(ctx, secureTokenPtr("my-token")) - - assert.False(t, matches) - assert.True(t, exists) - }) - - t.Run("returns exists but no match when request token is nil", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: keys.HashAccessToken("some-token"), err: nil} - api := newTestAPI(nil, mmdsClient) - - matches, exists := api.checkMMDSHash(ctx, nil) - - assert.False(t, matches) - assert.True(t, exists) - }) - - t.Run("returns false, false when MMDS returns error", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: "", err: assert.AnError} - api := newTestAPI(nil, mmdsClient) - - matches, exists := api.checkMMDSHash(ctx, secureTokenPtr("any-token")) - - assert.False(t, matches) - assert.False(t, exists) - }) - - t.Run("returns false, false when MMDS returns empty hash with non-nil request", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: "", err: nil} - api := newTestAPI(nil, mmdsClient) - - matches, exists := api.checkMMDSHash(ctx, secureTokenPtr("any-token")) - - assert.False(t, matches) - assert.False(t, exists) - }) - - t.Run("returns false, false when MMDS returns empty hash with nil request", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: "", err: nil} - api := newTestAPI(nil, mmdsClient) - - matches, exists := api.checkMMDSHash(ctx, nil) - - assert.False(t, matches) - assert.False(t, exists) - }) - - t.Run("returns true, true when MMDS returns hash of empty string with nil request (explicit reset)", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: keys.HashAccessToken(""), err: nil} - api := newTestAPI(nil, mmdsClient) - - matches, exists := api.checkMMDSHash(ctx, nil) - - assert.True(t, matches) - assert.True(t, exists) - }) -} - -func TestSetData(t *testing.T) { - t.Parallel() - ctx := context.Background() - logger := zerolog.Nop() - - t.Run("access token updates", func(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - existingToken *SecureToken - requestToken *SecureToken - mmdsHash string - mmdsErr error - wantErr error - wantFinalToken *SecureToken - }{ - { - name: "first-time setup: sets initial token", - existingToken: nil, - requestToken: secureTokenPtr("initial-token"), - mmdsHash: "", - mmdsErr: assert.AnError, - wantErr: nil, - wantFinalToken: secureTokenPtr("initial-token"), - }, - { - name: "first-time setup: nil request token leaves token unset", - existingToken: nil, - requestToken: nil, - mmdsHash: "", - mmdsErr: assert.AnError, - wantErr: nil, - wantFinalToken: nil, - }, - { - name: "re-init with same token: token unchanged", - existingToken: secureTokenPtr("same-token"), - requestToken: secureTokenPtr("same-token"), - mmdsHash: "", - mmdsErr: assert.AnError, - wantErr: nil, - wantFinalToken: secureTokenPtr("same-token"), - }, - { - name: "resume with MMDS: updates token when hash matches", - existingToken: secureTokenPtr("old-token"), - requestToken: secureTokenPtr("new-token"), - mmdsHash: keys.HashAccessToken("new-token"), - mmdsErr: nil, - wantErr: nil, - wantFinalToken: secureTokenPtr("new-token"), - }, - { - name: "resume with MMDS: fails when hash doesn't match", - existingToken: secureTokenPtr("old-token"), - requestToken: secureTokenPtr("new-token"), - mmdsHash: keys.HashAccessToken("different-token"), - mmdsErr: nil, - wantErr: ErrAccessTokenMismatch, - wantFinalToken: secureTokenPtr("old-token"), - }, - { - name: "fails when existing token and request token mismatch without MMDS", - existingToken: secureTokenPtr("existing-token"), - requestToken: secureTokenPtr("wrong-token"), - mmdsHash: "", - mmdsErr: assert.AnError, - wantErr: ErrAccessTokenMismatch, - wantFinalToken: secureTokenPtr("existing-token"), - }, - { - name: "conflict when existing token but nil request token", - existingToken: secureTokenPtr("existing-token"), - requestToken: nil, - mmdsHash: "", - mmdsErr: assert.AnError, - wantErr: ErrAccessTokenResetNotAuthorized, - wantFinalToken: secureTokenPtr("existing-token"), - }, - { - name: "conflict when existing token but nil request with MMDS present", - existingToken: secureTokenPtr("existing-token"), - requestToken: nil, - mmdsHash: keys.HashAccessToken("some-token"), - mmdsErr: nil, - wantErr: ErrAccessTokenResetNotAuthorized, - wantFinalToken: secureTokenPtr("existing-token"), - }, - { - name: "conflict when MMDS returns empty hash and request is nil (prevents unauthorized reset)", - existingToken: secureTokenPtr("existing-token"), - requestToken: nil, - mmdsHash: "", - mmdsErr: nil, - wantErr: ErrAccessTokenResetNotAuthorized, - wantFinalToken: secureTokenPtr("existing-token"), - }, - { - name: "resets token when MMDS returns hash of empty string and request is nil (explicit reset)", - existingToken: secureTokenPtr("existing-token"), - requestToken: nil, - mmdsHash: keys.HashAccessToken(""), - mmdsErr: nil, - wantErr: nil, - wantFinalToken: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: tt.mmdsHash, err: tt.mmdsErr} - api := newTestAPI(tt.existingToken, mmdsClient) - - data := PostInitJSONBody{ - AccessToken: tt.requestToken, - } - - err := api.SetData(ctx, logger, data) - - if tt.wantErr != nil { - require.ErrorIs(t, err, tt.wantErr) - } else { - require.NoError(t, err) - } - - if tt.wantFinalToken == nil { - assert.False(t, api.accessToken.IsSet(), "expected token to not be set") - } else { - require.True(t, api.accessToken.IsSet(), "expected token to be set") - assert.True(t, api.accessToken.EqualsSecure(tt.wantFinalToken), "expected token to match") - } - }) - } - }) - - t.Run("sets environment variables", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: "", err: assert.AnError} - api := newTestAPI(nil, mmdsClient) - - envVars := EnvVars{"FOO": "bar", "BAZ": "qux"} - data := PostInitJSONBody{ - EnvVars: &envVars, - } - - err := api.SetData(ctx, logger, data) - - require.NoError(t, err) - val, ok := api.defaults.EnvVars.Load("FOO") - assert.True(t, ok) - assert.Equal(t, "bar", val) - val, ok = api.defaults.EnvVars.Load("BAZ") - assert.True(t, ok) - assert.Equal(t, "qux", val) - }) - - t.Run("sets default user", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: "", err: assert.AnError} - api := newTestAPI(nil, mmdsClient) - - data := PostInitJSONBody{ - DefaultUser: utilsShared.ToPtr("testuser"), - } - - err := api.SetData(ctx, logger, data) - - require.NoError(t, err) - assert.Equal(t, "testuser", api.defaults.User) - }) - - t.Run("does not set default user when empty", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: "", err: assert.AnError} - api := newTestAPI(nil, mmdsClient) - api.defaults.User = "original" - - data := PostInitJSONBody{ - DefaultUser: utilsShared.ToPtr(""), - } - - err := api.SetData(ctx, logger, data) - - require.NoError(t, err) - assert.Equal(t, "original", api.defaults.User) - }) - - t.Run("sets default workdir", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: "", err: assert.AnError} - api := newTestAPI(nil, mmdsClient) - - data := PostInitJSONBody{ - DefaultWorkdir: utilsShared.ToPtr("/home/user"), - } - - err := api.SetData(ctx, logger, data) - - require.NoError(t, err) - require.NotNil(t, api.defaults.Workdir) - assert.Equal(t, "/home/user", *api.defaults.Workdir) - }) - - t.Run("does not set default workdir when empty", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: "", err: assert.AnError} - api := newTestAPI(nil, mmdsClient) - originalWorkdir := "/original" - api.defaults.Workdir = &originalWorkdir - - data := PostInitJSONBody{ - DefaultWorkdir: utilsShared.ToPtr(""), - } - - err := api.SetData(ctx, logger, data) - - require.NoError(t, err) - require.NotNil(t, api.defaults.Workdir) - assert.Equal(t, "/original", *api.defaults.Workdir) - }) - - t.Run("sets multiple fields at once", func(t *testing.T) { - t.Parallel() - mmdsClient := &mockMMDSClient{hash: "", err: assert.AnError} - api := newTestAPI(nil, mmdsClient) - - envVars := EnvVars{"KEY": "value"} - data := PostInitJSONBody{ - AccessToken: secureTokenPtr("token"), - DefaultUser: utilsShared.ToPtr("user"), - DefaultWorkdir: utilsShared.ToPtr("/workdir"), - EnvVars: &envVars, - } - - err := api.SetData(ctx, logger, data) - - require.NoError(t, err) - assert.True(t, api.accessToken.Equals("token"), "expected token to match") - assert.Equal(t, "user", api.defaults.User) - assert.Equal(t, "/workdir", *api.defaults.Workdir) - val, ok := api.defaults.EnvVars.Load("KEY") - assert.True(t, ok) - assert.Equal(t, "value", val) - }) -} diff --git a/envd/internal/api/secure_token.go b/envd/internal/api/secure_token.go deleted file mode 100644 index bdb6fab..0000000 --- a/envd/internal/api/secure_token.go +++ /dev/null @@ -1,214 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "bytes" - "errors" - "sync" - - "github.com/awnumar/memguard" -) - -var ( - ErrTokenNotSet = errors.New("access token not set") - ErrTokenEmpty = errors.New("empty token not allowed") -) - -// SecureToken wraps memguard for secure token storage. -// It uses LockedBuffer which provides memory locking, guard pages, -// and secure zeroing on destroy. -type SecureToken struct { - mu sync.RWMutex - buffer *memguard.LockedBuffer -} - -// Set securely replaces the token, destroying the old one first. -// The old token memory is zeroed before the new token is stored. -// The input byte slice is wiped after copying to secure memory. -// Returns ErrTokenEmpty if token is empty - use Destroy() to clear the token instead. -func (s *SecureToken) Set(token []byte) error { - if len(token) == 0 { - return ErrTokenEmpty - } - - s.mu.Lock() - defer s.mu.Unlock() - - // Destroy old token first (zeros memory) - if s.buffer != nil { - s.buffer.Destroy() - s.buffer = nil - } - - // Create new LockedBuffer from bytes (source slice is wiped by memguard) - s.buffer = memguard.NewBufferFromBytes(token) - - return nil -} - -// UnmarshalJSON implements json.Unmarshaler to securely parse a JSON string -// directly into memguard, wiping the input bytes after copying. -// -// Access tokens are hex-encoded HMAC-SHA256 hashes (64 chars of [0-9a-f]), -// so they never contain JSON escape sequences. -func (s *SecureToken) UnmarshalJSON(data []byte) error { - // JSON strings are quoted, so minimum valid is `""` (2 bytes). - if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { - memguard.WipeBytes(data) - - return errors.New("invalid secure token JSON string") - } - - content := data[1 : len(data)-1] - - // Access tokens are hex strings - reject if contains backslash - if bytes.ContainsRune(content, '\\') { - memguard.WipeBytes(data) - - return errors.New("invalid secure token: unexpected escape sequence") - } - - if len(content) == 0 { - memguard.WipeBytes(data) - - return ErrTokenEmpty - } - - s.mu.Lock() - defer s.mu.Unlock() - - if s.buffer != nil { - s.buffer.Destroy() - s.buffer = nil - } - - // Allocate secure buffer and copy directly into it - s.buffer = memguard.NewBuffer(len(content)) - copy(s.buffer.Bytes(), content) - - // Wipe the input data - memguard.WipeBytes(data) - - return nil -} - -// TakeFrom transfers the token from src to this SecureToken, destroying any -// existing token. The source token is cleared after transfer. -// This avoids copying the underlying bytes. -func (s *SecureToken) TakeFrom(src *SecureToken) { - if src == nil || s == src { - return - } - - // Extract buffer from source - src.mu.Lock() - buffer := src.buffer - src.buffer = nil - src.mu.Unlock() - - // Install buffer in destination - s.mu.Lock() - if s.buffer != nil { - s.buffer.Destroy() - } - s.buffer = buffer - s.mu.Unlock() -} - -// Equals checks if token matches using constant-time comparison. -// Returns false if the receiver is nil. -func (s *SecureToken) Equals(token string) bool { - if s == nil { - return false - } - - s.mu.RLock() - defer s.mu.RUnlock() - - if s.buffer == nil || !s.buffer.IsAlive() { - return false - } - - return s.buffer.EqualTo([]byte(token)) -} - -// EqualsSecure compares this token with another SecureToken using constant-time comparison. -// Returns false if either receiver or other is nil. -func (s *SecureToken) EqualsSecure(other *SecureToken) bool { - if s == nil || other == nil { - return false - } - - if s == other { - return s.IsSet() - } - - // Get a copy of other's bytes (avoids holding two locks simultaneously) - otherBytes, err := other.Bytes() - if err != nil { - return false - } - defer memguard.WipeBytes(otherBytes) - - s.mu.RLock() - defer s.mu.RUnlock() - - if s.buffer == nil || !s.buffer.IsAlive() { - return false - } - - return s.buffer.EqualTo(otherBytes) -} - -// IsSet returns true if a token is stored. -// Returns false if the receiver is nil. -func (s *SecureToken) IsSet() bool { - if s == nil { - return false - } - - s.mu.RLock() - defer s.mu.RUnlock() - - return s.buffer != nil && s.buffer.IsAlive() -} - -// Bytes returns a copy of the token bytes (for signature generation). -// The caller should zero the returned slice after use. -// Returns ErrTokenNotSet if the receiver is nil. -func (s *SecureToken) Bytes() ([]byte, error) { - if s == nil { - return nil, ErrTokenNotSet - } - - s.mu.RLock() - defer s.mu.RUnlock() - - if s.buffer == nil || !s.buffer.IsAlive() { - return nil, ErrTokenNotSet - } - - // Return a copy (unavoidable for signature generation) - src := s.buffer.Bytes() - result := make([]byte, len(src)) - copy(result, src) - - return result, nil -} - -// Destroy securely wipes the token from memory. -// No-op if the receiver is nil. -func (s *SecureToken) Destroy() { - if s == nil { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - if s.buffer != nil { - s.buffer.Destroy() - s.buffer = nil - } -} diff --git a/envd/internal/api/secure_token_test.go b/envd/internal/api/secure_token_test.go deleted file mode 100644 index ccb5a78..0000000 --- a/envd/internal/api/secure_token_test.go +++ /dev/null @@ -1,463 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "sync" - "testing" - - "github.com/awnumar/memguard" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestSecureTokenSetAndEquals(t *testing.T) { - t.Parallel() - - st := &SecureToken{} - - // Initially not set - assert.False(t, st.IsSet(), "token should not be set initially") - assert.False(t, st.Equals("any-token"), "equals should return false when not set") - - // Set token - err := st.Set([]byte("test-token")) - require.NoError(t, err) - assert.True(t, st.IsSet(), "token should be set after Set()") - assert.True(t, st.Equals("test-token"), "equals should return true for correct token") - assert.False(t, st.Equals("wrong-token"), "equals should return false for wrong token") - assert.False(t, st.Equals(""), "equals should return false for empty token") -} - -func TestSecureTokenReplace(t *testing.T) { - t.Parallel() - - st := &SecureToken{} - - // Set initial token - err := st.Set([]byte("first-token")) - require.NoError(t, err) - assert.True(t, st.Equals("first-token")) - - // Replace with new token (old one should be destroyed) - err = st.Set([]byte("second-token")) - require.NoError(t, err) - assert.True(t, st.Equals("second-token"), "should match new token") - assert.False(t, st.Equals("first-token"), "should not match old token") -} - -func TestSecureTokenDestroy(t *testing.T) { - t.Parallel() - - st := &SecureToken{} - - // Set and then destroy - err := st.Set([]byte("test-token")) - require.NoError(t, err) - assert.True(t, st.IsSet()) - - st.Destroy() - assert.False(t, st.IsSet(), "token should not be set after Destroy()") - assert.False(t, st.Equals("test-token"), "equals should return false after Destroy()") - - // Destroy on already destroyed should be safe - st.Destroy() - assert.False(t, st.IsSet()) - - // Nil receiver should be safe - var nilToken *SecureToken - assert.False(t, nilToken.IsSet(), "nil receiver should return false for IsSet()") - assert.False(t, nilToken.Equals("anything"), "nil receiver should return false for Equals()") - assert.False(t, nilToken.EqualsSecure(st), "nil receiver should return false for EqualsSecure()") - nilToken.Destroy() // should not panic - - _, err = nilToken.Bytes() - require.ErrorIs(t, err, ErrTokenNotSet, "nil receiver should return ErrTokenNotSet for Bytes()") -} - -func TestSecureTokenBytes(t *testing.T) { - t.Parallel() - - st := &SecureToken{} - - // Bytes should return error when not set - _, err := st.Bytes() - require.ErrorIs(t, err, ErrTokenNotSet) - - // Set token and get bytes - err = st.Set([]byte("test-token")) - require.NoError(t, err) - - bytes, err := st.Bytes() - require.NoError(t, err) - assert.Equal(t, []byte("test-token"), bytes) - - // Zero out the bytes (as caller should do) - memguard.WipeBytes(bytes) - - // Original should still be intact - assert.True(t, st.Equals("test-token"), "original token should still work after zeroing copy") - - // After destroy, bytes should fail - st.Destroy() - _, err = st.Bytes() - assert.ErrorIs(t, err, ErrTokenNotSet) -} - -func TestSecureTokenConcurrentAccess(t *testing.T) { - t.Parallel() - - st := &SecureToken{} - err := st.Set([]byte("initial-token")) - require.NoError(t, err) - - var wg sync.WaitGroup - const numGoroutines = 100 - - // Concurrent reads - for range numGoroutines { - wg.Go(func() { - st.IsSet() - st.Equals("initial-token") - }) - } - - // Concurrent writes - for i := range 10 { - wg.Add(1) - go func(idx int) { - defer wg.Done() - st.Set([]byte("token-" + string(rune('a'+idx)))) - }(i) - } - - wg.Wait() - - // Should still be in a valid state - assert.True(t, st.IsSet()) -} - -func TestSecureTokenEmptyToken(t *testing.T) { - t.Parallel() - - st := &SecureToken{} - - // Setting empty token should return an error - err := st.Set([]byte{}) - require.ErrorIs(t, err, ErrTokenEmpty) - assert.False(t, st.IsSet(), "token should not be set after empty token error") - - // Setting nil should also return an error - err = st.Set(nil) - require.ErrorIs(t, err, ErrTokenEmpty) - assert.False(t, st.IsSet(), "token should not be set after nil token error") -} - -func TestSecureTokenEmptyTokenDoesNotClearExisting(t *testing.T) { - t.Parallel() - - st := &SecureToken{} - - // Set a valid token first - err := st.Set([]byte("valid-token")) - require.NoError(t, err) - assert.True(t, st.IsSet()) - - // Attempting to set empty token should fail and preserve existing token - err = st.Set([]byte{}) - require.ErrorIs(t, err, ErrTokenEmpty) - assert.True(t, st.IsSet(), "existing token should be preserved after empty token error") - assert.True(t, st.Equals("valid-token"), "existing token value should be unchanged") -} - -func TestSecureTokenUnmarshalJSON(t *testing.T) { - t.Parallel() - - t.Run("unmarshals valid JSON string", func(t *testing.T) { - t.Parallel() - st := &SecureToken{} - err := st.UnmarshalJSON([]byte(`"my-secret-token"`)) - require.NoError(t, err) - assert.True(t, st.IsSet()) - assert.True(t, st.Equals("my-secret-token")) - }) - - t.Run("returns error for empty string", func(t *testing.T) { - t.Parallel() - st := &SecureToken{} - err := st.UnmarshalJSON([]byte(`""`)) - require.ErrorIs(t, err, ErrTokenEmpty) - assert.False(t, st.IsSet()) - }) - - t.Run("returns error for invalid JSON", func(t *testing.T) { - t.Parallel() - st := &SecureToken{} - err := st.UnmarshalJSON([]byte(`not-valid-json`)) - require.Error(t, err) - assert.False(t, st.IsSet()) - }) - - t.Run("replaces existing token", func(t *testing.T) { - t.Parallel() - st := &SecureToken{} - err := st.Set([]byte("old-token")) - require.NoError(t, err) - - err = st.UnmarshalJSON([]byte(`"new-token"`)) - require.NoError(t, err) - assert.True(t, st.Equals("new-token")) - assert.False(t, st.Equals("old-token")) - }) - - t.Run("wipes input buffer after parsing", func(t *testing.T) { - t.Parallel() - // Create a buffer with a known token - input := []byte(`"secret-token-12345"`) - original := make([]byte, len(input)) - copy(original, input) - - st := &SecureToken{} - err := st.UnmarshalJSON(input) - require.NoError(t, err) - - // Verify the token was stored correctly - assert.True(t, st.Equals("secret-token-12345")) - - // Verify the input buffer was wiped (all zeros) - for i, b := range input { - assert.Equal(t, byte(0), b, "byte at position %d should be zero, got %d", i, b) - } - }) - - t.Run("wipes input buffer on error", func(t *testing.T) { - t.Parallel() - // Create a buffer with an empty token (will error) - input := []byte(`""`) - - st := &SecureToken{} - err := st.UnmarshalJSON(input) - require.Error(t, err) - - // Verify the input buffer was still wiped - for i, b := range input { - assert.Equal(t, byte(0), b, "byte at position %d should be zero, got %d", i, b) - } - }) - - t.Run("rejects escape sequences", func(t *testing.T) { - t.Parallel() - st := &SecureToken{} - err := st.UnmarshalJSON([]byte(`"token\nwith\nnewlines"`)) - require.Error(t, err) - assert.Contains(t, err.Error(), "escape sequence") - assert.False(t, st.IsSet()) - }) -} - -func TestSecureTokenSetWipesInput(t *testing.T) { - t.Parallel() - - t.Run("wipes input buffer after storing", func(t *testing.T) { - t.Parallel() - // Create a buffer with a known token - input := []byte("my-secret-token") - original := make([]byte, len(input)) - copy(original, input) - - st := &SecureToken{} - err := st.Set(input) - require.NoError(t, err) - - // Verify the token was stored correctly - assert.True(t, st.Equals("my-secret-token")) - - // Verify the input buffer was wiped (all zeros) - for i, b := range input { - assert.Equal(t, byte(0), b, "byte at position %d should be zero, got %d", i, b) - } - }) -} - -func TestSecureTokenTakeFrom(t *testing.T) { - t.Parallel() - - t.Run("transfers token from source to destination", func(t *testing.T) { - t.Parallel() - src := &SecureToken{} - err := src.Set([]byte("source-token")) - require.NoError(t, err) - - dst := &SecureToken{} - dst.TakeFrom(src) - - assert.True(t, dst.IsSet()) - assert.True(t, dst.Equals("source-token")) - assert.False(t, src.IsSet(), "source should be empty after transfer") - }) - - t.Run("replaces existing destination token", func(t *testing.T) { - t.Parallel() - src := &SecureToken{} - err := src.Set([]byte("new-token")) - require.NoError(t, err) - - dst := &SecureToken{} - err = dst.Set([]byte("old-token")) - require.NoError(t, err) - - dst.TakeFrom(src) - - assert.True(t, dst.Equals("new-token")) - assert.False(t, dst.Equals("old-token")) - assert.False(t, src.IsSet()) - }) - - t.Run("handles nil source", func(t *testing.T) { - t.Parallel() - dst := &SecureToken{} - err := dst.Set([]byte("existing-token")) - require.NoError(t, err) - - dst.TakeFrom(nil) - - assert.True(t, dst.IsSet(), "destination should be unchanged with nil source") - assert.True(t, dst.Equals("existing-token")) - }) - - t.Run("handles empty source", func(t *testing.T) { - t.Parallel() - src := &SecureToken{} - dst := &SecureToken{} - err := dst.Set([]byte("existing-token")) - require.NoError(t, err) - - dst.TakeFrom(src) - - assert.False(t, dst.IsSet(), "destination should be cleared when source is empty") - }) - - t.Run("self-transfer is no-op and does not deadlock", func(t *testing.T) { - t.Parallel() - st := &SecureToken{} - err := st.Set([]byte("token")) - require.NoError(t, err) - - st.TakeFrom(st) - - assert.True(t, st.IsSet(), "token should remain set after self-transfer") - assert.True(t, st.Equals("token"), "token value should be unchanged") - }) -} - -func TestSecureTokenEqualsSecure(t *testing.T) { - t.Parallel() - - t.Run("returns true for matching tokens", func(t *testing.T) { - t.Parallel() - st1 := &SecureToken{} - err := st1.Set([]byte("same-token")) - require.NoError(t, err) - - st2 := &SecureToken{} - err = st2.Set([]byte("same-token")) - require.NoError(t, err) - - assert.True(t, st1.EqualsSecure(st2)) - assert.True(t, st2.EqualsSecure(st1)) - }) - - t.Run("concurrent TakeFrom and EqualsSecure do not deadlock", func(t *testing.T) { - t.Parallel() - // This test verifies the fix for the lock ordering deadlock bug. - - const iterations = 100 - - for range iterations { - a := &SecureToken{} - err := a.Set([]byte("token-a")) - require.NoError(t, err) - - b := &SecureToken{} - err = b.Set([]byte("token-b")) - require.NoError(t, err) - - var wg sync.WaitGroup - wg.Add(2) - - // Goroutine 1: a.TakeFrom(b) - go func() { - defer wg.Done() - a.TakeFrom(b) - }() - - // Goroutine 2: b.EqualsSecure(a) - go func() { - defer wg.Done() - b.EqualsSecure(a) - }() - - wg.Wait() - } - }) - - t.Run("returns false for different tokens", func(t *testing.T) { - t.Parallel() - st1 := &SecureToken{} - err := st1.Set([]byte("token-a")) - require.NoError(t, err) - - st2 := &SecureToken{} - err = st2.Set([]byte("token-b")) - require.NoError(t, err) - - assert.False(t, st1.EqualsSecure(st2)) - }) - - t.Run("returns false when comparing with nil", func(t *testing.T) { - t.Parallel() - st := &SecureToken{} - err := st.Set([]byte("token")) - require.NoError(t, err) - - assert.False(t, st.EqualsSecure(nil)) - }) - - t.Run("returns false when other is not set", func(t *testing.T) { - t.Parallel() - st1 := &SecureToken{} - err := st1.Set([]byte("token")) - require.NoError(t, err) - - st2 := &SecureToken{} - - assert.False(t, st1.EqualsSecure(st2)) - }) - - t.Run("returns false when self is not set", func(t *testing.T) { - t.Parallel() - st1 := &SecureToken{} - - st2 := &SecureToken{} - err := st2.Set([]byte("token")) - require.NoError(t, err) - - assert.False(t, st1.EqualsSecure(st2)) - }) - - t.Run("self-comparison returns true when set", func(t *testing.T) { - t.Parallel() - st := &SecureToken{} - err := st.Set([]byte("token")) - require.NoError(t, err) - - assert.True(t, st.EqualsSecure(st), "self-comparison should return true and not deadlock") - }) - - t.Run("self-comparison returns false when not set", func(t *testing.T) { - t.Parallel() - st := &SecureToken{} - - assert.False(t, st.EqualsSecure(st), "self-comparison on unset token should return false") - }) -} diff --git a/envd/internal/api/snapshot.go b/envd/internal/api/snapshot.go deleted file mode 100644 index 0e84dec..0000000 --- a/envd/internal/api/snapshot.go +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package api - -import ( - "net/http" - "runtime" - "runtime/debug" -) - -// PostSnapshotPrepare quiesces continuous goroutines (port scanner, forwarder), -// closes idle HTTP connections, and forces a GC cycle before Firecracker takes -// a VM snapshot. Closing connections prevents Go runtime corruption from stale -// TCP state after snapshot restore. Keep-alives are disabled so the current -// request's connection also closes after the response. -// -// To prevent Go page allocator corruption, GOMAXPROCS is set to 1 after the -// final GC. With a single P, all goroutines (including any that allocate -// between now and the VM freeze) run sequentially. This eliminates concurrent -// page allocator access, so even if the freeze lands mid-allocation, the -// in-flight operation completes atomically on restore before any GC reads -// the summary tree. GOMAXPROCS is restored on the first health check after -// restore (see postRestoreRecovery). -// -// Called by the host agent as a best-effort signal before vm.Pause(). -func (a *API) PostSnapshotPrepare(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - if a.portSubsystem != nil { - a.portSubsystem.Stop() - a.logger.Info().Msg("snapshot/prepare: port subsystem quiesced") - } - - if a.connTracker != nil { - a.connTracker.PrepareForSnapshot() - a.logger.Info().Msg("snapshot/prepare: idle connections closed, keep-alives disabled") - } - - // Send the response before the GC so HTTP buffer allocations happen - // while GOMAXPROCS is still at its normal value. - w.Header().Set("Cache-Control", "no-store") - w.WriteHeader(http.StatusNoContent) - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - - // Final GC pass after all major allocations (connection cleanup, - // response write) are complete. - runtime.GC() - runtime.GC() - debug.FreeOSMemory() - - // Reduce to a single P so any post-GC allocations (HTTP server - // connection teardown) run sequentially — no concurrent page allocator - // access that could leave the summary tree inconsistent if the VM - // freezes mid-update. - a.prevGOMAXPROCS = runtime.GOMAXPROCS(1) - - a.needsRestore.Store(true) - a.logger.Info().Msg("snapshot/prepare: GOMAXPROCS=1, ready for freeze") -} diff --git a/envd/internal/api/store.go b/envd/internal/api/store.go deleted file mode 100644 index ba4d445..0000000 --- a/envd/internal/api/store.go +++ /dev/null @@ -1,156 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package api - -import ( - "context" - "encoding/json" - "net/http" - "runtime" - "runtime/debug" - "sync" - "sync/atomic" - - "github.com/rs/zerolog" - - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" - "git.omukk.dev/wrenn/sandbox/envd/internal/host" - publicport "git.omukk.dev/wrenn/sandbox/envd/internal/port" - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -// MMDSClient provides access to MMDS metadata. -type MMDSClient interface { - GetAccessTokenHash(ctx context.Context) (string, error) -} - -// DefaultMMDSClient is the production implementation that calls the real MMDS endpoint. -type DefaultMMDSClient struct{} - -func (c *DefaultMMDSClient) GetAccessTokenHash(ctx context.Context) (string, error) { - return host.GetAccessTokenHashFromMMDS(ctx) -} - -type API struct { - isNotFC bool - logger *zerolog.Logger - accessToken *SecureToken - defaults *execcontext.Defaults - version string - - mmdsChan chan *host.MMDSOpts - hyperloopLock sync.Mutex - mmdsClient MMDSClient - - lastSetTime *utils.AtomicMax - initLock sync.Mutex - - // rootCtx is the parent context from main(), used to restart - // long-lived goroutines after snapshot restore. - rootCtx context.Context - portSubsystem *publicport.PortSubsystem - connTracker *ServerConnTracker - - // needsRestore is set by PostSnapshotPrepare and cleared on the first - // health check or PostInit after restore. While set, GOMAXPROCS is 1 - // to prevent concurrent page allocator access during the freeze window. - needsRestore atomic.Bool - prevGOMAXPROCS int // GOMAXPROCS value before PrepareSnapshot reduced it to 1 -} - -func New(l *zerolog.Logger, defaults *execcontext.Defaults, mmdsChan chan *host.MMDSOpts, isNotFC bool, rootCtx context.Context, portSubsystem *publicport.PortSubsystem, connTracker *ServerConnTracker, version string) *API { - return &API{ - logger: l, - defaults: defaults, - mmdsChan: mmdsChan, - isNotFC: isNotFC, - mmdsClient: &DefaultMMDSClient{}, - lastSetTime: utils.NewAtomicMax(), - accessToken: &SecureToken{}, - rootCtx: rootCtx, - portSubsystem: portSubsystem, - connTracker: connTracker, - version: version, - } -} - -func (a *API) GetHealth(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - // On the first health check after snapshot restore, re-enable GC and - // clean up stale state. By this point, any goroutine that was mid- - // allocation when the VM was frozen has completed, so the page allocator - // summary tree is consistent and safe for GC to read. - if a.needsRestore.CompareAndSwap(true, false) { - a.postRestoreRecovery() - } - - a.logger.Trace().Msg("Health check") - - w.Header().Set("Cache-Control", "no-store") - w.Header().Set("Content-Type", "application/json") - - _ = json.NewEncoder(w).Encode(map[string]string{ - "version": a.version, - }) -} - -// postRestoreRecovery restores GOMAXPROCS, runs a clean GC cycle, closes -// zombie TCP connections from before the snapshot, re-enables HTTP keep-alives, -// and restarts the port subsystem. Called exactly once per restore cycle, -// guarded by a CAS on needsRestore in both GetHealth and PostInit. -func (a *API) postRestoreRecovery() { - // Restore parallelism first — any goroutine that was mid-allocation - // when the VM froze has already completed by the time a health check - // or PostInit request is being served, so the page allocator summary - // tree is consistent and safe for a full GC. - prev := a.prevGOMAXPROCS - if prev > 0 { - runtime.GOMAXPROCS(prev) - } - runtime.GC() - runtime.GC() - debug.FreeOSMemory() - a.logger.Info().Msg("restore: GOMAXPROCS restored, GC complete") - - if a.connTracker != nil { - a.connTracker.RestoreAfterSnapshot() - a.logger.Info().Msg("restore: zombie connections closed, keep-alives re-enabled") - } - - if a.portSubsystem != nil { - a.portSubsystem.Start(a.rootCtx) - a.logger.Info().Msg("restore: port subsystem restarted") - } -} - -func (a *API) GetMetrics(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - a.logger.Trace().Msg("Get metrics") - - w.Header().Set("Cache-Control", "no-store") - w.Header().Set("Content-Type", "application/json") - - metrics, err := host.GetMetrics() - if err != nil { - a.logger.Error().Err(err).Msg("Failed to get metrics") - w.WriteHeader(http.StatusInternalServerError) - - return - } - - w.WriteHeader(http.StatusOK) - if err := json.NewEncoder(w).Encode(metrics); err != nil { - a.logger.Error().Err(err).Msg("Failed to encode metrics") - } -} - -func (a *API) getLogger(err error) *zerolog.Event { - if err != nil { - return a.logger.Error().Err(err) //nolint:zerologlint // this is only prep - } - - return a.logger.Info() //nolint:zerologlint // this is only prep -} diff --git a/envd/internal/api/upload.go b/envd/internal/api/upload.go deleted file mode 100644 index e42e0b5..0000000 --- a/envd/internal/api/upload.go +++ /dev/null @@ -1,311 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "mime/multipart" - "net/http" - "os" - "os/user" - "path/filepath" - "strings" - "syscall" - - "github.com/rs/zerolog" - - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -var ErrNoDiskSpace = fmt.Errorf("not enough disk space available") - -func processFile(r *http.Request, path string, part io.Reader, uid, gid int, logger zerolog.Logger) (int, error) { - logger.Debug(). - Str("path", path). - Msg("File processing") - - err := permissions.EnsureDirs(filepath.Dir(path), uid, gid) - if err != nil { - err := fmt.Errorf("error ensuring directories: %w", err) - - return http.StatusInternalServerError, err - } - - canBePreChowned := false - stat, err := os.Stat(path) - if err != nil && !os.IsNotExist(err) { - errMsg := fmt.Errorf("error getting file info: %w", err) - - return http.StatusInternalServerError, errMsg - } else if err == nil { - if stat.IsDir() { - err := fmt.Errorf("path is a directory: %s", path) - - return http.StatusBadRequest, err - } - canBePreChowned = true - } - - hasBeenChowned := false - if canBePreChowned { - err = os.Chown(path, uid, gid) - if err != nil { - if !os.IsNotExist(err) { - err = fmt.Errorf("error changing file ownership: %w", err) - - return http.StatusInternalServerError, err - } - } else { - hasBeenChowned = true - } - } - - file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o666) - if err != nil { - if errors.Is(err, syscall.ENOSPC) { - err = fmt.Errorf("not enough inodes available: %w", err) - - return http.StatusInsufficientStorage, err - } - - err := fmt.Errorf("error opening file: %w", err) - - return http.StatusInternalServerError, err - } - - defer file.Close() - - if !hasBeenChowned { - err = os.Chown(path, uid, gid) - if err != nil { - err := fmt.Errorf("error changing file ownership: %w", err) - - return http.StatusInternalServerError, err - } - } - - _, err = file.ReadFrom(part) - if err != nil { - if errors.Is(err, syscall.ENOSPC) { - err = ErrNoDiskSpace - if r.ContentLength > 0 { - err = fmt.Errorf("attempted to write %d bytes: %w", r.ContentLength, err) - } - - return http.StatusInsufficientStorage, err - } - - err = fmt.Errorf("error writing file: %w", err) - - return http.StatusInternalServerError, err - } - - return http.StatusNoContent, nil -} - -func resolvePath(part *multipart.Part, paths *UploadSuccess, u *user.User, defaultPath *string, params PostFilesParams) (string, error) { - var pathToResolve string - - if params.Path != nil { - pathToResolve = *params.Path - } else { - var err error - customPart := utils.NewCustomPart(part) - pathToResolve, err = customPart.FileNameWithPath() - if err != nil { - return "", fmt.Errorf("error getting multipart custom part file name: %w", err) - } - } - - filePath, err := permissions.ExpandAndResolve(pathToResolve, u, defaultPath) - if err != nil { - return "", fmt.Errorf("error resolving path: %w", err) - } - - for _, entry := range *paths { - if entry.Path == filePath { - var alreadyUploaded []string - for _, uploadedFile := range *paths { - if uploadedFile.Path != filePath { - alreadyUploaded = append(alreadyUploaded, uploadedFile.Path) - } - } - - errMsg := fmt.Errorf("you cannot upload multiple files to the same path '%s' in one upload request, only the first specified file was uploaded", filePath) - - if len(alreadyUploaded) > 1 { - errMsg = fmt.Errorf("%w, also the following files were uploaded: %v", errMsg, strings.Join(alreadyUploaded, ", ")) - } - - return "", errMsg - } - } - - return filePath, nil -} - -func (a *API) handlePart(r *http.Request, part *multipart.Part, paths UploadSuccess, u *user.User, uid, gid int, operationID string, params PostFilesParams) (*EntryInfo, int, error) { - defer part.Close() - - if part.FormName() != "file" { - return nil, http.StatusOK, nil - } - - filePath, err := resolvePath(part, &paths, u, a.defaults.Workdir, params) - if err != nil { - return nil, http.StatusBadRequest, err - } - - logger := a.logger. - With(). - Str(string(logs.OperationIDKey), operationID). - Str("event_type", "file_processing"). - Logger() - - status, err := processFile(r, filePath, part, uid, gid, logger) - if err != nil { - return nil, status, err - } - - return &EntryInfo{ - Path: filePath, - Name: filepath.Base(filePath), - Type: File, - }, http.StatusOK, nil -} - -func (a *API) PostFiles(w http.ResponseWriter, r *http.Request, params PostFilesParams) { - // Capture original body to ensure it's always closed - originalBody := r.Body - defer originalBody.Close() - - var errorCode int - var errMsg error - - var path string - if params.Path != nil { - path = *params.Path - } - - operationID := logs.AssignOperationID() - - // signing authorization if needed - err := a.validateSigning(r, params.Signature, params.SignatureExpiration, params.Username, path, SigningWriteOperation) - if err != nil { - a.logger.Error().Err(err).Str(string(logs.OperationIDKey), operationID).Msg("error during auth validation") - jsonError(w, http.StatusUnauthorized, err) - - return - } - - username, err := execcontext.ResolveDefaultUsername(params.Username, a.defaults.User) - if err != nil { - a.logger.Error().Err(err).Str(string(logs.OperationIDKey), operationID).Msg("no user specified") - jsonError(w, http.StatusBadRequest, err) - - return - } - - defer func() { - l := a.logger. - Err(errMsg). - Str("method", r.Method+" "+r.URL.Path). - Str(string(logs.OperationIDKey), operationID). - Str("path", path). - Str("username", username) - - if errMsg != nil { - l = l.Int("error_code", errorCode) - } - - l.Msg("File write") - }() - - // Handle gzip-encoded request body - body, err := getDecompressedBody(r) - if err != nil { - errMsg = fmt.Errorf("error decompressing request body: %w", err) - errorCode = http.StatusBadRequest - jsonError(w, errorCode, errMsg) - - return - } - defer body.Close() - r.Body = body - - f, err := r.MultipartReader() - if err != nil { - errMsg = fmt.Errorf("error parsing multipart form: %w", err) - errorCode = http.StatusInternalServerError - jsonError(w, errorCode, errMsg) - - return - } - - u, err := user.Lookup(username) - if err != nil { - errMsg = fmt.Errorf("error looking up user '%s': %w", username, err) - errorCode = http.StatusUnauthorized - - jsonError(w, errorCode, errMsg) - - return - } - - uid, gid, err := permissions.GetUserIdInts(u) - if err != nil { - errMsg = fmt.Errorf("error getting user ids: %w", err) - - jsonError(w, http.StatusInternalServerError, errMsg) - - return - } - - paths := UploadSuccess{} - - for { - part, partErr := f.NextPart() - - if partErr == io.EOF { - // We're done reading the parts. - break - } else if partErr != nil { - errMsg = fmt.Errorf("error reading form: %w", partErr) - errorCode = http.StatusInternalServerError - jsonError(w, errorCode, errMsg) - - break - } - - entry, status, err := a.handlePart(r, part, paths, u, uid, gid, operationID, params) - if err != nil { - errorCode = status - errMsg = err - jsonError(w, errorCode, errMsg) - - return - } - - if entry != nil { - paths = append(paths, *entry) - } - } - - data, err := json.Marshal(paths) - if err != nil { - errMsg = fmt.Errorf("error marshaling response: %w", err) - errorCode = http.StatusInternalServerError - jsonError(w, errorCode, errMsg) - - return - } - - w.WriteHeader(http.StatusOK) - _, _ = w.Write(data) -} diff --git a/envd/internal/api/upload_test.go b/envd/internal/api/upload_test.go deleted file mode 100644 index 9a142cb..0000000 --- a/envd/internal/api/upload_test.go +++ /dev/null @@ -1,251 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "os" - "os/exec" - "path/filepath" - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestProcessFile(t *testing.T) { - t.Parallel() - uid := os.Getuid() - gid := os.Getgid() - - newRequest := func(content []byte) (*http.Request, io.Reader) { - request := &http.Request{ - ContentLength: int64(len(content)), - } - buffer := bytes.NewBuffer(content) - - return request, buffer - } - - var emptyReq http.Request - var emptyPart *bytes.Buffer - var emptyLogger zerolog.Logger - - t.Run("failed to ensure directories", func(t *testing.T) { - t.Parallel() - httpStatus, err := processFile(&emptyReq, "/proc/invalid/not-real", emptyPart, uid, gid, emptyLogger) - require.Error(t, err) - assert.Equal(t, http.StatusInternalServerError, httpStatus) - assert.ErrorContains(t, err, "error ensuring directories: ") - }) - - t.Run("attempt to replace directory with a file", func(t *testing.T) { - t.Parallel() - tempDir := t.TempDir() - - httpStatus, err := processFile(&emptyReq, tempDir, emptyPart, uid, gid, emptyLogger) - require.Error(t, err) - assert.Equal(t, http.StatusBadRequest, httpStatus, err.Error()) - assert.ErrorContains(t, err, "path is a directory: ") - }) - - t.Run("fail to create file", func(t *testing.T) { - t.Parallel() - httpStatus, err := processFile(&emptyReq, "/proc/invalid-filename", emptyPart, uid, gid, emptyLogger) - require.Error(t, err) - assert.Equal(t, http.StatusInternalServerError, httpStatus) - assert.ErrorContains(t, err, "error opening file: ") - }) - - t.Run("out of disk space", func(t *testing.T) { - t.Parallel() - // make a tiny tmpfs mount - mountSize := 1024 - tempDir := createTmpfsMount(t, mountSize) - - // create test file - firstFileSize := mountSize / 2 - tempFile1 := filepath.Join(tempDir, "test-file-1") - - // fill it up - cmd := exec.CommandContext(t.Context(), - "dd", "if=/dev/zero", "of="+tempFile1, fmt.Sprintf("bs=%d", firstFileSize), "count=1") - err := cmd.Run() - require.NoError(t, err) - - // create a new file that would fill up the - secondFileContents := make([]byte, mountSize*2) - for index := range secondFileContents { - secondFileContents[index] = 'a' - } - - // try to replace it - request, buffer := newRequest(secondFileContents) - tempFile2 := filepath.Join(tempDir, "test-file-2") - httpStatus, err := processFile(request, tempFile2, buffer, uid, gid, emptyLogger) - require.Error(t, err) - assert.Equal(t, http.StatusInsufficientStorage, httpStatus) - assert.ErrorContains(t, err, "attempted to write 2048 bytes: not enough disk space") - }) - - t.Run("happy path", func(t *testing.T) { - t.Parallel() - tempDir := t.TempDir() - tempFile := filepath.Join(tempDir, "test-file") - - content := []byte("test-file-contents") - request, buffer := newRequest(content) - - httpStatus, err := processFile(request, tempFile, buffer, uid, gid, emptyLogger) - require.NoError(t, err) - assert.Equal(t, http.StatusNoContent, httpStatus) - - data, err := os.ReadFile(tempFile) - require.NoError(t, err) - assert.Equal(t, content, data) - }) - - t.Run("overwrite file on full disk", func(t *testing.T) { - t.Parallel() - // make a tiny tmpfs mount - sizeInBytes := 1024 - tempDir := createTmpfsMount(t, 1024) - - // create test file - tempFile := filepath.Join(tempDir, "test-file") - - // fill it up - cmd := exec.CommandContext(t.Context(), "dd", "if=/dev/zero", "of="+tempFile, fmt.Sprintf("bs=%d", sizeInBytes), "count=1") - err := cmd.Run() - require.NoError(t, err) - - // try to replace it - content := []byte("test-file-contents") - request, buffer := newRequest(content) - httpStatus, err := processFile(request, tempFile, buffer, uid, gid, emptyLogger) - require.NoError(t, err) - assert.Equal(t, http.StatusNoContent, httpStatus) - }) - - t.Run("write new file on full disk", func(t *testing.T) { - t.Parallel() - // make a tiny tmpfs mount - sizeInBytes := 1024 - tempDir := createTmpfsMount(t, 1024) - - // create test file - tempFile1 := filepath.Join(tempDir, "test-file") - - // fill it up - cmd := exec.CommandContext(t.Context(), "dd", "if=/dev/zero", "of="+tempFile1, fmt.Sprintf("bs=%d", sizeInBytes), "count=1") - err := cmd.Run() - require.NoError(t, err) - - // try to write a new file - tempFile2 := filepath.Join(tempDir, "test-file-2") - content := []byte("test-file-contents") - request, buffer := newRequest(content) - httpStatus, err := processFile(request, tempFile2, buffer, uid, gid, emptyLogger) - require.ErrorContains(t, err, "not enough disk space available") - assert.Equal(t, http.StatusInsufficientStorage, httpStatus) - }) - - t.Run("write new file with no inodes available", func(t *testing.T) { - t.Parallel() - // make a tiny tmpfs mount - tempDir := createTmpfsMountWithInodes(t, 1024, 2) - - // create test file - tempFile1 := filepath.Join(tempDir, "test-file") - - // fill it up - cmd := exec.CommandContext(t.Context(), "dd", "if=/dev/zero", "of="+tempFile1, fmt.Sprintf("bs=%d", 100), "count=1") - err := cmd.Run() - require.NoError(t, err) - - // try to write a new file - tempFile2 := filepath.Join(tempDir, "test-file-2") - content := []byte("test-file-contents") - request, buffer := newRequest(content) - httpStatus, err := processFile(request, tempFile2, buffer, uid, gid, emptyLogger) - require.ErrorContains(t, err, "not enough inodes available") - assert.Equal(t, http.StatusInsufficientStorage, httpStatus) - }) - - t.Run("update sysfs or other virtual fs", func(t *testing.T) { - t.Parallel() - if os.Geteuid() != 0 { - t.Skip("skipping sysfs updates: Operation not permitted with non-root user") - } - - filePath := "/sys/fs/cgroup/user.slice/cpu.weight" - newContent := []byte("102\n") - request, buffer := newRequest(newContent) - - httpStatus, err := processFile(request, filePath, buffer, uid, gid, emptyLogger) - require.NoError(t, err) - assert.Equal(t, http.StatusNoContent, httpStatus) - - data, err := os.ReadFile(filePath) - require.NoError(t, err) - assert.Equal(t, newContent, data) - }) - - t.Run("replace file", func(t *testing.T) { - t.Parallel() - tempDir := t.TempDir() - tempFile := filepath.Join(tempDir, "test-file") - - err := os.WriteFile(tempFile, []byte("old-contents"), 0o644) - require.NoError(t, err) - - newContent := []byte("new-file-contents") - request, buffer := newRequest(newContent) - - httpStatus, err := processFile(request, tempFile, buffer, uid, gid, emptyLogger) - require.NoError(t, err) - assert.Equal(t, http.StatusNoContent, httpStatus) - - data, err := os.ReadFile(tempFile) - require.NoError(t, err) - assert.Equal(t, newContent, data) - }) -} - -func createTmpfsMount(t *testing.T, sizeInBytes int) string { - t.Helper() - - return createTmpfsMountWithInodes(t, sizeInBytes, 5) -} - -func createTmpfsMountWithInodes(t *testing.T, sizeInBytes, inodesCount int) string { - t.Helper() - - if os.Geteuid() != 0 { - t.Skip("skipping sysfs updates: Operation not permitted with non-root user") - } - - tempDir := t.TempDir() - - cmd := exec.CommandContext(t.Context(), - "mount", - "tmpfs", - tempDir, - "-t", "tmpfs", - "-o", fmt.Sprintf("size=%d,nr_inodes=%d", sizeInBytes, inodesCount)) - err := cmd.Run() - require.NoError(t, err) - t.Cleanup(func() { - ctx := context.WithoutCancel(t.Context()) - cmd := exec.CommandContext(ctx, "umount", tempDir) - err := cmd.Run() - require.NoError(t, err) - }) - - return tempDir -} diff --git a/envd/internal/execcontext/context.go b/envd/internal/execcontext/context.go deleted file mode 100644 index f150d61..0000000 --- a/envd/internal/execcontext/context.go +++ /dev/null @@ -1,39 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package execcontext - -import ( - "errors" - - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -type Defaults struct { - EnvVars *utils.Map[string, string] - User string - Workdir *string -} - -func ResolveDefaultWorkdir(workdir string, defaultWorkdir *string) string { - if workdir != "" { - return workdir - } - - if defaultWorkdir != nil { - return *defaultWorkdir - } - - return "" -} - -func ResolveDefaultUsername(username *string, defaultUsername string) (string, error) { - if username != nil { - return *username, nil - } - - if defaultUsername != "" { - return defaultUsername, nil - } - - return "", errors.New("username not provided") -} diff --git a/envd/internal/host/metrics.go b/envd/internal/host/metrics.go deleted file mode 100644 index 3e80518..0000000 --- a/envd/internal/host/metrics.go +++ /dev/null @@ -1,96 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package host - -import ( - "math" - "time" - - "github.com/shirou/gopsutil/v4/cpu" - "github.com/shirou/gopsutil/v4/mem" - "golang.org/x/sys/unix" -) - -type Metrics struct { - Timestamp int64 `json:"ts"` // Unix Timestamp in UTC - - CPUCount uint32 `json:"cpu_count"` // Total CPU cores - CPUUsedPercent float32 `json:"cpu_used_pct"` // Percent rounded to 2 decimal places - - // Deprecated: kept for backwards compatibility with older orchestrators. - MemTotalMiB uint64 `json:"mem_total_mib"` // Total virtual memory in MiB - - // Deprecated: kept for backwards compatibility with older orchestrators. - MemUsedMiB uint64 `json:"mem_used_mib"` // Used virtual memory in MiB - - MemTotal uint64 `json:"mem_total"` // Total virtual memory in bytes - MemUsed uint64 `json:"mem_used"` // Used virtual memory in bytes - - DiskUsed uint64 `json:"disk_used"` // Used disk space in bytes - DiskTotal uint64 `json:"disk_total"` // Total disk space in bytes -} - -func GetMetrics() (*Metrics, error) { - v, err := mem.VirtualMemory() - if err != nil { - return nil, err - } - - memUsedMiB := v.Used / 1024 / 1024 - memTotalMiB := v.Total / 1024 / 1024 - - cpuTotal, err := cpu.Counts(true) - if err != nil { - return nil, err - } - - cpuUsedPcts, err := cpu.Percent(0, false) - if err != nil { - return nil, err - } - - cpuUsedPct := cpuUsedPcts[0] - cpuUsedPctRounded := float32(cpuUsedPct) - if cpuUsedPct > 0 { - cpuUsedPctRounded = float32(math.Round(cpuUsedPct*100) / 100) - } - - diskMetrics, err := diskStats("/") - if err != nil { - return nil, err - } - - return &Metrics{ - Timestamp: time.Now().UTC().Unix(), - CPUCount: uint32(cpuTotal), - CPUUsedPercent: cpuUsedPctRounded, - MemUsedMiB: memUsedMiB, - MemTotalMiB: memTotalMiB, - MemTotal: v.Total, - MemUsed: v.Used, - DiskUsed: diskMetrics.Total - diskMetrics.Available, - DiskTotal: diskMetrics.Total, - }, nil -} - -type diskSpace struct { - Total uint64 - Available uint64 -} - -func diskStats(path string) (diskSpace, error) { - var st unix.Statfs_t - if err := unix.Statfs(path, &st); err != nil { - return diskSpace{}, err - } - - block := uint64(st.Bsize) - - // all data blocks - total := st.Blocks * block - // blocks available - available := st.Bavail * block - - return diskSpace{Total: total, Available: available}, nil -} diff --git a/envd/internal/host/mmds.go b/envd/internal/host/mmds.go deleted file mode 100644 index 99efcf2..0000000 --- a/envd/internal/host/mmds.go +++ /dev/null @@ -1,185 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package host - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "time" - - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -const ( - WrennRunDir = "/run/wrenn" // store sandbox metadata files here - - mmdsDefaultAddress = "169.254.169.254" - mmdsTokenExpiration = 60 * time.Second - - mmdsAccessTokenRequestClientTimeout = 10 * time.Second -) - -var mmdsAccessTokenClient = &http.Client{ - Timeout: mmdsAccessTokenRequestClientTimeout, - Transport: &http.Transport{ - DisableKeepAlives: true, - }, -} - -type MMDSOpts struct { - SandboxID string `json:"instanceID"` - TemplateID string `json:"envID"` - LogsCollectorAddress string `json:"address"` - AccessTokenHash string `json:"accessTokenHash"` -} - -func (opts *MMDSOpts) Update(sandboxID, templateID, collectorAddress string) { - opts.SandboxID = sandboxID - opts.TemplateID = templateID - opts.LogsCollectorAddress = collectorAddress -} - -func (opts *MMDSOpts) AddOptsToJSON(jsonLogs []byte) ([]byte, error) { - parsed := make(map[string]any) - - err := json.Unmarshal(jsonLogs, &parsed) - if err != nil { - return nil, err - } - - parsed["instanceID"] = opts.SandboxID - parsed["envID"] = opts.TemplateID - - data, err := json.Marshal(parsed) - - return data, err -} - -func getMMDSToken(ctx context.Context, client *http.Client) (string, error) { - request, err := http.NewRequestWithContext(ctx, http.MethodPut, "http://"+mmdsDefaultAddress+"/latest/api/token", &bytes.Buffer{}) - if err != nil { - return "", err - } - - request.Header["X-metadata-token-ttl-seconds"] = []string{fmt.Sprint(mmdsTokenExpiration.Seconds())} - - response, err := client.Do(request) - if err != nil { - return "", err - } - defer response.Body.Close() - - body, err := io.ReadAll(response.Body) - if err != nil { - return "", err - } - - token := string(body) - - if len(token) == 0 { - return "", fmt.Errorf("mmds token is an empty string") - } - - return token, nil -} - -func getMMDSOpts(ctx context.Context, client *http.Client, token string) (*MMDSOpts, error) { - request, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+mmdsDefaultAddress, &bytes.Buffer{}) - if err != nil { - return nil, err - } - - request.Header["X-metadata-token"] = []string{token} - request.Header["Accept"] = []string{"application/json"} - - response, err := client.Do(request) - if err != nil { - return nil, err - } - - defer response.Body.Close() - - body, err := io.ReadAll(response.Body) - if err != nil { - return nil, err - } - - var opts MMDSOpts - - err = json.Unmarshal(body, &opts) - if err != nil { - return nil, err - } - - return &opts, nil -} - -// GetAccessTokenHashFromMMDS reads the access token hash from MMDS. -// This is used to validate that /init requests come from the orchestrator. -func GetAccessTokenHashFromMMDS(ctx context.Context) (string, error) { - token, err := getMMDSToken(ctx, mmdsAccessTokenClient) - if err != nil { - return "", fmt.Errorf("failed to get MMDS token: %w", err) - } - - opts, err := getMMDSOpts(ctx, mmdsAccessTokenClient, token) - if err != nil { - return "", fmt.Errorf("failed to get MMDS opts: %w", err) - } - - return opts.AccessTokenHash, nil -} - -func PollForMMDSOpts(ctx context.Context, mmdsChan chan<- *MMDSOpts, envVars *utils.Map[string, string]) { - httpClient := &http.Client{} - defer httpClient.CloseIdleConnections() - - ticker := time.NewTicker(50 * time.Millisecond) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - fmt.Fprintf(os.Stderr, "context cancelled while waiting for mmds opts") - - return - case <-ticker.C: - token, err := getMMDSToken(ctx, httpClient) - if err != nil { - fmt.Fprintf(os.Stderr, "error getting mmds token: %v\n", err) - - continue - } - - mmdsOpts, err := getMMDSOpts(ctx, httpClient, token) - if err != nil { - fmt.Fprintf(os.Stderr, "error getting mmds opts: %v\n", err) - - continue - } - - envVars.Store("WRENN_SANDBOX_ID", mmdsOpts.SandboxID) - envVars.Store("WRENN_TEMPLATE_ID", mmdsOpts.TemplateID) - - if err := os.WriteFile(filepath.Join(WrennRunDir, ".WRENN_SANDBOX_ID"), []byte(mmdsOpts.SandboxID), 0o666); err != nil { - fmt.Fprintf(os.Stderr, "error writing sandbox ID file: %v\n", err) - } - if err := os.WriteFile(filepath.Join(WrennRunDir, ".WRENN_TEMPLATE_ID"), []byte(mmdsOpts.TemplateID), 0o666); err != nil { - fmt.Fprintf(os.Stderr, "error writing template ID file: %v\n", err) - } - - if mmdsOpts.LogsCollectorAddress != "" { - mmdsChan <- mmdsOpts - } - - return - } - } -} diff --git a/envd/internal/logs/bufferedEvents.go b/envd/internal/logs/bufferedEvents.go deleted file mode 100644 index 24d5158..0000000 --- a/envd/internal/logs/bufferedEvents.go +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package logs - -import ( - "time" - - "github.com/rs/zerolog" -) - -const ( - defaultMaxBufferSize = 2 << 15 - defaultTimeout = 2 * time.Second -) - -func LogBufferedDataEvents(dataCh <-chan []byte, logger *zerolog.Logger, eventType string) { - timer := time.NewTicker(defaultTimeout) - defer timer.Stop() - - var buffer []byte - defer func() { - if len(buffer) > 0 { - logger.Info().Str(eventType, string(buffer)).Msg("Streaming process event (flush)") - } - }() - - for { - select { - case <-timer.C: - if len(buffer) > 0 { - logger.Info().Str(eventType, string(buffer)).Msg("Streaming process event") - buffer = nil - } - case data, ok := <-dataCh: - if !ok { - return - } - - buffer = append(buffer, data...) - - if len(buffer) >= defaultMaxBufferSize { - logger.Info().Str(eventType, string(buffer)).Msg("Streaming process event") - buffer = nil - - continue - } - } - } -} diff --git a/envd/internal/logs/exporter/exporter.go b/envd/internal/logs/exporter/exporter.go deleted file mode 100644 index 038ef51..0000000 --- a/envd/internal/logs/exporter/exporter.go +++ /dev/null @@ -1,174 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package exporter - -import ( - "bytes" - "context" - "fmt" - "log" - "net/http" - "os" - "sync" - "time" - - "git.omukk.dev/wrenn/sandbox/envd/internal/host" -) - -const ExporterTimeout = 10 * time.Second - -type HTTPExporter struct { - client http.Client - logs [][]byte - isNotFC bool - mmdsOpts *host.MMDSOpts - - // Concurrency coordination - triggers chan struct{} - logLock sync.RWMutex - mmdsLock sync.RWMutex - startOnce sync.Once -} - -func NewHTTPLogsExporter(ctx context.Context, isNotFC bool, mmdsChan <-chan *host.MMDSOpts) *HTTPExporter { - exporter := &HTTPExporter{ - client: http.Client{ - Timeout: ExporterTimeout, - }, - triggers: make(chan struct{}, 1), - isNotFC: isNotFC, - startOnce: sync.Once{}, - mmdsOpts: &host.MMDSOpts{ - SandboxID: "unknown", - TemplateID: "unknown", - LogsCollectorAddress: "", - }, - } - - go exporter.listenForMMDSOptsAndStart(ctx, mmdsChan) - - return exporter -} - -func (w *HTTPExporter) sendInstanceLogs(ctx context.Context, logs []byte, address string) error { - if address == "" { - return nil - } - - request, err := http.NewRequestWithContext(ctx, http.MethodPost, address, bytes.NewBuffer(logs)) - if err != nil { - return err - } - - request.Header.Set("Content-Type", "application/json") - - response, err := w.client.Do(request) - if err != nil { - return err - } - defer response.Body.Close() - - return nil -} - -func printLog(logs []byte) { - fmt.Fprintf(os.Stdout, "%v", string(logs)) -} - -func (w *HTTPExporter) listenForMMDSOptsAndStart(ctx context.Context, mmdsChan <-chan *host.MMDSOpts) { - for { - select { - case <-ctx.Done(): - return - case mmdsOpts, ok := <-mmdsChan: - if !ok { - return - } - - w.mmdsLock.Lock() - w.mmdsOpts.Update(mmdsOpts.SandboxID, mmdsOpts.TemplateID, mmdsOpts.LogsCollectorAddress) - w.mmdsLock.Unlock() - - w.startOnce.Do(func() { - go w.start(ctx) - }) - } - } -} - -func (w *HTTPExporter) start(ctx context.Context) { - for range w.triggers { - logs := w.getAllLogs() - - if len(logs) == 0 { - continue - } - - if w.isNotFC { - for _, log := range logs { - fmt.Fprintf(os.Stdout, "%v", string(log)) - } - - continue - } - - for _, logLine := range logs { - w.mmdsLock.RLock() - logLineWithOpts, err := w.mmdsOpts.AddOptsToJSON(logLine) - w.mmdsLock.RUnlock() - if err != nil { - log.Printf("error adding instance logging options (%+v) to JSON (%+v) with logs : %v\n", w.mmdsOpts, logLine, err) - - printLog(logLine) - - continue - } - - err = w.sendInstanceLogs(ctx, logLineWithOpts, w.mmdsOpts.LogsCollectorAddress) - if err != nil { - log.Printf("error sending instance logs: %+v", err) - - printLog(logLine) - - continue - } - } - } -} - -func (w *HTTPExporter) resumeProcessing() { - select { - case w.triggers <- struct{}{}: - default: - // Exporter processing already triggered - // This is expected behavior if the exporter is already processing logs - } -} - -func (w *HTTPExporter) Write(logs []byte) (int, error) { - logsCopy := make([]byte, len(logs)) - copy(logsCopy, logs) - - go w.addLogs(logsCopy) - - return len(logs), nil -} - -func (w *HTTPExporter) getAllLogs() [][]byte { - w.logLock.Lock() - defer w.logLock.Unlock() - - logs := w.logs - w.logs = nil - - return logs -} - -func (w *HTTPExporter) addLogs(logs []byte) { - w.logLock.Lock() - defer w.logLock.Unlock() - - w.logs = append(w.logs, logs) - - w.resumeProcessing() -} diff --git a/envd/internal/logs/interceptor.go b/envd/internal/logs/interceptor.go deleted file mode 100644 index 2aa7c83..0000000 --- a/envd/internal/logs/interceptor.go +++ /dev/null @@ -1,174 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package logs - -import ( - "context" - "fmt" - "strconv" - "strings" - "sync/atomic" - - "connectrpc.com/connect" - "github.com/rs/zerolog" -) - -type OperationID string - -const ( - OperationIDKey OperationID = "operation_id" - DefaultHTTPMethod string = "POST" -) - -var operationID = atomic.Int32{} - -func AssignOperationID() string { - id := operationID.Add(1) - - return strconv.Itoa(int(id)) -} - -func AddRequestIDToContext(ctx context.Context) context.Context { - return context.WithValue(ctx, OperationIDKey, AssignOperationID()) -} - -func formatMethod(method string) string { - parts := strings.Split(method, ".") - if len(parts) < 2 { - return method - } - - split := strings.Split(parts[1], "/") - if len(split) < 2 { - return method - } - - servicePart := split[0] - servicePart = strings.ToUpper(servicePart[:1]) + servicePart[1:] - - methodPart := split[1] - methodPart = strings.ToLower(methodPart[:1]) + methodPart[1:] - - return fmt.Sprintf("%s %s", servicePart, methodPart) -} - -func NewUnaryLogInterceptor(logger *zerolog.Logger) connect.UnaryInterceptorFunc { - interceptor := func(next connect.UnaryFunc) connect.UnaryFunc { - return connect.UnaryFunc(func( - ctx context.Context, - req connect.AnyRequest, - ) (connect.AnyResponse, error) { - ctx = AddRequestIDToContext(ctx) - - res, err := next(ctx, req) - - l := logger. - Err(err). - Str("method", DefaultHTTPMethod+" "+req.Spec().Procedure). - Str(string(OperationIDKey), ctx.Value(OperationIDKey).(string)) - - if err != nil { - l = l.Int("error_code", int(connect.CodeOf(err))) - } - - if req != nil { - l = l.Interface("request", req.Any()) - } - - if res != nil && err == nil { - l = l.Interface("response", res.Any()) - } - - if res == nil && err == nil { - l = l.Interface("response", nil) - } - - l.Msg(formatMethod(req.Spec().Procedure)) - - return res, err - }) - } - - return connect.UnaryInterceptorFunc(interceptor) -} - -func LogServerStreamWithoutEvents[T any, R any]( - ctx context.Context, - logger *zerolog.Logger, - req *connect.Request[R], - stream *connect.ServerStream[T], - handler func(ctx context.Context, req *connect.Request[R], stream *connect.ServerStream[T]) error, -) error { - ctx = AddRequestIDToContext(ctx) - - l := logger.Debug(). - Str("method", DefaultHTTPMethod+" "+req.Spec().Procedure). - Str(string(OperationIDKey), ctx.Value(OperationIDKey).(string)) - - if req != nil { - l = l.Interface("request", req.Any()) - } - - l.Msg(fmt.Sprintf("%s (server stream start)", formatMethod(req.Spec().Procedure))) - - err := handler(ctx, req, stream) - - logEvent := getErrDebugLogEvent(logger, err). - Str("method", DefaultHTTPMethod+" "+req.Spec().Procedure). - Str(string(OperationIDKey), ctx.Value(OperationIDKey).(string)) - - if err != nil { - logEvent = logEvent.Int("error_code", int(connect.CodeOf(err))) - } else { - logEvent = logEvent.Interface("response", nil) - } - - logEvent.Msg(fmt.Sprintf("%s (server stream end)", formatMethod(req.Spec().Procedure))) - - return err -} - -func LogClientStreamWithoutEvents[T any, R any]( - ctx context.Context, - logger *zerolog.Logger, - stream *connect.ClientStream[T], - handler func(ctx context.Context, stream *connect.ClientStream[T]) (*connect.Response[R], error), -) (*connect.Response[R], error) { - ctx = AddRequestIDToContext(ctx) - - logger.Debug(). - Str("method", DefaultHTTPMethod+" "+stream.Spec().Procedure). - Str(string(OperationIDKey), ctx.Value(OperationIDKey).(string)). - Msg(fmt.Sprintf("%s (client stream start)", formatMethod(stream.Spec().Procedure))) - - res, err := handler(ctx, stream) - - logEvent := getErrDebugLogEvent(logger, err). - Str("method", DefaultHTTPMethod+" "+stream.Spec().Procedure). - Str(string(OperationIDKey), ctx.Value(OperationIDKey).(string)) - - if err != nil { - logEvent = logEvent.Int("error_code", int(connect.CodeOf(err))) - } - - if res != nil && err == nil { - logEvent = logEvent.Interface("response", res.Any()) - } - - if res == nil && err == nil { - logEvent = logEvent.Interface("response", nil) - } - - logEvent.Msg(fmt.Sprintf("%s (client stream end)", formatMethod(stream.Spec().Procedure))) - - return res, err -} - -// Return logger with error level if err is not nil, otherwise return logger with debug level -func getErrDebugLogEvent(logger *zerolog.Logger, err error) *zerolog.Event { - if err != nil { - return logger.Error().Err(err) //nolint:zerologlint // this builds an event, it is not expected to return it - } - - return logger.Debug() //nolint:zerologlint // this builds an event, it is not expected to return it -} diff --git a/envd/internal/logs/logger.go b/envd/internal/logs/logger.go deleted file mode 100644 index ff17b0b..0000000 --- a/envd/internal/logs/logger.go +++ /dev/null @@ -1,37 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package logs - -import ( - "context" - "io" - "os" - "time" - - "github.com/rs/zerolog" - - "git.omukk.dev/wrenn/sandbox/envd/internal/host" - "git.omukk.dev/wrenn/sandbox/envd/internal/logs/exporter" -) - -func NewLogger(ctx context.Context, isNotFC bool, mmdsChan <-chan *host.MMDSOpts) *zerolog.Logger { - zerolog.TimestampFieldName = "timestamp" - zerolog.TimeFieldFormat = time.RFC3339Nano - - exporters := []io.Writer{} - - if isNotFC { - exporters = append(exporters, os.Stdout) - } else { - exporters = append(exporters, exporter.NewHTTPLogsExporter(ctx, isNotFC, mmdsChan), os.Stdout) - } - - l := zerolog. - New(io.MultiWriter(exporters...)). - With(). - Timestamp(). - Logger(). - Level(zerolog.DebugLevel) - - return &l -} diff --git a/envd/internal/permissions/authenticate.go b/envd/internal/permissions/authenticate.go deleted file mode 100644 index 1b799ad..0000000 --- a/envd/internal/permissions/authenticate.go +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package permissions - -import ( - "context" - "fmt" - "os/user" - - "connectrpc.com/authn" - "connectrpc.com/connect" - - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" -) - -func AuthenticateUsername(_ context.Context, req authn.Request) (any, error) { - username, _, ok := req.BasicAuth() - if !ok { - // When no username is provided, ignore the authentication method (not all endpoints require it) - // Missing user is then handled in the GetAuthUser function - return nil, nil - } - - u, err := GetUser(username) - if err != nil { - return nil, authn.Errorf("invalid username: '%s'", username) - } - - return u, nil -} - -func GetAuthUser(ctx context.Context, defaultUser string) (*user.User, error) { - u, ok := authn.GetInfo(ctx).(*user.User) - if !ok { - username, err := execcontext.ResolveDefaultUsername(nil, defaultUser) - if err != nil { - return nil, connect.NewError(connect.CodeUnauthenticated, fmt.Errorf("no user specified")) - } - - u, err := GetUser(username) - if err != nil { - return nil, authn.Errorf("invalid default user: '%s'", username) - } - - return u, nil - } - - return u, nil -} diff --git a/envd/internal/permissions/keepalive.go b/envd/internal/permissions/keepalive.go deleted file mode 100644 index e39b38b..0000000 --- a/envd/internal/permissions/keepalive.go +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package permissions - -import ( - "strconv" - "time" - - "connectrpc.com/connect" -) - -const defaultKeepAliveInterval = 90 * time.Second - -func GetKeepAliveTicker[T any](req *connect.Request[T]) (*time.Ticker, func()) { - keepAliveIntervalHeader := req.Header().Get("Keepalive-Ping-Interval") - - var interval time.Duration - - keepAliveIntervalInt, err := strconv.Atoi(keepAliveIntervalHeader) - if err != nil { - interval = defaultKeepAliveInterval - } else { - interval = time.Duration(keepAliveIntervalInt) * time.Second - } - - ticker := time.NewTicker(interval) - - return ticker, func() { - ticker.Reset(interval) - } -} diff --git a/envd/internal/permissions/path.go b/envd/internal/permissions/path.go deleted file mode 100644 index 9a15495..0000000 --- a/envd/internal/permissions/path.go +++ /dev/null @@ -1,98 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package permissions - -import ( - "errors" - "fmt" - "os" - "os/user" - "path/filepath" - "slices" - - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" -) - -func expand(path, homedir string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - return filepath.Join(homedir, path[1:]), nil -} - -func ExpandAndResolve(path string, user *user.User, defaultPath *string) (string, error) { - path = execcontext.ResolveDefaultWorkdir(path, defaultPath) - - path, err := expand(path, user.HomeDir) - if err != nil { - return "", fmt.Errorf("failed to expand path '%s' for user '%s': %w", path, user.Username, err) - } - - if filepath.IsAbs(path) { - return path, nil - } - - // The filepath.Abs can correctly resolve paths like /home/user/../file - path = filepath.Join(user.HomeDir, path) - - abs, err := filepath.Abs(path) - if err != nil { - return "", fmt.Errorf("failed to resolve path '%s' for user '%s' with home dir '%s': %w", path, user.Username, user.HomeDir, err) - } - - return abs, nil -} - -func getSubpaths(path string) (subpaths []string) { - for { - subpaths = append(subpaths, path) - - path = filepath.Dir(path) - if path == "/" { - break - } - } - - slices.Reverse(subpaths) - - return subpaths -} - -func EnsureDirs(path string, uid, gid int) error { - subpaths := getSubpaths(path) - for _, subpath := range subpaths { - info, err := os.Stat(subpath) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to stat directory: %w", err) - } - - if err != nil && os.IsNotExist(err) { - err = os.Mkdir(subpath, 0o755) - if err != nil { - return fmt.Errorf("failed to create directory: %w", err) - } - - err = os.Chown(subpath, uid, gid) - if err != nil { - return fmt.Errorf("failed to chown directory: %w", err) - } - - continue - } - - if !info.IsDir() { - return fmt.Errorf("path is a file: %s", subpath) - } - } - - return nil -} diff --git a/envd/internal/permissions/user.go b/envd/internal/permissions/user.go deleted file mode 100644 index e2d3ffd..0000000 --- a/envd/internal/permissions/user.go +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package permissions - -import ( - "fmt" - "os/user" - "strconv" -) - -func GetUserIdUints(u *user.User) (uid, gid uint32, err error) { - newUID, err := strconv.ParseUint(u.Uid, 10, 32) - if err != nil { - return 0, 0, fmt.Errorf("error parsing uid '%s': %w", u.Uid, err) - } - - newGID, err := strconv.ParseUint(u.Gid, 10, 32) - if err != nil { - return 0, 0, fmt.Errorf("error parsing gid '%s': %w", u.Gid, err) - } - - return uint32(newUID), uint32(newGID), nil -} - -func GetUserIdInts(u *user.User) (uid, gid int, err error) { - newUID, err := strconv.ParseInt(u.Uid, 10, strconv.IntSize) - if err != nil { - return 0, 0, fmt.Errorf("error parsing uid '%s': %w", u.Uid, err) - } - - newGID, err := strconv.ParseInt(u.Gid, 10, strconv.IntSize) - if err != nil { - return 0, 0, fmt.Errorf("error parsing gid '%s': %w", u.Gid, err) - } - - return int(newUID), int(newGID), nil -} - -func GetUser(username string) (u *user.User, err error) { - u, err = user.Lookup(username) - if err != nil { - return nil, fmt.Errorf("error looking up user '%s': %w", username, err) - } - - return u, nil -} diff --git a/envd/internal/port/conn.go b/envd/internal/port/conn.go deleted file mode 100644 index 8a8c032..0000000 --- a/envd/internal/port/conn.go +++ /dev/null @@ -1,165 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package port - -import ( - "bufio" - "encoding/hex" - "fmt" - "net" - "os" - "strconv" - "strings" - "syscall" -) - -// ConnStat represents a single TCP connection read from /proc/net/tcp(6). -// It contains only the fields needed by the port scanner and forwarder. -type ConnStat struct { - LocalIP string - LocalPort uint32 - Status string - Family uint32 // syscall.AF_INET or syscall.AF_INET6 - Inode uint64 // socket inode, unique per connection -} - -// tcpStates maps the hex state values from /proc/net/tcp to string names -// matching the gopsutil convention used by ScannerFilter. -var tcpStates = map[string]string{ - "01": "ESTABLISHED", - "02": "SYN_SENT", - "03": "SYN_RECV", - "04": "FIN_WAIT1", - "05": "FIN_WAIT2", - "06": "TIME_WAIT", - "07": "CLOSE", - "08": "CLOSE_WAIT", - "09": "LAST_ACK", - "0A": "LISTEN", - "0B": "CLOSING", -} - -// ReadTCPConnections reads /proc/net/tcp and /proc/net/tcp6 and returns -// all TCP connections. This avoids the /proc/{pid}/fd walk that gopsutil -// performs, which is unsafe across Firecracker snapshot/restore boundaries. -func ReadTCPConnections() ([]ConnStat, error) { - var conns []ConnStat - - tcp4, err := parseProcNetTCP("/proc/net/tcp", syscall.AF_INET) - if err != nil { - return nil, fmt.Errorf("parse /proc/net/tcp: %w", err) - } - conns = append(conns, tcp4...) - - tcp6, err := parseProcNetTCP("/proc/net/tcp6", syscall.AF_INET6) - if err != nil { - return nil, fmt.Errorf("parse /proc/net/tcp6: %w", err) - } - conns = append(conns, tcp6...) - - return conns, nil -} - -// parseProcNetTCP reads a single /proc/net/tcp or /proc/net/tcp6 file. -// -// Format (fields are whitespace-separated): -// -// sl local_address rem_address st tx_queue:rx_queue tr:tm->when retrnsmt uid timeout inode -// 0: 0100007F:1F90 00000000:0000 0A 00000000:00000000 00:00000000 00000000 1000 0 12345 -func parseProcNetTCP(path string, family uint32) ([]ConnStat, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - var conns []ConnStat - scanner := bufio.NewScanner(f) - - // Skip header line. - scanner.Scan() - - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if line == "" { - continue - } - - fields := strings.Fields(line) - if len(fields) < 10 { - continue - } - - // fields[1] = local_address (hex_ip:hex_port) - ip, port, err := parseHexAddr(fields[1], family) - if err != nil { - continue - } - - // fields[3] = state (hex) - state, ok := tcpStates[fields[3]] - if !ok { - state = "UNKNOWN" - } - - // fields[9] = inode - inode, err := strconv.ParseUint(fields[9], 10, 64) - if err != nil { - continue - } - - conns = append(conns, ConnStat{ - LocalIP: ip, - LocalPort: port, - Status: state, - Family: family, - Inode: inode, - }) - } - - return conns, scanner.Err() -} - -// parseHexAddr parses "HEXIP:HEXPORT" from /proc/net/tcp. -// IPv4 addresses are 8 hex chars (4 bytes, little-endian per 32-bit word). -// IPv6 addresses are 32 hex chars (16 bytes, little-endian per 32-bit word). -func parseHexAddr(s string, family uint32) (string, uint32, error) { - parts := strings.SplitN(s, ":", 2) - if len(parts) != 2 { - return "", 0, fmt.Errorf("invalid address: %s", s) - } - - port64, err := strconv.ParseUint(parts[1], 16, 32) - if err != nil { - return "", 0, err - } - - ipHex := parts[0] - ipBytes, err := hex.DecodeString(ipHex) - if err != nil { - return "", 0, err - } - - var ip net.IP - if family == syscall.AF_INET { - if len(ipBytes) != 4 { - return "", 0, fmt.Errorf("invalid IPv4 length: %d", len(ipBytes)) - } - // /proc/net/tcp stores IPv4 as a single little-endian 32-bit word. - ip = net.IPv4(ipBytes[3], ipBytes[2], ipBytes[1], ipBytes[0]) - } else { - if len(ipBytes) != 16 { - return "", 0, fmt.Errorf("invalid IPv6 length: %d", len(ipBytes)) - } - // /proc/net/tcp6 stores IPv6 as four little-endian 32-bit words. - ip = make(net.IP, 16) - for i := 0; i < 4; i++ { - ip[i*4+0] = ipBytes[i*4+3] - ip[i*4+1] = ipBytes[i*4+2] - ip[i*4+2] = ipBytes[i*4+1] - ip[i*4+3] = ipBytes[i*4+0] - } - } - - return ip.String(), uint32(port64), nil -} diff --git a/envd/internal/port/forward.go b/envd/internal/port/forward.go deleted file mode 100644 index cc71a41..0000000 --- a/envd/internal/port/forward.go +++ /dev/null @@ -1,240 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -// portf (port forward) periodaically scans opened TCP ports on the 127.0.0.1 (or localhost) -// and launches `socat` process for every such port in the background. -// socat forward traffic from `sourceIP`:port to the 127.0.0.1:port. - -// WARNING: portf isn't thread safe! - -package port - -import ( - "context" - "fmt" - "net" - "os/exec" - "syscall" - - "github.com/rs/zerolog" - - "git.omukk.dev/wrenn/sandbox/envd/internal/services/cgroups" -) - -type PortState string - -const ( - PortStateForward PortState = "FORWARD" - PortStateDelete PortState = "DELETE" -) - -var defaultGatewayIP = net.IPv4(169, 254, 0, 21) - -type PortToForward struct { - socat *exec.Cmd - // Socket inode of the listening socket (unique per connection). - inode uint64 - // family version of the ip. - family uint32 - state PortState - port uint32 -} - -type Forwarder struct { - logger *zerolog.Logger - cgroupManager cgroups.Manager - // Map of ports that are being currently forwarded. - ports map[string]*PortToForward - scannerSubscriber *ScannerSubscriber - sourceIP net.IP -} - -func NewForwarder( - logger *zerolog.Logger, - scanner *Scanner, - cgroupManager cgroups.Manager, -) *Forwarder { - scannerSub := scanner.AddSubscriber( - logger, - "port-forwarder", - // We only want to forward ports that are actively listening on localhost. - &ScannerFilter{ - IPs: []string{"127.0.0.1", "localhost", "::1"}, - State: "LISTEN", - }, - ) - - return &Forwarder{ - logger: logger, - sourceIP: defaultGatewayIP, - ports: make(map[string]*PortToForward), - scannerSubscriber: scannerSub, - cgroupManager: cgroupManager, - } -} - -func (f *Forwarder) StartForwarding(ctx context.Context) { - if f.scannerSubscriber == nil { - f.logger.Error().Msg("Cannot start forwarding because scanner subscriber is nil") - - return - } - - for { - select { - case <-ctx.Done(): - f.stopAllForwarding() - return - case procs, ok := <-f.scannerSubscriber.Messages: - if !ok { - f.stopAllForwarding() - return - } - - // Now we are going to refresh all ports that are being forwarded in the `ports` map. Maybe add new ones - // and maybe remove some. - - // Go through the ports that are currently being forwarded and set all of them - // to the `DELETE` state. We don't know yet if they will be there after refresh. - for _, v := range f.ports { - v.state = PortStateDelete - } - - // Let's refresh our map of currently forwarded ports and mark the currently opened ones with the "FORWARD" state. - // This will make sure we won't delete them later. - for _, p := range procs { - key := fmt.Sprintf("%d-%d", p.Inode, p.LocalPort) - - // We check if the opened port is in our map of forwarded ports. - val, portOk := f.ports[key] - if portOk { - // Just mark the port as being forwarded so we don't delete it. - // The actual socat process that handles forwarding should be running from the last iteration. - val.state = PortStateForward - } else { - f.logger.Debug(). - Str("ip", p.LocalIP). - Uint32("port", p.LocalPort). - Uint32("family", familyToIPVersion(p.Family)). - Str("state", p.Status). - Msg("Detected new opened port on localhost that is not forwarded") - - // The opened port wasn't in the map so we create a new PortToForward and start forwarding. - ptf := &PortToForward{ - inode: p.Inode, - port: p.LocalPort, - state: PortStateForward, - family: familyToIPVersion(p.Family), - } - f.ports[key] = ptf - f.startPortForwarding(ctx, ptf) - } - } - - // We go through the ports map one more time and stop forwarding all ports - // that stayed marked as "DELETE". - for _, v := range f.ports { - if v.state == PortStateDelete { - f.stopPortForwarding(v) - } - } - } - } -} - -func (f *Forwarder) stopAllForwarding() { - for _, p := range f.ports { - f.stopPortForwarding(p) - } - f.ports = make(map[string]*PortToForward) -} - -func (f *Forwarder) startPortForwarding(_ context.Context, p *PortToForward) { - // https://unix.stackexchange.com/questions/311492/redirect-application-listening-on-localhost-to-listening-on-external-interface - // socat -d -d TCP4-LISTEN:4000,bind=169.254.0.21,fork TCP4:localhost:4000 - // reuseaddr is used to fix the "Address already in use" error when restarting socat quickly. - // - // We use exec.Command (not CommandContext) because stopAllForwarding kills - // socat via SIGKILL to the process group. CommandContext would also call - // cmd.Wait() on context cancellation, racing with the wait goroutine below. - cmd := exec.Command( - "socat", "-d", "-d", "-d", - fmt.Sprintf("TCP4-LISTEN:%v,bind=%s,reuseaddr,fork", p.port, f.sourceIP.To4()), - fmt.Sprintf("TCP%d:localhost:%v", p.family, p.port), - ) - - cgroupFD, ok := f.cgroupManager.GetFileDescriptor(cgroups.ProcessTypeSocat) - - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - CgroupFD: cgroupFD, - UseCgroupFD: ok, - } - - f.logger.Debug(). - Str("socatCmd", cmd.String()). - Uint64("inode", p.inode). - Uint32("family", p.family). - IPAddr("sourceIP", f.sourceIP.To4()). - Uint32("port", p.port). - Msg("About to start port forwarding") - - if err := cmd.Start(); err != nil { - f.logger. - Error(). - Str("socatCmd", cmd.String()). - Err(err). - Msg("Failed to start port forwarding - failed to start socat") - - return - } - - go func() { - if err := cmd.Wait(); err != nil { - f.logger. - Debug(). - Str("socatCmd", cmd.String()). - Err(err). - Msg("Port forwarding socat process exited") - } - }() - - p.socat = cmd -} - -func (f *Forwarder) stopPortForwarding(p *PortToForward) { - if p.socat == nil { - return - } - - defer func() { p.socat = nil }() - - logger := f.logger.With(). - Str("socatCmd", p.socat.String()). - Uint64("inode", p.inode). - Uint32("family", p.family). - IPAddr("sourceIP", f.sourceIP.To4()). - Uint32("port", p.port). - Logger() - - logger.Debug().Msg("Stopping port forwarding") - - if err := syscall.Kill(-p.socat.Process.Pid, syscall.SIGKILL); err != nil { - logger.Error().Err(err).Msg("Failed to kill process group") - - return - } - - logger.Debug().Msg("Stopped port forwarding") -} - -func familyToIPVersion(family uint32) uint32 { - switch family { - case syscall.AF_INET: - return 4 - case syscall.AF_INET6: - return 6 - default: - return 0 // Unknown or unsupported family - } -} diff --git a/envd/internal/port/scan.go b/envd/internal/port/scan.go deleted file mode 100644 index 878b361..0000000 --- a/envd/internal/port/scan.go +++ /dev/null @@ -1,70 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package port - -import ( - "context" - "sync" - "time" - - "github.com/rs/zerolog" -) - -type Scanner struct { - period time.Duration - - // Plain mutex-protected map instead of concurrent-map. The concurrent-map - // library's Items() spawns goroutines and uses a WaitGroup internally, - // which corrupts Go runtime semaphore state across Firecracker snapshot/restore. - mu sync.RWMutex - subs map[string]*ScannerSubscriber -} - -func NewScanner(period time.Duration) *Scanner { - return &Scanner{ - period: period, - subs: make(map[string]*ScannerSubscriber), - } -} - -func (s *Scanner) AddSubscriber(logger *zerolog.Logger, id string, filter *ScannerFilter) *ScannerSubscriber { - subscriber := NewScannerSubscriber(logger, id, filter) - - s.mu.Lock() - s.subs[id] = subscriber - s.mu.Unlock() - - return subscriber -} - -func (s *Scanner) Unsubscribe(sub *ScannerSubscriber) { - s.mu.Lock() - delete(s.subs, sub.ID()) - s.mu.Unlock() - - sub.Destroy() -} - -// ScanAndBroadcast starts scanning open TCP ports and broadcasts every open port to all subscribers. -// It exits when ctx is cancelled. -func (s *Scanner) ScanAndBroadcast(ctx context.Context) { - for { - // Read directly from /proc/net/tcp and /proc/net/tcp6 instead of - // using gopsutil's net.Connections(), which walks /proc/{pid}/fd - // and causes Go runtime corruption after Firecracker snapshot/restore. - conns, _ := ReadTCPConnections() - - s.mu.RLock() - for _, sub := range s.subs { - sub.Signal(ctx, conns) - } - s.mu.RUnlock() - - select { - case <-ctx.Done(): - return - case <-time.After(s.period): - } - } -} diff --git a/envd/internal/port/scanSubscriber.go b/envd/internal/port/scanSubscriber.go deleted file mode 100644 index 312f8d2..0000000 --- a/envd/internal/port/scanSubscriber.go +++ /dev/null @@ -1,61 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package port - -import ( - "context" - - "github.com/rs/zerolog" -) - -// If we want to create a listener/subscriber pattern somewhere else we should move -// from a concrete implementation to combination of generics and interfaces. - -type ScannerSubscriber struct { - logger *zerolog.Logger - filter *ScannerFilter - Messages chan ([]ConnStat) - id string -} - -func NewScannerSubscriber(logger *zerolog.Logger, id string, filter *ScannerFilter) *ScannerSubscriber { - return &ScannerSubscriber{ - logger: logger, - id: id, - filter: filter, - Messages: make(chan []ConnStat), - } -} - -func (ss *ScannerSubscriber) ID() string { - return ss.id -} - -func (ss *ScannerSubscriber) Destroy() { - close(ss.Messages) -} - -// Signal sends the (filtered) connection list to the subscriber. It respects -// ctx cancellation so the scanner goroutine is never stuck waiting for a -// consumer that has already exited. -func (ss *ScannerSubscriber) Signal(ctx context.Context, conns []ConnStat) { - var payload []ConnStat - - if ss.filter == nil { - payload = conns - } else { - filtered := []ConnStat{} - for i := range conns { - if ss.filter.Match(&conns[i]) { - filtered = append(filtered, conns[i]) - } - } - payload = filtered - } - - select { - case ss.Messages <- payload: - case <-ctx.Done(): - } -} diff --git a/envd/internal/port/scanfilter.go b/envd/internal/port/scanfilter.go deleted file mode 100644 index f87667f..0000000 --- a/envd/internal/port/scanfilter.go +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package port - -import ( - "slices" -) - -type ScannerFilter struct { - State string - IPs []string -} - -func (sf *ScannerFilter) Match(conn *ConnStat) bool { - // Filter is an empty struct. - if sf.State == "" && len(sf.IPs) == 0 { - return false - } - - ipMatch := slices.Contains(sf.IPs, conn.LocalIP) - - if ipMatch && sf.State == conn.Status { - return true - } - - return false -} diff --git a/envd/internal/port/subsystem.go b/envd/internal/port/subsystem.go deleted file mode 100644 index e70a2db..0000000 --- a/envd/internal/port/subsystem.go +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package port - -import ( - "context" - "sync" - "time" - - "github.com/rs/zerolog" - - "git.omukk.dev/wrenn/sandbox/envd/internal/services/cgroups" -) - -// PortSubsystem owns the port scanner and forwarder lifecycle. -// It supports stop/restart across Firecracker snapshot/restore cycles. -type PortSubsystem struct { - logger *zerolog.Logger - cgroupManager cgroups.Manager - period time.Duration - - mu sync.Mutex - cancel context.CancelFunc - wg *sync.WaitGroup // per-cycle WaitGroup; nil when not running - running bool -} - -// NewPortSubsystem creates a new PortSubsystem. Call Start() to begin scanning. -func NewPortSubsystem(logger *zerolog.Logger, cgroupManager cgroups.Manager, period time.Duration) *PortSubsystem { - return &PortSubsystem{ - logger: logger, - cgroupManager: cgroupManager, - period: period, - } -} - -// Start creates a fresh scanner and forwarder, launching their goroutines. -// Safe to call multiple times; does nothing if already running. -func (p *PortSubsystem) Start(parentCtx context.Context) { - p.mu.Lock() - defer p.mu.Unlock() - - if p.running { - return - } - - ctx, cancel := context.WithCancel(parentCtx) - p.cancel = cancel - p.running = true - - // Allocate a fresh WaitGroup for this lifecycle so a concurrent Stop - // on the previous cycle's WaitGroup cannot interfere. - wg := &sync.WaitGroup{} - p.wg = wg - - scanner := NewScanner(p.period) - forwarder := NewForwarder(p.logger, scanner, p.cgroupManager) - - wg.Add(2) - - go func() { - defer wg.Done() - forwarder.StartForwarding(ctx) - }() - - go func() { - defer wg.Done() - scanner.ScanAndBroadcast(ctx) - }() -} - -// Stop quiesces the scanner and forwarder goroutines. -// Blocks until both goroutines have exited. Safe to call if already stopped. -// -// GC is NOT run here — it is deferred to PostSnapshotPrepare so that the -// GC happens after all allocations (connection cleanup, HTTP response) are -// complete, minimizing the window where page allocator corruption can occur. -func (p *PortSubsystem) Stop() { - p.mu.Lock() - if !p.running { - p.mu.Unlock() - return - } - cancelFn := p.cancel - wg := p.wg - p.cancel = nil - p.wg = nil - p.running = false - p.mu.Unlock() - - cancelFn() - wg.Wait() -} - -// Restart stops the subsystem (if running) and starts it again with a fresh -// scanner and forwarder. Used after snapshot restore via PostInit. -func (p *PortSubsystem) Restart(parentCtx context.Context) { - p.Stop() - p.Start(parentCtx) -} diff --git a/envd/internal/services/cgroups/cgroup2.go b/envd/internal/services/cgroups/cgroup2.go deleted file mode 100644 index b60251e..0000000 --- a/envd/internal/services/cgroups/cgroup2.go +++ /dev/null @@ -1,129 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package cgroups - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -type Cgroup2Manager struct { - cgroupFDs map[ProcessType]int -} - -var _ Manager = (*Cgroup2Manager)(nil) - -type cgroup2Config struct { - rootPath string - processTypes map[ProcessType]Cgroup2Config -} - -type Cgroup2ManagerOption func(*cgroup2Config) - -func WithCgroup2RootSysFSPath(path string) Cgroup2ManagerOption { - return func(config *cgroup2Config) { - config.rootPath = path - } -} - -func WithCgroup2ProcessType(processType ProcessType, path string, properties map[string]string) Cgroup2ManagerOption { - return func(config *cgroup2Config) { - if config.processTypes == nil { - config.processTypes = make(map[ProcessType]Cgroup2Config) - } - config.processTypes[processType] = Cgroup2Config{Path: path, Properties: properties} - } -} - -type Cgroup2Config struct { - Path string - Properties map[string]string -} - -func NewCgroup2Manager(opts ...Cgroup2ManagerOption) (*Cgroup2Manager, error) { - config := cgroup2Config{ - rootPath: "/sys/fs/cgroup", - } - - for _, opt := range opts { - opt(&config) - } - - cgroupFDs, err := createCgroups(config) - if err != nil { - return nil, fmt.Errorf("failed to create cgroups: %w", err) - } - - return &Cgroup2Manager{cgroupFDs: cgroupFDs}, nil -} - -func createCgroups(configs cgroup2Config) (map[ProcessType]int, error) { - var ( - results = make(map[ProcessType]int) - errs []error - ) - - for procType, config := range configs.processTypes { - fullPath := filepath.Join(configs.rootPath, config.Path) - fd, err := createCgroup(fullPath, config.Properties) - if err != nil { - errs = append(errs, fmt.Errorf("failed to create %s cgroup: %w", procType, err)) - - continue - } - results[procType] = fd - } - - if len(errs) > 0 { - for procType, fd := range results { - err := unix.Close(fd) - if err != nil { - errs = append(errs, fmt.Errorf("failed to close cgroup fd for %s: %w", procType, err)) - } - } - - return nil, errors.Join(errs...) - } - - return results, nil -} - -func createCgroup(fullPath string, properties map[string]string) (int, error) { - if err := os.MkdirAll(fullPath, 0o755); err != nil { - return -1, fmt.Errorf("failed to create cgroup root: %w", err) - } - - var errs []error - for name, value := range properties { - if err := os.WriteFile(filepath.Join(fullPath, name), []byte(value), 0o644); err != nil { - errs = append(errs, fmt.Errorf("failed to write cgroup property: %w", err)) - } - } - if len(errs) > 0 { - return -1, errors.Join(errs...) - } - - return unix.Open(fullPath, unix.O_RDONLY, 0) -} - -func (c Cgroup2Manager) GetFileDescriptor(procType ProcessType) (int, bool) { - fd, ok := c.cgroupFDs[procType] - - return fd, ok -} - -func (c Cgroup2Manager) Close() error { - var errs []error - for procType, fd := range c.cgroupFDs { - if err := unix.Close(fd); err != nil { - errs = append(errs, fmt.Errorf("failed to close cgroup fd for %s: %w", procType, err)) - } - delete(c.cgroupFDs, procType) - } - - return errors.Join(errs...) -} diff --git a/envd/internal/services/cgroups/cgroup2_test.go b/envd/internal/services/cgroups/cgroup2_test.go deleted file mode 100644 index ff16787..0000000 --- a/envd/internal/services/cgroups/cgroup2_test.go +++ /dev/null @@ -1,187 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package cgroups - -import ( - "context" - "fmt" - "math/rand" - "os" - "os/exec" - "strconv" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - oneByte = 1 - kilobyte = 1024 * oneByte - megabyte = 1024 * kilobyte -) - -func TestCgroupRoundTrip(t *testing.T) { - t.Parallel() - - if os.Geteuid() != 0 { - t.Skip("must run as root") - - return - } - - maxTimeout := time.Second * 5 - - t.Run("process does not die without cgroups", func(t *testing.T) { - t.Parallel() - - // create manager - m, err := NewCgroup2Manager() - require.NoError(t, err) - - // create new child process - cmd := startProcess(t, m, "not-a-real-one") - - // wait for child process to die - err = waitForProcess(t, cmd, maxTimeout) - - require.ErrorIs(t, err, context.DeadlineExceeded) - }) - - t.Run("process dies with cgroups", func(t *testing.T) { - t.Parallel() - - cgroupPath := createCgroupPath(t, "real-one") - - // create manager - m, err := NewCgroup2Manager( - WithCgroup2ProcessType(ProcessTypePTY, cgroupPath, map[string]string{ - "memory.max": strconv.Itoa(1 * megabyte), - }), - ) - require.NoError(t, err) - - t.Cleanup(func() { - err := m.Close() - assert.NoError(t, err) - }) - - // create new child process - cmd := startProcess(t, m, ProcessTypePTY) - - // wait for child process to die - err = waitForProcess(t, cmd, maxTimeout) - - // verify process exited correctly - var exitErr *exec.ExitError - require.ErrorAs(t, err, &exitErr) - assert.Equal(t, "signal: killed", exitErr.Error()) - assert.False(t, exitErr.Exited()) - assert.False(t, exitErr.Success()) - assert.Equal(t, -1, exitErr.ExitCode()) - - // dig a little deeper - ws, ok := exitErr.Sys().(syscall.WaitStatus) - require.True(t, ok) - assert.Equal(t, syscall.SIGKILL, ws.Signal()) - assert.True(t, ws.Signaled()) - assert.False(t, ws.Stopped()) - assert.False(t, ws.Continued()) - assert.False(t, ws.CoreDump()) - assert.False(t, ws.Exited()) - assert.Equal(t, -1, ws.ExitStatus()) - }) - - t.Run("process cannot be spawned because memory limit is too low", func(t *testing.T) { - t.Parallel() - - cgroupPath := createCgroupPath(t, "real-one") - - // create manager - m, err := NewCgroup2Manager( - WithCgroup2ProcessType(ProcessTypeSocat, cgroupPath, map[string]string{ - "memory.max": strconv.Itoa(1 * kilobyte), - }), - ) - require.NoError(t, err) - - t.Cleanup(func() { - err := m.Close() - assert.NoError(t, err) - }) - - // create new child process - cmd := startProcess(t, m, ProcessTypeSocat) - - // wait for child process to die - err = waitForProcess(t, cmd, maxTimeout) - - // verify process exited correctly - var exitErr *exec.ExitError - require.ErrorAs(t, err, &exitErr) - assert.Equal(t, "exit status 253", exitErr.Error()) - assert.True(t, exitErr.Exited()) - assert.False(t, exitErr.Success()) - assert.Equal(t, 253, exitErr.ExitCode()) - - // dig a little deeper - ws, ok := exitErr.Sys().(syscall.WaitStatus) - require.True(t, ok) - assert.Equal(t, syscall.Signal(-1), ws.Signal()) - assert.False(t, ws.Signaled()) - assert.False(t, ws.Stopped()) - assert.False(t, ws.Continued()) - assert.False(t, ws.CoreDump()) - assert.True(t, ws.Exited()) - assert.Equal(t, 253, ws.ExitStatus()) - }) -} - -func createCgroupPath(t *testing.T, s string) string { - t.Helper() - - randPart := rand.Int() - - return fmt.Sprintf("envd-test-%s-%d", s, randPart) -} - -func startProcess(t *testing.T, m *Cgroup2Manager, pt ProcessType) *exec.Cmd { - t.Helper() - - cmdName, args := "bash", []string{"-c", `sleep 1 && tail /dev/zero`} - cmd := exec.CommandContext(t.Context(), cmdName, args...) - - fd, ok := m.GetFileDescriptor(pt) - cmd.SysProcAttr = &syscall.SysProcAttr{ - UseCgroupFD: ok, - CgroupFD: fd, - } - - err := cmd.Start() - require.NoError(t, err) - - return cmd -} - -func waitForProcess(t *testing.T, cmd *exec.Cmd, timeout time.Duration) error { - t.Helper() - - done := make(chan error, 1) - - go func() { - defer close(done) - done <- cmd.Wait() - }() - - ctx, cancel := context.WithTimeout(t.Context(), timeout) - t.Cleanup(cancel) - - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-done: - return err - } -} diff --git a/envd/internal/services/cgroups/iface.go b/envd/internal/services/cgroups/iface.go deleted file mode 100644 index 04bbfa0..0000000 --- a/envd/internal/services/cgroups/iface.go +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package cgroups - -type ProcessType string - -const ( - ProcessTypePTY ProcessType = "pty" - ProcessTypeUser ProcessType = "user" - ProcessTypeSocat ProcessType = "socat" -) - -type Manager interface { - GetFileDescriptor(procType ProcessType) (int, bool) - Close() error -} diff --git a/envd/internal/services/cgroups/noop.go b/envd/internal/services/cgroups/noop.go deleted file mode 100644 index 3b5f076..0000000 --- a/envd/internal/services/cgroups/noop.go +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package cgroups - -type NoopManager struct{} - -var _ Manager = (*NoopManager)(nil) - -func NewNoopManager() *NoopManager { - return &NoopManager{} -} - -func (n NoopManager) GetFileDescriptor(ProcessType) (int, bool) { - return 0, false -} - -func (n NoopManager) Close() error { - return nil -} diff --git a/envd/internal/services/filesystem/dir.go b/envd/internal/services/filesystem/dir.go deleted file mode 100644 index c3ee752..0000000 --- a/envd/internal/services/filesystem/dir.go +++ /dev/null @@ -1,186 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "connectrpc.com/connect" - - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" -) - -func (s Service) ListDir(ctx context.Context, req *connect.Request[rpc.ListDirRequest]) (*connect.Response[rpc.ListDirResponse], error) { - depth := req.Msg.GetDepth() - if depth == 0 { - depth = 1 // default depth to current directory - } - - u, err := permissions.GetAuthUser(ctx, s.defaults.User) - if err != nil { - return nil, err - } - - requestedPath := req.Msg.GetPath() - - // Expand the path so we can return absolute paths in the response. - requestedPath, err = permissions.ExpandAndResolve(requestedPath, u, s.defaults.Workdir) - if err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - - resolvedPath, err := followSymlink(requestedPath) - if err != nil { - return nil, err - } - - err = checkIfDirectory(resolvedPath) - if err != nil { - return nil, err - } - - entries, err := walkDir(requestedPath, resolvedPath, int(depth)) - if err != nil { - return nil, err - } - - return connect.NewResponse(&rpc.ListDirResponse{ - Entries: entries, - }), nil -} - -func (s Service) MakeDir(ctx context.Context, req *connect.Request[rpc.MakeDirRequest]) (*connect.Response[rpc.MakeDirResponse], error) { - u, err := permissions.GetAuthUser(ctx, s.defaults.User) - if err != nil { - return nil, err - } - - dirPath, err := permissions.ExpandAndResolve(req.Msg.GetPath(), u, s.defaults.Workdir) - if err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - - stat, err := os.Stat(dirPath) - if err != nil && !os.IsNotExist(err) { - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error getting file info: %w", err)) - } - - if err == nil { - if stat.IsDir() { - return nil, connect.NewError(connect.CodeAlreadyExists, fmt.Errorf("directory already exists: %s", dirPath)) - } - - return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("path already exists but it is not a directory: %s", dirPath)) - } - - uid, gid, userErr := permissions.GetUserIdInts(u) - if userErr != nil { - return nil, connect.NewError(connect.CodeInternal, userErr) - } - - userErr = permissions.EnsureDirs(dirPath, uid, gid) - if userErr != nil { - return nil, connect.NewError(connect.CodeInternal, userErr) - } - - entry, err := entryInfo(dirPath) - if err != nil { - return nil, err - } - - return connect.NewResponse(&rpc.MakeDirResponse{ - Entry: entry, - }), nil -} - -// followSymlink resolves a symbolic link to its target path. -func followSymlink(path string) (string, error) { - // Resolve symlinks - resolvedPath, err := filepath.EvalSymlinks(path) - if err != nil { - if os.IsNotExist(err) { - return "", connect.NewError(connect.CodeNotFound, fmt.Errorf("path not found: %w", err)) - } - - if strings.Contains(err.Error(), "too many links") { - return "", connect.NewError(connect.CodeFailedPrecondition, fmt.Errorf("cyclic symlink or chain >255 links at %q", path)) - } - - return "", connect.NewError(connect.CodeInternal, fmt.Errorf("error resolving symlink: %w", err)) - } - - return resolvedPath, nil -} - -// checkIfDirectory checks if the given path is a directory. -func checkIfDirectory(path string) error { - stat, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return connect.NewError(connect.CodeNotFound, fmt.Errorf("directory not found: %w", err)) - } - - return connect.NewError(connect.CodeInternal, fmt.Errorf("error getting file info: %w", err)) - } - - if !stat.IsDir() { - return connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("path is not a directory: %s", path)) - } - - return nil -} - -// walkDir walks the directory tree starting from dirPath up to the specified depth (doesn't follow symlinks). -func walkDir(requestedPath string, dirPath string, depth int) (entries []*rpc.EntryInfo, err error) { - err = filepath.WalkDir(dirPath, func(path string, _ os.DirEntry, err error) error { - if err != nil { - return err - } - - // Skip the root directory itself - if path == dirPath { - return nil - } - - // Calculate current depth - relPath, err := filepath.Rel(dirPath, path) - if err != nil { - return err - } - currentDepth := len(strings.Split(relPath, string(os.PathSeparator))) - - if currentDepth > depth { - return filepath.SkipDir - } - - entryInfo, err := entryInfo(path) - if err != nil { - var connectErr *connect.Error - if errors.As(err, &connectErr) && connectErr.Code() == connect.CodeNotFound { - // Skip entries that don't exist anymore - return nil - } - - return err - } - - // Return the requested path as the base path instead of the symlink-resolved path - path = filepath.Join(requestedPath, relPath) - entryInfo.Path = path - - entries = append(entries, entryInfo) - - return nil - }) - if err != nil { - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error reading directory %s: %w", dirPath, err)) - } - - return entries, nil -} diff --git a/envd/internal/services/filesystem/dir_test.go b/envd/internal/services/filesystem/dir_test.go deleted file mode 100644 index 5dba82e..0000000 --- a/envd/internal/services/filesystem/dir_test.go +++ /dev/null @@ -1,407 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "context" - "errors" - "fmt" - "os" - "os/user" - "path/filepath" - "testing" - - "connectrpc.com/authn" - "connectrpc.com/connect" - "github.com/google/uuid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" -) - -func TestListDir(t *testing.T) { - t.Parallel() - - // Setup temp root and user - root := t.TempDir() - u, err := user.Current() - require.NoError(t, err) - - // Setup directory structure - testFolder := filepath.Join(root, "test") - require.NoError(t, os.MkdirAll(filepath.Join(testFolder, "test-dir", "sub-dir-1"), 0o755)) - require.NoError(t, os.MkdirAll(filepath.Join(testFolder, "test-dir", "sub-dir-2"), 0o755)) - filePath := filepath.Join(testFolder, "test-dir", "sub-dir-1", "file.txt") - require.NoError(t, os.WriteFile(filePath, []byte("Hello, World!"), 0o644)) - - // Service instance - svc := mockService() - - // Helper to inject user into context - injectUser := func(ctx context.Context, u *user.User) context.Context { - return authn.SetInfo(ctx, u) - } - - tests := []struct { - name string - depth uint32 - expectedPaths []string - }{ - { - name: "depth 0 lists only root directory", - depth: 0, - expectedPaths: []string{ - filepath.Join(testFolder, "test-dir"), - }, - }, - { - name: "depth 1 lists root directory", - depth: 1, - expectedPaths: []string{ - filepath.Join(testFolder, "test-dir"), - }, - }, - { - name: "depth 2 lists first level of subdirectories (in this case the root directory)", - depth: 2, - expectedPaths: []string{ - filepath.Join(testFolder, "test-dir"), - filepath.Join(testFolder, "test-dir", "sub-dir-1"), - filepath.Join(testFolder, "test-dir", "sub-dir-2"), - }, - }, - { - name: "depth 3 lists all directories and files", - depth: 3, - expectedPaths: []string{ - filepath.Join(testFolder, "test-dir"), - filepath.Join(testFolder, "test-dir", "sub-dir-1"), - filepath.Join(testFolder, "test-dir", "sub-dir-2"), - filepath.Join(testFolder, "test-dir", "sub-dir-1", "file.txt"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - ctx := injectUser(t.Context(), u) - req := connect.NewRequest(&filesystem.ListDirRequest{ - Path: testFolder, - Depth: tt.depth, - }) - resp, err := svc.ListDir(ctx, req) - require.NoError(t, err) - assert.NotEmpty(t, resp.Msg) - assert.Len(t, resp.Msg.GetEntries(), len(tt.expectedPaths)) - actualPaths := make([]string, len(resp.Msg.GetEntries())) - for i, entry := range resp.Msg.GetEntries() { - actualPaths[i] = entry.GetPath() - } - assert.ElementsMatch(t, tt.expectedPaths, actualPaths) - }) - } -} - -func TestListDirNonExistingPath(t *testing.T) { - t.Parallel() - - svc := mockService() - u, err := user.Current() - require.NoError(t, err) - ctx := authn.SetInfo(t.Context(), u) - - req := connect.NewRequest(&filesystem.ListDirRequest{ - Path: "/non-existing-path", - Depth: 1, - }) - _, err = svc.ListDir(ctx, req) - require.Error(t, err) - var connectErr *connect.Error - ok := errors.As(err, &connectErr) - assert.True(t, ok, "expected error to be of type *connect.Error") - assert.Equal(t, connect.CodeNotFound, connectErr.Code()) -} - -func TestListDirRelativePath(t *testing.T) { - t.Parallel() - - // Setup temp root and user - u, err := user.Current() - require.NoError(t, err) - - // Setup directory structure - testRelativePath := fmt.Sprintf("test-%s", uuid.New()) - testFolderPath := filepath.Join(u.HomeDir, testRelativePath) - filePath := filepath.Join(testFolderPath, "file.txt") - require.NoError(t, os.MkdirAll(testFolderPath, 0o755)) - require.NoError(t, os.WriteFile(filePath, []byte("Hello, World!"), 0o644)) - - // Service instance - svc := mockService() - ctx := authn.SetInfo(t.Context(), u) - - req := connect.NewRequest(&filesystem.ListDirRequest{ - Path: testRelativePath, - Depth: 1, - }) - resp, err := svc.ListDir(ctx, req) - require.NoError(t, err) - assert.NotEmpty(t, resp.Msg) - - expectedPaths := []string{ - filepath.Join(testFolderPath, "file.txt"), - } - assert.Len(t, resp.Msg.GetEntries(), len(expectedPaths)) - - actualPaths := make([]string, len(resp.Msg.GetEntries())) - for i, entry := range resp.Msg.GetEntries() { - actualPaths[i] = entry.GetPath() - } - assert.ElementsMatch(t, expectedPaths, actualPaths) -} - -func TestListDir_Symlinks(t *testing.T) { - t.Parallel() - - root := t.TempDir() - u, err := user.Current() - require.NoError(t, err) - ctx := authn.SetInfo(t.Context(), u) - - symlinkRoot := filepath.Join(root, "test-symlinks") - require.NoError(t, os.MkdirAll(symlinkRoot, 0o755)) - - // 1. Prepare a real directory + file that a symlink will point to - realDir := filepath.Join(symlinkRoot, "real-dir") - require.NoError(t, os.MkdirAll(realDir, 0o755)) - filePath := filepath.Join(realDir, "file.txt") - require.NoError(t, os.WriteFile(filePath, []byte("hello via symlink"), 0o644)) - - // 2. Prepare a standalone real file (points-to-file scenario) - realFile := filepath.Join(symlinkRoot, "real-file.txt") - require.NoError(t, os.WriteFile(realFile, []byte("i am a plain file"), 0o644)) - - // 3. Create the three symlinks - linkToDir := filepath.Join(symlinkRoot, "link-dir") // → directory - linkToFile := filepath.Join(symlinkRoot, "link-file") // → file - cyclicLink := filepath.Join(symlinkRoot, "cyclic") // → itself - require.NoError(t, os.Symlink(realDir, linkToDir)) - require.NoError(t, os.Symlink(realFile, linkToFile)) - require.NoError(t, os.Symlink(cyclicLink, cyclicLink)) - - svc := mockService() - - t.Run("symlink to directory behaves like directory and the content looks like inside the directory", func(t *testing.T) { - t.Parallel() - - req := connect.NewRequest(&filesystem.ListDirRequest{ - Path: linkToDir, - Depth: 1, - }) - resp, err := svc.ListDir(ctx, req) - require.NoError(t, err) - expected := []string{ - filepath.Join(linkToDir, "file.txt"), - } - actual := make([]string, len(resp.Msg.GetEntries())) - for i, e := range resp.Msg.GetEntries() { - actual[i] = e.GetPath() - } - assert.ElementsMatch(t, expected, actual) - }) - - t.Run("link to file", func(t *testing.T) { - t.Parallel() - - req := connect.NewRequest(&filesystem.ListDirRequest{ - Path: linkToFile, - Depth: 1, - }) - _, err := svc.ListDir(ctx, req) - require.Error(t, err) - assert.Contains(t, err.Error(), "not a directory") - }) - - t.Run("cyclic symlink surfaces 'too many links' → invalid-argument", func(t *testing.T) { - t.Parallel() - - req := connect.NewRequest(&filesystem.ListDirRequest{ - Path: cyclicLink, - }) - _, err := svc.ListDir(ctx, req) - require.Error(t, err) - var connectErr *connect.Error - ok := errors.As(err, &connectErr) - assert.True(t, ok, "expected error to be of type *connect.Error") - assert.Equal(t, connect.CodeFailedPrecondition, connectErr.Code()) - assert.Contains(t, connectErr.Error(), "cyclic symlink") - }) - - t.Run("symlink not resolved if not root", func(t *testing.T) { - t.Parallel() - - req := connect.NewRequest(&filesystem.ListDirRequest{ - Path: symlinkRoot, - Depth: 3, - }) - res, err := svc.ListDir(ctx, req) - require.NoError(t, err) - expected := []string{ - filepath.Join(symlinkRoot, "cyclic"), - filepath.Join(symlinkRoot, "link-dir"), - filepath.Join(symlinkRoot, "link-file"), - filepath.Join(symlinkRoot, "real-dir"), - filepath.Join(symlinkRoot, "real-dir", "file.txt"), - filepath.Join(symlinkRoot, "real-file.txt"), - } - actual := make([]string, len(res.Msg.GetEntries())) - for i, e := range res.Msg.GetEntries() { - actual[i] = e.GetPath() - } - assert.ElementsMatch(t, expected, actual, "symlinks should not be resolved when listing the symlink root directory") - }) -} - -// TestFollowSymlink_Success makes sure that followSymlink resolves symlinks, -// while also being robust to the /var → /private/var indirection that exists on macOS. -func TestFollowSymlink_Success(t *testing.T) { - t.Parallel() - - // Base temporary directory. On macOS this lives under /var/folders/… - // which itself is a symlink to /private/var/folders/…. - base := t.TempDir() - - // Create a real directory that we ultimately want to resolve to. - target := filepath.Join(base, "target") - require.NoError(t, os.MkdirAll(target, 0o755)) - - // Create a symlink pointing at the real directory so we can verify that - // followSymlink follows it. - link := filepath.Join(base, "link") - require.NoError(t, os.Symlink(target, link)) - - got, err := followSymlink(link) - require.NoError(t, err) - - // Canonicalise the expected path too, so that /var → /private/var (macOS) - // or any other benign symlink indirections don’t cause flaky tests. - want, err := filepath.EvalSymlinks(link) - require.NoError(t, err) - - require.Equal(t, want, got, "followSymlink should resolve and canonicalise symlinks") -} - -// TestFollowSymlink_MultiSymlinkChain verifies that followSymlink follows a chain -// of several symlinks (non‑cyclic) correctly. -func TestFollowSymlink_MultiSymlinkChain(t *testing.T) { - t.Parallel() - - base := t.TempDir() - - // Final destination directory. - target := filepath.Join(base, "target") - require.NoError(t, os.MkdirAll(target, 0o755)) - - // Build a 3‑link chain: link1 → link2 → link3 → target. - link3 := filepath.Join(base, "link3") - require.NoError(t, os.Symlink(target, link3)) - - link2 := filepath.Join(base, "link2") - require.NoError(t, os.Symlink(link3, link2)) - - link1 := filepath.Join(base, "link1") - require.NoError(t, os.Symlink(link2, link1)) - - got, err := followSymlink(link1) - require.NoError(t, err) - - want, err := filepath.EvalSymlinks(link1) - require.NoError(t, err) - - require.Equal(t, want, got, "followSymlink should resolve an arbitrary symlink chain") -} - -func TestFollowSymlink_NotFound(t *testing.T) { - t.Parallel() - - _, err := followSymlink("/definitely/does/not/exist") - require.Error(t, err) - - var cerr *connect.Error - require.ErrorAs(t, err, &cerr) - require.Equal(t, connect.CodeNotFound, cerr.Code()) -} - -func TestFollowSymlink_CyclicSymlink(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - a := filepath.Join(dir, "a") - b := filepath.Join(dir, "b") - require.NoError(t, os.MkdirAll(a, 0o755)) - require.NoError(t, os.MkdirAll(b, 0o755)) - - // Create a two‑node loop: a/loop → b/loop, b/loop → a/loop. - require.NoError(t, os.Symlink(filepath.Join(b, "loop"), filepath.Join(a, "loop"))) - require.NoError(t, os.Symlink(filepath.Join(a, "loop"), filepath.Join(b, "loop"))) - - _, err := followSymlink(filepath.Join(a, "loop")) - require.Error(t, err) - - var cerr *connect.Error - require.ErrorAs(t, err, &cerr) - require.Equal(t, connect.CodeFailedPrecondition, cerr.Code()) - require.Contains(t, cerr.Message(), "cyclic") -} - -func TestCheckIfDirectory(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - require.NoError(t, checkIfDirectory(dir)) - - file := filepath.Join(dir, "file.txt") - require.NoError(t, os.WriteFile(file, []byte("hello"), 0o644)) - - err := checkIfDirectory(file) - require.Error(t, err) - - var cerr *connect.Error - require.ErrorAs(t, err, &cerr) - require.Equal(t, connect.CodeInvalidArgument, cerr.Code()) -} - -func TestWalkDir_Depth(t *testing.T) { - t.Parallel() - - root := t.TempDir() - sub := filepath.Join(root, "sub") - subsub := filepath.Join(sub, "subsub") - require.NoError(t, os.MkdirAll(subsub, 0o755)) - - entries, err := walkDir(root, root, 1) - require.NoError(t, err) - - // Collect the names for easier assertions. - names := make([]string, 0, len(entries)) - for _, e := range entries { - names = append(names, e.GetName()) - } - - require.Contains(t, names, "sub") - require.NotContains(t, names, "subsub", "entries beyond depth should be excluded") -} - -func TestWalkDir_Error(t *testing.T) { - t.Parallel() - - _, err := walkDir("/does/not/exist", "/does/not/exist", 1) - require.Error(t, err) - - var cerr *connect.Error - require.ErrorAs(t, err, &cerr) - require.Equal(t, connect.CodeInternal, cerr.Code()) -} diff --git a/envd/internal/services/filesystem/move.go b/envd/internal/services/filesystem/move.go deleted file mode 100644 index adf5b7e..0000000 --- a/envd/internal/services/filesystem/move.go +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "connectrpc.com/connect" - - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" -) - -func (s Service) Move(ctx context.Context, req *connect.Request[rpc.MoveRequest]) (*connect.Response[rpc.MoveResponse], error) { - u, err := permissions.GetAuthUser(ctx, s.defaults.User) - if err != nil { - return nil, err - } - - source, err := permissions.ExpandAndResolve(req.Msg.GetSource(), u, s.defaults.Workdir) - if err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - - destination, err := permissions.ExpandAndResolve(req.Msg.GetDestination(), u, s.defaults.Workdir) - if err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - - uid, gid, userErr := permissions.GetUserIdInts(u) - if userErr != nil { - return nil, connect.NewError(connect.CodeInternal, userErr) - } - - userErr = permissions.EnsureDirs(filepath.Dir(destination), uid, gid) - if userErr != nil { - return nil, connect.NewError(connect.CodeInternal, userErr) - } - - err = os.Rename(source, destination) - if err != nil { - if os.IsNotExist(err) { - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("source file not found: %w", err)) - } - - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error renaming: %w", err)) - } - - entry, err := entryInfo(destination) - if err != nil { - return nil, err - } - - return connect.NewResponse(&rpc.MoveResponse{ - Entry: entry, - }), nil -} diff --git a/envd/internal/services/filesystem/move_test.go b/envd/internal/services/filesystem/move_test.go deleted file mode 100644 index f094e9b..0000000 --- a/envd/internal/services/filesystem/move_test.go +++ /dev/null @@ -1,366 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "errors" - "fmt" - "os" - "os/user" - "path/filepath" - "testing" - - "connectrpc.com/authn" - "connectrpc.com/connect" - "github.com/google/uuid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" -) - -func TestMove(t *testing.T) { - t.Parallel() - - // Setup temp root and user - root := t.TempDir() - u, err := user.Current() - require.NoError(t, err) - - // Setup source and destination directories - sourceDir := filepath.Join(root, "source") - destDir := filepath.Join(root, "destination") - require.NoError(t, os.MkdirAll(sourceDir, 0o755)) - require.NoError(t, os.MkdirAll(destDir, 0o755)) - - // Create a test file to move - sourceFile := filepath.Join(sourceDir, "test-file.txt") - testContent := []byte("Hello, World!") - require.NoError(t, os.WriteFile(sourceFile, testContent, 0o644)) - - // Destination file path - destFile := filepath.Join(destDir, "test-file.txt") - - // Service instance - svc := mockService() - - // Call the Move function - ctx := authn.SetInfo(t.Context(), u) - req := connect.NewRequest(&filesystem.MoveRequest{ - Source: sourceFile, - Destination: destFile, - }) - resp, err := svc.Move(ctx, req) - - // Verify the move was successful - require.NoError(t, err) - assert.NotNil(t, resp) - assert.Equal(t, destFile, resp.Msg.GetEntry().GetPath()) - - // Verify the file exists at the destination - _, err = os.Stat(destFile) - require.NoError(t, err) - - // Verify the file no longer exists at the source - _, err = os.Stat(sourceFile) - assert.True(t, os.IsNotExist(err)) - - // Verify the content of the moved file - content, err := os.ReadFile(destFile) - require.NoError(t, err) - assert.Equal(t, testContent, content) -} - -func TestMoveDirectory(t *testing.T) { - t.Parallel() - - // Setup temp root and user - root := t.TempDir() - u, err := user.Current() - require.NoError(t, err) - - // Setup source and destination directories - sourceParent := filepath.Join(root, "source-parent") - destParent := filepath.Join(root, "dest-parent") - require.NoError(t, os.MkdirAll(sourceParent, 0o755)) - require.NoError(t, os.MkdirAll(destParent, 0o755)) - - // Create a test directory with files to move - sourceDir := filepath.Join(sourceParent, "test-dir") - require.NoError(t, os.MkdirAll(filepath.Join(sourceDir, "subdir"), 0o755)) - - // Create some files in the directory - file1 := filepath.Join(sourceDir, "file1.txt") - file2 := filepath.Join(sourceDir, "subdir", "file2.txt") - require.NoError(t, os.WriteFile(file1, []byte("File 1 content"), 0o644)) - require.NoError(t, os.WriteFile(file2, []byte("File 2 content"), 0o644)) - - // Destination directory path - destDir := filepath.Join(destParent, "test-dir") - - // Service instance - svc := mockService() - - // Call the Move function - ctx := authn.SetInfo(t.Context(), u) - req := connect.NewRequest(&filesystem.MoveRequest{ - Source: sourceDir, - Destination: destDir, - }) - resp, err := svc.Move(ctx, req) - - // Verify the move was successful - require.NoError(t, err) - assert.NotNil(t, resp) - assert.Equal(t, destDir, resp.Msg.GetEntry().GetPath()) - - // Verify the directory exists at the destination - _, err = os.Stat(destDir) - require.NoError(t, err) - - // Verify the files exist at the destination - destFile1 := filepath.Join(destDir, "file1.txt") - destFile2 := filepath.Join(destDir, "subdir", "file2.txt") - _, err = os.Stat(destFile1) - require.NoError(t, err) - _, err = os.Stat(destFile2) - require.NoError(t, err) - - // Verify the directory no longer exists at the source - _, err = os.Stat(sourceDir) - assert.True(t, os.IsNotExist(err)) - - // Verify the content of the moved files - content1, err := os.ReadFile(destFile1) - require.NoError(t, err) - assert.Equal(t, []byte("File 1 content"), content1) - - content2, err := os.ReadFile(destFile2) - require.NoError(t, err) - assert.Equal(t, []byte("File 2 content"), content2) -} - -func TestMoveNonExistingFile(t *testing.T) { - t.Parallel() - - // Setup temp root and user - root := t.TempDir() - u, err := user.Current() - require.NoError(t, err) - - // Setup destination directory - destDir := filepath.Join(root, "destination") - require.NoError(t, os.MkdirAll(destDir, 0o755)) - - // Non-existing source file - sourceFile := filepath.Join(root, "non-existing-file.txt") - - // Destination file path - destFile := filepath.Join(destDir, "moved-file.txt") - - // Service instance - svc := mockService() - - // Call the Move function - ctx := authn.SetInfo(t.Context(), u) - req := connect.NewRequest(&filesystem.MoveRequest{ - Source: sourceFile, - Destination: destFile, - }) - _, err = svc.Move(ctx, req) - - // Verify the correct error is returned - require.Error(t, err) - - var connectErr *connect.Error - ok := errors.As(err, &connectErr) - assert.True(t, ok, "expected error to be of type *connect.Error") - assert.Equal(t, connect.CodeNotFound, connectErr.Code()) - assert.Contains(t, connectErr.Message(), "source file not found") -} - -func TestMoveRelativePath(t *testing.T) { - t.Parallel() - - // Setup user - u, err := user.Current() - require.NoError(t, err) - - // Setup directory structure with unique name to avoid conflicts - testRelativePath := fmt.Sprintf("test-move-%s", uuid.New()) - testFolderPath := filepath.Join(u.HomeDir, testRelativePath) - require.NoError(t, os.MkdirAll(testFolderPath, 0o755)) - - // Create a test file to move - sourceFile := filepath.Join(testFolderPath, "source-file.txt") - testContent := []byte("Hello from relative path!") - require.NoError(t, os.WriteFile(sourceFile, testContent, 0o644)) - - // Destination file path (also relative) - destRelativePath := fmt.Sprintf("test-move-dest-%s", uuid.New()) - destFolderPath := filepath.Join(u.HomeDir, destRelativePath) - require.NoError(t, os.MkdirAll(destFolderPath, 0o755)) - destFile := filepath.Join(destFolderPath, "moved-file.txt") - - // Service instance - svc := mockService() - - // Call the Move function with relative paths - ctx := authn.SetInfo(t.Context(), u) - req := connect.NewRequest(&filesystem.MoveRequest{ - Source: filepath.Join(testRelativePath, "source-file.txt"), // Relative path - Destination: filepath.Join(destRelativePath, "moved-file.txt"), // Relative path - }) - resp, err := svc.Move(ctx, req) - - // Verify the move was successful - require.NoError(t, err) - assert.NotNil(t, resp) - assert.Equal(t, destFile, resp.Msg.GetEntry().GetPath()) - - // Verify the file exists at the destination - _, err = os.Stat(destFile) - require.NoError(t, err) - - // Verify the file no longer exists at the source - _, err = os.Stat(sourceFile) - assert.True(t, os.IsNotExist(err)) - - // Verify the content of the moved file - content, err := os.ReadFile(destFile) - require.NoError(t, err) - assert.Equal(t, testContent, content) - - // Clean up - os.RemoveAll(testFolderPath) - os.RemoveAll(destFolderPath) -} - -func TestMove_Symlinks(t *testing.T) { //nolint:tparallel // this test cannot be executed in parallel - root := t.TempDir() - u, err := user.Current() - require.NoError(t, err) - ctx := authn.SetInfo(t.Context(), u) - - // Setup source and destination directories - sourceRoot := filepath.Join(root, "source") - destRoot := filepath.Join(root, "destination") - require.NoError(t, os.MkdirAll(sourceRoot, 0o755)) - require.NoError(t, os.MkdirAll(destRoot, 0o755)) - - // 1. Prepare a real directory + file that a symlink will point to - realDir := filepath.Join(sourceRoot, "real-dir") - require.NoError(t, os.MkdirAll(realDir, 0o755)) - filePath := filepath.Join(realDir, "file.txt") - require.NoError(t, os.WriteFile(filePath, []byte("hello via symlink"), 0o644)) - - // 2. Prepare a standalone real file (points-to-file scenario) - realFile := filepath.Join(sourceRoot, "real-file.txt") - require.NoError(t, os.WriteFile(realFile, []byte("i am a plain file"), 0o644)) - - // 3. Create symlinks - linkToDir := filepath.Join(sourceRoot, "link-dir") // → directory - linkToFile := filepath.Join(sourceRoot, "link-file") // → file - require.NoError(t, os.Symlink(realDir, linkToDir)) - require.NoError(t, os.Symlink(realFile, linkToFile)) - - svc := mockService() - - t.Run("move symlink to directory", func(t *testing.T) { - t.Parallel() - destPath := filepath.Join(destRoot, "moved-link-dir") - - req := connect.NewRequest(&filesystem.MoveRequest{ - Source: linkToDir, - Destination: destPath, - }) - resp, err := svc.Move(ctx, req) - require.NoError(t, err) - assert.Equal(t, destPath, resp.Msg.GetEntry().GetPath()) - - // Verify the symlink was moved - _, err = os.Stat(destPath) - require.NoError(t, err) - - // Verify it's still a symlink - info, err := os.Lstat(destPath) - require.NoError(t, err) - assert.NotEqual(t, 0, info.Mode()&os.ModeSymlink, "expected a symlink") - - // Verify the symlink target is still correct - target, err := os.Readlink(destPath) - require.NoError(t, err) - assert.Equal(t, realDir, target) - - // Verify the original symlink is gone - _, err = os.Stat(linkToDir) - assert.True(t, os.IsNotExist(err)) - - // Verify the real directory still exists - _, err = os.Stat(realDir) - assert.NoError(t, err) - }) - - t.Run("move symlink to file", func(t *testing.T) { //nolint:paralleltest - destPath := filepath.Join(destRoot, "moved-link-file") - - req := connect.NewRequest(&filesystem.MoveRequest{ - Source: linkToFile, - Destination: destPath, - }) - resp, err := svc.Move(ctx, req) - require.NoError(t, err) - assert.Equal(t, destPath, resp.Msg.GetEntry().GetPath()) - - // Verify the symlink was moved - _, err = os.Stat(destPath) - require.NoError(t, err) - - // Verify it's still a symlink - info, err := os.Lstat(destPath) - require.NoError(t, err) - assert.NotEqual(t, 0, info.Mode()&os.ModeSymlink, "expected a symlink") - - // Verify the symlink target is still correct - target, err := os.Readlink(destPath) - require.NoError(t, err) - assert.Equal(t, realFile, target) - - // Verify the original symlink is gone - _, err = os.Stat(linkToFile) - assert.True(t, os.IsNotExist(err)) - - // Verify the real file still exists - _, err = os.Stat(realFile) - assert.NoError(t, err) - }) - - t.Run("move real file that is target of symlink", func(t *testing.T) { - t.Parallel() - // Create a new symlink to the real file - newLinkToFile := filepath.Join(sourceRoot, "new-link-file") - require.NoError(t, os.Symlink(realFile, newLinkToFile)) - - destPath := filepath.Join(destRoot, "moved-real-file.txt") - - req := connect.NewRequest(&filesystem.MoveRequest{ - Source: realFile, - Destination: destPath, - }) - resp, err := svc.Move(ctx, req) - require.NoError(t, err) - assert.Equal(t, destPath, resp.Msg.GetEntry().GetPath()) - - // Verify the real file was moved - _, err = os.Stat(destPath) - require.NoError(t, err) - - // Verify the original file is gone - _, err = os.Stat(realFile) - assert.True(t, os.IsNotExist(err)) - - // Verify the symlink still exists but now points to a non-existent file - _, err = os.Stat(newLinkToFile) - require.Error(t, err, "symlink should point to non-existent file") - }) -} diff --git a/envd/internal/services/filesystem/remove.go b/envd/internal/services/filesystem/remove.go deleted file mode 100644 index fd8ce62..0000000 --- a/envd/internal/services/filesystem/remove.go +++ /dev/null @@ -1,33 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "context" - "fmt" - "os" - - "connectrpc.com/connect" - - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" -) - -func (s Service) Remove(ctx context.Context, req *connect.Request[rpc.RemoveRequest]) (*connect.Response[rpc.RemoveResponse], error) { - u, err := permissions.GetAuthUser(ctx, s.defaults.User) - if err != nil { - return nil, err - } - - path, err := permissions.ExpandAndResolve(req.Msg.GetPath(), u, s.defaults.Workdir) - if err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - - err = os.RemoveAll(path) - if err != nil { - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error removing file or directory: %w", err)) - } - - return connect.NewResponse(&rpc.RemoveResponse{}), nil -} diff --git a/envd/internal/services/filesystem/service.go b/envd/internal/services/filesystem/service.go deleted file mode 100644 index 51f948d..0000000 --- a/envd/internal/services/filesystem/service.go +++ /dev/null @@ -1,37 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package filesystem - -import ( - "connectrpc.com/connect" - "github.com/go-chi/chi/v5" - "github.com/rs/zerolog" - - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - spec "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem/filesystemconnect" - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -type Service struct { - logger *zerolog.Logger - watchers *utils.Map[string, *FileWatcher] - defaults *execcontext.Defaults -} - -func Handle(server *chi.Mux, l *zerolog.Logger, defaults *execcontext.Defaults) { - service := Service{ - logger: l, - watchers: utils.NewMap[string, *FileWatcher](), - defaults: defaults, - } - - interceptors := connect.WithInterceptors( - logs.NewUnaryLogInterceptor(l), - ) - - path, handler := spec.NewFilesystemHandler(service, interceptors) - - server.Mount(path, handler) -} diff --git a/envd/internal/services/filesystem/service_test.go b/envd/internal/services/filesystem/service_test.go deleted file mode 100644 index 3e6db01..0000000 --- a/envd/internal/services/filesystem/service_test.go +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -func mockService() Service { - return Service{ - defaults: &execcontext.Defaults{ - EnvVars: utils.NewMap[string, string](), - }, - } -} diff --git a/envd/internal/services/filesystem/stat.go b/envd/internal/services/filesystem/stat.go deleted file mode 100644 index d3bc4f5..0000000 --- a/envd/internal/services/filesystem/stat.go +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "context" - - "connectrpc.com/connect" - - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" -) - -func (s Service) Stat(ctx context.Context, req *connect.Request[rpc.StatRequest]) (*connect.Response[rpc.StatResponse], error) { - u, err := permissions.GetAuthUser(ctx, s.defaults.User) - if err != nil { - return nil, err - } - - path, err := permissions.ExpandAndResolve(req.Msg.GetPath(), u, s.defaults.Workdir) - if err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - - entry, err := entryInfo(path) - if err != nil { - return nil, err - } - - return connect.NewResponse(&rpc.StatResponse{Entry: entry}), nil -} diff --git a/envd/internal/services/filesystem/stat_test.go b/envd/internal/services/filesystem/stat_test.go deleted file mode 100644 index 56d4af7..0000000 --- a/envd/internal/services/filesystem/stat_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "context" - "os" - "os/user" - "path/filepath" - "testing" - - "connectrpc.com/authn" - "connectrpc.com/connect" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" -) - -func TestStat(t *testing.T) { - t.Parallel() - - // Setup temp root and user - root := t.TempDir() - // Get the actual path to the temp directory (symlinks can cause issues) - root, err := filepath.EvalSymlinks(root) - require.NoError(t, err) - - u, err := user.Current() - require.NoError(t, err) - - group, err := user.LookupGroupId(u.Gid) - require.NoError(t, err) - - // Setup directory structure - testFolder := filepath.Join(root, "test") - err = os.MkdirAll(testFolder, 0o755) - require.NoError(t, err) - - testFile := filepath.Join(testFolder, "file.txt") - err = os.WriteFile(testFile, []byte("Hello, World!"), 0o644) - require.NoError(t, err) - - linkedFile := filepath.Join(testFolder, "linked-file.txt") - err = os.Symlink(testFile, linkedFile) - require.NoError(t, err) - - // Service instance - svc := mockService() - - // Helper to inject user into context - injectUser := func(ctx context.Context, u *user.User) context.Context { - return authn.SetInfo(ctx, u) - } - - tests := []struct { - name string - path string - }{ - { - name: "Stat file directory", - path: testFile, - }, - { - name: "Stat symlink to file", - path: linkedFile, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - ctx := injectUser(t.Context(), u) - req := connect.NewRequest(&filesystem.StatRequest{ - Path: tt.path, - }) - resp, err := svc.Stat(ctx, req) - require.NoError(t, err) - require.NotEmpty(t, resp.Msg) - require.NotNil(t, resp.Msg.GetEntry()) - assert.Equal(t, tt.path, resp.Msg.GetEntry().GetPath()) - assert.Equal(t, filesystem.FileType_FILE_TYPE_FILE, resp.Msg.GetEntry().GetType()) - assert.Equal(t, u.Username, resp.Msg.GetEntry().GetOwner()) - assert.Equal(t, group.Name, resp.Msg.GetEntry().GetGroup()) - assert.Equal(t, uint32(0o644), resp.Msg.GetEntry().GetMode()) - if tt.path == linkedFile { - require.NotNil(t, resp.Msg.GetEntry().GetSymlinkTarget()) - assert.Equal(t, testFile, resp.Msg.GetEntry().GetSymlinkTarget()) - } else { - assert.Empty(t, resp.Msg.GetEntry().GetSymlinkTarget()) - } - }) - } -} - -func TestStatMissingPathReturnsNotFound(t *testing.T) { - t.Parallel() - - u, err := user.Current() - require.NoError(t, err) - - svc := mockService() - ctx := authn.SetInfo(t.Context(), u) - - req := connect.NewRequest(&filesystem.StatRequest{ - Path: filepath.Join(t.TempDir(), "missing.txt"), - }) - - _, err = svc.Stat(ctx, req) - require.Error(t, err) - - var connectErr *connect.Error - require.ErrorAs(t, err, &connectErr) - assert.Equal(t, connect.CodeNotFound, connectErr.Code()) -} diff --git a/envd/internal/services/filesystem/utils.go b/envd/internal/services/filesystem/utils.go deleted file mode 100644 index 6e94ce0..0000000 --- a/envd/internal/services/filesystem/utils.go +++ /dev/null @@ -1,109 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "fmt" - "os" - "os/user" - "syscall" - "time" - - "connectrpc.com/connect" - "google.golang.org/protobuf/types/known/timestamppb" - - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" - "git.omukk.dev/wrenn/sandbox/envd/internal/shared/filesystem" -) - -// Filesystem magic numbers from Linux kernel (include/uapi/linux/magic.h) -const ( - nfsSuperMagic = 0x6969 - cifsMagic = 0xFF534D42 - smbSuperMagic = 0x517B - smb2MagicNumber = 0xFE534D42 - fuseSuperMagic = 0x65735546 -) - -// IsPathOnNetworkMount checks if the given path is on a network filesystem mount. -// Returns true if the path is on NFS, CIFS, SMB, or FUSE filesystem. -func IsPathOnNetworkMount(path string) (bool, error) { - var statfs syscall.Statfs_t - if err := syscall.Statfs(path, &statfs); err != nil { - return false, fmt.Errorf("failed to statfs %s: %w", path, err) - } - - switch statfs.Type { - case nfsSuperMagic, cifsMagic, smbSuperMagic, smb2MagicNumber, fuseSuperMagic: - return true, nil - default: - return false, nil - } -} - -func entryInfo(path string) (*rpc.EntryInfo, error) { - info, err := filesystem.GetEntryFromPath(path) - if err != nil { - if os.IsNotExist(err) { - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("file not found: %w", err)) - } - - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error getting file info: %w", err)) - } - - owner, group := getFileOwnership(info) - - return &rpc.EntryInfo{ - Name: info.Name, - Type: getEntryType(info.Type), - Path: info.Path, - Size: info.Size, - Mode: uint32(info.Mode), - Permissions: info.Permissions, - Owner: owner, - Group: group, - ModifiedTime: toTimestamp(info.ModifiedTime), - SymlinkTarget: info.SymlinkTarget, - }, nil -} - -func toTimestamp(time time.Time) *timestamppb.Timestamp { - if time.IsZero() { - return nil - } - - return timestamppb.New(time) -} - -// getFileOwnership returns the owner and group names for a file. -// If the lookup fails, it returns the numeric UID and GID as strings. -func getFileOwnership(fileInfo filesystem.EntryInfo) (owner, group string) { - // Look up username - owner = fmt.Sprintf("%d", fileInfo.UID) - if u, err := user.LookupId(owner); err == nil { - owner = u.Username - } - - // Look up group name - group = fmt.Sprintf("%d", fileInfo.GID) - if g, err := user.LookupGroupId(group); err == nil { - group = g.Name - } - - return owner, group -} - -// getEntryType determines the type of file entry based on its mode and path. -// If the file is a symlink, it follows the symlink to determine the actual type. -func getEntryType(fileType filesystem.FileType) rpc.FileType { - switch fileType { - case filesystem.FileFileType: - return rpc.FileType_FILE_TYPE_FILE - case filesystem.DirectoryFileType: - return rpc.FileType_FILE_TYPE_DIRECTORY - case filesystem.SymlinkFileType: - return rpc.FileType_FILE_TYPE_SYMLINK - default: - return rpc.FileType_FILE_TYPE_UNSPECIFIED - } -} diff --git a/envd/internal/services/filesystem/utils_test.go b/envd/internal/services/filesystem/utils_test.go deleted file mode 100644 index 0f0f9ad..0000000 --- a/envd/internal/services/filesystem/utils_test.go +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "context" - "os/exec" - osuser "os/user" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - fsmodel "git.omukk.dev/wrenn/sandbox/envd/internal/shared/filesystem" -) - -func TestIsPathOnNetworkMount(t *testing.T) { - t.Parallel() - - // Test with a regular directory (should not be on network mount) - tempDir := t.TempDir() - isNetwork, err := IsPathOnNetworkMount(tempDir) - require.NoError(t, err) - assert.False(t, isNetwork, "temp directory should not be on a network mount") -} - -func TestIsPathOnNetworkMount_FuseMount(t *testing.T) { - t.Parallel() - - // Require bindfs to be available - _, err := exec.LookPath("bindfs") - require.NoError(t, err, "bindfs must be installed for this test") - - // Require fusermount to be available (needed for unmounting) - _, err = exec.LookPath("fusermount") - require.NoError(t, err, "fusermount must be installed for this test") - - // Create source and mount directories - sourceDir := t.TempDir() - mountDir := t.TempDir() - - // Mount sourceDir onto mountDir using bindfs (FUSE) - ctx := context.Background() - cmd := exec.CommandContext(ctx, "bindfs", sourceDir, mountDir) - require.NoError(t, cmd.Run(), "failed to mount bindfs") - - // Ensure we unmount on cleanup - t.Cleanup(func() { - _ = exec.CommandContext(context.Background(), "fusermount", "-u", mountDir).Run() - }) - - // Test that the FUSE mount is detected - isNetwork, err := IsPathOnNetworkMount(mountDir) - require.NoError(t, err) - assert.True(t, isNetwork, "FUSE mount should be detected as network filesystem") - - // Test that the source directory is NOT detected as network mount - isNetworkSource, err := IsPathOnNetworkMount(sourceDir) - require.NoError(t, err) - assert.False(t, isNetworkSource, "source directory should not be detected as network filesystem") -} - -func TestGetFileOwnership_CurrentUser(t *testing.T) { - t.Parallel() - - t.Run("current user", func(t *testing.T) { - t.Parallel() - - // Get current user running the tests - cur, err := osuser.Current() - if err != nil { - t.Skipf("unable to determine current user: %v", err) - } - - // Determine expected owner/group using the same lookup logic - expectedOwner := cur.Uid - if u, err := osuser.LookupId(cur.Uid); err == nil { - expectedOwner = u.Username - } - - expectedGroup := cur.Gid - if g, err := osuser.LookupGroupId(cur.Gid); err == nil { - expectedGroup = g.Name - } - - // Parse UID/GID strings to uint32 for EntryInfo - uid64, err := strconv.ParseUint(cur.Uid, 10, 32) - require.NoError(t, err) - gid64, err := strconv.ParseUint(cur.Gid, 10, 32) - require.NoError(t, err) - - // Build a minimal EntryInfo with current UID/GID - info := fsmodel.EntryInfo{ // from shared pkg - UID: uint32(uid64), - GID: uint32(gid64), - } - - owner, group := getFileOwnership(info) - assert.Equal(t, expectedOwner, owner) - assert.Equal(t, expectedGroup, group) - }) - - t.Run("no user", func(t *testing.T) { - t.Parallel() - - // Find a UID that does not exist on this system - var unknownUIDStr string - for i := 60001; i < 70000; i++ { // search a high range typically unused - idStr := strconv.Itoa(i) - if _, err := osuser.LookupId(idStr); err != nil { - unknownUIDStr = idStr - - break - } - } - if unknownUIDStr == "" { - t.Skip("could not find a non-existent UID in the probed range") - } - - // Find a GID that does not exist on this system - var unknownGIDStr string - for i := 60001; i < 70000; i++ { // search a high range typically unused - idStr := strconv.Itoa(i) - if _, err := osuser.LookupGroupId(idStr); err != nil { - unknownGIDStr = idStr - - break - } - } - if unknownGIDStr == "" { - t.Skip("could not find a non-existent GID in the probed range") - } - - // Parse to uint32 for EntryInfo construction - uid64, err := strconv.ParseUint(unknownUIDStr, 10, 32) - require.NoError(t, err) - gid64, err := strconv.ParseUint(unknownGIDStr, 10, 32) - require.NoError(t, err) - - info := fsmodel.EntryInfo{ - UID: uint32(uid64), - GID: uint32(gid64), - } - - owner, group := getFileOwnership(info) - // Expect numeric fallbacks because lookups should fail for unknown IDs - assert.Equal(t, unknownUIDStr, owner) - assert.Equal(t, unknownGIDStr, group) - }) -} diff --git a/envd/internal/services/filesystem/watch.go b/envd/internal/services/filesystem/watch.go deleted file mode 100644 index 0ad0105..0000000 --- a/envd/internal/services/filesystem/watch.go +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "connectrpc.com/connect" - "github.com/e2b-dev/fsnotify" - - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -func (s Service) WatchDir(ctx context.Context, req *connect.Request[rpc.WatchDirRequest], stream *connect.ServerStream[rpc.WatchDirResponse]) error { - return logs.LogServerStreamWithoutEvents(ctx, s.logger, req, stream, s.watchHandler) -} - -func (s Service) watchHandler(ctx context.Context, req *connect.Request[rpc.WatchDirRequest], stream *connect.ServerStream[rpc.WatchDirResponse]) error { - u, err := permissions.GetAuthUser(ctx, s.defaults.User) - if err != nil { - return err - } - - watchPath, err := permissions.ExpandAndResolve(req.Msg.GetPath(), u, s.defaults.Workdir) - if err != nil { - return connect.NewError(connect.CodeInvalidArgument, err) - } - - info, err := os.Stat(watchPath) - if err != nil { - if os.IsNotExist(err) { - return connect.NewError(connect.CodeNotFound, fmt.Errorf("path %s not found: %w", watchPath, err)) - } - - return connect.NewError(connect.CodeInternal, fmt.Errorf("error statting path %s: %w", watchPath, err)) - } - - if !info.IsDir() { - return connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("path %s not a directory: %w", watchPath, err)) - } - - // Check if path is on a network filesystem mount - isNetworkMount, err := IsPathOnNetworkMount(watchPath) - if err != nil { - return connect.NewError(connect.CodeInternal, fmt.Errorf("error checking mount status: %w", err)) - } - if isNetworkMount { - return connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("cannot watch path on network filesystem: %s", watchPath)) - } - - w, err := fsnotify.NewWatcher() - if err != nil { - return connect.NewError(connect.CodeInternal, fmt.Errorf("error creating watcher: %w", err)) - } - defer w.Close() - - err = w.Add(utils.FsnotifyPath(watchPath, req.Msg.GetRecursive())) - if err != nil { - return connect.NewError(connect.CodeInternal, fmt.Errorf("error adding path %s to watcher: %w", watchPath, err)) - } - - err = stream.Send(&rpc.WatchDirResponse{ - Event: &rpc.WatchDirResponse_Start{ - Start: &rpc.WatchDirResponse_StartEvent{}, - }, - }) - if err != nil { - return connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending start event: %w", err)) - } - - keepaliveTicker, resetKeepalive := permissions.GetKeepAliveTicker(req) - defer keepaliveTicker.Stop() - - for { - select { - case <-keepaliveTicker.C: - streamErr := stream.Send(&rpc.WatchDirResponse{ - Event: &rpc.WatchDirResponse_Keepalive{ - Keepalive: &rpc.WatchDirResponse_KeepAlive{}, - }, - }) - if streamErr != nil { - return connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending keepalive: %w", streamErr)) - } - case <-ctx.Done(): - return ctx.Err() - case chErr, ok := <-w.Errors: - if !ok { - return connect.NewError(connect.CodeInternal, fmt.Errorf("watcher error channel closed")) - } - - return connect.NewError(connect.CodeInternal, fmt.Errorf("watcher error: %w", chErr)) - case e, ok := <-w.Events: - if !ok { - return connect.NewError(connect.CodeInternal, fmt.Errorf("watcher event channel closed")) - } - - // One event can have multiple operations. - ops := []rpc.EventType{} - - if fsnotify.Create.Has(e.Op) { - ops = append(ops, rpc.EventType_EVENT_TYPE_CREATE) - } - - if fsnotify.Rename.Has(e.Op) { - ops = append(ops, rpc.EventType_EVENT_TYPE_RENAME) - } - - if fsnotify.Chmod.Has(e.Op) { - ops = append(ops, rpc.EventType_EVENT_TYPE_CHMOD) - } - - if fsnotify.Write.Has(e.Op) { - ops = append(ops, rpc.EventType_EVENT_TYPE_WRITE) - } - - if fsnotify.Remove.Has(e.Op) { - ops = append(ops, rpc.EventType_EVENT_TYPE_REMOVE) - } - - for _, op := range ops { - name, nameErr := filepath.Rel(watchPath, e.Name) - if nameErr != nil { - return connect.NewError(connect.CodeInternal, fmt.Errorf("error getting relative path: %w", nameErr)) - } - - filesystemEvent := &rpc.WatchDirResponse_Filesystem{ - Filesystem: &rpc.FilesystemEvent{ - Name: name, - Type: op, - }, - } - - event := &rpc.WatchDirResponse{ - Event: filesystemEvent, - } - - streamErr := stream.Send(event) - - s.logger. - Debug(). - Str("event_type", "filesystem_event"). - Str(string(logs.OperationIDKey), ctx.Value(logs.OperationIDKey).(string)). - Interface("filesystem_event", event). - Msg("Streaming filesystem event") - - if streamErr != nil { - return connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending filesystem event: %w", streamErr)) - } - - resetKeepalive() - } - } - } -} diff --git a/envd/internal/services/filesystem/watch_sync.go b/envd/internal/services/filesystem/watch_sync.go deleted file mode 100644 index fb5c407..0000000 --- a/envd/internal/services/filesystem/watch_sync.go +++ /dev/null @@ -1,226 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sync" - - "connectrpc.com/connect" - "github.com/e2b-dev/fsnotify" - "github.com/rs/zerolog" - - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" - "git.omukk.dev/wrenn/sandbox/envd/internal/shared/id" - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -type FileWatcher struct { - watcher *fsnotify.Watcher - Events []*rpc.FilesystemEvent - cancel func() - Error error - - Lock sync.Mutex -} - -func CreateFileWatcher(ctx context.Context, watchPath string, recursive bool, operationID string, logger *zerolog.Logger) (*FileWatcher, error) { - w, err := fsnotify.NewWatcher() - if err != nil { - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error creating watcher: %w", err)) - } - - // We don't want to cancel the context when the request is finished - ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) - - err = w.Add(utils.FsnotifyPath(watchPath, recursive)) - if err != nil { - _ = w.Close() - cancel() - - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error adding path %s to watcher: %w", watchPath, err)) - } - fw := &FileWatcher{ - watcher: w, - cancel: cancel, - Events: []*rpc.FilesystemEvent{}, - Error: nil, - } - - go func() { - for { - select { - case <-ctx.Done(): - return - case chErr, ok := <-w.Errors: - if !ok { - fw.Error = connect.NewError(connect.CodeInternal, fmt.Errorf("watcher error channel closed")) - - return - } - - fw.Error = connect.NewError(connect.CodeInternal, fmt.Errorf("watcher error: %w", chErr)) - - return - case e, ok := <-w.Events: - if !ok { - fw.Error = connect.NewError(connect.CodeInternal, fmt.Errorf("watcher event channel closed")) - - return - } - - // One event can have multiple operations. - ops := []rpc.EventType{} - - if fsnotify.Create.Has(e.Op) { - ops = append(ops, rpc.EventType_EVENT_TYPE_CREATE) - } - - if fsnotify.Rename.Has(e.Op) { - ops = append(ops, rpc.EventType_EVENT_TYPE_RENAME) - } - - if fsnotify.Chmod.Has(e.Op) { - ops = append(ops, rpc.EventType_EVENT_TYPE_CHMOD) - } - - if fsnotify.Write.Has(e.Op) { - ops = append(ops, rpc.EventType_EVENT_TYPE_WRITE) - } - - if fsnotify.Remove.Has(e.Op) { - ops = append(ops, rpc.EventType_EVENT_TYPE_REMOVE) - } - - for _, op := range ops { - name, nameErr := filepath.Rel(watchPath, e.Name) - if nameErr != nil { - fw.Error = connect.NewError(connect.CodeInternal, fmt.Errorf("error getting relative path: %w", nameErr)) - - return - } - - fw.Lock.Lock() - fw.Events = append(fw.Events, &rpc.FilesystemEvent{ - Name: name, - Type: op, - }) - fw.Lock.Unlock() - - // these are only used for logging - filesystemEvent := &rpc.WatchDirResponse_Filesystem{ - Filesystem: &rpc.FilesystemEvent{ - Name: name, - Type: op, - }, - } - event := &rpc.WatchDirResponse{ - Event: filesystemEvent, - } - - logger. - Debug(). - Str("event_type", "filesystem_event"). - Str(string(logs.OperationIDKey), operationID). - Interface("filesystem_event", event). - Msg("Streaming filesystem event") - } - } - } - }() - - return fw, nil -} - -func (fw *FileWatcher) Close() { - _ = fw.watcher.Close() - fw.cancel() -} - -func (s Service) CreateWatcher(ctx context.Context, req *connect.Request[rpc.CreateWatcherRequest]) (*connect.Response[rpc.CreateWatcherResponse], error) { - u, err := permissions.GetAuthUser(ctx, s.defaults.User) - if err != nil { - return nil, err - } - - watchPath, err := permissions.ExpandAndResolve(req.Msg.GetPath(), u, s.defaults.Workdir) - if err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - - info, err := os.Stat(watchPath) - if err != nil { - if os.IsNotExist(err) { - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("path %s not found: %w", watchPath, err)) - } - - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error statting path %s: %w", watchPath, err)) - } - - if !info.IsDir() { - return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("path %s not a directory: %w", watchPath, err)) - } - - // Check if path is on a network filesystem mount - isNetworkMount, err := IsPathOnNetworkMount(watchPath) - if err != nil { - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error checking mount status: %w", err)) - } - if isNetworkMount { - return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("cannot watch path on network filesystem: %s", watchPath)) - } - - watcherId := "w" + id.Generate() - - w, err := CreateFileWatcher(ctx, watchPath, req.Msg.GetRecursive(), watcherId, s.logger) - if err != nil { - return nil, err - } - - s.watchers.Store(watcherId, w) - - return connect.NewResponse(&rpc.CreateWatcherResponse{ - WatcherId: watcherId, - }), nil -} - -func (s Service) GetWatcherEvents(_ context.Context, req *connect.Request[rpc.GetWatcherEventsRequest]) (*connect.Response[rpc.GetWatcherEventsResponse], error) { - watcherId := req.Msg.GetWatcherId() - - w, ok := s.watchers.Load(watcherId) - if !ok { - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("watcher with id %s not found", watcherId)) - } - - if w.Error != nil { - return nil, w.Error - } - - w.Lock.Lock() - defer w.Lock.Unlock() - events := w.Events - w.Events = []*rpc.FilesystemEvent{} - - return connect.NewResponse(&rpc.GetWatcherEventsResponse{ - Events: events, - }), nil -} - -func (s Service) RemoveWatcher(_ context.Context, req *connect.Request[rpc.RemoveWatcherRequest]) (*connect.Response[rpc.RemoveWatcherResponse], error) { - watcherId := req.Msg.GetWatcherId() - - w, ok := s.watchers.Load(watcherId) - if !ok { - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("watcher with id %s not found", watcherId)) - } - - w.Close() - s.watchers.Delete(watcherId) - - return connect.NewResponse(&rpc.RemoveWatcherResponse{}), nil -} diff --git a/envd/internal/services/process/connect.go b/envd/internal/services/process/connect.go deleted file mode 100644 index 6d900ef..0000000 --- a/envd/internal/services/process/connect.go +++ /dev/null @@ -1,128 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package process - -import ( - "context" - "errors" - "fmt" - - "connectrpc.com/connect" - - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process" -) - -func (s *Service) Connect(ctx context.Context, req *connect.Request[rpc.ConnectRequest], stream *connect.ServerStream[rpc.ConnectResponse]) error { - return logs.LogServerStreamWithoutEvents(ctx, s.logger, req, stream, s.handleConnect) -} - -func (s *Service) handleConnect(ctx context.Context, req *connect.Request[rpc.ConnectRequest], stream *connect.ServerStream[rpc.ConnectResponse]) error { - ctx, cancel := context.WithCancelCause(ctx) - defer cancel(nil) - - proc, err := s.getProcess(req.Msg.GetProcess()) - if err != nil { - return err - } - - exitChan := make(chan struct{}) - - data, dataCancel := proc.DataEvent.Fork() - defer dataCancel() - - end, endCancel := proc.EndEvent.Fork() - defer endCancel() - - streamErr := stream.Send(&rpc.ConnectResponse{ - Event: &rpc.ProcessEvent{ - Event: &rpc.ProcessEvent_Start{ - Start: &rpc.ProcessEvent_StartEvent{ - Pid: proc.Pid(), - }, - }, - }, - }) - if streamErr != nil { - return connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending start event: %w", streamErr)) - } - - go func() { - defer close(exitChan) - - keepaliveTicker, resetKeepalive := permissions.GetKeepAliveTicker(req) - defer keepaliveTicker.Stop() - - dataLoop: - for { - select { - case <-keepaliveTicker.C: - streamErr := stream.Send(&rpc.ConnectResponse{ - Event: &rpc.ProcessEvent{ - Event: &rpc.ProcessEvent_Keepalive{ - Keepalive: &rpc.ProcessEvent_KeepAlive{}, - }, - }, - }) - if streamErr != nil { - cancel(connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending keepalive: %w", streamErr))) - - return - } - case <-ctx.Done(): - cancel(ctx.Err()) - - return - case event, ok := <-data: - if !ok { - break dataLoop - } - - streamErr := stream.Send(&rpc.ConnectResponse{ - Event: &rpc.ProcessEvent{ - Event: &event, - }, - }) - if streamErr != nil { - cancel(connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending data event: %w", streamErr))) - - return - } - - resetKeepalive() - } - } - - select { - case <-ctx.Done(): - cancel(ctx.Err()) - - return - case event, ok := <-end: - if !ok { - cancel(connect.NewError(connect.CodeUnknown, errors.New("end event channel closed before sending end event"))) - - return - } - - streamErr := stream.Send(&rpc.ConnectResponse{ - Event: &rpc.ProcessEvent{ - Event: &event, - }, - }) - if streamErr != nil { - cancel(connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending end event: %w", streamErr))) - - return - } - } - }() - - select { - case <-ctx.Done(): - return ctx.Err() - case <-exitChan: - return nil - } -} diff --git a/envd/internal/services/process/handler/handler.go b/envd/internal/services/process/handler/handler.go deleted file mode 100644 index 9a73103..0000000 --- a/envd/internal/services/process/handler/handler.go +++ /dev/null @@ -1,482 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package handler - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "os/exec" - "os/user" - "strconv" - "strings" - "sync" - "syscall" - - "connectrpc.com/connect" - "github.com/creack/pty" - "github.com/rs/zerolog" - - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - "git.omukk.dev/wrenn/sandbox/envd/internal/services/cgroups" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process" -) - -const ( - defaultNice = 0 - defaultOomScore = 100 - outputBufferSize = 64 - stdChunkSize = 2 << 14 - ptyChunkSize = 2 << 13 -) - -type ProcessExit struct { - Error *string - Status string - Exited bool - Code int32 -} - -type Handler struct { - Config *rpc.ProcessConfig - - logger *zerolog.Logger - - Tag *string - cmd *exec.Cmd - tty *os.File - - cancel context.CancelFunc - - outCtx context.Context //nolint:containedctx // todo: refactor so this can be removed - outCancel context.CancelFunc - - stdinMu sync.Mutex - stdin io.WriteCloser - - DataEvent *MultiplexedChannel[rpc.ProcessEvent_Data] - EndEvent *MultiplexedChannel[rpc.ProcessEvent_End] -} - -// This method must be called only after the process has been started -func (p *Handler) Pid() uint32 { - return uint32(p.cmd.Process.Pid) -} - -// userCommand returns a human-readable representation of the user's original command, -// without the internal OOM/nice wrapper that is prepended to the actual exec. -func (p *Handler) userCommand() string { - return strings.Join(append([]string{p.Config.GetCmd()}, p.Config.GetArgs()...), " ") -} - -// currentNice returns the nice value of the current process. -func currentNice() int { - prio, err := syscall.Getpriority(syscall.PRIO_PROCESS, 0) - if err != nil { - return 0 - } - - // Getpriority returns 20 - nice on Linux. - return 20 - prio -} - -func New( - ctx context.Context, - user *user.User, - req *rpc.StartRequest, - logger *zerolog.Logger, - defaults *execcontext.Defaults, - cgroupManager cgroups.Manager, - cancel context.CancelFunc, -) (*Handler, error) { - // User command string for logging (without the internal wrapper details). - userCmd := strings.Join(append([]string{req.GetProcess().GetCmd()}, req.GetProcess().GetArgs()...), " ") - - // Wrap the command in a shell that sets the OOM score and nice value before exec-ing the actual command. - // This eliminates the race window where grandchildren could inherit the parent's protected OOM score (-1000) - // or high CPU priority (nice -20) before the post-start calls had a chance to correct them. - // nice(1) applies a relative adjustment, so we compute the delta from the current (inherited) nice to the target. - niceDelta := defaultNice - currentNice() - oomWrapperScript := fmt.Sprintf(`echo %d > /proc/$$/oom_score_adj && exec /usr/bin/nice -n %d "${@}"`, defaultOomScore, niceDelta) - wrapperArgs := append([]string{"-c", oomWrapperScript, "--", req.GetProcess().GetCmd()}, req.GetProcess().GetArgs()...) - cmd := exec.CommandContext(ctx, "/bin/sh", wrapperArgs...) - - uid, gid, err := permissions.GetUserIdUints(user) - if err != nil { - return nil, connect.NewError(connect.CodeInternal, err) - } - - groups := []uint32{gid} - if gids, err := user.GroupIds(); err != nil { - logger.Warn().Err(err).Str("user", user.Username).Msg("failed to get supplementary groups") - } else { - for _, g := range gids { - if parsed, err := strconv.ParseUint(g, 10, 32); err == nil { - groups = append(groups, uint32(parsed)) - } - } - } - - cgroupFD, ok := cgroupManager.GetFileDescriptor(getProcType(req)) - - cmd.SysProcAttr = &syscall.SysProcAttr{ - UseCgroupFD: ok, - CgroupFD: cgroupFD, - Credential: &syscall.Credential{ - Uid: uid, - Gid: gid, - Groups: groups, - }, - } - - resolvedPath, err := permissions.ExpandAndResolve(req.GetProcess().GetCwd(), user, defaults.Workdir) - if err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - - // Check if the cwd resolved path exists - if _, err := os.Stat(resolvedPath); errors.Is(err, os.ErrNotExist) { - return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("cwd '%s' does not exist", resolvedPath)) - } - - cmd.Dir = resolvedPath - - var formattedVars []string - - // Take only 'PATH' variable from the current environment - // The 'PATH' should ideally be set in the environment - formattedVars = append(formattedVars, "PATH="+os.Getenv("PATH")) - formattedVars = append(formattedVars, "HOME="+user.HomeDir) - formattedVars = append(formattedVars, "USER="+user.Username) - formattedVars = append(formattedVars, "LOGNAME="+user.Username) - - // Add the environment variables from the global environment - if defaults.EnvVars != nil { - defaults.EnvVars.Range(func(key string, value string) bool { - formattedVars = append(formattedVars, key+"="+value) - - return true - }) - } - - // Only the last values of the env vars are used - this allows for overwriting defaults - for key, value := range req.GetProcess().GetEnvs() { - formattedVars = append(formattedVars, key+"="+value) - } - - cmd.Env = formattedVars - - outMultiplex := NewMultiplexedChannel[rpc.ProcessEvent_Data](outputBufferSize) - - var outWg sync.WaitGroup - - // Create a context for waiting for and cancelling output pipes. - // Cancellation of the process via timeout will propagate and cancel this context too. - outCtx, outCancel := context.WithCancel(ctx) - - h := &Handler{ - Config: req.GetProcess(), - cmd: cmd, - Tag: req.Tag, - DataEvent: outMultiplex, - cancel: cancel, - outCtx: outCtx, - outCancel: outCancel, - EndEvent: NewMultiplexedChannel[rpc.ProcessEvent_End](0), - logger: logger, - } - - if req.GetPty() != nil { - // The pty should ideally start only in the Start method, but the package does not support that and we would have to code it manually. - // The output of the pty should correctly be passed though. - tty, err := pty.StartWithSize(cmd, &pty.Winsize{ - Cols: uint16(req.GetPty().GetSize().GetCols()), - Rows: uint16(req.GetPty().GetSize().GetRows()), - }) - if err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("error starting pty with command '%s' in dir '%s' with '%d' cols and '%d' rows: %w", userCmd, cmd.Dir, req.GetPty().GetSize().GetCols(), req.GetPty().GetSize().GetRows(), err)) - } - - outWg.Go(func() { - for { - buf := make([]byte, ptyChunkSize) - - n, readErr := tty.Read(buf) - - if n > 0 { - outMultiplex.Source <- rpc.ProcessEvent_Data{ - Data: &rpc.ProcessEvent_DataEvent{ - Output: &rpc.ProcessEvent_DataEvent_Pty{ - Pty: buf[:n], - }, - }, - } - } - - if errors.Is(readErr, io.EOF) { - break - } - - if readErr != nil { - fmt.Fprintf(os.Stderr, "error reading from pty: %s\n", readErr) - - break - } - } - }) - - h.tty = tty - } else { - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error creating stdout pipe for command '%s': %w", userCmd, err)) - } - - outWg.Go(func() { - stdoutLogs := make(chan []byte, outputBufferSize) - defer close(stdoutLogs) - - stdoutLogger := logger.With().Str("event_type", "stdout").Logger() - - go logs.LogBufferedDataEvents(stdoutLogs, &stdoutLogger, "data") - - for { - buf := make([]byte, stdChunkSize) - - n, readErr := stdout.Read(buf) - - if n > 0 { - outMultiplex.Source <- rpc.ProcessEvent_Data{ - Data: &rpc.ProcessEvent_DataEvent{ - Output: &rpc.ProcessEvent_DataEvent_Stdout{ - Stdout: buf[:n], - }, - }, - } - - stdoutLogs <- buf[:n] - } - - if errors.Is(readErr, io.EOF) { - break - } - - if readErr != nil { - fmt.Fprintf(os.Stderr, "error reading from stdout: %s\n", readErr) - - break - } - } - }) - - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error creating stderr pipe for command '%s': %w", userCmd, err)) - } - - outWg.Go(func() { - stderrLogs := make(chan []byte, outputBufferSize) - defer close(stderrLogs) - - stderrLogger := logger.With().Str("event_type", "stderr").Logger() - - go logs.LogBufferedDataEvents(stderrLogs, &stderrLogger, "data") - - for { - buf := make([]byte, stdChunkSize) - - n, readErr := stderr.Read(buf) - - if n > 0 { - outMultiplex.Source <- rpc.ProcessEvent_Data{ - Data: &rpc.ProcessEvent_DataEvent{ - Output: &rpc.ProcessEvent_DataEvent_Stderr{ - Stderr: buf[:n], - }, - }, - } - - stderrLogs <- buf[:n] - } - - if errors.Is(readErr, io.EOF) { - break - } - - if readErr != nil { - fmt.Fprintf(os.Stderr, "error reading from stderr: %s\n", readErr) - - break - } - } - }) - - // For backwards compatibility we still set the stdin if not explicitly disabled - // If stdin is disabled, the process will use /dev/null as stdin - if req.Stdin == nil || req.GetStdin() == true { - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error creating stdin pipe for command '%s': %w", userCmd, err)) - } - - h.stdin = stdin - } - } - - go func() { - outWg.Wait() - - close(outMultiplex.Source) - - outCancel() - }() - - return h, nil -} - -func getProcType(req *rpc.StartRequest) cgroups.ProcessType { - if req != nil && req.GetPty() != nil { - return cgroups.ProcessTypePTY - } - - return cgroups.ProcessTypeUser -} - -func (p *Handler) SendSignal(signal syscall.Signal) error { - if p.cmd.Process == nil { - return fmt.Errorf("process not started") - } - - if signal == syscall.SIGKILL || signal == syscall.SIGTERM { - p.outCancel() - } - - return p.cmd.Process.Signal(signal) -} - -func (p *Handler) ResizeTty(size *pty.Winsize) error { - if p.tty == nil { - return fmt.Errorf("tty not assigned to process") - } - - return pty.Setsize(p.tty, size) -} - -func (p *Handler) WriteStdin(data []byte) error { - if p.tty != nil { - return fmt.Errorf("tty assigned to process — input should be written to the pty, not the stdin") - } - - p.stdinMu.Lock() - defer p.stdinMu.Unlock() - - if p.stdin == nil { - return fmt.Errorf("stdin not enabled or closed") - } - - _, err := p.stdin.Write(data) - if err != nil { - return fmt.Errorf("error writing to stdin of process '%d': %w", p.cmd.Process.Pid, err) - } - - return nil -} - -// CloseStdin closes the stdin pipe to signal EOF to the process. -// Only works for non-PTY processes. -func (p *Handler) CloseStdin() error { - if p.tty != nil { - return fmt.Errorf("cannot close stdin for PTY process — send Ctrl+D (0x04) instead") - } - - p.stdinMu.Lock() - defer p.stdinMu.Unlock() - - if p.stdin == nil { - return nil - } - - err := p.stdin.Close() - // We still set the stdin to nil even on error as there are no errors, - // for which it is really safe to retry close across all distributions. - p.stdin = nil - - return err -} - -func (p *Handler) WriteTty(data []byte) error { - if p.tty == nil { - return fmt.Errorf("tty not assigned to process — input should be written to the stdin, not the tty") - } - - _, err := p.tty.Write(data) - if err != nil { - return fmt.Errorf("error writing to tty of process '%d': %w", p.cmd.Process.Pid, err) - } - - return nil -} - -func (p *Handler) Start() (uint32, error) { - // Pty is already started in the New method - if p.tty == nil { - err := p.cmd.Start() - if err != nil { - return 0, fmt.Errorf("error starting process '%s': %w", p.userCommand(), err) - } - } - - p.logger. - Info(). - Str("event_type", "process_start"). - Int("pid", p.cmd.Process.Pid). - Str("command", p.userCommand()). - Msg(fmt.Sprintf("Process with pid %d started", p.cmd.Process.Pid)) - - return uint32(p.cmd.Process.Pid), nil -} - -func (p *Handler) Wait() { - // Wait for the output pipes to be closed or cancelled. - <-p.outCtx.Done() - - err := p.cmd.Wait() - - if p.tty != nil { - p.tty.Close() - } - - var errMsg *string - - if err != nil { - msg := err.Error() - errMsg = &msg - } - - endEvent := &rpc.ProcessEvent_EndEvent{ - Error: errMsg, - ExitCode: int32(p.cmd.ProcessState.ExitCode()), - Exited: p.cmd.ProcessState.Exited(), - Status: p.cmd.ProcessState.String(), - } - - event := rpc.ProcessEvent_End{ - End: endEvent, - } - - p.EndEvent.Source <- event - - p.logger. - Info(). - Str("event_type", "process_end"). - Interface("process_result", endEvent). - Msg(fmt.Sprintf("Process with pid %d ended", p.cmd.Process.Pid)) - - // Ensure the process cancel is called to cleanup resources. - // As it is called after end event and Wait, it should not affect command execution or returned events. - p.cancel() -} diff --git a/envd/internal/services/process/handler/multiplex.go b/envd/internal/services/process/handler/multiplex.go deleted file mode 100644 index 88f0916..0000000 --- a/envd/internal/services/process/handler/multiplex.go +++ /dev/null @@ -1,79 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package handler - -import ( - "sync" - "sync/atomic" -) - -type MultiplexedChannel[T any] struct { - Source chan T - channels []chan T - mu sync.RWMutex - exited atomic.Bool -} - -func NewMultiplexedChannel[T any](buffer int) *MultiplexedChannel[T] { - c := &MultiplexedChannel[T]{ - channels: nil, - Source: make(chan T, buffer), - } - - go func() { - for v := range c.Source { - c.mu.RLock() - - for _, cons := range c.channels { - select { - case cons <- v: - default: - // Consumer not reading — skip to prevent deadlock - } - } - - c.mu.RUnlock() - } - - c.mu.Lock() - c.exited.Store(true) - for _, cons := range c.channels { - close(cons) - } - c.mu.Unlock() - }() - - return c -} - -func (m *MultiplexedChannel[T]) Fork() (chan T, func()) { - m.mu.Lock() - defer m.mu.Unlock() - - if m.exited.Load() { - ch := make(chan T) - close(ch) - return ch, func() {} - } - - consumer := make(chan T, 4096) - - m.channels = append(m.channels, consumer) - - return consumer, func() { - m.remove(consumer) - } -} - -func (m *MultiplexedChannel[T]) remove(consumer chan T) { - m.mu.Lock() - defer m.mu.Unlock() - - for i, ch := range m.channels { - if ch == consumer { - m.channels = append(m.channels[:i], m.channels[i+1:]...) - - return - } - } -} diff --git a/envd/internal/services/process/input.go b/envd/internal/services/process/input.go deleted file mode 100644 index da82d37..0000000 --- a/envd/internal/services/process/input.go +++ /dev/null @@ -1,109 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package process - -import ( - "context" - "fmt" - - "connectrpc.com/connect" - "github.com/rs/zerolog" - - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/services/process/handler" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process" -) - -func handleInput(ctx context.Context, process *handler.Handler, in *rpc.ProcessInput, logger *zerolog.Logger) error { - switch in.GetInput().(type) { - case *rpc.ProcessInput_Pty: - err := process.WriteTty(in.GetPty()) - if err != nil { - return connect.NewError(connect.CodeInternal, fmt.Errorf("error writing to tty: %w", err)) - } - - case *rpc.ProcessInput_Stdin: - err := process.WriteStdin(in.GetStdin()) - if err != nil { - return connect.NewError(connect.CodeInternal, fmt.Errorf("error writing to stdin: %w", err)) - } - - logger.Debug(). - Str("event_type", "stdin"). - Interface("stdin", in.GetStdin()). - Str(string(logs.OperationIDKey), ctx.Value(logs.OperationIDKey).(string)). - Msg("Streaming input to process") - - default: - return connect.NewError(connect.CodeUnimplemented, fmt.Errorf("invalid input type %T", in.GetInput())) - } - - return nil -} - -func (s *Service) SendInput(ctx context.Context, req *connect.Request[rpc.SendInputRequest]) (*connect.Response[rpc.SendInputResponse], error) { - proc, err := s.getProcess(req.Msg.GetProcess()) - if err != nil { - return nil, err - } - - err = handleInput(ctx, proc, req.Msg.GetInput(), s.logger) - if err != nil { - return nil, err - } - - return connect.NewResponse(&rpc.SendInputResponse{}), nil -} - -func (s *Service) StreamInput(ctx context.Context, stream *connect.ClientStream[rpc.StreamInputRequest]) (*connect.Response[rpc.StreamInputResponse], error) { - return logs.LogClientStreamWithoutEvents(ctx, s.logger, stream, s.streamInputHandler) -} - -func (s *Service) streamInputHandler(ctx context.Context, stream *connect.ClientStream[rpc.StreamInputRequest]) (*connect.Response[rpc.StreamInputResponse], error) { - var proc *handler.Handler - - for stream.Receive() { - req := stream.Msg() - - switch req.GetEvent().(type) { - case *rpc.StreamInputRequest_Start: - p, err := s.getProcess(req.GetStart().GetProcess()) - if err != nil { - return nil, err - } - - proc = p - case *rpc.StreamInputRequest_Data: - err := handleInput(ctx, proc, req.GetData().GetInput(), s.logger) - if err != nil { - return nil, err - } - case *rpc.StreamInputRequest_Keepalive: - default: - return nil, connect.NewError(connect.CodeUnimplemented, fmt.Errorf("invalid event type %T", req.GetEvent())) - } - } - - err := stream.Err() - if err != nil { - return nil, connect.NewError(connect.CodeUnknown, fmt.Errorf("error streaming input: %w", err)) - } - - return connect.NewResponse(&rpc.StreamInputResponse{}), nil -} - -func (s *Service) CloseStdin( - _ context.Context, - req *connect.Request[rpc.CloseStdinRequest], -) (*connect.Response[rpc.CloseStdinResponse], error) { - handler, err := s.getProcess(req.Msg.GetProcess()) - if err != nil { - return nil, err - } - - if err := handler.CloseStdin(); err != nil { - return nil, connect.NewError(connect.CodeUnknown, fmt.Errorf("error closing stdin: %w", err)) - } - - return connect.NewResponse(&rpc.CloseStdinResponse{}), nil -} diff --git a/envd/internal/services/process/list.go b/envd/internal/services/process/list.go deleted file mode 100644 index 3b42655..0000000 --- a/envd/internal/services/process/list.go +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package process - -import ( - "context" - - "connectrpc.com/connect" - - "git.omukk.dev/wrenn/sandbox/envd/internal/services/process/handler" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process" -) - -func (s *Service) List(context.Context, *connect.Request[rpc.ListRequest]) (*connect.Response[rpc.ListResponse], error) { - processes := make([]*rpc.ProcessInfo, 0) - - s.processes.Range(func(pid uint32, value *handler.Handler) bool { - processes = append(processes, &rpc.ProcessInfo{ - Pid: pid, - Tag: value.Tag, - Config: value.Config, - }) - - return true - }) - - return connect.NewResponse(&rpc.ListResponse{ - Processes: processes, - }), nil -} diff --git a/envd/internal/services/process/service.go b/envd/internal/services/process/service.go deleted file mode 100644 index 9b89521..0000000 --- a/envd/internal/services/process/service.go +++ /dev/null @@ -1,85 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package process - -import ( - "fmt" - - "connectrpc.com/connect" - "github.com/go-chi/chi/v5" - "github.com/rs/zerolog" - - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/services/cgroups" - "git.omukk.dev/wrenn/sandbox/envd/internal/services/process/handler" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process" - spec "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process/processconnect" - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -type Service struct { - processes *utils.Map[uint32, *handler.Handler] - logger *zerolog.Logger - defaults *execcontext.Defaults - cgroupManager cgroups.Manager -} - -func newService(l *zerolog.Logger, defaults *execcontext.Defaults, cgroupManager cgroups.Manager) *Service { - return &Service{ - logger: l, - processes: utils.NewMap[uint32, *handler.Handler](), - defaults: defaults, - cgroupManager: cgroupManager, - } -} - -func Handle(server *chi.Mux, l *zerolog.Logger, defaults *execcontext.Defaults, cgroupManager cgroups.Manager) *Service { - service := newService(l, defaults, cgroupManager) - - interceptors := connect.WithInterceptors(logs.NewUnaryLogInterceptor(l)) - - path, h := spec.NewProcessHandler(service, interceptors) - - server.Mount(path, h) - - return service -} - -func (s *Service) getProcess(selector *rpc.ProcessSelector) (*handler.Handler, error) { - var proc *handler.Handler - - switch selector.GetSelector().(type) { - case *rpc.ProcessSelector_Pid: - p, ok := s.processes.Load(selector.GetPid()) - if !ok { - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("process with pid %d not found", selector.GetPid())) - } - - proc = p - case *rpc.ProcessSelector_Tag: - tag := selector.GetTag() - - s.processes.Range(func(_ uint32, value *handler.Handler) bool { - if value.Tag == nil { - return true // no tag, keep looking - } - - if *value.Tag == tag { - proc = value - return false // found, stop iterating - } - - return true // different tag, keep looking - }) - - if proc == nil { - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("process with tag %s not found", tag)) - } - - default: - return nil, connect.NewError(connect.CodeUnimplemented, fmt.Errorf("invalid input type %T", selector)) - } - - return proc, nil -} diff --git a/envd/internal/services/process/signal.go b/envd/internal/services/process/signal.go deleted file mode 100644 index 23795da..0000000 --- a/envd/internal/services/process/signal.go +++ /dev/null @@ -1,40 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package process - -import ( - "context" - "fmt" - "syscall" - - "connectrpc.com/connect" - - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process" -) - -func (s *Service) SendSignal( - _ context.Context, - req *connect.Request[rpc.SendSignalRequest], -) (*connect.Response[rpc.SendSignalResponse], error) { - handler, err := s.getProcess(req.Msg.GetProcess()) - if err != nil { - return nil, err - } - - var signal syscall.Signal - switch req.Msg.GetSignal() { - case rpc.Signal_SIGNAL_SIGKILL: - signal = syscall.SIGKILL - case rpc.Signal_SIGNAL_SIGTERM: - signal = syscall.SIGTERM - default: - return nil, connect.NewError(connect.CodeUnimplemented, fmt.Errorf("invalid signal: %s", req.Msg.GetSignal())) - } - - err = handler.SendSignal(signal) - if err != nil { - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error sending signal: %w", err)) - } - - return connect.NewResponse(&rpc.SendSignalResponse{}), nil -} diff --git a/envd/internal/services/process/start.go b/envd/internal/services/process/start.go deleted file mode 100644 index b9a61b1..0000000 --- a/envd/internal/services/process/start.go +++ /dev/null @@ -1,249 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package process - -import ( - "context" - "errors" - "fmt" - "net/http" - "os/user" - "strconv" - "time" - - "connectrpc.com/connect" - - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - "git.omukk.dev/wrenn/sandbox/envd/internal/services/process/handler" - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process" -) - -func (s *Service) InitializeStartProcess(ctx context.Context, user *user.User, req *rpc.StartRequest) error { - var err error - - ctx = logs.AddRequestIDToContext(ctx) - - defer s.logger. - Err(err). - Interface("request", req). - Str(string(logs.OperationIDKey), ctx.Value(logs.OperationIDKey).(string)). - Msg("Initialized startCmd") - - handlerL := s.logger.With().Str(string(logs.OperationIDKey), ctx.Value(logs.OperationIDKey).(string)).Logger() - - startProcCtx, startProcCancel := context.WithCancel(ctx) - proc, err := handler.New(startProcCtx, user, req, &handlerL, s.defaults, s.cgroupManager, startProcCancel) - if err != nil { - return err - } - - pid, err := proc.Start() - if err != nil { - return err - } - - s.processes.Store(pid, proc) - - go func() { - defer s.processes.Delete(pid) - - proc.Wait() - }() - - return nil -} - -func (s *Service) Start(ctx context.Context, req *connect.Request[rpc.StartRequest], stream *connect.ServerStream[rpc.StartResponse]) error { - return logs.LogServerStreamWithoutEvents(ctx, s.logger, req, stream, s.handleStart) -} - -func (s *Service) handleStart(ctx context.Context, req *connect.Request[rpc.StartRequest], stream *connect.ServerStream[rpc.StartResponse]) error { - ctx, cancel := context.WithCancelCause(ctx) - defer cancel(nil) - - handlerL := s.logger.With().Str(string(logs.OperationIDKey), ctx.Value(logs.OperationIDKey).(string)).Logger() - - u, err := permissions.GetAuthUser(ctx, s.defaults.User) - if err != nil { - return err - } - - timeout, err := determineTimeoutFromHeader(stream.Conn().RequestHeader()) - if err != nil { - return connect.NewError(connect.CodeInvalidArgument, err) - } - - // Create a new context with a timeout if provided. - // We do not want the command to be killed if the request context is cancelled - procCtx, cancelProc := context.Background(), func() {} - if timeout > 0 { // zero timeout means no timeout - procCtx, cancelProc = context.WithTimeout(procCtx, timeout) - } - - proc, err := handler.New( //nolint:contextcheck // TODO: fix this later - procCtx, - u, - req.Msg, - &handlerL, - s.defaults, - s.cgroupManager, - cancelProc, - ) - if err != nil { - // Ensure the process cancel is called to cleanup resources. - cancelProc() - - return err - } - - exitChan := make(chan struct{}) - - startMultiplexer := handler.NewMultiplexedChannel[rpc.ProcessEvent_Start](0) - defer close(startMultiplexer.Source) - - start, startCancel := startMultiplexer.Fork() - defer startCancel() - - data, dataCancel := proc.DataEvent.Fork() - defer dataCancel() - - end, endCancel := proc.EndEvent.Fork() - defer endCancel() - - go func() { - defer close(exitChan) - - select { - case <-ctx.Done(): - cancel(ctx.Err()) - - return - case event, ok := <-start: - if !ok { - cancel(connect.NewError(connect.CodeUnknown, errors.New("start event channel closed before sending start event"))) - - return - } - - streamErr := stream.Send(&rpc.StartResponse{ - Event: &rpc.ProcessEvent{ - Event: &event, - }, - }) - if streamErr != nil { - cancel(connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending start event: %w", streamErr))) - - return - } - } - - keepaliveTicker, resetKeepalive := permissions.GetKeepAliveTicker(req) - defer keepaliveTicker.Stop() - - dataLoop: - for { - select { - case <-keepaliveTicker.C: - streamErr := stream.Send(&rpc.StartResponse{ - Event: &rpc.ProcessEvent{ - Event: &rpc.ProcessEvent_Keepalive{ - Keepalive: &rpc.ProcessEvent_KeepAlive{}, - }, - }, - }) - if streamErr != nil { - cancel(connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending keepalive: %w", streamErr))) - - return - } - case <-ctx.Done(): - cancel(ctx.Err()) - - return - case event, ok := <-data: - if !ok { - break dataLoop - } - - streamErr := stream.Send(&rpc.StartResponse{ - Event: &rpc.ProcessEvent{ - Event: &event, - }, - }) - if streamErr != nil { - cancel(connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending data event: %w", streamErr))) - - return - } - - resetKeepalive() - } - } - - select { - case <-ctx.Done(): - cancel(ctx.Err()) - - return - case event, ok := <-end: - if !ok { - cancel(connect.NewError(connect.CodeUnknown, errors.New("end event channel closed before sending end event"))) - - return - } - - streamErr := stream.Send(&rpc.StartResponse{ - Event: &rpc.ProcessEvent{ - Event: &event, - }, - }) - if streamErr != nil { - cancel(connect.NewError(connect.CodeUnknown, fmt.Errorf("error sending end event: %w", streamErr))) - - return - } - } - }() - - pid, err := proc.Start() - if err != nil { - return connect.NewError(connect.CodeInvalidArgument, err) - } - - s.processes.Store(pid, proc) - - start <- rpc.ProcessEvent_Start{ - Start: &rpc.ProcessEvent_StartEvent{ - Pid: pid, - }, - } - - go func() { - defer s.processes.Delete(pid) - - proc.Wait() - }() - - select { - case <-ctx.Done(): - return ctx.Err() - case <-exitChan: - return nil - } -} - -func determineTimeoutFromHeader(header http.Header) (time.Duration, error) { - timeoutHeader := header.Get("Connect-Timeout-Ms") - - if timeoutHeader == "" { - return 0, nil - } - - timeout, err := strconv.Atoi(timeoutHeader) - if err != nil { - return 0, err - } - - return time.Duration(timeout) * time.Millisecond, nil -} diff --git a/envd/internal/services/process/update.go b/envd/internal/services/process/update.go deleted file mode 100644 index 1778f89..0000000 --- a/envd/internal/services/process/update.go +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package process - -import ( - "context" - "fmt" - - "connectrpc.com/connect" - "github.com/creack/pty" - - rpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process" -) - -func (s *Service) Update(_ context.Context, req *connect.Request[rpc.UpdateRequest]) (*connect.Response[rpc.UpdateResponse], error) { - proc, err := s.getProcess(req.Msg.GetProcess()) - if err != nil { - return nil, err - } - - if req.Msg.GetPty() != nil { - err := proc.ResizeTty(&pty.Winsize{ - Rows: uint16(req.Msg.GetPty().GetSize().GetRows()), - Cols: uint16(req.Msg.GetPty().GetSize().GetCols()), - }) - if err != nil { - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("error resizing tty: %w", err)) - } - } - - return connect.NewResponse(&rpc.UpdateResponse{}), nil -} diff --git a/envd/internal/services/spec/filesystem.pb.go b/envd/internal/services/spec/filesystem.pb.go deleted file mode 100644 index 9d3e537..0000000 --- a/envd/internal/services/spec/filesystem.pb.go +++ /dev/null @@ -1,1446 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.11 -// protoc (unknown) -// source: filesystem.proto - -package spec - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type FileType int32 - -const ( - FileType_FILE_TYPE_UNSPECIFIED FileType = 0 - FileType_FILE_TYPE_FILE FileType = 1 - FileType_FILE_TYPE_DIRECTORY FileType = 2 - FileType_FILE_TYPE_SYMLINK FileType = 3 -) - -// Enum value maps for FileType. -var ( - FileType_name = map[int32]string{ - 0: "FILE_TYPE_UNSPECIFIED", - 1: "FILE_TYPE_FILE", - 2: "FILE_TYPE_DIRECTORY", - 3: "FILE_TYPE_SYMLINK", - } - FileType_value = map[string]int32{ - "FILE_TYPE_UNSPECIFIED": 0, - "FILE_TYPE_FILE": 1, - "FILE_TYPE_DIRECTORY": 2, - "FILE_TYPE_SYMLINK": 3, - } -) - -func (x FileType) Enum() *FileType { - p := new(FileType) - *p = x - return p -} - -func (x FileType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FileType) Descriptor() protoreflect.EnumDescriptor { - return file_filesystem_proto_enumTypes[0].Descriptor() -} - -func (FileType) Type() protoreflect.EnumType { - return &file_filesystem_proto_enumTypes[0] -} - -func (x FileType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use FileType.Descriptor instead. -func (FileType) EnumDescriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{0} -} - -type EventType int32 - -const ( - EventType_EVENT_TYPE_UNSPECIFIED EventType = 0 - EventType_EVENT_TYPE_CREATE EventType = 1 - EventType_EVENT_TYPE_WRITE EventType = 2 - EventType_EVENT_TYPE_REMOVE EventType = 3 - EventType_EVENT_TYPE_RENAME EventType = 4 - EventType_EVENT_TYPE_CHMOD EventType = 5 -) - -// Enum value maps for EventType. -var ( - EventType_name = map[int32]string{ - 0: "EVENT_TYPE_UNSPECIFIED", - 1: "EVENT_TYPE_CREATE", - 2: "EVENT_TYPE_WRITE", - 3: "EVENT_TYPE_REMOVE", - 4: "EVENT_TYPE_RENAME", - 5: "EVENT_TYPE_CHMOD", - } - EventType_value = map[string]int32{ - "EVENT_TYPE_UNSPECIFIED": 0, - "EVENT_TYPE_CREATE": 1, - "EVENT_TYPE_WRITE": 2, - "EVENT_TYPE_REMOVE": 3, - "EVENT_TYPE_RENAME": 4, - "EVENT_TYPE_CHMOD": 5, - } -) - -func (x EventType) Enum() *EventType { - p := new(EventType) - *p = x - return p -} - -func (x EventType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (EventType) Descriptor() protoreflect.EnumDescriptor { - return file_filesystem_proto_enumTypes[1].Descriptor() -} - -func (EventType) Type() protoreflect.EnumType { - return &file_filesystem_proto_enumTypes[1] -} - -func (x EventType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use EventType.Descriptor instead. -func (EventType) EnumDescriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{1} -} - -type MoveRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` - Destination string `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MoveRequest) Reset() { - *x = MoveRequest{} - mi := &file_filesystem_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MoveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MoveRequest) ProtoMessage() {} - -func (x *MoveRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MoveRequest.ProtoReflect.Descriptor instead. -func (*MoveRequest) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{0} -} - -func (x *MoveRequest) GetSource() string { - if x != nil { - return x.Source - } - return "" -} - -func (x *MoveRequest) GetDestination() string { - if x != nil { - return x.Destination - } - return "" -} - -type MoveResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entry *EntryInfo `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MoveResponse) Reset() { - *x = MoveResponse{} - mi := &file_filesystem_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MoveResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MoveResponse) ProtoMessage() {} - -func (x *MoveResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MoveResponse.ProtoReflect.Descriptor instead. -func (*MoveResponse) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{1} -} - -func (x *MoveResponse) GetEntry() *EntryInfo { - if x != nil { - return x.Entry - } - return nil -} - -type MakeDirRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MakeDirRequest) Reset() { - *x = MakeDirRequest{} - mi := &file_filesystem_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MakeDirRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MakeDirRequest) ProtoMessage() {} - -func (x *MakeDirRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MakeDirRequest.ProtoReflect.Descriptor instead. -func (*MakeDirRequest) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{2} -} - -func (x *MakeDirRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type MakeDirResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entry *EntryInfo `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MakeDirResponse) Reset() { - *x = MakeDirResponse{} - mi := &file_filesystem_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MakeDirResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MakeDirResponse) ProtoMessage() {} - -func (x *MakeDirResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MakeDirResponse.ProtoReflect.Descriptor instead. -func (*MakeDirResponse) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{3} -} - -func (x *MakeDirResponse) GetEntry() *EntryInfo { - if x != nil { - return x.Entry - } - return nil -} - -type RemoveRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RemoveRequest) Reset() { - *x = RemoveRequest{} - mi := &file_filesystem_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RemoveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveRequest) ProtoMessage() {} - -func (x *RemoveRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveRequest.ProtoReflect.Descriptor instead. -func (*RemoveRequest) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{4} -} - -func (x *RemoveRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type RemoveResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RemoveResponse) Reset() { - *x = RemoveResponse{} - mi := &file_filesystem_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RemoveResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveResponse) ProtoMessage() {} - -func (x *RemoveResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveResponse.ProtoReflect.Descriptor instead. -func (*RemoveResponse) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{5} -} - -type StatRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StatRequest) Reset() { - *x = StatRequest{} - mi := &file_filesystem_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatRequest) ProtoMessage() {} - -func (x *StatRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatRequest.ProtoReflect.Descriptor instead. -func (*StatRequest) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{6} -} - -func (x *StatRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type StatResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entry *EntryInfo `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StatResponse) Reset() { - *x = StatResponse{} - mi := &file_filesystem_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatResponse) ProtoMessage() {} - -func (x *StatResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatResponse.ProtoReflect.Descriptor instead. -func (*StatResponse) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{7} -} - -func (x *StatResponse) GetEntry() *EntryInfo { - if x != nil { - return x.Entry - } - return nil -} - -type EntryInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Type FileType `protobuf:"varint,2,opt,name=type,proto3,enum=filesystem.FileType" json:"type,omitempty"` - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` - Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` - Mode uint32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` - Permissions string `protobuf:"bytes,6,opt,name=permissions,proto3" json:"permissions,omitempty"` - Owner string `protobuf:"bytes,7,opt,name=owner,proto3" json:"owner,omitempty"` - Group string `protobuf:"bytes,8,opt,name=group,proto3" json:"group,omitempty"` - ModifiedTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=modified_time,json=modifiedTime,proto3" json:"modified_time,omitempty"` - // If the entry is a symlink, this field contains the target of the symlink. - SymlinkTarget *string `protobuf:"bytes,10,opt,name=symlink_target,json=symlinkTarget,proto3,oneof" json:"symlink_target,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EntryInfo) Reset() { - *x = EntryInfo{} - mi := &file_filesystem_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EntryInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EntryInfo) ProtoMessage() {} - -func (x *EntryInfo) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EntryInfo.ProtoReflect.Descriptor instead. -func (*EntryInfo) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{8} -} - -func (x *EntryInfo) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *EntryInfo) GetType() FileType { - if x != nil { - return x.Type - } - return FileType_FILE_TYPE_UNSPECIFIED -} - -func (x *EntryInfo) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *EntryInfo) GetSize() int64 { - if x != nil { - return x.Size - } - return 0 -} - -func (x *EntryInfo) GetMode() uint32 { - if x != nil { - return x.Mode - } - return 0 -} - -func (x *EntryInfo) GetPermissions() string { - if x != nil { - return x.Permissions - } - return "" -} - -func (x *EntryInfo) GetOwner() string { - if x != nil { - return x.Owner - } - return "" -} - -func (x *EntryInfo) GetGroup() string { - if x != nil { - return x.Group - } - return "" -} - -func (x *EntryInfo) GetModifiedTime() *timestamppb.Timestamp { - if x != nil { - return x.ModifiedTime - } - return nil -} - -func (x *EntryInfo) GetSymlinkTarget() string { - if x != nil && x.SymlinkTarget != nil { - return *x.SymlinkTarget - } - return "" -} - -type ListDirRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Depth uint32 `protobuf:"varint,2,opt,name=depth,proto3" json:"depth,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListDirRequest) Reset() { - *x = ListDirRequest{} - mi := &file_filesystem_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListDirRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListDirRequest) ProtoMessage() {} - -func (x *ListDirRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListDirRequest.ProtoReflect.Descriptor instead. -func (*ListDirRequest) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{9} -} - -func (x *ListDirRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *ListDirRequest) GetDepth() uint32 { - if x != nil { - return x.Depth - } - return 0 -} - -type ListDirResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entries []*EntryInfo `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListDirResponse) Reset() { - *x = ListDirResponse{} - mi := &file_filesystem_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListDirResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListDirResponse) ProtoMessage() {} - -func (x *ListDirResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListDirResponse.ProtoReflect.Descriptor instead. -func (*ListDirResponse) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{10} -} - -func (x *ListDirResponse) GetEntries() []*EntryInfo { - if x != nil { - return x.Entries - } - return nil -} - -type WatchDirRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WatchDirRequest) Reset() { - *x = WatchDirRequest{} - mi := &file_filesystem_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WatchDirRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WatchDirRequest) ProtoMessage() {} - -func (x *WatchDirRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WatchDirRequest.ProtoReflect.Descriptor instead. -func (*WatchDirRequest) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{11} -} - -func (x *WatchDirRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *WatchDirRequest) GetRecursive() bool { - if x != nil { - return x.Recursive - } - return false -} - -type FilesystemEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Type EventType `protobuf:"varint,2,opt,name=type,proto3,enum=filesystem.EventType" json:"type,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FilesystemEvent) Reset() { - *x = FilesystemEvent{} - mi := &file_filesystem_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FilesystemEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FilesystemEvent) ProtoMessage() {} - -func (x *FilesystemEvent) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FilesystemEvent.ProtoReflect.Descriptor instead. -func (*FilesystemEvent) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{12} -} - -func (x *FilesystemEvent) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *FilesystemEvent) GetType() EventType { - if x != nil { - return x.Type - } - return EventType_EVENT_TYPE_UNSPECIFIED -} - -type WatchDirResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Event: - // - // *WatchDirResponse_Start - // *WatchDirResponse_Filesystem - // *WatchDirResponse_Keepalive - Event isWatchDirResponse_Event `protobuf_oneof:"event"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WatchDirResponse) Reset() { - *x = WatchDirResponse{} - mi := &file_filesystem_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WatchDirResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WatchDirResponse) ProtoMessage() {} - -func (x *WatchDirResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WatchDirResponse.ProtoReflect.Descriptor instead. -func (*WatchDirResponse) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{13} -} - -func (x *WatchDirResponse) GetEvent() isWatchDirResponse_Event { - if x != nil { - return x.Event - } - return nil -} - -func (x *WatchDirResponse) GetStart() *WatchDirResponse_StartEvent { - if x != nil { - if x, ok := x.Event.(*WatchDirResponse_Start); ok { - return x.Start - } - } - return nil -} - -func (x *WatchDirResponse) GetFilesystem() *FilesystemEvent { - if x != nil { - if x, ok := x.Event.(*WatchDirResponse_Filesystem); ok { - return x.Filesystem - } - } - return nil -} - -func (x *WatchDirResponse) GetKeepalive() *WatchDirResponse_KeepAlive { - if x != nil { - if x, ok := x.Event.(*WatchDirResponse_Keepalive); ok { - return x.Keepalive - } - } - return nil -} - -type isWatchDirResponse_Event interface { - isWatchDirResponse_Event() -} - -type WatchDirResponse_Start struct { - Start *WatchDirResponse_StartEvent `protobuf:"bytes,1,opt,name=start,proto3,oneof"` -} - -type WatchDirResponse_Filesystem struct { - Filesystem *FilesystemEvent `protobuf:"bytes,2,opt,name=filesystem,proto3,oneof"` -} - -type WatchDirResponse_Keepalive struct { - Keepalive *WatchDirResponse_KeepAlive `protobuf:"bytes,3,opt,name=keepalive,proto3,oneof"` -} - -func (*WatchDirResponse_Start) isWatchDirResponse_Event() {} - -func (*WatchDirResponse_Filesystem) isWatchDirResponse_Event() {} - -func (*WatchDirResponse_Keepalive) isWatchDirResponse_Event() {} - -type CreateWatcherRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateWatcherRequest) Reset() { - *x = CreateWatcherRequest{} - mi := &file_filesystem_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateWatcherRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateWatcherRequest) ProtoMessage() {} - -func (x *CreateWatcherRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateWatcherRequest.ProtoReflect.Descriptor instead. -func (*CreateWatcherRequest) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{14} -} - -func (x *CreateWatcherRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *CreateWatcherRequest) GetRecursive() bool { - if x != nil { - return x.Recursive - } - return false -} - -type CreateWatcherResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - WatcherId string `protobuf:"bytes,1,opt,name=watcher_id,json=watcherId,proto3" json:"watcher_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateWatcherResponse) Reset() { - *x = CreateWatcherResponse{} - mi := &file_filesystem_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateWatcherResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateWatcherResponse) ProtoMessage() {} - -func (x *CreateWatcherResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateWatcherResponse.ProtoReflect.Descriptor instead. -func (*CreateWatcherResponse) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{15} -} - -func (x *CreateWatcherResponse) GetWatcherId() string { - if x != nil { - return x.WatcherId - } - return "" -} - -type GetWatcherEventsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - WatcherId string `protobuf:"bytes,1,opt,name=watcher_id,json=watcherId,proto3" json:"watcher_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetWatcherEventsRequest) Reset() { - *x = GetWatcherEventsRequest{} - mi := &file_filesystem_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetWatcherEventsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetWatcherEventsRequest) ProtoMessage() {} - -func (x *GetWatcherEventsRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetWatcherEventsRequest.ProtoReflect.Descriptor instead. -func (*GetWatcherEventsRequest) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{16} -} - -func (x *GetWatcherEventsRequest) GetWatcherId() string { - if x != nil { - return x.WatcherId - } - return "" -} - -type GetWatcherEventsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Events []*FilesystemEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetWatcherEventsResponse) Reset() { - *x = GetWatcherEventsResponse{} - mi := &file_filesystem_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetWatcherEventsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetWatcherEventsResponse) ProtoMessage() {} - -func (x *GetWatcherEventsResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[17] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetWatcherEventsResponse.ProtoReflect.Descriptor instead. -func (*GetWatcherEventsResponse) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{17} -} - -func (x *GetWatcherEventsResponse) GetEvents() []*FilesystemEvent { - if x != nil { - return x.Events - } - return nil -} - -type RemoveWatcherRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - WatcherId string `protobuf:"bytes,1,opt,name=watcher_id,json=watcherId,proto3" json:"watcher_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RemoveWatcherRequest) Reset() { - *x = RemoveWatcherRequest{} - mi := &file_filesystem_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RemoveWatcherRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveWatcherRequest) ProtoMessage() {} - -func (x *RemoveWatcherRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[18] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveWatcherRequest.ProtoReflect.Descriptor instead. -func (*RemoveWatcherRequest) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{18} -} - -func (x *RemoveWatcherRequest) GetWatcherId() string { - if x != nil { - return x.WatcherId - } - return "" -} - -type RemoveWatcherResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RemoveWatcherResponse) Reset() { - *x = RemoveWatcherResponse{} - mi := &file_filesystem_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RemoveWatcherResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveWatcherResponse) ProtoMessage() {} - -func (x *RemoveWatcherResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveWatcherResponse.ProtoReflect.Descriptor instead. -func (*RemoveWatcherResponse) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{19} -} - -type WatchDirResponse_StartEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WatchDirResponse_StartEvent) Reset() { - *x = WatchDirResponse_StartEvent{} - mi := &file_filesystem_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WatchDirResponse_StartEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WatchDirResponse_StartEvent) ProtoMessage() {} - -func (x *WatchDirResponse_StartEvent) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[20] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WatchDirResponse_StartEvent.ProtoReflect.Descriptor instead. -func (*WatchDirResponse_StartEvent) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{13, 0} -} - -type WatchDirResponse_KeepAlive struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WatchDirResponse_KeepAlive) Reset() { - *x = WatchDirResponse_KeepAlive{} - mi := &file_filesystem_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WatchDirResponse_KeepAlive) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WatchDirResponse_KeepAlive) ProtoMessage() {} - -func (x *WatchDirResponse_KeepAlive) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WatchDirResponse_KeepAlive.ProtoReflect.Descriptor instead. -func (*WatchDirResponse_KeepAlive) Descriptor() ([]byte, []int) { - return file_filesystem_proto_rawDescGZIP(), []int{13, 1} -} - -var File_filesystem_proto protoreflect.FileDescriptor - -const file_filesystem_proto_rawDesc = "" + - "\n" + - "\x10filesystem.proto\x12\n" + - "filesystem\x1a\x1fgoogle/protobuf/timestamp.proto\"G\n" + - "\vMoveRequest\x12\x16\n" + - "\x06source\x18\x01 \x01(\tR\x06source\x12 \n" + - "\vdestination\x18\x02 \x01(\tR\vdestination\";\n" + - "\fMoveResponse\x12+\n" + - "\x05entry\x18\x01 \x01(\v2\x15.filesystem.EntryInfoR\x05entry\"$\n" + - "\x0eMakeDirRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\">\n" + - "\x0fMakeDirResponse\x12+\n" + - "\x05entry\x18\x01 \x01(\v2\x15.filesystem.EntryInfoR\x05entry\"#\n" + - "\rRemoveRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\"\x10\n" + - "\x0eRemoveResponse\"!\n" + - "\vStatRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\";\n" + - "\fStatResponse\x12+\n" + - "\x05entry\x18\x01 \x01(\v2\x15.filesystem.EntryInfoR\x05entry\"\xd3\x02\n" + - "\tEntryInfo\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12(\n" + - "\x04type\x18\x02 \x01(\x0e2\x14.filesystem.FileTypeR\x04type\x12\x12\n" + - "\x04path\x18\x03 \x01(\tR\x04path\x12\x12\n" + - "\x04size\x18\x04 \x01(\x03R\x04size\x12\x12\n" + - "\x04mode\x18\x05 \x01(\rR\x04mode\x12 \n" + - "\vpermissions\x18\x06 \x01(\tR\vpermissions\x12\x14\n" + - "\x05owner\x18\a \x01(\tR\x05owner\x12\x14\n" + - "\x05group\x18\b \x01(\tR\x05group\x12?\n" + - "\rmodified_time\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\fmodifiedTime\x12*\n" + - "\x0esymlink_target\x18\n" + - " \x01(\tH\x00R\rsymlinkTarget\x88\x01\x01B\x11\n" + - "\x0f_symlink_target\":\n" + - "\x0eListDirRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\x12\x14\n" + - "\x05depth\x18\x02 \x01(\rR\x05depth\"B\n" + - "\x0fListDirResponse\x12/\n" + - "\aentries\x18\x01 \x03(\v2\x15.filesystem.EntryInfoR\aentries\"C\n" + - "\x0fWatchDirRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\x12\x1c\n" + - "\trecursive\x18\x02 \x01(\bR\trecursive\"P\n" + - "\x0fFilesystemEvent\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12)\n" + - "\x04type\x18\x02 \x01(\x0e2\x15.filesystem.EventTypeR\x04type\"\xfe\x01\n" + - "\x10WatchDirResponse\x12?\n" + - "\x05start\x18\x01 \x01(\v2'.filesystem.WatchDirResponse.StartEventH\x00R\x05start\x12=\n" + - "\n" + - "filesystem\x18\x02 \x01(\v2\x1b.filesystem.FilesystemEventH\x00R\n" + - "filesystem\x12F\n" + - "\tkeepalive\x18\x03 \x01(\v2&.filesystem.WatchDirResponse.KeepAliveH\x00R\tkeepalive\x1a\f\n" + - "\n" + - "StartEvent\x1a\v\n" + - "\tKeepAliveB\a\n" + - "\x05event\"H\n" + - "\x14CreateWatcherRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\x12\x1c\n" + - "\trecursive\x18\x02 \x01(\bR\trecursive\"6\n" + - "\x15CreateWatcherResponse\x12\x1d\n" + - "\n" + - "watcher_id\x18\x01 \x01(\tR\twatcherId\"8\n" + - "\x17GetWatcherEventsRequest\x12\x1d\n" + - "\n" + - "watcher_id\x18\x01 \x01(\tR\twatcherId\"O\n" + - "\x18GetWatcherEventsResponse\x123\n" + - "\x06events\x18\x01 \x03(\v2\x1b.filesystem.FilesystemEventR\x06events\"5\n" + - "\x14RemoveWatcherRequest\x12\x1d\n" + - "\n" + - "watcher_id\x18\x01 \x01(\tR\twatcherId\"\x17\n" + - "\x15RemoveWatcherResponse*i\n" + - "\bFileType\x12\x19\n" + - "\x15FILE_TYPE_UNSPECIFIED\x10\x00\x12\x12\n" + - "\x0eFILE_TYPE_FILE\x10\x01\x12\x17\n" + - "\x13FILE_TYPE_DIRECTORY\x10\x02\x12\x15\n" + - "\x11FILE_TYPE_SYMLINK\x10\x03*\x98\x01\n" + - "\tEventType\x12\x1a\n" + - "\x16EVENT_TYPE_UNSPECIFIED\x10\x00\x12\x15\n" + - "\x11EVENT_TYPE_CREATE\x10\x01\x12\x14\n" + - "\x10EVENT_TYPE_WRITE\x10\x02\x12\x15\n" + - "\x11EVENT_TYPE_REMOVE\x10\x03\x12\x15\n" + - "\x11EVENT_TYPE_RENAME\x10\x04\x12\x14\n" + - "\x10EVENT_TYPE_CHMOD\x10\x052\x9f\x05\n" + - "\n" + - "Filesystem\x129\n" + - "\x04Stat\x12\x17.filesystem.StatRequest\x1a\x18.filesystem.StatResponse\x12B\n" + - "\aMakeDir\x12\x1a.filesystem.MakeDirRequest\x1a\x1b.filesystem.MakeDirResponse\x129\n" + - "\x04Move\x12\x17.filesystem.MoveRequest\x1a\x18.filesystem.MoveResponse\x12B\n" + - "\aListDir\x12\x1a.filesystem.ListDirRequest\x1a\x1b.filesystem.ListDirResponse\x12?\n" + - "\x06Remove\x12\x19.filesystem.RemoveRequest\x1a\x1a.filesystem.RemoveResponse\x12G\n" + - "\bWatchDir\x12\x1b.filesystem.WatchDirRequest\x1a\x1c.filesystem.WatchDirResponse0\x01\x12T\n" + - "\rCreateWatcher\x12 .filesystem.CreateWatcherRequest\x1a!.filesystem.CreateWatcherResponse\x12]\n" + - "\x10GetWatcherEvents\x12#.filesystem.GetWatcherEventsRequest\x1a$.filesystem.GetWatcherEventsResponse\x12T\n" + - "\rRemoveWatcher\x12 .filesystem.RemoveWatcherRequest\x1a!.filesystem.RemoveWatcherResponseB\xa2\x01\n" + - "\x0ecom.filesystemB\x0fFilesystemProtoP\x01Z7git.omukk.dev/wrenn/sandbox/envd/internal/services/spec\xa2\x02\x03FXX\xaa\x02\n" + - "Filesystem\xca\x02\n" + - "Filesystem\xe2\x02\x16Filesystem\\GPBMetadata\xea\x02\n" + - "Filesystemb\x06proto3" - -var ( - file_filesystem_proto_rawDescOnce sync.Once - file_filesystem_proto_rawDescData []byte -) - -func file_filesystem_proto_rawDescGZIP() []byte { - file_filesystem_proto_rawDescOnce.Do(func() { - file_filesystem_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_filesystem_proto_rawDesc), len(file_filesystem_proto_rawDesc))) - }) - return file_filesystem_proto_rawDescData -} - -var file_filesystem_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_filesystem_proto_msgTypes = make([]protoimpl.MessageInfo, 22) -var file_filesystem_proto_goTypes = []any{ - (FileType)(0), // 0: filesystem.FileType - (EventType)(0), // 1: filesystem.EventType - (*MoveRequest)(nil), // 2: filesystem.MoveRequest - (*MoveResponse)(nil), // 3: filesystem.MoveResponse - (*MakeDirRequest)(nil), // 4: filesystem.MakeDirRequest - (*MakeDirResponse)(nil), // 5: filesystem.MakeDirResponse - (*RemoveRequest)(nil), // 6: filesystem.RemoveRequest - (*RemoveResponse)(nil), // 7: filesystem.RemoveResponse - (*StatRequest)(nil), // 8: filesystem.StatRequest - (*StatResponse)(nil), // 9: filesystem.StatResponse - (*EntryInfo)(nil), // 10: filesystem.EntryInfo - (*ListDirRequest)(nil), // 11: filesystem.ListDirRequest - (*ListDirResponse)(nil), // 12: filesystem.ListDirResponse - (*WatchDirRequest)(nil), // 13: filesystem.WatchDirRequest - (*FilesystemEvent)(nil), // 14: filesystem.FilesystemEvent - (*WatchDirResponse)(nil), // 15: filesystem.WatchDirResponse - (*CreateWatcherRequest)(nil), // 16: filesystem.CreateWatcherRequest - (*CreateWatcherResponse)(nil), // 17: filesystem.CreateWatcherResponse - (*GetWatcherEventsRequest)(nil), // 18: filesystem.GetWatcherEventsRequest - (*GetWatcherEventsResponse)(nil), // 19: filesystem.GetWatcherEventsResponse - (*RemoveWatcherRequest)(nil), // 20: filesystem.RemoveWatcherRequest - (*RemoveWatcherResponse)(nil), // 21: filesystem.RemoveWatcherResponse - (*WatchDirResponse_StartEvent)(nil), // 22: filesystem.WatchDirResponse.StartEvent - (*WatchDirResponse_KeepAlive)(nil), // 23: filesystem.WatchDirResponse.KeepAlive - (*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp -} -var file_filesystem_proto_depIdxs = []int32{ - 10, // 0: filesystem.MoveResponse.entry:type_name -> filesystem.EntryInfo - 10, // 1: filesystem.MakeDirResponse.entry:type_name -> filesystem.EntryInfo - 10, // 2: filesystem.StatResponse.entry:type_name -> filesystem.EntryInfo - 0, // 3: filesystem.EntryInfo.type:type_name -> filesystem.FileType - 24, // 4: filesystem.EntryInfo.modified_time:type_name -> google.protobuf.Timestamp - 10, // 5: filesystem.ListDirResponse.entries:type_name -> filesystem.EntryInfo - 1, // 6: filesystem.FilesystemEvent.type:type_name -> filesystem.EventType - 22, // 7: filesystem.WatchDirResponse.start:type_name -> filesystem.WatchDirResponse.StartEvent - 14, // 8: filesystem.WatchDirResponse.filesystem:type_name -> filesystem.FilesystemEvent - 23, // 9: filesystem.WatchDirResponse.keepalive:type_name -> filesystem.WatchDirResponse.KeepAlive - 14, // 10: filesystem.GetWatcherEventsResponse.events:type_name -> filesystem.FilesystemEvent - 8, // 11: filesystem.Filesystem.Stat:input_type -> filesystem.StatRequest - 4, // 12: filesystem.Filesystem.MakeDir:input_type -> filesystem.MakeDirRequest - 2, // 13: filesystem.Filesystem.Move:input_type -> filesystem.MoveRequest - 11, // 14: filesystem.Filesystem.ListDir:input_type -> filesystem.ListDirRequest - 6, // 15: filesystem.Filesystem.Remove:input_type -> filesystem.RemoveRequest - 13, // 16: filesystem.Filesystem.WatchDir:input_type -> filesystem.WatchDirRequest - 16, // 17: filesystem.Filesystem.CreateWatcher:input_type -> filesystem.CreateWatcherRequest - 18, // 18: filesystem.Filesystem.GetWatcherEvents:input_type -> filesystem.GetWatcherEventsRequest - 20, // 19: filesystem.Filesystem.RemoveWatcher:input_type -> filesystem.RemoveWatcherRequest - 9, // 20: filesystem.Filesystem.Stat:output_type -> filesystem.StatResponse - 5, // 21: filesystem.Filesystem.MakeDir:output_type -> filesystem.MakeDirResponse - 3, // 22: filesystem.Filesystem.Move:output_type -> filesystem.MoveResponse - 12, // 23: filesystem.Filesystem.ListDir:output_type -> filesystem.ListDirResponse - 7, // 24: filesystem.Filesystem.Remove:output_type -> filesystem.RemoveResponse - 15, // 25: filesystem.Filesystem.WatchDir:output_type -> filesystem.WatchDirResponse - 17, // 26: filesystem.Filesystem.CreateWatcher:output_type -> filesystem.CreateWatcherResponse - 19, // 27: filesystem.Filesystem.GetWatcherEvents:output_type -> filesystem.GetWatcherEventsResponse - 21, // 28: filesystem.Filesystem.RemoveWatcher:output_type -> filesystem.RemoveWatcherResponse - 20, // [20:29] is the sub-list for method output_type - 11, // [11:20] is the sub-list for method input_type - 11, // [11:11] is the sub-list for extension type_name - 11, // [11:11] is the sub-list for extension extendee - 0, // [0:11] is the sub-list for field type_name -} - -func init() { file_filesystem_proto_init() } -func file_filesystem_proto_init() { - if File_filesystem_proto != nil { - return - } - file_filesystem_proto_msgTypes[8].OneofWrappers = []any{} - file_filesystem_proto_msgTypes[13].OneofWrappers = []any{ - (*WatchDirResponse_Start)(nil), - (*WatchDirResponse_Filesystem)(nil), - (*WatchDirResponse_Keepalive)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_filesystem_proto_rawDesc), len(file_filesystem_proto_rawDesc)), - NumEnums: 2, - NumMessages: 22, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_filesystem_proto_goTypes, - DependencyIndexes: file_filesystem_proto_depIdxs, - EnumInfos: file_filesystem_proto_enumTypes, - MessageInfos: file_filesystem_proto_msgTypes, - }.Build() - File_filesystem_proto = out.File - file_filesystem_proto_goTypes = nil - file_filesystem_proto_depIdxs = nil -} diff --git a/envd/internal/services/spec/filesystem/filesystem.pb.go b/envd/internal/services/spec/filesystem/filesystem.pb.go deleted file mode 100644 index d8a03bd..0000000 --- a/envd/internal/services/spec/filesystem/filesystem.pb.go +++ /dev/null @@ -1,1444 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.11 -// protoc (unknown) -// source: filesystem/filesystem.proto - -package filesystem - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type FileType int32 - -const ( - FileType_FILE_TYPE_UNSPECIFIED FileType = 0 - FileType_FILE_TYPE_FILE FileType = 1 - FileType_FILE_TYPE_DIRECTORY FileType = 2 - FileType_FILE_TYPE_SYMLINK FileType = 3 -) - -// Enum value maps for FileType. -var ( - FileType_name = map[int32]string{ - 0: "FILE_TYPE_UNSPECIFIED", - 1: "FILE_TYPE_FILE", - 2: "FILE_TYPE_DIRECTORY", - 3: "FILE_TYPE_SYMLINK", - } - FileType_value = map[string]int32{ - "FILE_TYPE_UNSPECIFIED": 0, - "FILE_TYPE_FILE": 1, - "FILE_TYPE_DIRECTORY": 2, - "FILE_TYPE_SYMLINK": 3, - } -) - -func (x FileType) Enum() *FileType { - p := new(FileType) - *p = x - return p -} - -func (x FileType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FileType) Descriptor() protoreflect.EnumDescriptor { - return file_filesystem_filesystem_proto_enumTypes[0].Descriptor() -} - -func (FileType) Type() protoreflect.EnumType { - return &file_filesystem_filesystem_proto_enumTypes[0] -} - -func (x FileType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use FileType.Descriptor instead. -func (FileType) EnumDescriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{0} -} - -type EventType int32 - -const ( - EventType_EVENT_TYPE_UNSPECIFIED EventType = 0 - EventType_EVENT_TYPE_CREATE EventType = 1 - EventType_EVENT_TYPE_WRITE EventType = 2 - EventType_EVENT_TYPE_REMOVE EventType = 3 - EventType_EVENT_TYPE_RENAME EventType = 4 - EventType_EVENT_TYPE_CHMOD EventType = 5 -) - -// Enum value maps for EventType. -var ( - EventType_name = map[int32]string{ - 0: "EVENT_TYPE_UNSPECIFIED", - 1: "EVENT_TYPE_CREATE", - 2: "EVENT_TYPE_WRITE", - 3: "EVENT_TYPE_REMOVE", - 4: "EVENT_TYPE_RENAME", - 5: "EVENT_TYPE_CHMOD", - } - EventType_value = map[string]int32{ - "EVENT_TYPE_UNSPECIFIED": 0, - "EVENT_TYPE_CREATE": 1, - "EVENT_TYPE_WRITE": 2, - "EVENT_TYPE_REMOVE": 3, - "EVENT_TYPE_RENAME": 4, - "EVENT_TYPE_CHMOD": 5, - } -) - -func (x EventType) Enum() *EventType { - p := new(EventType) - *p = x - return p -} - -func (x EventType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (EventType) Descriptor() protoreflect.EnumDescriptor { - return file_filesystem_filesystem_proto_enumTypes[1].Descriptor() -} - -func (EventType) Type() protoreflect.EnumType { - return &file_filesystem_filesystem_proto_enumTypes[1] -} - -func (x EventType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use EventType.Descriptor instead. -func (EventType) EnumDescriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{1} -} - -type MoveRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` - Destination string `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MoveRequest) Reset() { - *x = MoveRequest{} - mi := &file_filesystem_filesystem_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MoveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MoveRequest) ProtoMessage() {} - -func (x *MoveRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MoveRequest.ProtoReflect.Descriptor instead. -func (*MoveRequest) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{0} -} - -func (x *MoveRequest) GetSource() string { - if x != nil { - return x.Source - } - return "" -} - -func (x *MoveRequest) GetDestination() string { - if x != nil { - return x.Destination - } - return "" -} - -type MoveResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entry *EntryInfo `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MoveResponse) Reset() { - *x = MoveResponse{} - mi := &file_filesystem_filesystem_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MoveResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MoveResponse) ProtoMessage() {} - -func (x *MoveResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MoveResponse.ProtoReflect.Descriptor instead. -func (*MoveResponse) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{1} -} - -func (x *MoveResponse) GetEntry() *EntryInfo { - if x != nil { - return x.Entry - } - return nil -} - -type MakeDirRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MakeDirRequest) Reset() { - *x = MakeDirRequest{} - mi := &file_filesystem_filesystem_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MakeDirRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MakeDirRequest) ProtoMessage() {} - -func (x *MakeDirRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MakeDirRequest.ProtoReflect.Descriptor instead. -func (*MakeDirRequest) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{2} -} - -func (x *MakeDirRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type MakeDirResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entry *EntryInfo `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MakeDirResponse) Reset() { - *x = MakeDirResponse{} - mi := &file_filesystem_filesystem_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MakeDirResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MakeDirResponse) ProtoMessage() {} - -func (x *MakeDirResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MakeDirResponse.ProtoReflect.Descriptor instead. -func (*MakeDirResponse) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{3} -} - -func (x *MakeDirResponse) GetEntry() *EntryInfo { - if x != nil { - return x.Entry - } - return nil -} - -type RemoveRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RemoveRequest) Reset() { - *x = RemoveRequest{} - mi := &file_filesystem_filesystem_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RemoveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveRequest) ProtoMessage() {} - -func (x *RemoveRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveRequest.ProtoReflect.Descriptor instead. -func (*RemoveRequest) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{4} -} - -func (x *RemoveRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type RemoveResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RemoveResponse) Reset() { - *x = RemoveResponse{} - mi := &file_filesystem_filesystem_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RemoveResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveResponse) ProtoMessage() {} - -func (x *RemoveResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveResponse.ProtoReflect.Descriptor instead. -func (*RemoveResponse) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{5} -} - -type StatRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StatRequest) Reset() { - *x = StatRequest{} - mi := &file_filesystem_filesystem_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatRequest) ProtoMessage() {} - -func (x *StatRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatRequest.ProtoReflect.Descriptor instead. -func (*StatRequest) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{6} -} - -func (x *StatRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type StatResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entry *EntryInfo `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StatResponse) Reset() { - *x = StatResponse{} - mi := &file_filesystem_filesystem_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatResponse) ProtoMessage() {} - -func (x *StatResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatResponse.ProtoReflect.Descriptor instead. -func (*StatResponse) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{7} -} - -func (x *StatResponse) GetEntry() *EntryInfo { - if x != nil { - return x.Entry - } - return nil -} - -type EntryInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Type FileType `protobuf:"varint,2,opt,name=type,proto3,enum=filesystem.FileType" json:"type,omitempty"` - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` - Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` - Mode uint32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` - Permissions string `protobuf:"bytes,6,opt,name=permissions,proto3" json:"permissions,omitempty"` - Owner string `protobuf:"bytes,7,opt,name=owner,proto3" json:"owner,omitempty"` - Group string `protobuf:"bytes,8,opt,name=group,proto3" json:"group,omitempty"` - ModifiedTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=modified_time,json=modifiedTime,proto3" json:"modified_time,omitempty"` - // If the entry is a symlink, this field contains the target of the symlink. - SymlinkTarget *string `protobuf:"bytes,10,opt,name=symlink_target,json=symlinkTarget,proto3,oneof" json:"symlink_target,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EntryInfo) Reset() { - *x = EntryInfo{} - mi := &file_filesystem_filesystem_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EntryInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EntryInfo) ProtoMessage() {} - -func (x *EntryInfo) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EntryInfo.ProtoReflect.Descriptor instead. -func (*EntryInfo) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{8} -} - -func (x *EntryInfo) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *EntryInfo) GetType() FileType { - if x != nil { - return x.Type - } - return FileType_FILE_TYPE_UNSPECIFIED -} - -func (x *EntryInfo) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *EntryInfo) GetSize() int64 { - if x != nil { - return x.Size - } - return 0 -} - -func (x *EntryInfo) GetMode() uint32 { - if x != nil { - return x.Mode - } - return 0 -} - -func (x *EntryInfo) GetPermissions() string { - if x != nil { - return x.Permissions - } - return "" -} - -func (x *EntryInfo) GetOwner() string { - if x != nil { - return x.Owner - } - return "" -} - -func (x *EntryInfo) GetGroup() string { - if x != nil { - return x.Group - } - return "" -} - -func (x *EntryInfo) GetModifiedTime() *timestamppb.Timestamp { - if x != nil { - return x.ModifiedTime - } - return nil -} - -func (x *EntryInfo) GetSymlinkTarget() string { - if x != nil && x.SymlinkTarget != nil { - return *x.SymlinkTarget - } - return "" -} - -type ListDirRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Depth uint32 `protobuf:"varint,2,opt,name=depth,proto3" json:"depth,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListDirRequest) Reset() { - *x = ListDirRequest{} - mi := &file_filesystem_filesystem_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListDirRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListDirRequest) ProtoMessage() {} - -func (x *ListDirRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListDirRequest.ProtoReflect.Descriptor instead. -func (*ListDirRequest) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{9} -} - -func (x *ListDirRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *ListDirRequest) GetDepth() uint32 { - if x != nil { - return x.Depth - } - return 0 -} - -type ListDirResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entries []*EntryInfo `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListDirResponse) Reset() { - *x = ListDirResponse{} - mi := &file_filesystem_filesystem_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListDirResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListDirResponse) ProtoMessage() {} - -func (x *ListDirResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListDirResponse.ProtoReflect.Descriptor instead. -func (*ListDirResponse) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{10} -} - -func (x *ListDirResponse) GetEntries() []*EntryInfo { - if x != nil { - return x.Entries - } - return nil -} - -type WatchDirRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WatchDirRequest) Reset() { - *x = WatchDirRequest{} - mi := &file_filesystem_filesystem_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WatchDirRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WatchDirRequest) ProtoMessage() {} - -func (x *WatchDirRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WatchDirRequest.ProtoReflect.Descriptor instead. -func (*WatchDirRequest) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{11} -} - -func (x *WatchDirRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *WatchDirRequest) GetRecursive() bool { - if x != nil { - return x.Recursive - } - return false -} - -type FilesystemEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Type EventType `protobuf:"varint,2,opt,name=type,proto3,enum=filesystem.EventType" json:"type,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FilesystemEvent) Reset() { - *x = FilesystemEvent{} - mi := &file_filesystem_filesystem_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FilesystemEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FilesystemEvent) ProtoMessage() {} - -func (x *FilesystemEvent) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FilesystemEvent.ProtoReflect.Descriptor instead. -func (*FilesystemEvent) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{12} -} - -func (x *FilesystemEvent) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *FilesystemEvent) GetType() EventType { - if x != nil { - return x.Type - } - return EventType_EVENT_TYPE_UNSPECIFIED -} - -type WatchDirResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Event: - // - // *WatchDirResponse_Start - // *WatchDirResponse_Filesystem - // *WatchDirResponse_Keepalive - Event isWatchDirResponse_Event `protobuf_oneof:"event"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WatchDirResponse) Reset() { - *x = WatchDirResponse{} - mi := &file_filesystem_filesystem_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WatchDirResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WatchDirResponse) ProtoMessage() {} - -func (x *WatchDirResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WatchDirResponse.ProtoReflect.Descriptor instead. -func (*WatchDirResponse) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{13} -} - -func (x *WatchDirResponse) GetEvent() isWatchDirResponse_Event { - if x != nil { - return x.Event - } - return nil -} - -func (x *WatchDirResponse) GetStart() *WatchDirResponse_StartEvent { - if x != nil { - if x, ok := x.Event.(*WatchDirResponse_Start); ok { - return x.Start - } - } - return nil -} - -func (x *WatchDirResponse) GetFilesystem() *FilesystemEvent { - if x != nil { - if x, ok := x.Event.(*WatchDirResponse_Filesystem); ok { - return x.Filesystem - } - } - return nil -} - -func (x *WatchDirResponse) GetKeepalive() *WatchDirResponse_KeepAlive { - if x != nil { - if x, ok := x.Event.(*WatchDirResponse_Keepalive); ok { - return x.Keepalive - } - } - return nil -} - -type isWatchDirResponse_Event interface { - isWatchDirResponse_Event() -} - -type WatchDirResponse_Start struct { - Start *WatchDirResponse_StartEvent `protobuf:"bytes,1,opt,name=start,proto3,oneof"` -} - -type WatchDirResponse_Filesystem struct { - Filesystem *FilesystemEvent `protobuf:"bytes,2,opt,name=filesystem,proto3,oneof"` -} - -type WatchDirResponse_Keepalive struct { - Keepalive *WatchDirResponse_KeepAlive `protobuf:"bytes,3,opt,name=keepalive,proto3,oneof"` -} - -func (*WatchDirResponse_Start) isWatchDirResponse_Event() {} - -func (*WatchDirResponse_Filesystem) isWatchDirResponse_Event() {} - -func (*WatchDirResponse_Keepalive) isWatchDirResponse_Event() {} - -type CreateWatcherRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateWatcherRequest) Reset() { - *x = CreateWatcherRequest{} - mi := &file_filesystem_filesystem_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateWatcherRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateWatcherRequest) ProtoMessage() {} - -func (x *CreateWatcherRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateWatcherRequest.ProtoReflect.Descriptor instead. -func (*CreateWatcherRequest) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{14} -} - -func (x *CreateWatcherRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *CreateWatcherRequest) GetRecursive() bool { - if x != nil { - return x.Recursive - } - return false -} - -type CreateWatcherResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - WatcherId string `protobuf:"bytes,1,opt,name=watcher_id,json=watcherId,proto3" json:"watcher_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateWatcherResponse) Reset() { - *x = CreateWatcherResponse{} - mi := &file_filesystem_filesystem_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateWatcherResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateWatcherResponse) ProtoMessage() {} - -func (x *CreateWatcherResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateWatcherResponse.ProtoReflect.Descriptor instead. -func (*CreateWatcherResponse) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{15} -} - -func (x *CreateWatcherResponse) GetWatcherId() string { - if x != nil { - return x.WatcherId - } - return "" -} - -type GetWatcherEventsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - WatcherId string `protobuf:"bytes,1,opt,name=watcher_id,json=watcherId,proto3" json:"watcher_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetWatcherEventsRequest) Reset() { - *x = GetWatcherEventsRequest{} - mi := &file_filesystem_filesystem_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetWatcherEventsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetWatcherEventsRequest) ProtoMessage() {} - -func (x *GetWatcherEventsRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetWatcherEventsRequest.ProtoReflect.Descriptor instead. -func (*GetWatcherEventsRequest) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{16} -} - -func (x *GetWatcherEventsRequest) GetWatcherId() string { - if x != nil { - return x.WatcherId - } - return "" -} - -type GetWatcherEventsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Events []*FilesystemEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetWatcherEventsResponse) Reset() { - *x = GetWatcherEventsResponse{} - mi := &file_filesystem_filesystem_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetWatcherEventsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetWatcherEventsResponse) ProtoMessage() {} - -func (x *GetWatcherEventsResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[17] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetWatcherEventsResponse.ProtoReflect.Descriptor instead. -func (*GetWatcherEventsResponse) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{17} -} - -func (x *GetWatcherEventsResponse) GetEvents() []*FilesystemEvent { - if x != nil { - return x.Events - } - return nil -} - -type RemoveWatcherRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - WatcherId string `protobuf:"bytes,1,opt,name=watcher_id,json=watcherId,proto3" json:"watcher_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RemoveWatcherRequest) Reset() { - *x = RemoveWatcherRequest{} - mi := &file_filesystem_filesystem_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RemoveWatcherRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveWatcherRequest) ProtoMessage() {} - -func (x *RemoveWatcherRequest) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[18] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveWatcherRequest.ProtoReflect.Descriptor instead. -func (*RemoveWatcherRequest) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{18} -} - -func (x *RemoveWatcherRequest) GetWatcherId() string { - if x != nil { - return x.WatcherId - } - return "" -} - -type RemoveWatcherResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RemoveWatcherResponse) Reset() { - *x = RemoveWatcherResponse{} - mi := &file_filesystem_filesystem_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RemoveWatcherResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveWatcherResponse) ProtoMessage() {} - -func (x *RemoveWatcherResponse) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveWatcherResponse.ProtoReflect.Descriptor instead. -func (*RemoveWatcherResponse) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{19} -} - -type WatchDirResponse_StartEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WatchDirResponse_StartEvent) Reset() { - *x = WatchDirResponse_StartEvent{} - mi := &file_filesystem_filesystem_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WatchDirResponse_StartEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WatchDirResponse_StartEvent) ProtoMessage() {} - -func (x *WatchDirResponse_StartEvent) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[20] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WatchDirResponse_StartEvent.ProtoReflect.Descriptor instead. -func (*WatchDirResponse_StartEvent) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{13, 0} -} - -type WatchDirResponse_KeepAlive struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WatchDirResponse_KeepAlive) Reset() { - *x = WatchDirResponse_KeepAlive{} - mi := &file_filesystem_filesystem_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WatchDirResponse_KeepAlive) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WatchDirResponse_KeepAlive) ProtoMessage() {} - -func (x *WatchDirResponse_KeepAlive) ProtoReflect() protoreflect.Message { - mi := &file_filesystem_filesystem_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WatchDirResponse_KeepAlive.ProtoReflect.Descriptor instead. -func (*WatchDirResponse_KeepAlive) Descriptor() ([]byte, []int) { - return file_filesystem_filesystem_proto_rawDescGZIP(), []int{13, 1} -} - -var File_filesystem_filesystem_proto protoreflect.FileDescriptor - -const file_filesystem_filesystem_proto_rawDesc = "" + - "\n" + - "\x1bfilesystem/filesystem.proto\x12\n" + - "filesystem\x1a\x1fgoogle/protobuf/timestamp.proto\"G\n" + - "\vMoveRequest\x12\x16\n" + - "\x06source\x18\x01 \x01(\tR\x06source\x12 \n" + - "\vdestination\x18\x02 \x01(\tR\vdestination\";\n" + - "\fMoveResponse\x12+\n" + - "\x05entry\x18\x01 \x01(\v2\x15.filesystem.EntryInfoR\x05entry\"$\n" + - "\x0eMakeDirRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\">\n" + - "\x0fMakeDirResponse\x12+\n" + - "\x05entry\x18\x01 \x01(\v2\x15.filesystem.EntryInfoR\x05entry\"#\n" + - "\rRemoveRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\"\x10\n" + - "\x0eRemoveResponse\"!\n" + - "\vStatRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\";\n" + - "\fStatResponse\x12+\n" + - "\x05entry\x18\x01 \x01(\v2\x15.filesystem.EntryInfoR\x05entry\"\xd3\x02\n" + - "\tEntryInfo\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12(\n" + - "\x04type\x18\x02 \x01(\x0e2\x14.filesystem.FileTypeR\x04type\x12\x12\n" + - "\x04path\x18\x03 \x01(\tR\x04path\x12\x12\n" + - "\x04size\x18\x04 \x01(\x03R\x04size\x12\x12\n" + - "\x04mode\x18\x05 \x01(\rR\x04mode\x12 \n" + - "\vpermissions\x18\x06 \x01(\tR\vpermissions\x12\x14\n" + - "\x05owner\x18\a \x01(\tR\x05owner\x12\x14\n" + - "\x05group\x18\b \x01(\tR\x05group\x12?\n" + - "\rmodified_time\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\fmodifiedTime\x12*\n" + - "\x0esymlink_target\x18\n" + - " \x01(\tH\x00R\rsymlinkTarget\x88\x01\x01B\x11\n" + - "\x0f_symlink_target\":\n" + - "\x0eListDirRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\x12\x14\n" + - "\x05depth\x18\x02 \x01(\rR\x05depth\"B\n" + - "\x0fListDirResponse\x12/\n" + - "\aentries\x18\x01 \x03(\v2\x15.filesystem.EntryInfoR\aentries\"C\n" + - "\x0fWatchDirRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\x12\x1c\n" + - "\trecursive\x18\x02 \x01(\bR\trecursive\"P\n" + - "\x0fFilesystemEvent\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12)\n" + - "\x04type\x18\x02 \x01(\x0e2\x15.filesystem.EventTypeR\x04type\"\xfe\x01\n" + - "\x10WatchDirResponse\x12?\n" + - "\x05start\x18\x01 \x01(\v2'.filesystem.WatchDirResponse.StartEventH\x00R\x05start\x12=\n" + - "\n" + - "filesystem\x18\x02 \x01(\v2\x1b.filesystem.FilesystemEventH\x00R\n" + - "filesystem\x12F\n" + - "\tkeepalive\x18\x03 \x01(\v2&.filesystem.WatchDirResponse.KeepAliveH\x00R\tkeepalive\x1a\f\n" + - "\n" + - "StartEvent\x1a\v\n" + - "\tKeepAliveB\a\n" + - "\x05event\"H\n" + - "\x14CreateWatcherRequest\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\x12\x1c\n" + - "\trecursive\x18\x02 \x01(\bR\trecursive\"6\n" + - "\x15CreateWatcherResponse\x12\x1d\n" + - "\n" + - "watcher_id\x18\x01 \x01(\tR\twatcherId\"8\n" + - "\x17GetWatcherEventsRequest\x12\x1d\n" + - "\n" + - "watcher_id\x18\x01 \x01(\tR\twatcherId\"O\n" + - "\x18GetWatcherEventsResponse\x123\n" + - "\x06events\x18\x01 \x03(\v2\x1b.filesystem.FilesystemEventR\x06events\"5\n" + - "\x14RemoveWatcherRequest\x12\x1d\n" + - "\n" + - "watcher_id\x18\x01 \x01(\tR\twatcherId\"\x17\n" + - "\x15RemoveWatcherResponse*i\n" + - "\bFileType\x12\x19\n" + - "\x15FILE_TYPE_UNSPECIFIED\x10\x00\x12\x12\n" + - "\x0eFILE_TYPE_FILE\x10\x01\x12\x17\n" + - "\x13FILE_TYPE_DIRECTORY\x10\x02\x12\x15\n" + - "\x11FILE_TYPE_SYMLINK\x10\x03*\x98\x01\n" + - "\tEventType\x12\x1a\n" + - "\x16EVENT_TYPE_UNSPECIFIED\x10\x00\x12\x15\n" + - "\x11EVENT_TYPE_CREATE\x10\x01\x12\x14\n" + - "\x10EVENT_TYPE_WRITE\x10\x02\x12\x15\n" + - "\x11EVENT_TYPE_REMOVE\x10\x03\x12\x15\n" + - "\x11EVENT_TYPE_RENAME\x10\x04\x12\x14\n" + - "\x10EVENT_TYPE_CHMOD\x10\x052\x9f\x05\n" + - "\n" + - "Filesystem\x129\n" + - "\x04Stat\x12\x17.filesystem.StatRequest\x1a\x18.filesystem.StatResponse\x12B\n" + - "\aMakeDir\x12\x1a.filesystem.MakeDirRequest\x1a\x1b.filesystem.MakeDirResponse\x129\n" + - "\x04Move\x12\x17.filesystem.MoveRequest\x1a\x18.filesystem.MoveResponse\x12B\n" + - "\aListDir\x12\x1a.filesystem.ListDirRequest\x1a\x1b.filesystem.ListDirResponse\x12?\n" + - "\x06Remove\x12\x19.filesystem.RemoveRequest\x1a\x1a.filesystem.RemoveResponse\x12G\n" + - "\bWatchDir\x12\x1b.filesystem.WatchDirRequest\x1a\x1c.filesystem.WatchDirResponse0\x01\x12T\n" + - "\rCreateWatcher\x12 .filesystem.CreateWatcherRequest\x1a!.filesystem.CreateWatcherResponse\x12]\n" + - "\x10GetWatcherEvents\x12#.filesystem.GetWatcherEventsRequest\x1a$.filesystem.GetWatcherEventsResponse\x12T\n" + - "\rRemoveWatcher\x12 .filesystem.RemoveWatcherRequest\x1a!.filesystem.RemoveWatcherResponseB\xad\x01\n" + - "\x0ecom.filesystemB\x0fFilesystemProtoP\x01ZBgit.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem\xa2\x02\x03FXX\xaa\x02\n" + - "Filesystem\xca\x02\n" + - "Filesystem\xe2\x02\x16Filesystem\\GPBMetadata\xea\x02\n" + - "Filesystemb\x06proto3" - -var ( - file_filesystem_filesystem_proto_rawDescOnce sync.Once - file_filesystem_filesystem_proto_rawDescData []byte -) - -func file_filesystem_filesystem_proto_rawDescGZIP() []byte { - file_filesystem_filesystem_proto_rawDescOnce.Do(func() { - file_filesystem_filesystem_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_filesystem_filesystem_proto_rawDesc), len(file_filesystem_filesystem_proto_rawDesc))) - }) - return file_filesystem_filesystem_proto_rawDescData -} - -var file_filesystem_filesystem_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_filesystem_filesystem_proto_msgTypes = make([]protoimpl.MessageInfo, 22) -var file_filesystem_filesystem_proto_goTypes = []any{ - (FileType)(0), // 0: filesystem.FileType - (EventType)(0), // 1: filesystem.EventType - (*MoveRequest)(nil), // 2: filesystem.MoveRequest - (*MoveResponse)(nil), // 3: filesystem.MoveResponse - (*MakeDirRequest)(nil), // 4: filesystem.MakeDirRequest - (*MakeDirResponse)(nil), // 5: filesystem.MakeDirResponse - (*RemoveRequest)(nil), // 6: filesystem.RemoveRequest - (*RemoveResponse)(nil), // 7: filesystem.RemoveResponse - (*StatRequest)(nil), // 8: filesystem.StatRequest - (*StatResponse)(nil), // 9: filesystem.StatResponse - (*EntryInfo)(nil), // 10: filesystem.EntryInfo - (*ListDirRequest)(nil), // 11: filesystem.ListDirRequest - (*ListDirResponse)(nil), // 12: filesystem.ListDirResponse - (*WatchDirRequest)(nil), // 13: filesystem.WatchDirRequest - (*FilesystemEvent)(nil), // 14: filesystem.FilesystemEvent - (*WatchDirResponse)(nil), // 15: filesystem.WatchDirResponse - (*CreateWatcherRequest)(nil), // 16: filesystem.CreateWatcherRequest - (*CreateWatcherResponse)(nil), // 17: filesystem.CreateWatcherResponse - (*GetWatcherEventsRequest)(nil), // 18: filesystem.GetWatcherEventsRequest - (*GetWatcherEventsResponse)(nil), // 19: filesystem.GetWatcherEventsResponse - (*RemoveWatcherRequest)(nil), // 20: filesystem.RemoveWatcherRequest - (*RemoveWatcherResponse)(nil), // 21: filesystem.RemoveWatcherResponse - (*WatchDirResponse_StartEvent)(nil), // 22: filesystem.WatchDirResponse.StartEvent - (*WatchDirResponse_KeepAlive)(nil), // 23: filesystem.WatchDirResponse.KeepAlive - (*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp -} -var file_filesystem_filesystem_proto_depIdxs = []int32{ - 10, // 0: filesystem.MoveResponse.entry:type_name -> filesystem.EntryInfo - 10, // 1: filesystem.MakeDirResponse.entry:type_name -> filesystem.EntryInfo - 10, // 2: filesystem.StatResponse.entry:type_name -> filesystem.EntryInfo - 0, // 3: filesystem.EntryInfo.type:type_name -> filesystem.FileType - 24, // 4: filesystem.EntryInfo.modified_time:type_name -> google.protobuf.Timestamp - 10, // 5: filesystem.ListDirResponse.entries:type_name -> filesystem.EntryInfo - 1, // 6: filesystem.FilesystemEvent.type:type_name -> filesystem.EventType - 22, // 7: filesystem.WatchDirResponse.start:type_name -> filesystem.WatchDirResponse.StartEvent - 14, // 8: filesystem.WatchDirResponse.filesystem:type_name -> filesystem.FilesystemEvent - 23, // 9: filesystem.WatchDirResponse.keepalive:type_name -> filesystem.WatchDirResponse.KeepAlive - 14, // 10: filesystem.GetWatcherEventsResponse.events:type_name -> filesystem.FilesystemEvent - 8, // 11: filesystem.Filesystem.Stat:input_type -> filesystem.StatRequest - 4, // 12: filesystem.Filesystem.MakeDir:input_type -> filesystem.MakeDirRequest - 2, // 13: filesystem.Filesystem.Move:input_type -> filesystem.MoveRequest - 11, // 14: filesystem.Filesystem.ListDir:input_type -> filesystem.ListDirRequest - 6, // 15: filesystem.Filesystem.Remove:input_type -> filesystem.RemoveRequest - 13, // 16: filesystem.Filesystem.WatchDir:input_type -> filesystem.WatchDirRequest - 16, // 17: filesystem.Filesystem.CreateWatcher:input_type -> filesystem.CreateWatcherRequest - 18, // 18: filesystem.Filesystem.GetWatcherEvents:input_type -> filesystem.GetWatcherEventsRequest - 20, // 19: filesystem.Filesystem.RemoveWatcher:input_type -> filesystem.RemoveWatcherRequest - 9, // 20: filesystem.Filesystem.Stat:output_type -> filesystem.StatResponse - 5, // 21: filesystem.Filesystem.MakeDir:output_type -> filesystem.MakeDirResponse - 3, // 22: filesystem.Filesystem.Move:output_type -> filesystem.MoveResponse - 12, // 23: filesystem.Filesystem.ListDir:output_type -> filesystem.ListDirResponse - 7, // 24: filesystem.Filesystem.Remove:output_type -> filesystem.RemoveResponse - 15, // 25: filesystem.Filesystem.WatchDir:output_type -> filesystem.WatchDirResponse - 17, // 26: filesystem.Filesystem.CreateWatcher:output_type -> filesystem.CreateWatcherResponse - 19, // 27: filesystem.Filesystem.GetWatcherEvents:output_type -> filesystem.GetWatcherEventsResponse - 21, // 28: filesystem.Filesystem.RemoveWatcher:output_type -> filesystem.RemoveWatcherResponse - 20, // [20:29] is the sub-list for method output_type - 11, // [11:20] is the sub-list for method input_type - 11, // [11:11] is the sub-list for extension type_name - 11, // [11:11] is the sub-list for extension extendee - 0, // [0:11] is the sub-list for field type_name -} - -func init() { file_filesystem_filesystem_proto_init() } -func file_filesystem_filesystem_proto_init() { - if File_filesystem_filesystem_proto != nil { - return - } - file_filesystem_filesystem_proto_msgTypes[8].OneofWrappers = []any{} - file_filesystem_filesystem_proto_msgTypes[13].OneofWrappers = []any{ - (*WatchDirResponse_Start)(nil), - (*WatchDirResponse_Filesystem)(nil), - (*WatchDirResponse_Keepalive)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_filesystem_filesystem_proto_rawDesc), len(file_filesystem_filesystem_proto_rawDesc)), - NumEnums: 2, - NumMessages: 22, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_filesystem_filesystem_proto_goTypes, - DependencyIndexes: file_filesystem_filesystem_proto_depIdxs, - EnumInfos: file_filesystem_filesystem_proto_enumTypes, - MessageInfos: file_filesystem_filesystem_proto_msgTypes, - }.Build() - File_filesystem_filesystem_proto = out.File - file_filesystem_filesystem_proto_goTypes = nil - file_filesystem_filesystem_proto_depIdxs = nil -} diff --git a/envd/internal/services/spec/filesystem/filesystemconnect/filesystem.connect.go b/envd/internal/services/spec/filesystem/filesystemconnect/filesystem.connect.go deleted file mode 100644 index 05893f2..0000000 --- a/envd/internal/services/spec/filesystem/filesystemconnect/filesystem.connect.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by protoc-gen-connect-go. DO NOT EDIT. -// -// Source: filesystem/filesystem.proto - -package filesystemconnect - -import ( - connect "connectrpc.com/connect" - context "context" - errors "errors" - filesystem "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/filesystem" - http "net/http" - strings "strings" -) - -// This is a compile-time assertion to ensure that this generated file and the connect package are -// compatible. If you get a compiler error that this constant is not defined, this code was -// generated with a version of connect newer than the one compiled into your binary. You can fix the -// problem by either regenerating this code with an older version of connect or updating the connect -// version compiled into your binary. -const _ = connect.IsAtLeastVersion1_13_0 - -const ( - // FilesystemName is the fully-qualified name of the Filesystem service. - FilesystemName = "filesystem.Filesystem" -) - -// These constants are the fully-qualified names of the RPCs defined in this package. They're -// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. -// -// Note that these are different from the fully-qualified method names used by -// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to -// reflection-formatted method names, remove the leading slash and convert the remaining slash to a -// period. -const ( - // FilesystemStatProcedure is the fully-qualified name of the Filesystem's Stat RPC. - FilesystemStatProcedure = "/filesystem.Filesystem/Stat" - // FilesystemMakeDirProcedure is the fully-qualified name of the Filesystem's MakeDir RPC. - FilesystemMakeDirProcedure = "/filesystem.Filesystem/MakeDir" - // FilesystemMoveProcedure is the fully-qualified name of the Filesystem's Move RPC. - FilesystemMoveProcedure = "/filesystem.Filesystem/Move" - // FilesystemListDirProcedure is the fully-qualified name of the Filesystem's ListDir RPC. - FilesystemListDirProcedure = "/filesystem.Filesystem/ListDir" - // FilesystemRemoveProcedure is the fully-qualified name of the Filesystem's Remove RPC. - FilesystemRemoveProcedure = "/filesystem.Filesystem/Remove" - // FilesystemWatchDirProcedure is the fully-qualified name of the Filesystem's WatchDir RPC. - FilesystemWatchDirProcedure = "/filesystem.Filesystem/WatchDir" - // FilesystemCreateWatcherProcedure is the fully-qualified name of the Filesystem's CreateWatcher - // RPC. - FilesystemCreateWatcherProcedure = "/filesystem.Filesystem/CreateWatcher" - // FilesystemGetWatcherEventsProcedure is the fully-qualified name of the Filesystem's - // GetWatcherEvents RPC. - FilesystemGetWatcherEventsProcedure = "/filesystem.Filesystem/GetWatcherEvents" - // FilesystemRemoveWatcherProcedure is the fully-qualified name of the Filesystem's RemoveWatcher - // RPC. - FilesystemRemoveWatcherProcedure = "/filesystem.Filesystem/RemoveWatcher" -) - -// FilesystemClient is a client for the filesystem.Filesystem service. -type FilesystemClient interface { - Stat(context.Context, *connect.Request[filesystem.StatRequest]) (*connect.Response[filesystem.StatResponse], error) - MakeDir(context.Context, *connect.Request[filesystem.MakeDirRequest]) (*connect.Response[filesystem.MakeDirResponse], error) - Move(context.Context, *connect.Request[filesystem.MoveRequest]) (*connect.Response[filesystem.MoveResponse], error) - ListDir(context.Context, *connect.Request[filesystem.ListDirRequest]) (*connect.Response[filesystem.ListDirResponse], error) - Remove(context.Context, *connect.Request[filesystem.RemoveRequest]) (*connect.Response[filesystem.RemoveResponse], error) - WatchDir(context.Context, *connect.Request[filesystem.WatchDirRequest]) (*connect.ServerStreamForClient[filesystem.WatchDirResponse], error) - // Non-streaming versions of WatchDir - CreateWatcher(context.Context, *connect.Request[filesystem.CreateWatcherRequest]) (*connect.Response[filesystem.CreateWatcherResponse], error) - GetWatcherEvents(context.Context, *connect.Request[filesystem.GetWatcherEventsRequest]) (*connect.Response[filesystem.GetWatcherEventsResponse], error) - RemoveWatcher(context.Context, *connect.Request[filesystem.RemoveWatcherRequest]) (*connect.Response[filesystem.RemoveWatcherResponse], error) -} - -// NewFilesystemClient constructs a client for the filesystem.Filesystem service. By default, it -// uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, and sends -// uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the connect.WithGRPC() or -// connect.WithGRPCWeb() options. -// -// The URL supplied here should be the base URL for the Connect or gRPC server (for example, -// http://api.acme.com or https://acme.com/grpc). -func NewFilesystemClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) FilesystemClient { - baseURL = strings.TrimRight(baseURL, "/") - filesystemMethods := filesystem.File_filesystem_filesystem_proto.Services().ByName("Filesystem").Methods() - return &filesystemClient{ - stat: connect.NewClient[filesystem.StatRequest, filesystem.StatResponse]( - httpClient, - baseURL+FilesystemStatProcedure, - connect.WithSchema(filesystemMethods.ByName("Stat")), - connect.WithClientOptions(opts...), - ), - makeDir: connect.NewClient[filesystem.MakeDirRequest, filesystem.MakeDirResponse]( - httpClient, - baseURL+FilesystemMakeDirProcedure, - connect.WithSchema(filesystemMethods.ByName("MakeDir")), - connect.WithClientOptions(opts...), - ), - move: connect.NewClient[filesystem.MoveRequest, filesystem.MoveResponse]( - httpClient, - baseURL+FilesystemMoveProcedure, - connect.WithSchema(filesystemMethods.ByName("Move")), - connect.WithClientOptions(opts...), - ), - listDir: connect.NewClient[filesystem.ListDirRequest, filesystem.ListDirResponse]( - httpClient, - baseURL+FilesystemListDirProcedure, - connect.WithSchema(filesystemMethods.ByName("ListDir")), - connect.WithClientOptions(opts...), - ), - remove: connect.NewClient[filesystem.RemoveRequest, filesystem.RemoveResponse]( - httpClient, - baseURL+FilesystemRemoveProcedure, - connect.WithSchema(filesystemMethods.ByName("Remove")), - connect.WithClientOptions(opts...), - ), - watchDir: connect.NewClient[filesystem.WatchDirRequest, filesystem.WatchDirResponse]( - httpClient, - baseURL+FilesystemWatchDirProcedure, - connect.WithSchema(filesystemMethods.ByName("WatchDir")), - connect.WithClientOptions(opts...), - ), - createWatcher: connect.NewClient[filesystem.CreateWatcherRequest, filesystem.CreateWatcherResponse]( - httpClient, - baseURL+FilesystemCreateWatcherProcedure, - connect.WithSchema(filesystemMethods.ByName("CreateWatcher")), - connect.WithClientOptions(opts...), - ), - getWatcherEvents: connect.NewClient[filesystem.GetWatcherEventsRequest, filesystem.GetWatcherEventsResponse]( - httpClient, - baseURL+FilesystemGetWatcherEventsProcedure, - connect.WithSchema(filesystemMethods.ByName("GetWatcherEvents")), - connect.WithClientOptions(opts...), - ), - removeWatcher: connect.NewClient[filesystem.RemoveWatcherRequest, filesystem.RemoveWatcherResponse]( - httpClient, - baseURL+FilesystemRemoveWatcherProcedure, - connect.WithSchema(filesystemMethods.ByName("RemoveWatcher")), - connect.WithClientOptions(opts...), - ), - } -} - -// filesystemClient implements FilesystemClient. -type filesystemClient struct { - stat *connect.Client[filesystem.StatRequest, filesystem.StatResponse] - makeDir *connect.Client[filesystem.MakeDirRequest, filesystem.MakeDirResponse] - move *connect.Client[filesystem.MoveRequest, filesystem.MoveResponse] - listDir *connect.Client[filesystem.ListDirRequest, filesystem.ListDirResponse] - remove *connect.Client[filesystem.RemoveRequest, filesystem.RemoveResponse] - watchDir *connect.Client[filesystem.WatchDirRequest, filesystem.WatchDirResponse] - createWatcher *connect.Client[filesystem.CreateWatcherRequest, filesystem.CreateWatcherResponse] - getWatcherEvents *connect.Client[filesystem.GetWatcherEventsRequest, filesystem.GetWatcherEventsResponse] - removeWatcher *connect.Client[filesystem.RemoveWatcherRequest, filesystem.RemoveWatcherResponse] -} - -// Stat calls filesystem.Filesystem.Stat. -func (c *filesystemClient) Stat(ctx context.Context, req *connect.Request[filesystem.StatRequest]) (*connect.Response[filesystem.StatResponse], error) { - return c.stat.CallUnary(ctx, req) -} - -// MakeDir calls filesystem.Filesystem.MakeDir. -func (c *filesystemClient) MakeDir(ctx context.Context, req *connect.Request[filesystem.MakeDirRequest]) (*connect.Response[filesystem.MakeDirResponse], error) { - return c.makeDir.CallUnary(ctx, req) -} - -// Move calls filesystem.Filesystem.Move. -func (c *filesystemClient) Move(ctx context.Context, req *connect.Request[filesystem.MoveRequest]) (*connect.Response[filesystem.MoveResponse], error) { - return c.move.CallUnary(ctx, req) -} - -// ListDir calls filesystem.Filesystem.ListDir. -func (c *filesystemClient) ListDir(ctx context.Context, req *connect.Request[filesystem.ListDirRequest]) (*connect.Response[filesystem.ListDirResponse], error) { - return c.listDir.CallUnary(ctx, req) -} - -// Remove calls filesystem.Filesystem.Remove. -func (c *filesystemClient) Remove(ctx context.Context, req *connect.Request[filesystem.RemoveRequest]) (*connect.Response[filesystem.RemoveResponse], error) { - return c.remove.CallUnary(ctx, req) -} - -// WatchDir calls filesystem.Filesystem.WatchDir. -func (c *filesystemClient) WatchDir(ctx context.Context, req *connect.Request[filesystem.WatchDirRequest]) (*connect.ServerStreamForClient[filesystem.WatchDirResponse], error) { - return c.watchDir.CallServerStream(ctx, req) -} - -// CreateWatcher calls filesystem.Filesystem.CreateWatcher. -func (c *filesystemClient) CreateWatcher(ctx context.Context, req *connect.Request[filesystem.CreateWatcherRequest]) (*connect.Response[filesystem.CreateWatcherResponse], error) { - return c.createWatcher.CallUnary(ctx, req) -} - -// GetWatcherEvents calls filesystem.Filesystem.GetWatcherEvents. -func (c *filesystemClient) GetWatcherEvents(ctx context.Context, req *connect.Request[filesystem.GetWatcherEventsRequest]) (*connect.Response[filesystem.GetWatcherEventsResponse], error) { - return c.getWatcherEvents.CallUnary(ctx, req) -} - -// RemoveWatcher calls filesystem.Filesystem.RemoveWatcher. -func (c *filesystemClient) RemoveWatcher(ctx context.Context, req *connect.Request[filesystem.RemoveWatcherRequest]) (*connect.Response[filesystem.RemoveWatcherResponse], error) { - return c.removeWatcher.CallUnary(ctx, req) -} - -// FilesystemHandler is an implementation of the filesystem.Filesystem service. -type FilesystemHandler interface { - Stat(context.Context, *connect.Request[filesystem.StatRequest]) (*connect.Response[filesystem.StatResponse], error) - MakeDir(context.Context, *connect.Request[filesystem.MakeDirRequest]) (*connect.Response[filesystem.MakeDirResponse], error) - Move(context.Context, *connect.Request[filesystem.MoveRequest]) (*connect.Response[filesystem.MoveResponse], error) - ListDir(context.Context, *connect.Request[filesystem.ListDirRequest]) (*connect.Response[filesystem.ListDirResponse], error) - Remove(context.Context, *connect.Request[filesystem.RemoveRequest]) (*connect.Response[filesystem.RemoveResponse], error) - WatchDir(context.Context, *connect.Request[filesystem.WatchDirRequest], *connect.ServerStream[filesystem.WatchDirResponse]) error - // Non-streaming versions of WatchDir - CreateWatcher(context.Context, *connect.Request[filesystem.CreateWatcherRequest]) (*connect.Response[filesystem.CreateWatcherResponse], error) - GetWatcherEvents(context.Context, *connect.Request[filesystem.GetWatcherEventsRequest]) (*connect.Response[filesystem.GetWatcherEventsResponse], error) - RemoveWatcher(context.Context, *connect.Request[filesystem.RemoveWatcherRequest]) (*connect.Response[filesystem.RemoveWatcherResponse], error) -} - -// NewFilesystemHandler builds an HTTP handler from the service implementation. It returns the path -// on which to mount the handler and the handler itself. -// -// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf -// and JSON codecs. They also support gzip compression. -func NewFilesystemHandler(svc FilesystemHandler, opts ...connect.HandlerOption) (string, http.Handler) { - filesystemMethods := filesystem.File_filesystem_filesystem_proto.Services().ByName("Filesystem").Methods() - filesystemStatHandler := connect.NewUnaryHandler( - FilesystemStatProcedure, - svc.Stat, - connect.WithSchema(filesystemMethods.ByName("Stat")), - connect.WithHandlerOptions(opts...), - ) - filesystemMakeDirHandler := connect.NewUnaryHandler( - FilesystemMakeDirProcedure, - svc.MakeDir, - connect.WithSchema(filesystemMethods.ByName("MakeDir")), - connect.WithHandlerOptions(opts...), - ) - filesystemMoveHandler := connect.NewUnaryHandler( - FilesystemMoveProcedure, - svc.Move, - connect.WithSchema(filesystemMethods.ByName("Move")), - connect.WithHandlerOptions(opts...), - ) - filesystemListDirHandler := connect.NewUnaryHandler( - FilesystemListDirProcedure, - svc.ListDir, - connect.WithSchema(filesystemMethods.ByName("ListDir")), - connect.WithHandlerOptions(opts...), - ) - filesystemRemoveHandler := connect.NewUnaryHandler( - FilesystemRemoveProcedure, - svc.Remove, - connect.WithSchema(filesystemMethods.ByName("Remove")), - connect.WithHandlerOptions(opts...), - ) - filesystemWatchDirHandler := connect.NewServerStreamHandler( - FilesystemWatchDirProcedure, - svc.WatchDir, - connect.WithSchema(filesystemMethods.ByName("WatchDir")), - connect.WithHandlerOptions(opts...), - ) - filesystemCreateWatcherHandler := connect.NewUnaryHandler( - FilesystemCreateWatcherProcedure, - svc.CreateWatcher, - connect.WithSchema(filesystemMethods.ByName("CreateWatcher")), - connect.WithHandlerOptions(opts...), - ) - filesystemGetWatcherEventsHandler := connect.NewUnaryHandler( - FilesystemGetWatcherEventsProcedure, - svc.GetWatcherEvents, - connect.WithSchema(filesystemMethods.ByName("GetWatcherEvents")), - connect.WithHandlerOptions(opts...), - ) - filesystemRemoveWatcherHandler := connect.NewUnaryHandler( - FilesystemRemoveWatcherProcedure, - svc.RemoveWatcher, - connect.WithSchema(filesystemMethods.ByName("RemoveWatcher")), - connect.WithHandlerOptions(opts...), - ) - return "/filesystem.Filesystem/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case FilesystemStatProcedure: - filesystemStatHandler.ServeHTTP(w, r) - case FilesystemMakeDirProcedure: - filesystemMakeDirHandler.ServeHTTP(w, r) - case FilesystemMoveProcedure: - filesystemMoveHandler.ServeHTTP(w, r) - case FilesystemListDirProcedure: - filesystemListDirHandler.ServeHTTP(w, r) - case FilesystemRemoveProcedure: - filesystemRemoveHandler.ServeHTTP(w, r) - case FilesystemWatchDirProcedure: - filesystemWatchDirHandler.ServeHTTP(w, r) - case FilesystemCreateWatcherProcedure: - filesystemCreateWatcherHandler.ServeHTTP(w, r) - case FilesystemGetWatcherEventsProcedure: - filesystemGetWatcherEventsHandler.ServeHTTP(w, r) - case FilesystemRemoveWatcherProcedure: - filesystemRemoveWatcherHandler.ServeHTTP(w, r) - default: - http.NotFound(w, r) - } - }) -} - -// UnimplementedFilesystemHandler returns CodeUnimplemented from all methods. -type UnimplementedFilesystemHandler struct{} - -func (UnimplementedFilesystemHandler) Stat(context.Context, *connect.Request[filesystem.StatRequest]) (*connect.Response[filesystem.StatResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.Stat is not implemented")) -} - -func (UnimplementedFilesystemHandler) MakeDir(context.Context, *connect.Request[filesystem.MakeDirRequest]) (*connect.Response[filesystem.MakeDirResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.MakeDir is not implemented")) -} - -func (UnimplementedFilesystemHandler) Move(context.Context, *connect.Request[filesystem.MoveRequest]) (*connect.Response[filesystem.MoveResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.Move is not implemented")) -} - -func (UnimplementedFilesystemHandler) ListDir(context.Context, *connect.Request[filesystem.ListDirRequest]) (*connect.Response[filesystem.ListDirResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.ListDir is not implemented")) -} - -func (UnimplementedFilesystemHandler) Remove(context.Context, *connect.Request[filesystem.RemoveRequest]) (*connect.Response[filesystem.RemoveResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.Remove is not implemented")) -} - -func (UnimplementedFilesystemHandler) WatchDir(context.Context, *connect.Request[filesystem.WatchDirRequest], *connect.ServerStream[filesystem.WatchDirResponse]) error { - return connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.WatchDir is not implemented")) -} - -func (UnimplementedFilesystemHandler) CreateWatcher(context.Context, *connect.Request[filesystem.CreateWatcherRequest]) (*connect.Response[filesystem.CreateWatcherResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.CreateWatcher is not implemented")) -} - -func (UnimplementedFilesystemHandler) GetWatcherEvents(context.Context, *connect.Request[filesystem.GetWatcherEventsRequest]) (*connect.Response[filesystem.GetWatcherEventsResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.GetWatcherEvents is not implemented")) -} - -func (UnimplementedFilesystemHandler) RemoveWatcher(context.Context, *connect.Request[filesystem.RemoveWatcherRequest]) (*connect.Response[filesystem.RemoveWatcherResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.RemoveWatcher is not implemented")) -} diff --git a/envd/internal/services/spec/process.pb.go b/envd/internal/services/spec/process.pb.go deleted file mode 100644 index 6877dca..0000000 --- a/envd/internal/services/spec/process.pb.go +++ /dev/null @@ -1,1972 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.11 -// protoc (unknown) -// source: process.proto - -package spec - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Signal int32 - -const ( - Signal_SIGNAL_UNSPECIFIED Signal = 0 - Signal_SIGNAL_SIGTERM Signal = 15 - Signal_SIGNAL_SIGKILL Signal = 9 -) - -// Enum value maps for Signal. -var ( - Signal_name = map[int32]string{ - 0: "SIGNAL_UNSPECIFIED", - 15: "SIGNAL_SIGTERM", - 9: "SIGNAL_SIGKILL", - } - Signal_value = map[string]int32{ - "SIGNAL_UNSPECIFIED": 0, - "SIGNAL_SIGTERM": 15, - "SIGNAL_SIGKILL": 9, - } -) - -func (x Signal) Enum() *Signal { - p := new(Signal) - *p = x - return p -} - -func (x Signal) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Signal) Descriptor() protoreflect.EnumDescriptor { - return file_process_proto_enumTypes[0].Descriptor() -} - -func (Signal) Type() protoreflect.EnumType { - return &file_process_proto_enumTypes[0] -} - -func (x Signal) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Signal.Descriptor instead. -func (Signal) EnumDescriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{0} -} - -type PTY struct { - state protoimpl.MessageState `protogen:"open.v1"` - Size *PTY_Size `protobuf:"bytes,1,opt,name=size,proto3" json:"size,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PTY) Reset() { - *x = PTY{} - mi := &file_process_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PTY) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PTY) ProtoMessage() {} - -func (x *PTY) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PTY.ProtoReflect.Descriptor instead. -func (*PTY) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{0} -} - -func (x *PTY) GetSize() *PTY_Size { - if x != nil { - return x.Size - } - return nil -} - -type ProcessConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - Cmd string `protobuf:"bytes,1,opt,name=cmd,proto3" json:"cmd,omitempty"` - Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"` - Envs map[string]string `protobuf:"bytes,3,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Cwd *string `protobuf:"bytes,4,opt,name=cwd,proto3,oneof" json:"cwd,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessConfig) Reset() { - *x = ProcessConfig{} - mi := &file_process_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessConfig) ProtoMessage() {} - -func (x *ProcessConfig) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessConfig.ProtoReflect.Descriptor instead. -func (*ProcessConfig) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{1} -} - -func (x *ProcessConfig) GetCmd() string { - if x != nil { - return x.Cmd - } - return "" -} - -func (x *ProcessConfig) GetArgs() []string { - if x != nil { - return x.Args - } - return nil -} - -func (x *ProcessConfig) GetEnvs() map[string]string { - if x != nil { - return x.Envs - } - return nil -} - -func (x *ProcessConfig) GetCwd() string { - if x != nil && x.Cwd != nil { - return *x.Cwd - } - return "" -} - -type ListRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListRequest) Reset() { - *x = ListRequest{} - mi := &file_process_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListRequest) ProtoMessage() {} - -func (x *ListRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListRequest.ProtoReflect.Descriptor instead. -func (*ListRequest) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{2} -} - -type ProcessInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Config *ProcessConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - Tag *string `protobuf:"bytes,3,opt,name=tag,proto3,oneof" json:"tag,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessInfo) Reset() { - *x = ProcessInfo{} - mi := &file_process_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessInfo) ProtoMessage() {} - -func (x *ProcessInfo) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessInfo.ProtoReflect.Descriptor instead. -func (*ProcessInfo) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{3} -} - -func (x *ProcessInfo) GetConfig() *ProcessConfig { - if x != nil { - return x.Config - } - return nil -} - -func (x *ProcessInfo) GetPid() uint32 { - if x != nil { - return x.Pid - } - return 0 -} - -func (x *ProcessInfo) GetTag() string { - if x != nil && x.Tag != nil { - return *x.Tag - } - return "" -} - -type ListResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Processes []*ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListResponse) Reset() { - *x = ListResponse{} - mi := &file_process_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListResponse) ProtoMessage() {} - -func (x *ListResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListResponse.ProtoReflect.Descriptor instead. -func (*ListResponse) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{4} -} - -func (x *ListResponse) GetProcesses() []*ProcessInfo { - if x != nil { - return x.Processes - } - return nil -} - -type StartRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessConfig `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - Pty *PTY `protobuf:"bytes,2,opt,name=pty,proto3,oneof" json:"pty,omitempty"` - Tag *string `protobuf:"bytes,3,opt,name=tag,proto3,oneof" json:"tag,omitempty"` - // This is optional for backwards compatibility. - // We default to true. New SDK versions will set this to false by default. - Stdin *bool `protobuf:"varint,4,opt,name=stdin,proto3,oneof" json:"stdin,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StartRequest) Reset() { - *x = StartRequest{} - mi := &file_process_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StartRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartRequest) ProtoMessage() {} - -func (x *StartRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartRequest.ProtoReflect.Descriptor instead. -func (*StartRequest) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{5} -} - -func (x *StartRequest) GetProcess() *ProcessConfig { - if x != nil { - return x.Process - } - return nil -} - -func (x *StartRequest) GetPty() *PTY { - if x != nil { - return x.Pty - } - return nil -} - -func (x *StartRequest) GetTag() string { - if x != nil && x.Tag != nil { - return *x.Tag - } - return "" -} - -func (x *StartRequest) GetStdin() bool { - if x != nil && x.Stdin != nil { - return *x.Stdin - } - return false -} - -type UpdateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - Pty *PTY `protobuf:"bytes,2,opt,name=pty,proto3,oneof" json:"pty,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UpdateRequest) Reset() { - *x = UpdateRequest{} - mi := &file_process_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UpdateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateRequest) ProtoMessage() {} - -func (x *UpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateRequest.ProtoReflect.Descriptor instead. -func (*UpdateRequest) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{6} -} - -func (x *UpdateRequest) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -func (x *UpdateRequest) GetPty() *PTY { - if x != nil { - return x.Pty - } - return nil -} - -type UpdateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UpdateResponse) Reset() { - *x = UpdateResponse{} - mi := &file_process_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UpdateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateResponse) ProtoMessage() {} - -func (x *UpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateResponse.ProtoReflect.Descriptor instead. -func (*UpdateResponse) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{7} -} - -type ProcessEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Event: - // - // *ProcessEvent_Start - // *ProcessEvent_Data - // *ProcessEvent_End - // *ProcessEvent_Keepalive - Event isProcessEvent_Event `protobuf_oneof:"event"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessEvent) Reset() { - *x = ProcessEvent{} - mi := &file_process_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessEvent) ProtoMessage() {} - -func (x *ProcessEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessEvent.ProtoReflect.Descriptor instead. -func (*ProcessEvent) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{8} -} - -func (x *ProcessEvent) GetEvent() isProcessEvent_Event { - if x != nil { - return x.Event - } - return nil -} - -func (x *ProcessEvent) GetStart() *ProcessEvent_StartEvent { - if x != nil { - if x, ok := x.Event.(*ProcessEvent_Start); ok { - return x.Start - } - } - return nil -} - -func (x *ProcessEvent) GetData() *ProcessEvent_DataEvent { - if x != nil { - if x, ok := x.Event.(*ProcessEvent_Data); ok { - return x.Data - } - } - return nil -} - -func (x *ProcessEvent) GetEnd() *ProcessEvent_EndEvent { - if x != nil { - if x, ok := x.Event.(*ProcessEvent_End); ok { - return x.End - } - } - return nil -} - -func (x *ProcessEvent) GetKeepalive() *ProcessEvent_KeepAlive { - if x != nil { - if x, ok := x.Event.(*ProcessEvent_Keepalive); ok { - return x.Keepalive - } - } - return nil -} - -type isProcessEvent_Event interface { - isProcessEvent_Event() -} - -type ProcessEvent_Start struct { - Start *ProcessEvent_StartEvent `protobuf:"bytes,1,opt,name=start,proto3,oneof"` -} - -type ProcessEvent_Data struct { - Data *ProcessEvent_DataEvent `protobuf:"bytes,2,opt,name=data,proto3,oneof"` -} - -type ProcessEvent_End struct { - End *ProcessEvent_EndEvent `protobuf:"bytes,3,opt,name=end,proto3,oneof"` -} - -type ProcessEvent_Keepalive struct { - Keepalive *ProcessEvent_KeepAlive `protobuf:"bytes,4,opt,name=keepalive,proto3,oneof"` -} - -func (*ProcessEvent_Start) isProcessEvent_Event() {} - -func (*ProcessEvent_Data) isProcessEvent_Event() {} - -func (*ProcessEvent_End) isProcessEvent_Event() {} - -func (*ProcessEvent_Keepalive) isProcessEvent_Event() {} - -type StartResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Event *ProcessEvent `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StartResponse) Reset() { - *x = StartResponse{} - mi := &file_process_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StartResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartResponse) ProtoMessage() {} - -func (x *StartResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartResponse.ProtoReflect.Descriptor instead. -func (*StartResponse) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{9} -} - -func (x *StartResponse) GetEvent() *ProcessEvent { - if x != nil { - return x.Event - } - return nil -} - -type ConnectResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Event *ProcessEvent `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ConnectResponse) Reset() { - *x = ConnectResponse{} - mi := &file_process_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ConnectResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConnectResponse) ProtoMessage() {} - -func (x *ConnectResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConnectResponse.ProtoReflect.Descriptor instead. -func (*ConnectResponse) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{10} -} - -func (x *ConnectResponse) GetEvent() *ProcessEvent { - if x != nil { - return x.Event - } - return nil -} - -type SendInputRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - Input *ProcessInput `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SendInputRequest) Reset() { - *x = SendInputRequest{} - mi := &file_process_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SendInputRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendInputRequest) ProtoMessage() {} - -func (x *SendInputRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SendInputRequest.ProtoReflect.Descriptor instead. -func (*SendInputRequest) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{11} -} - -func (x *SendInputRequest) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -func (x *SendInputRequest) GetInput() *ProcessInput { - if x != nil { - return x.Input - } - return nil -} - -type SendInputResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SendInputResponse) Reset() { - *x = SendInputResponse{} - mi := &file_process_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SendInputResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendInputResponse) ProtoMessage() {} - -func (x *SendInputResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SendInputResponse.ProtoReflect.Descriptor instead. -func (*SendInputResponse) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{12} -} - -type ProcessInput struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Input: - // - // *ProcessInput_Stdin - // *ProcessInput_Pty - Input isProcessInput_Input `protobuf_oneof:"input"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessInput) Reset() { - *x = ProcessInput{} - mi := &file_process_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessInput) ProtoMessage() {} - -func (x *ProcessInput) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessInput.ProtoReflect.Descriptor instead. -func (*ProcessInput) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{13} -} - -func (x *ProcessInput) GetInput() isProcessInput_Input { - if x != nil { - return x.Input - } - return nil -} - -func (x *ProcessInput) GetStdin() []byte { - if x != nil { - if x, ok := x.Input.(*ProcessInput_Stdin); ok { - return x.Stdin - } - } - return nil -} - -func (x *ProcessInput) GetPty() []byte { - if x != nil { - if x, ok := x.Input.(*ProcessInput_Pty); ok { - return x.Pty - } - } - return nil -} - -type isProcessInput_Input interface { - isProcessInput_Input() -} - -type ProcessInput_Stdin struct { - Stdin []byte `protobuf:"bytes,1,opt,name=stdin,proto3,oneof"` -} - -type ProcessInput_Pty struct { - Pty []byte `protobuf:"bytes,2,opt,name=pty,proto3,oneof"` -} - -func (*ProcessInput_Stdin) isProcessInput_Input() {} - -func (*ProcessInput_Pty) isProcessInput_Input() {} - -type StreamInputRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Event: - // - // *StreamInputRequest_Start - // *StreamInputRequest_Data - // *StreamInputRequest_Keepalive - Event isStreamInputRequest_Event `protobuf_oneof:"event"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StreamInputRequest) Reset() { - *x = StreamInputRequest{} - mi := &file_process_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StreamInputRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamInputRequest) ProtoMessage() {} - -func (x *StreamInputRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamInputRequest.ProtoReflect.Descriptor instead. -func (*StreamInputRequest) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{14} -} - -func (x *StreamInputRequest) GetEvent() isStreamInputRequest_Event { - if x != nil { - return x.Event - } - return nil -} - -func (x *StreamInputRequest) GetStart() *StreamInputRequest_StartEvent { - if x != nil { - if x, ok := x.Event.(*StreamInputRequest_Start); ok { - return x.Start - } - } - return nil -} - -func (x *StreamInputRequest) GetData() *StreamInputRequest_DataEvent { - if x != nil { - if x, ok := x.Event.(*StreamInputRequest_Data); ok { - return x.Data - } - } - return nil -} - -func (x *StreamInputRequest) GetKeepalive() *StreamInputRequest_KeepAlive { - if x != nil { - if x, ok := x.Event.(*StreamInputRequest_Keepalive); ok { - return x.Keepalive - } - } - return nil -} - -type isStreamInputRequest_Event interface { - isStreamInputRequest_Event() -} - -type StreamInputRequest_Start struct { - Start *StreamInputRequest_StartEvent `protobuf:"bytes,1,opt,name=start,proto3,oneof"` -} - -type StreamInputRequest_Data struct { - Data *StreamInputRequest_DataEvent `protobuf:"bytes,2,opt,name=data,proto3,oneof"` -} - -type StreamInputRequest_Keepalive struct { - Keepalive *StreamInputRequest_KeepAlive `protobuf:"bytes,3,opt,name=keepalive,proto3,oneof"` -} - -func (*StreamInputRequest_Start) isStreamInputRequest_Event() {} - -func (*StreamInputRequest_Data) isStreamInputRequest_Event() {} - -func (*StreamInputRequest_Keepalive) isStreamInputRequest_Event() {} - -type StreamInputResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StreamInputResponse) Reset() { - *x = StreamInputResponse{} - mi := &file_process_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StreamInputResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamInputResponse) ProtoMessage() {} - -func (x *StreamInputResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamInputResponse.ProtoReflect.Descriptor instead. -func (*StreamInputResponse) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{15} -} - -type SendSignalRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - Signal Signal `protobuf:"varint,2,opt,name=signal,proto3,enum=process.Signal" json:"signal,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SendSignalRequest) Reset() { - *x = SendSignalRequest{} - mi := &file_process_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SendSignalRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendSignalRequest) ProtoMessage() {} - -func (x *SendSignalRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SendSignalRequest.ProtoReflect.Descriptor instead. -func (*SendSignalRequest) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{16} -} - -func (x *SendSignalRequest) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -func (x *SendSignalRequest) GetSignal() Signal { - if x != nil { - return x.Signal - } - return Signal_SIGNAL_UNSPECIFIED -} - -type SendSignalResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SendSignalResponse) Reset() { - *x = SendSignalResponse{} - mi := &file_process_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SendSignalResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendSignalResponse) ProtoMessage() {} - -func (x *SendSignalResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[17] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SendSignalResponse.ProtoReflect.Descriptor instead. -func (*SendSignalResponse) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{17} -} - -type CloseStdinRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CloseStdinRequest) Reset() { - *x = CloseStdinRequest{} - mi := &file_process_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CloseStdinRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloseStdinRequest) ProtoMessage() {} - -func (x *CloseStdinRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[18] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloseStdinRequest.ProtoReflect.Descriptor instead. -func (*CloseStdinRequest) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{18} -} - -func (x *CloseStdinRequest) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -type CloseStdinResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CloseStdinResponse) Reset() { - *x = CloseStdinResponse{} - mi := &file_process_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CloseStdinResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloseStdinResponse) ProtoMessage() {} - -func (x *CloseStdinResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloseStdinResponse.ProtoReflect.Descriptor instead. -func (*CloseStdinResponse) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{19} -} - -type ConnectRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ConnectRequest) Reset() { - *x = ConnectRequest{} - mi := &file_process_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ConnectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConnectRequest) ProtoMessage() {} - -func (x *ConnectRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[20] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConnectRequest.ProtoReflect.Descriptor instead. -func (*ConnectRequest) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{20} -} - -func (x *ConnectRequest) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -type ProcessSelector struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Selector: - // - // *ProcessSelector_Pid - // *ProcessSelector_Tag - Selector isProcessSelector_Selector `protobuf_oneof:"selector"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessSelector) Reset() { - *x = ProcessSelector{} - mi := &file_process_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessSelector) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessSelector) ProtoMessage() {} - -func (x *ProcessSelector) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessSelector.ProtoReflect.Descriptor instead. -func (*ProcessSelector) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{21} -} - -func (x *ProcessSelector) GetSelector() isProcessSelector_Selector { - if x != nil { - return x.Selector - } - return nil -} - -func (x *ProcessSelector) GetPid() uint32 { - if x != nil { - if x, ok := x.Selector.(*ProcessSelector_Pid); ok { - return x.Pid - } - } - return 0 -} - -func (x *ProcessSelector) GetTag() string { - if x != nil { - if x, ok := x.Selector.(*ProcessSelector_Tag); ok { - return x.Tag - } - } - return "" -} - -type isProcessSelector_Selector interface { - isProcessSelector_Selector() -} - -type ProcessSelector_Pid struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3,oneof"` -} - -type ProcessSelector_Tag struct { - Tag string `protobuf:"bytes,2,opt,name=tag,proto3,oneof"` -} - -func (*ProcessSelector_Pid) isProcessSelector_Selector() {} - -func (*ProcessSelector_Tag) isProcessSelector_Selector() {} - -type PTY_Size struct { - state protoimpl.MessageState `protogen:"open.v1"` - Cols uint32 `protobuf:"varint,1,opt,name=cols,proto3" json:"cols,omitempty"` - Rows uint32 `protobuf:"varint,2,opt,name=rows,proto3" json:"rows,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PTY_Size) Reset() { - *x = PTY_Size{} - mi := &file_process_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PTY_Size) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PTY_Size) ProtoMessage() {} - -func (x *PTY_Size) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[22] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PTY_Size.ProtoReflect.Descriptor instead. -func (*PTY_Size) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *PTY_Size) GetCols() uint32 { - if x != nil { - return x.Cols - } - return 0 -} - -func (x *PTY_Size) GetRows() uint32 { - if x != nil { - return x.Rows - } - return 0 -} - -type ProcessEvent_StartEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessEvent_StartEvent) Reset() { - *x = ProcessEvent_StartEvent{} - mi := &file_process_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessEvent_StartEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessEvent_StartEvent) ProtoMessage() {} - -func (x *ProcessEvent_StartEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[24] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessEvent_StartEvent.ProtoReflect.Descriptor instead. -func (*ProcessEvent_StartEvent) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{8, 0} -} - -func (x *ProcessEvent_StartEvent) GetPid() uint32 { - if x != nil { - return x.Pid - } - return 0 -} - -type ProcessEvent_DataEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Output: - // - // *ProcessEvent_DataEvent_Stdout - // *ProcessEvent_DataEvent_Stderr - // *ProcessEvent_DataEvent_Pty - Output isProcessEvent_DataEvent_Output `protobuf_oneof:"output"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessEvent_DataEvent) Reset() { - *x = ProcessEvent_DataEvent{} - mi := &file_process_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessEvent_DataEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessEvent_DataEvent) ProtoMessage() {} - -func (x *ProcessEvent_DataEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[25] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessEvent_DataEvent.ProtoReflect.Descriptor instead. -func (*ProcessEvent_DataEvent) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{8, 1} -} - -func (x *ProcessEvent_DataEvent) GetOutput() isProcessEvent_DataEvent_Output { - if x != nil { - return x.Output - } - return nil -} - -func (x *ProcessEvent_DataEvent) GetStdout() []byte { - if x != nil { - if x, ok := x.Output.(*ProcessEvent_DataEvent_Stdout); ok { - return x.Stdout - } - } - return nil -} - -func (x *ProcessEvent_DataEvent) GetStderr() []byte { - if x != nil { - if x, ok := x.Output.(*ProcessEvent_DataEvent_Stderr); ok { - return x.Stderr - } - } - return nil -} - -func (x *ProcessEvent_DataEvent) GetPty() []byte { - if x != nil { - if x, ok := x.Output.(*ProcessEvent_DataEvent_Pty); ok { - return x.Pty - } - } - return nil -} - -type isProcessEvent_DataEvent_Output interface { - isProcessEvent_DataEvent_Output() -} - -type ProcessEvent_DataEvent_Stdout struct { - Stdout []byte `protobuf:"bytes,1,opt,name=stdout,proto3,oneof"` -} - -type ProcessEvent_DataEvent_Stderr struct { - Stderr []byte `protobuf:"bytes,2,opt,name=stderr,proto3,oneof"` -} - -type ProcessEvent_DataEvent_Pty struct { - Pty []byte `protobuf:"bytes,3,opt,name=pty,proto3,oneof"` -} - -func (*ProcessEvent_DataEvent_Stdout) isProcessEvent_DataEvent_Output() {} - -func (*ProcessEvent_DataEvent_Stderr) isProcessEvent_DataEvent_Output() {} - -func (*ProcessEvent_DataEvent_Pty) isProcessEvent_DataEvent_Output() {} - -type ProcessEvent_EndEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - ExitCode int32 `protobuf:"zigzag32,1,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` - Exited bool `protobuf:"varint,2,opt,name=exited,proto3" json:"exited,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - Error *string `protobuf:"bytes,4,opt,name=error,proto3,oneof" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessEvent_EndEvent) Reset() { - *x = ProcessEvent_EndEvent{} - mi := &file_process_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessEvent_EndEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessEvent_EndEvent) ProtoMessage() {} - -func (x *ProcessEvent_EndEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[26] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessEvent_EndEvent.ProtoReflect.Descriptor instead. -func (*ProcessEvent_EndEvent) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{8, 2} -} - -func (x *ProcessEvent_EndEvent) GetExitCode() int32 { - if x != nil { - return x.ExitCode - } - return 0 -} - -func (x *ProcessEvent_EndEvent) GetExited() bool { - if x != nil { - return x.Exited - } - return false -} - -func (x *ProcessEvent_EndEvent) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *ProcessEvent_EndEvent) GetError() string { - if x != nil && x.Error != nil { - return *x.Error - } - return "" -} - -type ProcessEvent_KeepAlive struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessEvent_KeepAlive) Reset() { - *x = ProcessEvent_KeepAlive{} - mi := &file_process_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessEvent_KeepAlive) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessEvent_KeepAlive) ProtoMessage() {} - -func (x *ProcessEvent_KeepAlive) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[27] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessEvent_KeepAlive.ProtoReflect.Descriptor instead. -func (*ProcessEvent_KeepAlive) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{8, 3} -} - -type StreamInputRequest_StartEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StreamInputRequest_StartEvent) Reset() { - *x = StreamInputRequest_StartEvent{} - mi := &file_process_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StreamInputRequest_StartEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamInputRequest_StartEvent) ProtoMessage() {} - -func (x *StreamInputRequest_StartEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[28] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamInputRequest_StartEvent.ProtoReflect.Descriptor instead. -func (*StreamInputRequest_StartEvent) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{14, 0} -} - -func (x *StreamInputRequest_StartEvent) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -type StreamInputRequest_DataEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - Input *ProcessInput `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StreamInputRequest_DataEvent) Reset() { - *x = StreamInputRequest_DataEvent{} - mi := &file_process_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StreamInputRequest_DataEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamInputRequest_DataEvent) ProtoMessage() {} - -func (x *StreamInputRequest_DataEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[29] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamInputRequest_DataEvent.ProtoReflect.Descriptor instead. -func (*StreamInputRequest_DataEvent) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{14, 1} -} - -func (x *StreamInputRequest_DataEvent) GetInput() *ProcessInput { - if x != nil { - return x.Input - } - return nil -} - -type StreamInputRequest_KeepAlive struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StreamInputRequest_KeepAlive) Reset() { - *x = StreamInputRequest_KeepAlive{} - mi := &file_process_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StreamInputRequest_KeepAlive) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamInputRequest_KeepAlive) ProtoMessage() {} - -func (x *StreamInputRequest_KeepAlive) ProtoReflect() protoreflect.Message { - mi := &file_process_proto_msgTypes[30] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamInputRequest_KeepAlive.ProtoReflect.Descriptor instead. -func (*StreamInputRequest_KeepAlive) Descriptor() ([]byte, []int) { - return file_process_proto_rawDescGZIP(), []int{14, 2} -} - -var File_process_proto protoreflect.FileDescriptor - -const file_process_proto_rawDesc = "" + - "\n" + - "\rprocess.proto\x12\aprocess\"\\\n" + - "\x03PTY\x12%\n" + - "\x04size\x18\x01 \x01(\v2\x11.process.PTY.SizeR\x04size\x1a.\n" + - "\x04Size\x12\x12\n" + - "\x04cols\x18\x01 \x01(\rR\x04cols\x12\x12\n" + - "\x04rows\x18\x02 \x01(\rR\x04rows\"\xc3\x01\n" + - "\rProcessConfig\x12\x10\n" + - "\x03cmd\x18\x01 \x01(\tR\x03cmd\x12\x12\n" + - "\x04args\x18\x02 \x03(\tR\x04args\x124\n" + - "\x04envs\x18\x03 \x03(\v2 .process.ProcessConfig.EnvsEntryR\x04envs\x12\x15\n" + - "\x03cwd\x18\x04 \x01(\tH\x00R\x03cwd\x88\x01\x01\x1a7\n" + - "\tEnvsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B\x06\n" + - "\x04_cwd\"\r\n" + - "\vListRequest\"n\n" + - "\vProcessInfo\x12.\n" + - "\x06config\x18\x01 \x01(\v2\x16.process.ProcessConfigR\x06config\x12\x10\n" + - "\x03pid\x18\x02 \x01(\rR\x03pid\x12\x15\n" + - "\x03tag\x18\x03 \x01(\tH\x00R\x03tag\x88\x01\x01B\x06\n" + - "\x04_tag\"B\n" + - "\fListResponse\x122\n" + - "\tprocesses\x18\x01 \x03(\v2\x14.process.ProcessInfoR\tprocesses\"\xb1\x01\n" + - "\fStartRequest\x120\n" + - "\aprocess\x18\x01 \x01(\v2\x16.process.ProcessConfigR\aprocess\x12#\n" + - "\x03pty\x18\x02 \x01(\v2\f.process.PTYH\x00R\x03pty\x88\x01\x01\x12\x15\n" + - "\x03tag\x18\x03 \x01(\tH\x01R\x03tag\x88\x01\x01\x12\x19\n" + - "\x05stdin\x18\x04 \x01(\bH\x02R\x05stdin\x88\x01\x01B\x06\n" + - "\x04_ptyB\x06\n" + - "\x04_tagB\b\n" + - "\x06_stdin\"p\n" + - "\rUpdateRequest\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\x12#\n" + - "\x03pty\x18\x02 \x01(\v2\f.process.PTYH\x00R\x03pty\x88\x01\x01B\x06\n" + - "\x04_pty\"\x10\n" + - "\x0eUpdateResponse\"\x87\x04\n" + - "\fProcessEvent\x128\n" + - "\x05start\x18\x01 \x01(\v2 .process.ProcessEvent.StartEventH\x00R\x05start\x125\n" + - "\x04data\x18\x02 \x01(\v2\x1f.process.ProcessEvent.DataEventH\x00R\x04data\x122\n" + - "\x03end\x18\x03 \x01(\v2\x1e.process.ProcessEvent.EndEventH\x00R\x03end\x12?\n" + - "\tkeepalive\x18\x04 \x01(\v2\x1f.process.ProcessEvent.KeepAliveH\x00R\tkeepalive\x1a\x1e\n" + - "\n" + - "StartEvent\x12\x10\n" + - "\x03pid\x18\x01 \x01(\rR\x03pid\x1a]\n" + - "\tDataEvent\x12\x18\n" + - "\x06stdout\x18\x01 \x01(\fH\x00R\x06stdout\x12\x18\n" + - "\x06stderr\x18\x02 \x01(\fH\x00R\x06stderr\x12\x12\n" + - "\x03pty\x18\x03 \x01(\fH\x00R\x03ptyB\b\n" + - "\x06output\x1a|\n" + - "\bEndEvent\x12\x1b\n" + - "\texit_code\x18\x01 \x01(\x11R\bexitCode\x12\x16\n" + - "\x06exited\x18\x02 \x01(\bR\x06exited\x12\x16\n" + - "\x06status\x18\x03 \x01(\tR\x06status\x12\x19\n" + - "\x05error\x18\x04 \x01(\tH\x00R\x05error\x88\x01\x01B\b\n" + - "\x06_error\x1a\v\n" + - "\tKeepAliveB\a\n" + - "\x05event\"<\n" + - "\rStartResponse\x12+\n" + - "\x05event\x18\x01 \x01(\v2\x15.process.ProcessEventR\x05event\">\n" + - "\x0fConnectResponse\x12+\n" + - "\x05event\x18\x01 \x01(\v2\x15.process.ProcessEventR\x05event\"s\n" + - "\x10SendInputRequest\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\x12+\n" + - "\x05input\x18\x02 \x01(\v2\x15.process.ProcessInputR\x05input\"\x13\n" + - "\x11SendInputResponse\"C\n" + - "\fProcessInput\x12\x16\n" + - "\x05stdin\x18\x01 \x01(\fH\x00R\x05stdin\x12\x12\n" + - "\x03pty\x18\x02 \x01(\fH\x00R\x03ptyB\a\n" + - "\x05input\"\xea\x02\n" + - "\x12StreamInputRequest\x12>\n" + - "\x05start\x18\x01 \x01(\v2&.process.StreamInputRequest.StartEventH\x00R\x05start\x12;\n" + - "\x04data\x18\x02 \x01(\v2%.process.StreamInputRequest.DataEventH\x00R\x04data\x12E\n" + - "\tkeepalive\x18\x03 \x01(\v2%.process.StreamInputRequest.KeepAliveH\x00R\tkeepalive\x1a@\n" + - "\n" + - "StartEvent\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\x1a8\n" + - "\tDataEvent\x12+\n" + - "\x05input\x18\x02 \x01(\v2\x15.process.ProcessInputR\x05input\x1a\v\n" + - "\tKeepAliveB\a\n" + - "\x05event\"\x15\n" + - "\x13StreamInputResponse\"p\n" + - "\x11SendSignalRequest\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\x12'\n" + - "\x06signal\x18\x02 \x01(\x0e2\x0f.process.SignalR\x06signal\"\x14\n" + - "\x12SendSignalResponse\"G\n" + - "\x11CloseStdinRequest\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\"\x14\n" + - "\x12CloseStdinResponse\"D\n" + - "\x0eConnectRequest\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\"E\n" + - "\x0fProcessSelector\x12\x12\n" + - "\x03pid\x18\x01 \x01(\rH\x00R\x03pid\x12\x12\n" + - "\x03tag\x18\x02 \x01(\tH\x00R\x03tagB\n" + - "\n" + - "\bselector*H\n" + - "\x06Signal\x12\x16\n" + - "\x12SIGNAL_UNSPECIFIED\x10\x00\x12\x12\n" + - "\x0eSIGNAL_SIGTERM\x10\x0f\x12\x12\n" + - "\x0eSIGNAL_SIGKILL\x10\t2\x91\x04\n" + - "\aProcess\x123\n" + - "\x04List\x12\x14.process.ListRequest\x1a\x15.process.ListResponse\x12>\n" + - "\aConnect\x12\x17.process.ConnectRequest\x1a\x18.process.ConnectResponse0\x01\x128\n" + - "\x05Start\x12\x15.process.StartRequest\x1a\x16.process.StartResponse0\x01\x129\n" + - "\x06Update\x12\x16.process.UpdateRequest\x1a\x17.process.UpdateResponse\x12J\n" + - "\vStreamInput\x12\x1b.process.StreamInputRequest\x1a\x1c.process.StreamInputResponse(\x01\x12B\n" + - "\tSendInput\x12\x19.process.SendInputRequest\x1a\x1a.process.SendInputResponse\x12E\n" + - "\n" + - "SendSignal\x12\x1a.process.SendSignalRequest\x1a\x1b.process.SendSignalResponse\x12E\n" + - "\n" + - "CloseStdin\x12\x1a.process.CloseStdinRequest\x1a\x1b.process.CloseStdinResponseB\x90\x01\n" + - "\vcom.processB\fProcessProtoP\x01Z7git.omukk.dev/wrenn/sandbox/envd/internal/services/spec\xa2\x02\x03PXX\xaa\x02\aProcess\xca\x02\aProcess\xe2\x02\x13Process\\GPBMetadata\xea\x02\aProcessb\x06proto3" - -var ( - file_process_proto_rawDescOnce sync.Once - file_process_proto_rawDescData []byte -) - -func file_process_proto_rawDescGZIP() []byte { - file_process_proto_rawDescOnce.Do(func() { - file_process_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_process_proto_rawDesc), len(file_process_proto_rawDesc))) - }) - return file_process_proto_rawDescData -} - -var file_process_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_process_proto_msgTypes = make([]protoimpl.MessageInfo, 31) -var file_process_proto_goTypes = []any{ - (Signal)(0), // 0: process.Signal - (*PTY)(nil), // 1: process.PTY - (*ProcessConfig)(nil), // 2: process.ProcessConfig - (*ListRequest)(nil), // 3: process.ListRequest - (*ProcessInfo)(nil), // 4: process.ProcessInfo - (*ListResponse)(nil), // 5: process.ListResponse - (*StartRequest)(nil), // 6: process.StartRequest - (*UpdateRequest)(nil), // 7: process.UpdateRequest - (*UpdateResponse)(nil), // 8: process.UpdateResponse - (*ProcessEvent)(nil), // 9: process.ProcessEvent - (*StartResponse)(nil), // 10: process.StartResponse - (*ConnectResponse)(nil), // 11: process.ConnectResponse - (*SendInputRequest)(nil), // 12: process.SendInputRequest - (*SendInputResponse)(nil), // 13: process.SendInputResponse - (*ProcessInput)(nil), // 14: process.ProcessInput - (*StreamInputRequest)(nil), // 15: process.StreamInputRequest - (*StreamInputResponse)(nil), // 16: process.StreamInputResponse - (*SendSignalRequest)(nil), // 17: process.SendSignalRequest - (*SendSignalResponse)(nil), // 18: process.SendSignalResponse - (*CloseStdinRequest)(nil), // 19: process.CloseStdinRequest - (*CloseStdinResponse)(nil), // 20: process.CloseStdinResponse - (*ConnectRequest)(nil), // 21: process.ConnectRequest - (*ProcessSelector)(nil), // 22: process.ProcessSelector - (*PTY_Size)(nil), // 23: process.PTY.Size - nil, // 24: process.ProcessConfig.EnvsEntry - (*ProcessEvent_StartEvent)(nil), // 25: process.ProcessEvent.StartEvent - (*ProcessEvent_DataEvent)(nil), // 26: process.ProcessEvent.DataEvent - (*ProcessEvent_EndEvent)(nil), // 27: process.ProcessEvent.EndEvent - (*ProcessEvent_KeepAlive)(nil), // 28: process.ProcessEvent.KeepAlive - (*StreamInputRequest_StartEvent)(nil), // 29: process.StreamInputRequest.StartEvent - (*StreamInputRequest_DataEvent)(nil), // 30: process.StreamInputRequest.DataEvent - (*StreamInputRequest_KeepAlive)(nil), // 31: process.StreamInputRequest.KeepAlive -} -var file_process_proto_depIdxs = []int32{ - 23, // 0: process.PTY.size:type_name -> process.PTY.Size - 24, // 1: process.ProcessConfig.envs:type_name -> process.ProcessConfig.EnvsEntry - 2, // 2: process.ProcessInfo.config:type_name -> process.ProcessConfig - 4, // 3: process.ListResponse.processes:type_name -> process.ProcessInfo - 2, // 4: process.StartRequest.process:type_name -> process.ProcessConfig - 1, // 5: process.StartRequest.pty:type_name -> process.PTY - 22, // 6: process.UpdateRequest.process:type_name -> process.ProcessSelector - 1, // 7: process.UpdateRequest.pty:type_name -> process.PTY - 25, // 8: process.ProcessEvent.start:type_name -> process.ProcessEvent.StartEvent - 26, // 9: process.ProcessEvent.data:type_name -> process.ProcessEvent.DataEvent - 27, // 10: process.ProcessEvent.end:type_name -> process.ProcessEvent.EndEvent - 28, // 11: process.ProcessEvent.keepalive:type_name -> process.ProcessEvent.KeepAlive - 9, // 12: process.StartResponse.event:type_name -> process.ProcessEvent - 9, // 13: process.ConnectResponse.event:type_name -> process.ProcessEvent - 22, // 14: process.SendInputRequest.process:type_name -> process.ProcessSelector - 14, // 15: process.SendInputRequest.input:type_name -> process.ProcessInput - 29, // 16: process.StreamInputRequest.start:type_name -> process.StreamInputRequest.StartEvent - 30, // 17: process.StreamInputRequest.data:type_name -> process.StreamInputRequest.DataEvent - 31, // 18: process.StreamInputRequest.keepalive:type_name -> process.StreamInputRequest.KeepAlive - 22, // 19: process.SendSignalRequest.process:type_name -> process.ProcessSelector - 0, // 20: process.SendSignalRequest.signal:type_name -> process.Signal - 22, // 21: process.CloseStdinRequest.process:type_name -> process.ProcessSelector - 22, // 22: process.ConnectRequest.process:type_name -> process.ProcessSelector - 22, // 23: process.StreamInputRequest.StartEvent.process:type_name -> process.ProcessSelector - 14, // 24: process.StreamInputRequest.DataEvent.input:type_name -> process.ProcessInput - 3, // 25: process.Process.List:input_type -> process.ListRequest - 21, // 26: process.Process.Connect:input_type -> process.ConnectRequest - 6, // 27: process.Process.Start:input_type -> process.StartRequest - 7, // 28: process.Process.Update:input_type -> process.UpdateRequest - 15, // 29: process.Process.StreamInput:input_type -> process.StreamInputRequest - 12, // 30: process.Process.SendInput:input_type -> process.SendInputRequest - 17, // 31: process.Process.SendSignal:input_type -> process.SendSignalRequest - 19, // 32: process.Process.CloseStdin:input_type -> process.CloseStdinRequest - 5, // 33: process.Process.List:output_type -> process.ListResponse - 11, // 34: process.Process.Connect:output_type -> process.ConnectResponse - 10, // 35: process.Process.Start:output_type -> process.StartResponse - 8, // 36: process.Process.Update:output_type -> process.UpdateResponse - 16, // 37: process.Process.StreamInput:output_type -> process.StreamInputResponse - 13, // 38: process.Process.SendInput:output_type -> process.SendInputResponse - 18, // 39: process.Process.SendSignal:output_type -> process.SendSignalResponse - 20, // 40: process.Process.CloseStdin:output_type -> process.CloseStdinResponse - 33, // [33:41] is the sub-list for method output_type - 25, // [25:33] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name -} - -func init() { file_process_proto_init() } -func file_process_proto_init() { - if File_process_proto != nil { - return - } - file_process_proto_msgTypes[1].OneofWrappers = []any{} - file_process_proto_msgTypes[3].OneofWrappers = []any{} - file_process_proto_msgTypes[5].OneofWrappers = []any{} - file_process_proto_msgTypes[6].OneofWrappers = []any{} - file_process_proto_msgTypes[8].OneofWrappers = []any{ - (*ProcessEvent_Start)(nil), - (*ProcessEvent_Data)(nil), - (*ProcessEvent_End)(nil), - (*ProcessEvent_Keepalive)(nil), - } - file_process_proto_msgTypes[13].OneofWrappers = []any{ - (*ProcessInput_Stdin)(nil), - (*ProcessInput_Pty)(nil), - } - file_process_proto_msgTypes[14].OneofWrappers = []any{ - (*StreamInputRequest_Start)(nil), - (*StreamInputRequest_Data)(nil), - (*StreamInputRequest_Keepalive)(nil), - } - file_process_proto_msgTypes[21].OneofWrappers = []any{ - (*ProcessSelector_Pid)(nil), - (*ProcessSelector_Tag)(nil), - } - file_process_proto_msgTypes[25].OneofWrappers = []any{ - (*ProcessEvent_DataEvent_Stdout)(nil), - (*ProcessEvent_DataEvent_Stderr)(nil), - (*ProcessEvent_DataEvent_Pty)(nil), - } - file_process_proto_msgTypes[26].OneofWrappers = []any{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_process_proto_rawDesc), len(file_process_proto_rawDesc)), - NumEnums: 1, - NumMessages: 31, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_process_proto_goTypes, - DependencyIndexes: file_process_proto_depIdxs, - EnumInfos: file_process_proto_enumTypes, - MessageInfos: file_process_proto_msgTypes, - }.Build() - File_process_proto = out.File - file_process_proto_goTypes = nil - file_process_proto_depIdxs = nil -} diff --git a/envd/internal/services/spec/process/process.pb.go b/envd/internal/services/spec/process/process.pb.go deleted file mode 100644 index c7c0ee0..0000000 --- a/envd/internal/services/spec/process/process.pb.go +++ /dev/null @@ -1,1970 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.11 -// protoc (unknown) -// source: process/process.proto - -package process - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Signal int32 - -const ( - Signal_SIGNAL_UNSPECIFIED Signal = 0 - Signal_SIGNAL_SIGTERM Signal = 15 - Signal_SIGNAL_SIGKILL Signal = 9 -) - -// Enum value maps for Signal. -var ( - Signal_name = map[int32]string{ - 0: "SIGNAL_UNSPECIFIED", - 15: "SIGNAL_SIGTERM", - 9: "SIGNAL_SIGKILL", - } - Signal_value = map[string]int32{ - "SIGNAL_UNSPECIFIED": 0, - "SIGNAL_SIGTERM": 15, - "SIGNAL_SIGKILL": 9, - } -) - -func (x Signal) Enum() *Signal { - p := new(Signal) - *p = x - return p -} - -func (x Signal) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Signal) Descriptor() protoreflect.EnumDescriptor { - return file_process_process_proto_enumTypes[0].Descriptor() -} - -func (Signal) Type() protoreflect.EnumType { - return &file_process_process_proto_enumTypes[0] -} - -func (x Signal) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Signal.Descriptor instead. -func (Signal) EnumDescriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{0} -} - -type PTY struct { - state protoimpl.MessageState `protogen:"open.v1"` - Size *PTY_Size `protobuf:"bytes,1,opt,name=size,proto3" json:"size,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PTY) Reset() { - *x = PTY{} - mi := &file_process_process_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PTY) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PTY) ProtoMessage() {} - -func (x *PTY) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PTY.ProtoReflect.Descriptor instead. -func (*PTY) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{0} -} - -func (x *PTY) GetSize() *PTY_Size { - if x != nil { - return x.Size - } - return nil -} - -type ProcessConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - Cmd string `protobuf:"bytes,1,opt,name=cmd,proto3" json:"cmd,omitempty"` - Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"` - Envs map[string]string `protobuf:"bytes,3,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Cwd *string `protobuf:"bytes,4,opt,name=cwd,proto3,oneof" json:"cwd,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessConfig) Reset() { - *x = ProcessConfig{} - mi := &file_process_process_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessConfig) ProtoMessage() {} - -func (x *ProcessConfig) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessConfig.ProtoReflect.Descriptor instead. -func (*ProcessConfig) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{1} -} - -func (x *ProcessConfig) GetCmd() string { - if x != nil { - return x.Cmd - } - return "" -} - -func (x *ProcessConfig) GetArgs() []string { - if x != nil { - return x.Args - } - return nil -} - -func (x *ProcessConfig) GetEnvs() map[string]string { - if x != nil { - return x.Envs - } - return nil -} - -func (x *ProcessConfig) GetCwd() string { - if x != nil && x.Cwd != nil { - return *x.Cwd - } - return "" -} - -type ListRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListRequest) Reset() { - *x = ListRequest{} - mi := &file_process_process_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListRequest) ProtoMessage() {} - -func (x *ListRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListRequest.ProtoReflect.Descriptor instead. -func (*ListRequest) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{2} -} - -type ProcessInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Config *ProcessConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - Tag *string `protobuf:"bytes,3,opt,name=tag,proto3,oneof" json:"tag,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessInfo) Reset() { - *x = ProcessInfo{} - mi := &file_process_process_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessInfo) ProtoMessage() {} - -func (x *ProcessInfo) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessInfo.ProtoReflect.Descriptor instead. -func (*ProcessInfo) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{3} -} - -func (x *ProcessInfo) GetConfig() *ProcessConfig { - if x != nil { - return x.Config - } - return nil -} - -func (x *ProcessInfo) GetPid() uint32 { - if x != nil { - return x.Pid - } - return 0 -} - -func (x *ProcessInfo) GetTag() string { - if x != nil && x.Tag != nil { - return *x.Tag - } - return "" -} - -type ListResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Processes []*ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListResponse) Reset() { - *x = ListResponse{} - mi := &file_process_process_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListResponse) ProtoMessage() {} - -func (x *ListResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListResponse.ProtoReflect.Descriptor instead. -func (*ListResponse) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{4} -} - -func (x *ListResponse) GetProcesses() []*ProcessInfo { - if x != nil { - return x.Processes - } - return nil -} - -type StartRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessConfig `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - Pty *PTY `protobuf:"bytes,2,opt,name=pty,proto3,oneof" json:"pty,omitempty"` - Tag *string `protobuf:"bytes,3,opt,name=tag,proto3,oneof" json:"tag,omitempty"` - // This is optional for backwards compatibility. - // We default to true. New SDK versions will set this to false by default. - Stdin *bool `protobuf:"varint,4,opt,name=stdin,proto3,oneof" json:"stdin,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StartRequest) Reset() { - *x = StartRequest{} - mi := &file_process_process_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StartRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartRequest) ProtoMessage() {} - -func (x *StartRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartRequest.ProtoReflect.Descriptor instead. -func (*StartRequest) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{5} -} - -func (x *StartRequest) GetProcess() *ProcessConfig { - if x != nil { - return x.Process - } - return nil -} - -func (x *StartRequest) GetPty() *PTY { - if x != nil { - return x.Pty - } - return nil -} - -func (x *StartRequest) GetTag() string { - if x != nil && x.Tag != nil { - return *x.Tag - } - return "" -} - -func (x *StartRequest) GetStdin() bool { - if x != nil && x.Stdin != nil { - return *x.Stdin - } - return false -} - -type UpdateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - Pty *PTY `protobuf:"bytes,2,opt,name=pty,proto3,oneof" json:"pty,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UpdateRequest) Reset() { - *x = UpdateRequest{} - mi := &file_process_process_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UpdateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateRequest) ProtoMessage() {} - -func (x *UpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateRequest.ProtoReflect.Descriptor instead. -func (*UpdateRequest) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{6} -} - -func (x *UpdateRequest) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -func (x *UpdateRequest) GetPty() *PTY { - if x != nil { - return x.Pty - } - return nil -} - -type UpdateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UpdateResponse) Reset() { - *x = UpdateResponse{} - mi := &file_process_process_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UpdateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateResponse) ProtoMessage() {} - -func (x *UpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateResponse.ProtoReflect.Descriptor instead. -func (*UpdateResponse) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{7} -} - -type ProcessEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Event: - // - // *ProcessEvent_Start - // *ProcessEvent_Data - // *ProcessEvent_End - // *ProcessEvent_Keepalive - Event isProcessEvent_Event `protobuf_oneof:"event"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessEvent) Reset() { - *x = ProcessEvent{} - mi := &file_process_process_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessEvent) ProtoMessage() {} - -func (x *ProcessEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessEvent.ProtoReflect.Descriptor instead. -func (*ProcessEvent) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{8} -} - -func (x *ProcessEvent) GetEvent() isProcessEvent_Event { - if x != nil { - return x.Event - } - return nil -} - -func (x *ProcessEvent) GetStart() *ProcessEvent_StartEvent { - if x != nil { - if x, ok := x.Event.(*ProcessEvent_Start); ok { - return x.Start - } - } - return nil -} - -func (x *ProcessEvent) GetData() *ProcessEvent_DataEvent { - if x != nil { - if x, ok := x.Event.(*ProcessEvent_Data); ok { - return x.Data - } - } - return nil -} - -func (x *ProcessEvent) GetEnd() *ProcessEvent_EndEvent { - if x != nil { - if x, ok := x.Event.(*ProcessEvent_End); ok { - return x.End - } - } - return nil -} - -func (x *ProcessEvent) GetKeepalive() *ProcessEvent_KeepAlive { - if x != nil { - if x, ok := x.Event.(*ProcessEvent_Keepalive); ok { - return x.Keepalive - } - } - return nil -} - -type isProcessEvent_Event interface { - isProcessEvent_Event() -} - -type ProcessEvent_Start struct { - Start *ProcessEvent_StartEvent `protobuf:"bytes,1,opt,name=start,proto3,oneof"` -} - -type ProcessEvent_Data struct { - Data *ProcessEvent_DataEvent `protobuf:"bytes,2,opt,name=data,proto3,oneof"` -} - -type ProcessEvent_End struct { - End *ProcessEvent_EndEvent `protobuf:"bytes,3,opt,name=end,proto3,oneof"` -} - -type ProcessEvent_Keepalive struct { - Keepalive *ProcessEvent_KeepAlive `protobuf:"bytes,4,opt,name=keepalive,proto3,oneof"` -} - -func (*ProcessEvent_Start) isProcessEvent_Event() {} - -func (*ProcessEvent_Data) isProcessEvent_Event() {} - -func (*ProcessEvent_End) isProcessEvent_Event() {} - -func (*ProcessEvent_Keepalive) isProcessEvent_Event() {} - -type StartResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Event *ProcessEvent `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StartResponse) Reset() { - *x = StartResponse{} - mi := &file_process_process_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StartResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartResponse) ProtoMessage() {} - -func (x *StartResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartResponse.ProtoReflect.Descriptor instead. -func (*StartResponse) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{9} -} - -func (x *StartResponse) GetEvent() *ProcessEvent { - if x != nil { - return x.Event - } - return nil -} - -type ConnectResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Event *ProcessEvent `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ConnectResponse) Reset() { - *x = ConnectResponse{} - mi := &file_process_process_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ConnectResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConnectResponse) ProtoMessage() {} - -func (x *ConnectResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConnectResponse.ProtoReflect.Descriptor instead. -func (*ConnectResponse) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{10} -} - -func (x *ConnectResponse) GetEvent() *ProcessEvent { - if x != nil { - return x.Event - } - return nil -} - -type SendInputRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - Input *ProcessInput `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SendInputRequest) Reset() { - *x = SendInputRequest{} - mi := &file_process_process_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SendInputRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendInputRequest) ProtoMessage() {} - -func (x *SendInputRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SendInputRequest.ProtoReflect.Descriptor instead. -func (*SendInputRequest) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{11} -} - -func (x *SendInputRequest) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -func (x *SendInputRequest) GetInput() *ProcessInput { - if x != nil { - return x.Input - } - return nil -} - -type SendInputResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SendInputResponse) Reset() { - *x = SendInputResponse{} - mi := &file_process_process_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SendInputResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendInputResponse) ProtoMessage() {} - -func (x *SendInputResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SendInputResponse.ProtoReflect.Descriptor instead. -func (*SendInputResponse) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{12} -} - -type ProcessInput struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Input: - // - // *ProcessInput_Stdin - // *ProcessInput_Pty - Input isProcessInput_Input `protobuf_oneof:"input"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessInput) Reset() { - *x = ProcessInput{} - mi := &file_process_process_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessInput) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessInput) ProtoMessage() {} - -func (x *ProcessInput) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessInput.ProtoReflect.Descriptor instead. -func (*ProcessInput) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{13} -} - -func (x *ProcessInput) GetInput() isProcessInput_Input { - if x != nil { - return x.Input - } - return nil -} - -func (x *ProcessInput) GetStdin() []byte { - if x != nil { - if x, ok := x.Input.(*ProcessInput_Stdin); ok { - return x.Stdin - } - } - return nil -} - -func (x *ProcessInput) GetPty() []byte { - if x != nil { - if x, ok := x.Input.(*ProcessInput_Pty); ok { - return x.Pty - } - } - return nil -} - -type isProcessInput_Input interface { - isProcessInput_Input() -} - -type ProcessInput_Stdin struct { - Stdin []byte `protobuf:"bytes,1,opt,name=stdin,proto3,oneof"` -} - -type ProcessInput_Pty struct { - Pty []byte `protobuf:"bytes,2,opt,name=pty,proto3,oneof"` -} - -func (*ProcessInput_Stdin) isProcessInput_Input() {} - -func (*ProcessInput_Pty) isProcessInput_Input() {} - -type StreamInputRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Event: - // - // *StreamInputRequest_Start - // *StreamInputRequest_Data - // *StreamInputRequest_Keepalive - Event isStreamInputRequest_Event `protobuf_oneof:"event"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StreamInputRequest) Reset() { - *x = StreamInputRequest{} - mi := &file_process_process_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StreamInputRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamInputRequest) ProtoMessage() {} - -func (x *StreamInputRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamInputRequest.ProtoReflect.Descriptor instead. -func (*StreamInputRequest) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{14} -} - -func (x *StreamInputRequest) GetEvent() isStreamInputRequest_Event { - if x != nil { - return x.Event - } - return nil -} - -func (x *StreamInputRequest) GetStart() *StreamInputRequest_StartEvent { - if x != nil { - if x, ok := x.Event.(*StreamInputRequest_Start); ok { - return x.Start - } - } - return nil -} - -func (x *StreamInputRequest) GetData() *StreamInputRequest_DataEvent { - if x != nil { - if x, ok := x.Event.(*StreamInputRequest_Data); ok { - return x.Data - } - } - return nil -} - -func (x *StreamInputRequest) GetKeepalive() *StreamInputRequest_KeepAlive { - if x != nil { - if x, ok := x.Event.(*StreamInputRequest_Keepalive); ok { - return x.Keepalive - } - } - return nil -} - -type isStreamInputRequest_Event interface { - isStreamInputRequest_Event() -} - -type StreamInputRequest_Start struct { - Start *StreamInputRequest_StartEvent `protobuf:"bytes,1,opt,name=start,proto3,oneof"` -} - -type StreamInputRequest_Data struct { - Data *StreamInputRequest_DataEvent `protobuf:"bytes,2,opt,name=data,proto3,oneof"` -} - -type StreamInputRequest_Keepalive struct { - Keepalive *StreamInputRequest_KeepAlive `protobuf:"bytes,3,opt,name=keepalive,proto3,oneof"` -} - -func (*StreamInputRequest_Start) isStreamInputRequest_Event() {} - -func (*StreamInputRequest_Data) isStreamInputRequest_Event() {} - -func (*StreamInputRequest_Keepalive) isStreamInputRequest_Event() {} - -type StreamInputResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StreamInputResponse) Reset() { - *x = StreamInputResponse{} - mi := &file_process_process_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StreamInputResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamInputResponse) ProtoMessage() {} - -func (x *StreamInputResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamInputResponse.ProtoReflect.Descriptor instead. -func (*StreamInputResponse) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{15} -} - -type SendSignalRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - Signal Signal `protobuf:"varint,2,opt,name=signal,proto3,enum=process.Signal" json:"signal,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SendSignalRequest) Reset() { - *x = SendSignalRequest{} - mi := &file_process_process_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SendSignalRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendSignalRequest) ProtoMessage() {} - -func (x *SendSignalRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SendSignalRequest.ProtoReflect.Descriptor instead. -func (*SendSignalRequest) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{16} -} - -func (x *SendSignalRequest) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -func (x *SendSignalRequest) GetSignal() Signal { - if x != nil { - return x.Signal - } - return Signal_SIGNAL_UNSPECIFIED -} - -type SendSignalResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SendSignalResponse) Reset() { - *x = SendSignalResponse{} - mi := &file_process_process_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SendSignalResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendSignalResponse) ProtoMessage() {} - -func (x *SendSignalResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[17] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SendSignalResponse.ProtoReflect.Descriptor instead. -func (*SendSignalResponse) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{17} -} - -type CloseStdinRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CloseStdinRequest) Reset() { - *x = CloseStdinRequest{} - mi := &file_process_process_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CloseStdinRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloseStdinRequest) ProtoMessage() {} - -func (x *CloseStdinRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[18] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloseStdinRequest.ProtoReflect.Descriptor instead. -func (*CloseStdinRequest) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{18} -} - -func (x *CloseStdinRequest) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -type CloseStdinResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CloseStdinResponse) Reset() { - *x = CloseStdinResponse{} - mi := &file_process_process_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CloseStdinResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloseStdinResponse) ProtoMessage() {} - -func (x *CloseStdinResponse) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloseStdinResponse.ProtoReflect.Descriptor instead. -func (*CloseStdinResponse) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{19} -} - -type ConnectRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ConnectRequest) Reset() { - *x = ConnectRequest{} - mi := &file_process_process_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ConnectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConnectRequest) ProtoMessage() {} - -func (x *ConnectRequest) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[20] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConnectRequest.ProtoReflect.Descriptor instead. -func (*ConnectRequest) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{20} -} - -func (x *ConnectRequest) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -type ProcessSelector struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Selector: - // - // *ProcessSelector_Pid - // *ProcessSelector_Tag - Selector isProcessSelector_Selector `protobuf_oneof:"selector"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessSelector) Reset() { - *x = ProcessSelector{} - mi := &file_process_process_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessSelector) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessSelector) ProtoMessage() {} - -func (x *ProcessSelector) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessSelector.ProtoReflect.Descriptor instead. -func (*ProcessSelector) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{21} -} - -func (x *ProcessSelector) GetSelector() isProcessSelector_Selector { - if x != nil { - return x.Selector - } - return nil -} - -func (x *ProcessSelector) GetPid() uint32 { - if x != nil { - if x, ok := x.Selector.(*ProcessSelector_Pid); ok { - return x.Pid - } - } - return 0 -} - -func (x *ProcessSelector) GetTag() string { - if x != nil { - if x, ok := x.Selector.(*ProcessSelector_Tag); ok { - return x.Tag - } - } - return "" -} - -type isProcessSelector_Selector interface { - isProcessSelector_Selector() -} - -type ProcessSelector_Pid struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3,oneof"` -} - -type ProcessSelector_Tag struct { - Tag string `protobuf:"bytes,2,opt,name=tag,proto3,oneof"` -} - -func (*ProcessSelector_Pid) isProcessSelector_Selector() {} - -func (*ProcessSelector_Tag) isProcessSelector_Selector() {} - -type PTY_Size struct { - state protoimpl.MessageState `protogen:"open.v1"` - Cols uint32 `protobuf:"varint,1,opt,name=cols,proto3" json:"cols,omitempty"` - Rows uint32 `protobuf:"varint,2,opt,name=rows,proto3" json:"rows,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PTY_Size) Reset() { - *x = PTY_Size{} - mi := &file_process_process_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PTY_Size) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PTY_Size) ProtoMessage() {} - -func (x *PTY_Size) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[22] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PTY_Size.ProtoReflect.Descriptor instead. -func (*PTY_Size) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *PTY_Size) GetCols() uint32 { - if x != nil { - return x.Cols - } - return 0 -} - -func (x *PTY_Size) GetRows() uint32 { - if x != nil { - return x.Rows - } - return 0 -} - -type ProcessEvent_StartEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessEvent_StartEvent) Reset() { - *x = ProcessEvent_StartEvent{} - mi := &file_process_process_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessEvent_StartEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessEvent_StartEvent) ProtoMessage() {} - -func (x *ProcessEvent_StartEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[24] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessEvent_StartEvent.ProtoReflect.Descriptor instead. -func (*ProcessEvent_StartEvent) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{8, 0} -} - -func (x *ProcessEvent_StartEvent) GetPid() uint32 { - if x != nil { - return x.Pid - } - return 0 -} - -type ProcessEvent_DataEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Output: - // - // *ProcessEvent_DataEvent_Stdout - // *ProcessEvent_DataEvent_Stderr - // *ProcessEvent_DataEvent_Pty - Output isProcessEvent_DataEvent_Output `protobuf_oneof:"output"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessEvent_DataEvent) Reset() { - *x = ProcessEvent_DataEvent{} - mi := &file_process_process_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessEvent_DataEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessEvent_DataEvent) ProtoMessage() {} - -func (x *ProcessEvent_DataEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[25] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessEvent_DataEvent.ProtoReflect.Descriptor instead. -func (*ProcessEvent_DataEvent) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{8, 1} -} - -func (x *ProcessEvent_DataEvent) GetOutput() isProcessEvent_DataEvent_Output { - if x != nil { - return x.Output - } - return nil -} - -func (x *ProcessEvent_DataEvent) GetStdout() []byte { - if x != nil { - if x, ok := x.Output.(*ProcessEvent_DataEvent_Stdout); ok { - return x.Stdout - } - } - return nil -} - -func (x *ProcessEvent_DataEvent) GetStderr() []byte { - if x != nil { - if x, ok := x.Output.(*ProcessEvent_DataEvent_Stderr); ok { - return x.Stderr - } - } - return nil -} - -func (x *ProcessEvent_DataEvent) GetPty() []byte { - if x != nil { - if x, ok := x.Output.(*ProcessEvent_DataEvent_Pty); ok { - return x.Pty - } - } - return nil -} - -type isProcessEvent_DataEvent_Output interface { - isProcessEvent_DataEvent_Output() -} - -type ProcessEvent_DataEvent_Stdout struct { - Stdout []byte `protobuf:"bytes,1,opt,name=stdout,proto3,oneof"` -} - -type ProcessEvent_DataEvent_Stderr struct { - Stderr []byte `protobuf:"bytes,2,opt,name=stderr,proto3,oneof"` -} - -type ProcessEvent_DataEvent_Pty struct { - Pty []byte `protobuf:"bytes,3,opt,name=pty,proto3,oneof"` -} - -func (*ProcessEvent_DataEvent_Stdout) isProcessEvent_DataEvent_Output() {} - -func (*ProcessEvent_DataEvent_Stderr) isProcessEvent_DataEvent_Output() {} - -func (*ProcessEvent_DataEvent_Pty) isProcessEvent_DataEvent_Output() {} - -type ProcessEvent_EndEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - ExitCode int32 `protobuf:"zigzag32,1,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` - Exited bool `protobuf:"varint,2,opt,name=exited,proto3" json:"exited,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - Error *string `protobuf:"bytes,4,opt,name=error,proto3,oneof" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessEvent_EndEvent) Reset() { - *x = ProcessEvent_EndEvent{} - mi := &file_process_process_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessEvent_EndEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessEvent_EndEvent) ProtoMessage() {} - -func (x *ProcessEvent_EndEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[26] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessEvent_EndEvent.ProtoReflect.Descriptor instead. -func (*ProcessEvent_EndEvent) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{8, 2} -} - -func (x *ProcessEvent_EndEvent) GetExitCode() int32 { - if x != nil { - return x.ExitCode - } - return 0 -} - -func (x *ProcessEvent_EndEvent) GetExited() bool { - if x != nil { - return x.Exited - } - return false -} - -func (x *ProcessEvent_EndEvent) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *ProcessEvent_EndEvent) GetError() string { - if x != nil && x.Error != nil { - return *x.Error - } - return "" -} - -type ProcessEvent_KeepAlive struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ProcessEvent_KeepAlive) Reset() { - *x = ProcessEvent_KeepAlive{} - mi := &file_process_process_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ProcessEvent_KeepAlive) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProcessEvent_KeepAlive) ProtoMessage() {} - -func (x *ProcessEvent_KeepAlive) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[27] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProcessEvent_KeepAlive.ProtoReflect.Descriptor instead. -func (*ProcessEvent_KeepAlive) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{8, 3} -} - -type StreamInputRequest_StartEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StreamInputRequest_StartEvent) Reset() { - *x = StreamInputRequest_StartEvent{} - mi := &file_process_process_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StreamInputRequest_StartEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamInputRequest_StartEvent) ProtoMessage() {} - -func (x *StreamInputRequest_StartEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[28] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamInputRequest_StartEvent.ProtoReflect.Descriptor instead. -func (*StreamInputRequest_StartEvent) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{14, 0} -} - -func (x *StreamInputRequest_StartEvent) GetProcess() *ProcessSelector { - if x != nil { - return x.Process - } - return nil -} - -type StreamInputRequest_DataEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` - Input *ProcessInput `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StreamInputRequest_DataEvent) Reset() { - *x = StreamInputRequest_DataEvent{} - mi := &file_process_process_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StreamInputRequest_DataEvent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamInputRequest_DataEvent) ProtoMessage() {} - -func (x *StreamInputRequest_DataEvent) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[29] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamInputRequest_DataEvent.ProtoReflect.Descriptor instead. -func (*StreamInputRequest_DataEvent) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{14, 1} -} - -func (x *StreamInputRequest_DataEvent) GetInput() *ProcessInput { - if x != nil { - return x.Input - } - return nil -} - -type StreamInputRequest_KeepAlive struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StreamInputRequest_KeepAlive) Reset() { - *x = StreamInputRequest_KeepAlive{} - mi := &file_process_process_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StreamInputRequest_KeepAlive) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamInputRequest_KeepAlive) ProtoMessage() {} - -func (x *StreamInputRequest_KeepAlive) ProtoReflect() protoreflect.Message { - mi := &file_process_process_proto_msgTypes[30] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamInputRequest_KeepAlive.ProtoReflect.Descriptor instead. -func (*StreamInputRequest_KeepAlive) Descriptor() ([]byte, []int) { - return file_process_process_proto_rawDescGZIP(), []int{14, 2} -} - -var File_process_process_proto protoreflect.FileDescriptor - -const file_process_process_proto_rawDesc = "" + - "\n" + - "\x15process/process.proto\x12\aprocess\"\\\n" + - "\x03PTY\x12%\n" + - "\x04size\x18\x01 \x01(\v2\x11.process.PTY.SizeR\x04size\x1a.\n" + - "\x04Size\x12\x12\n" + - "\x04cols\x18\x01 \x01(\rR\x04cols\x12\x12\n" + - "\x04rows\x18\x02 \x01(\rR\x04rows\"\xc3\x01\n" + - "\rProcessConfig\x12\x10\n" + - "\x03cmd\x18\x01 \x01(\tR\x03cmd\x12\x12\n" + - "\x04args\x18\x02 \x03(\tR\x04args\x124\n" + - "\x04envs\x18\x03 \x03(\v2 .process.ProcessConfig.EnvsEntryR\x04envs\x12\x15\n" + - "\x03cwd\x18\x04 \x01(\tH\x00R\x03cwd\x88\x01\x01\x1a7\n" + - "\tEnvsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B\x06\n" + - "\x04_cwd\"\r\n" + - "\vListRequest\"n\n" + - "\vProcessInfo\x12.\n" + - "\x06config\x18\x01 \x01(\v2\x16.process.ProcessConfigR\x06config\x12\x10\n" + - "\x03pid\x18\x02 \x01(\rR\x03pid\x12\x15\n" + - "\x03tag\x18\x03 \x01(\tH\x00R\x03tag\x88\x01\x01B\x06\n" + - "\x04_tag\"B\n" + - "\fListResponse\x122\n" + - "\tprocesses\x18\x01 \x03(\v2\x14.process.ProcessInfoR\tprocesses\"\xb1\x01\n" + - "\fStartRequest\x120\n" + - "\aprocess\x18\x01 \x01(\v2\x16.process.ProcessConfigR\aprocess\x12#\n" + - "\x03pty\x18\x02 \x01(\v2\f.process.PTYH\x00R\x03pty\x88\x01\x01\x12\x15\n" + - "\x03tag\x18\x03 \x01(\tH\x01R\x03tag\x88\x01\x01\x12\x19\n" + - "\x05stdin\x18\x04 \x01(\bH\x02R\x05stdin\x88\x01\x01B\x06\n" + - "\x04_ptyB\x06\n" + - "\x04_tagB\b\n" + - "\x06_stdin\"p\n" + - "\rUpdateRequest\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\x12#\n" + - "\x03pty\x18\x02 \x01(\v2\f.process.PTYH\x00R\x03pty\x88\x01\x01B\x06\n" + - "\x04_pty\"\x10\n" + - "\x0eUpdateResponse\"\x87\x04\n" + - "\fProcessEvent\x128\n" + - "\x05start\x18\x01 \x01(\v2 .process.ProcessEvent.StartEventH\x00R\x05start\x125\n" + - "\x04data\x18\x02 \x01(\v2\x1f.process.ProcessEvent.DataEventH\x00R\x04data\x122\n" + - "\x03end\x18\x03 \x01(\v2\x1e.process.ProcessEvent.EndEventH\x00R\x03end\x12?\n" + - "\tkeepalive\x18\x04 \x01(\v2\x1f.process.ProcessEvent.KeepAliveH\x00R\tkeepalive\x1a\x1e\n" + - "\n" + - "StartEvent\x12\x10\n" + - "\x03pid\x18\x01 \x01(\rR\x03pid\x1a]\n" + - "\tDataEvent\x12\x18\n" + - "\x06stdout\x18\x01 \x01(\fH\x00R\x06stdout\x12\x18\n" + - "\x06stderr\x18\x02 \x01(\fH\x00R\x06stderr\x12\x12\n" + - "\x03pty\x18\x03 \x01(\fH\x00R\x03ptyB\b\n" + - "\x06output\x1a|\n" + - "\bEndEvent\x12\x1b\n" + - "\texit_code\x18\x01 \x01(\x11R\bexitCode\x12\x16\n" + - "\x06exited\x18\x02 \x01(\bR\x06exited\x12\x16\n" + - "\x06status\x18\x03 \x01(\tR\x06status\x12\x19\n" + - "\x05error\x18\x04 \x01(\tH\x00R\x05error\x88\x01\x01B\b\n" + - "\x06_error\x1a\v\n" + - "\tKeepAliveB\a\n" + - "\x05event\"<\n" + - "\rStartResponse\x12+\n" + - "\x05event\x18\x01 \x01(\v2\x15.process.ProcessEventR\x05event\">\n" + - "\x0fConnectResponse\x12+\n" + - "\x05event\x18\x01 \x01(\v2\x15.process.ProcessEventR\x05event\"s\n" + - "\x10SendInputRequest\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\x12+\n" + - "\x05input\x18\x02 \x01(\v2\x15.process.ProcessInputR\x05input\"\x13\n" + - "\x11SendInputResponse\"C\n" + - "\fProcessInput\x12\x16\n" + - "\x05stdin\x18\x01 \x01(\fH\x00R\x05stdin\x12\x12\n" + - "\x03pty\x18\x02 \x01(\fH\x00R\x03ptyB\a\n" + - "\x05input\"\xea\x02\n" + - "\x12StreamInputRequest\x12>\n" + - "\x05start\x18\x01 \x01(\v2&.process.StreamInputRequest.StartEventH\x00R\x05start\x12;\n" + - "\x04data\x18\x02 \x01(\v2%.process.StreamInputRequest.DataEventH\x00R\x04data\x12E\n" + - "\tkeepalive\x18\x03 \x01(\v2%.process.StreamInputRequest.KeepAliveH\x00R\tkeepalive\x1a@\n" + - "\n" + - "StartEvent\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\x1a8\n" + - "\tDataEvent\x12+\n" + - "\x05input\x18\x02 \x01(\v2\x15.process.ProcessInputR\x05input\x1a\v\n" + - "\tKeepAliveB\a\n" + - "\x05event\"\x15\n" + - "\x13StreamInputResponse\"p\n" + - "\x11SendSignalRequest\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\x12'\n" + - "\x06signal\x18\x02 \x01(\x0e2\x0f.process.SignalR\x06signal\"\x14\n" + - "\x12SendSignalResponse\"G\n" + - "\x11CloseStdinRequest\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\"\x14\n" + - "\x12CloseStdinResponse\"D\n" + - "\x0eConnectRequest\x122\n" + - "\aprocess\x18\x01 \x01(\v2\x18.process.ProcessSelectorR\aprocess\"E\n" + - "\x0fProcessSelector\x12\x12\n" + - "\x03pid\x18\x01 \x01(\rH\x00R\x03pid\x12\x12\n" + - "\x03tag\x18\x02 \x01(\tH\x00R\x03tagB\n" + - "\n" + - "\bselector*H\n" + - "\x06Signal\x12\x16\n" + - "\x12SIGNAL_UNSPECIFIED\x10\x00\x12\x12\n" + - "\x0eSIGNAL_SIGTERM\x10\x0f\x12\x12\n" + - "\x0eSIGNAL_SIGKILL\x10\t2\x91\x04\n" + - "\aProcess\x123\n" + - "\x04List\x12\x14.process.ListRequest\x1a\x15.process.ListResponse\x12>\n" + - "\aConnect\x12\x17.process.ConnectRequest\x1a\x18.process.ConnectResponse0\x01\x128\n" + - "\x05Start\x12\x15.process.StartRequest\x1a\x16.process.StartResponse0\x01\x129\n" + - "\x06Update\x12\x16.process.UpdateRequest\x1a\x17.process.UpdateResponse\x12J\n" + - "\vStreamInput\x12\x1b.process.StreamInputRequest\x1a\x1c.process.StreamInputResponse(\x01\x12B\n" + - "\tSendInput\x12\x19.process.SendInputRequest\x1a\x1a.process.SendInputResponse\x12E\n" + - "\n" + - "SendSignal\x12\x1a.process.SendSignalRequest\x1a\x1b.process.SendSignalResponse\x12E\n" + - "\n" + - "CloseStdin\x12\x1a.process.CloseStdinRequest\x1a\x1b.process.CloseStdinResponseB\x98\x01\n" + - "\vcom.processB\fProcessProtoP\x01Z?git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process\xa2\x02\x03PXX\xaa\x02\aProcess\xca\x02\aProcess\xe2\x02\x13Process\\GPBMetadata\xea\x02\aProcessb\x06proto3" - -var ( - file_process_process_proto_rawDescOnce sync.Once - file_process_process_proto_rawDescData []byte -) - -func file_process_process_proto_rawDescGZIP() []byte { - file_process_process_proto_rawDescOnce.Do(func() { - file_process_process_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_process_process_proto_rawDesc), len(file_process_process_proto_rawDesc))) - }) - return file_process_process_proto_rawDescData -} - -var file_process_process_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_process_process_proto_msgTypes = make([]protoimpl.MessageInfo, 31) -var file_process_process_proto_goTypes = []any{ - (Signal)(0), // 0: process.Signal - (*PTY)(nil), // 1: process.PTY - (*ProcessConfig)(nil), // 2: process.ProcessConfig - (*ListRequest)(nil), // 3: process.ListRequest - (*ProcessInfo)(nil), // 4: process.ProcessInfo - (*ListResponse)(nil), // 5: process.ListResponse - (*StartRequest)(nil), // 6: process.StartRequest - (*UpdateRequest)(nil), // 7: process.UpdateRequest - (*UpdateResponse)(nil), // 8: process.UpdateResponse - (*ProcessEvent)(nil), // 9: process.ProcessEvent - (*StartResponse)(nil), // 10: process.StartResponse - (*ConnectResponse)(nil), // 11: process.ConnectResponse - (*SendInputRequest)(nil), // 12: process.SendInputRequest - (*SendInputResponse)(nil), // 13: process.SendInputResponse - (*ProcessInput)(nil), // 14: process.ProcessInput - (*StreamInputRequest)(nil), // 15: process.StreamInputRequest - (*StreamInputResponse)(nil), // 16: process.StreamInputResponse - (*SendSignalRequest)(nil), // 17: process.SendSignalRequest - (*SendSignalResponse)(nil), // 18: process.SendSignalResponse - (*CloseStdinRequest)(nil), // 19: process.CloseStdinRequest - (*CloseStdinResponse)(nil), // 20: process.CloseStdinResponse - (*ConnectRequest)(nil), // 21: process.ConnectRequest - (*ProcessSelector)(nil), // 22: process.ProcessSelector - (*PTY_Size)(nil), // 23: process.PTY.Size - nil, // 24: process.ProcessConfig.EnvsEntry - (*ProcessEvent_StartEvent)(nil), // 25: process.ProcessEvent.StartEvent - (*ProcessEvent_DataEvent)(nil), // 26: process.ProcessEvent.DataEvent - (*ProcessEvent_EndEvent)(nil), // 27: process.ProcessEvent.EndEvent - (*ProcessEvent_KeepAlive)(nil), // 28: process.ProcessEvent.KeepAlive - (*StreamInputRequest_StartEvent)(nil), // 29: process.StreamInputRequest.StartEvent - (*StreamInputRequest_DataEvent)(nil), // 30: process.StreamInputRequest.DataEvent - (*StreamInputRequest_KeepAlive)(nil), // 31: process.StreamInputRequest.KeepAlive -} -var file_process_process_proto_depIdxs = []int32{ - 23, // 0: process.PTY.size:type_name -> process.PTY.Size - 24, // 1: process.ProcessConfig.envs:type_name -> process.ProcessConfig.EnvsEntry - 2, // 2: process.ProcessInfo.config:type_name -> process.ProcessConfig - 4, // 3: process.ListResponse.processes:type_name -> process.ProcessInfo - 2, // 4: process.StartRequest.process:type_name -> process.ProcessConfig - 1, // 5: process.StartRequest.pty:type_name -> process.PTY - 22, // 6: process.UpdateRequest.process:type_name -> process.ProcessSelector - 1, // 7: process.UpdateRequest.pty:type_name -> process.PTY - 25, // 8: process.ProcessEvent.start:type_name -> process.ProcessEvent.StartEvent - 26, // 9: process.ProcessEvent.data:type_name -> process.ProcessEvent.DataEvent - 27, // 10: process.ProcessEvent.end:type_name -> process.ProcessEvent.EndEvent - 28, // 11: process.ProcessEvent.keepalive:type_name -> process.ProcessEvent.KeepAlive - 9, // 12: process.StartResponse.event:type_name -> process.ProcessEvent - 9, // 13: process.ConnectResponse.event:type_name -> process.ProcessEvent - 22, // 14: process.SendInputRequest.process:type_name -> process.ProcessSelector - 14, // 15: process.SendInputRequest.input:type_name -> process.ProcessInput - 29, // 16: process.StreamInputRequest.start:type_name -> process.StreamInputRequest.StartEvent - 30, // 17: process.StreamInputRequest.data:type_name -> process.StreamInputRequest.DataEvent - 31, // 18: process.StreamInputRequest.keepalive:type_name -> process.StreamInputRequest.KeepAlive - 22, // 19: process.SendSignalRequest.process:type_name -> process.ProcessSelector - 0, // 20: process.SendSignalRequest.signal:type_name -> process.Signal - 22, // 21: process.CloseStdinRequest.process:type_name -> process.ProcessSelector - 22, // 22: process.ConnectRequest.process:type_name -> process.ProcessSelector - 22, // 23: process.StreamInputRequest.StartEvent.process:type_name -> process.ProcessSelector - 14, // 24: process.StreamInputRequest.DataEvent.input:type_name -> process.ProcessInput - 3, // 25: process.Process.List:input_type -> process.ListRequest - 21, // 26: process.Process.Connect:input_type -> process.ConnectRequest - 6, // 27: process.Process.Start:input_type -> process.StartRequest - 7, // 28: process.Process.Update:input_type -> process.UpdateRequest - 15, // 29: process.Process.StreamInput:input_type -> process.StreamInputRequest - 12, // 30: process.Process.SendInput:input_type -> process.SendInputRequest - 17, // 31: process.Process.SendSignal:input_type -> process.SendSignalRequest - 19, // 32: process.Process.CloseStdin:input_type -> process.CloseStdinRequest - 5, // 33: process.Process.List:output_type -> process.ListResponse - 11, // 34: process.Process.Connect:output_type -> process.ConnectResponse - 10, // 35: process.Process.Start:output_type -> process.StartResponse - 8, // 36: process.Process.Update:output_type -> process.UpdateResponse - 16, // 37: process.Process.StreamInput:output_type -> process.StreamInputResponse - 13, // 38: process.Process.SendInput:output_type -> process.SendInputResponse - 18, // 39: process.Process.SendSignal:output_type -> process.SendSignalResponse - 20, // 40: process.Process.CloseStdin:output_type -> process.CloseStdinResponse - 33, // [33:41] is the sub-list for method output_type - 25, // [25:33] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name -} - -func init() { file_process_process_proto_init() } -func file_process_process_proto_init() { - if File_process_process_proto != nil { - return - } - file_process_process_proto_msgTypes[1].OneofWrappers = []any{} - file_process_process_proto_msgTypes[3].OneofWrappers = []any{} - file_process_process_proto_msgTypes[5].OneofWrappers = []any{} - file_process_process_proto_msgTypes[6].OneofWrappers = []any{} - file_process_process_proto_msgTypes[8].OneofWrappers = []any{ - (*ProcessEvent_Start)(nil), - (*ProcessEvent_Data)(nil), - (*ProcessEvent_End)(nil), - (*ProcessEvent_Keepalive)(nil), - } - file_process_process_proto_msgTypes[13].OneofWrappers = []any{ - (*ProcessInput_Stdin)(nil), - (*ProcessInput_Pty)(nil), - } - file_process_process_proto_msgTypes[14].OneofWrappers = []any{ - (*StreamInputRequest_Start)(nil), - (*StreamInputRequest_Data)(nil), - (*StreamInputRequest_Keepalive)(nil), - } - file_process_process_proto_msgTypes[21].OneofWrappers = []any{ - (*ProcessSelector_Pid)(nil), - (*ProcessSelector_Tag)(nil), - } - file_process_process_proto_msgTypes[25].OneofWrappers = []any{ - (*ProcessEvent_DataEvent_Stdout)(nil), - (*ProcessEvent_DataEvent_Stderr)(nil), - (*ProcessEvent_DataEvent_Pty)(nil), - } - file_process_process_proto_msgTypes[26].OneofWrappers = []any{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_process_process_proto_rawDesc), len(file_process_process_proto_rawDesc)), - NumEnums: 1, - NumMessages: 31, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_process_process_proto_goTypes, - DependencyIndexes: file_process_process_proto_depIdxs, - EnumInfos: file_process_process_proto_enumTypes, - MessageInfos: file_process_process_proto_msgTypes, - }.Build() - File_process_process_proto = out.File - file_process_process_proto_goTypes = nil - file_process_process_proto_depIdxs = nil -} diff --git a/envd/internal/services/spec/process/processconnect/process.connect.go b/envd/internal/services/spec/process/processconnect/process.connect.go deleted file mode 100644 index 7a4f3f8..0000000 --- a/envd/internal/services/spec/process/processconnect/process.connect.go +++ /dev/null @@ -1,310 +0,0 @@ -// Code generated by protoc-gen-connect-go. DO NOT EDIT. -// -// Source: process/process.proto - -package processconnect - -import ( - connect "connectrpc.com/connect" - context "context" - errors "errors" - process "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process" - http "net/http" - strings "strings" -) - -// This is a compile-time assertion to ensure that this generated file and the connect package are -// compatible. If you get a compiler error that this constant is not defined, this code was -// generated with a version of connect newer than the one compiled into your binary. You can fix the -// problem by either regenerating this code with an older version of connect or updating the connect -// version compiled into your binary. -const _ = connect.IsAtLeastVersion1_13_0 - -const ( - // ProcessName is the fully-qualified name of the Process service. - ProcessName = "process.Process" -) - -// These constants are the fully-qualified names of the RPCs defined in this package. They're -// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. -// -// Note that these are different from the fully-qualified method names used by -// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to -// reflection-formatted method names, remove the leading slash and convert the remaining slash to a -// period. -const ( - // ProcessListProcedure is the fully-qualified name of the Process's List RPC. - ProcessListProcedure = "/process.Process/List" - // ProcessConnectProcedure is the fully-qualified name of the Process's Connect RPC. - ProcessConnectProcedure = "/process.Process/Connect" - // ProcessStartProcedure is the fully-qualified name of the Process's Start RPC. - ProcessStartProcedure = "/process.Process/Start" - // ProcessUpdateProcedure is the fully-qualified name of the Process's Update RPC. - ProcessUpdateProcedure = "/process.Process/Update" - // ProcessStreamInputProcedure is the fully-qualified name of the Process's StreamInput RPC. - ProcessStreamInputProcedure = "/process.Process/StreamInput" - // ProcessSendInputProcedure is the fully-qualified name of the Process's SendInput RPC. - ProcessSendInputProcedure = "/process.Process/SendInput" - // ProcessSendSignalProcedure is the fully-qualified name of the Process's SendSignal RPC. - ProcessSendSignalProcedure = "/process.Process/SendSignal" - // ProcessCloseStdinProcedure is the fully-qualified name of the Process's CloseStdin RPC. - ProcessCloseStdinProcedure = "/process.Process/CloseStdin" -) - -// ProcessClient is a client for the process.Process service. -type ProcessClient interface { - List(context.Context, *connect.Request[process.ListRequest]) (*connect.Response[process.ListResponse], error) - Connect(context.Context, *connect.Request[process.ConnectRequest]) (*connect.ServerStreamForClient[process.ConnectResponse], error) - Start(context.Context, *connect.Request[process.StartRequest]) (*connect.ServerStreamForClient[process.StartResponse], error) - Update(context.Context, *connect.Request[process.UpdateRequest]) (*connect.Response[process.UpdateResponse], error) - // Client input stream ensures ordering of messages - StreamInput(context.Context) *connect.ClientStreamForClient[process.StreamInputRequest, process.StreamInputResponse] - SendInput(context.Context, *connect.Request[process.SendInputRequest]) (*connect.Response[process.SendInputResponse], error) - SendSignal(context.Context, *connect.Request[process.SendSignalRequest]) (*connect.Response[process.SendSignalResponse], error) - // Close stdin to signal EOF to the process. - // Only works for non-PTY processes. For PTY, send Ctrl+D (0x04) instead. - CloseStdin(context.Context, *connect.Request[process.CloseStdinRequest]) (*connect.Response[process.CloseStdinResponse], error) -} - -// NewProcessClient constructs a client for the process.Process service. By default, it uses the -// Connect protocol with the binary Protobuf Codec, asks for gzipped responses, and sends -// uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the connect.WithGRPC() or -// connect.WithGRPCWeb() options. -// -// The URL supplied here should be the base URL for the Connect or gRPC server (for example, -// http://api.acme.com or https://acme.com/grpc). -func NewProcessClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) ProcessClient { - baseURL = strings.TrimRight(baseURL, "/") - processMethods := process.File_process_process_proto.Services().ByName("Process").Methods() - return &processClient{ - list: connect.NewClient[process.ListRequest, process.ListResponse]( - httpClient, - baseURL+ProcessListProcedure, - connect.WithSchema(processMethods.ByName("List")), - connect.WithClientOptions(opts...), - ), - connect: connect.NewClient[process.ConnectRequest, process.ConnectResponse]( - httpClient, - baseURL+ProcessConnectProcedure, - connect.WithSchema(processMethods.ByName("Connect")), - connect.WithClientOptions(opts...), - ), - start: connect.NewClient[process.StartRequest, process.StartResponse]( - httpClient, - baseURL+ProcessStartProcedure, - connect.WithSchema(processMethods.ByName("Start")), - connect.WithClientOptions(opts...), - ), - update: connect.NewClient[process.UpdateRequest, process.UpdateResponse]( - httpClient, - baseURL+ProcessUpdateProcedure, - connect.WithSchema(processMethods.ByName("Update")), - connect.WithClientOptions(opts...), - ), - streamInput: connect.NewClient[process.StreamInputRequest, process.StreamInputResponse]( - httpClient, - baseURL+ProcessStreamInputProcedure, - connect.WithSchema(processMethods.ByName("StreamInput")), - connect.WithClientOptions(opts...), - ), - sendInput: connect.NewClient[process.SendInputRequest, process.SendInputResponse]( - httpClient, - baseURL+ProcessSendInputProcedure, - connect.WithSchema(processMethods.ByName("SendInput")), - connect.WithClientOptions(opts...), - ), - sendSignal: connect.NewClient[process.SendSignalRequest, process.SendSignalResponse]( - httpClient, - baseURL+ProcessSendSignalProcedure, - connect.WithSchema(processMethods.ByName("SendSignal")), - connect.WithClientOptions(opts...), - ), - closeStdin: connect.NewClient[process.CloseStdinRequest, process.CloseStdinResponse]( - httpClient, - baseURL+ProcessCloseStdinProcedure, - connect.WithSchema(processMethods.ByName("CloseStdin")), - connect.WithClientOptions(opts...), - ), - } -} - -// processClient implements ProcessClient. -type processClient struct { - list *connect.Client[process.ListRequest, process.ListResponse] - connect *connect.Client[process.ConnectRequest, process.ConnectResponse] - start *connect.Client[process.StartRequest, process.StartResponse] - update *connect.Client[process.UpdateRequest, process.UpdateResponse] - streamInput *connect.Client[process.StreamInputRequest, process.StreamInputResponse] - sendInput *connect.Client[process.SendInputRequest, process.SendInputResponse] - sendSignal *connect.Client[process.SendSignalRequest, process.SendSignalResponse] - closeStdin *connect.Client[process.CloseStdinRequest, process.CloseStdinResponse] -} - -// List calls process.Process.List. -func (c *processClient) List(ctx context.Context, req *connect.Request[process.ListRequest]) (*connect.Response[process.ListResponse], error) { - return c.list.CallUnary(ctx, req) -} - -// Connect calls process.Process.Connect. -func (c *processClient) Connect(ctx context.Context, req *connect.Request[process.ConnectRequest]) (*connect.ServerStreamForClient[process.ConnectResponse], error) { - return c.connect.CallServerStream(ctx, req) -} - -// Start calls process.Process.Start. -func (c *processClient) Start(ctx context.Context, req *connect.Request[process.StartRequest]) (*connect.ServerStreamForClient[process.StartResponse], error) { - return c.start.CallServerStream(ctx, req) -} - -// Update calls process.Process.Update. -func (c *processClient) Update(ctx context.Context, req *connect.Request[process.UpdateRequest]) (*connect.Response[process.UpdateResponse], error) { - return c.update.CallUnary(ctx, req) -} - -// StreamInput calls process.Process.StreamInput. -func (c *processClient) StreamInput(ctx context.Context) *connect.ClientStreamForClient[process.StreamInputRequest, process.StreamInputResponse] { - return c.streamInput.CallClientStream(ctx) -} - -// SendInput calls process.Process.SendInput. -func (c *processClient) SendInput(ctx context.Context, req *connect.Request[process.SendInputRequest]) (*connect.Response[process.SendInputResponse], error) { - return c.sendInput.CallUnary(ctx, req) -} - -// SendSignal calls process.Process.SendSignal. -func (c *processClient) SendSignal(ctx context.Context, req *connect.Request[process.SendSignalRequest]) (*connect.Response[process.SendSignalResponse], error) { - return c.sendSignal.CallUnary(ctx, req) -} - -// CloseStdin calls process.Process.CloseStdin. -func (c *processClient) CloseStdin(ctx context.Context, req *connect.Request[process.CloseStdinRequest]) (*connect.Response[process.CloseStdinResponse], error) { - return c.closeStdin.CallUnary(ctx, req) -} - -// ProcessHandler is an implementation of the process.Process service. -type ProcessHandler interface { - List(context.Context, *connect.Request[process.ListRequest]) (*connect.Response[process.ListResponse], error) - Connect(context.Context, *connect.Request[process.ConnectRequest], *connect.ServerStream[process.ConnectResponse]) error - Start(context.Context, *connect.Request[process.StartRequest], *connect.ServerStream[process.StartResponse]) error - Update(context.Context, *connect.Request[process.UpdateRequest]) (*connect.Response[process.UpdateResponse], error) - // Client input stream ensures ordering of messages - StreamInput(context.Context, *connect.ClientStream[process.StreamInputRequest]) (*connect.Response[process.StreamInputResponse], error) - SendInput(context.Context, *connect.Request[process.SendInputRequest]) (*connect.Response[process.SendInputResponse], error) - SendSignal(context.Context, *connect.Request[process.SendSignalRequest]) (*connect.Response[process.SendSignalResponse], error) - // Close stdin to signal EOF to the process. - // Only works for non-PTY processes. For PTY, send Ctrl+D (0x04) instead. - CloseStdin(context.Context, *connect.Request[process.CloseStdinRequest]) (*connect.Response[process.CloseStdinResponse], error) -} - -// NewProcessHandler builds an HTTP handler from the service implementation. It returns the path on -// which to mount the handler and the handler itself. -// -// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf -// and JSON codecs. They also support gzip compression. -func NewProcessHandler(svc ProcessHandler, opts ...connect.HandlerOption) (string, http.Handler) { - processMethods := process.File_process_process_proto.Services().ByName("Process").Methods() - processListHandler := connect.NewUnaryHandler( - ProcessListProcedure, - svc.List, - connect.WithSchema(processMethods.ByName("List")), - connect.WithHandlerOptions(opts...), - ) - processConnectHandler := connect.NewServerStreamHandler( - ProcessConnectProcedure, - svc.Connect, - connect.WithSchema(processMethods.ByName("Connect")), - connect.WithHandlerOptions(opts...), - ) - processStartHandler := connect.NewServerStreamHandler( - ProcessStartProcedure, - svc.Start, - connect.WithSchema(processMethods.ByName("Start")), - connect.WithHandlerOptions(opts...), - ) - processUpdateHandler := connect.NewUnaryHandler( - ProcessUpdateProcedure, - svc.Update, - connect.WithSchema(processMethods.ByName("Update")), - connect.WithHandlerOptions(opts...), - ) - processStreamInputHandler := connect.NewClientStreamHandler( - ProcessStreamInputProcedure, - svc.StreamInput, - connect.WithSchema(processMethods.ByName("StreamInput")), - connect.WithHandlerOptions(opts...), - ) - processSendInputHandler := connect.NewUnaryHandler( - ProcessSendInputProcedure, - svc.SendInput, - connect.WithSchema(processMethods.ByName("SendInput")), - connect.WithHandlerOptions(opts...), - ) - processSendSignalHandler := connect.NewUnaryHandler( - ProcessSendSignalProcedure, - svc.SendSignal, - connect.WithSchema(processMethods.ByName("SendSignal")), - connect.WithHandlerOptions(opts...), - ) - processCloseStdinHandler := connect.NewUnaryHandler( - ProcessCloseStdinProcedure, - svc.CloseStdin, - connect.WithSchema(processMethods.ByName("CloseStdin")), - connect.WithHandlerOptions(opts...), - ) - return "/process.Process/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case ProcessListProcedure: - processListHandler.ServeHTTP(w, r) - case ProcessConnectProcedure: - processConnectHandler.ServeHTTP(w, r) - case ProcessStartProcedure: - processStartHandler.ServeHTTP(w, r) - case ProcessUpdateProcedure: - processUpdateHandler.ServeHTTP(w, r) - case ProcessStreamInputProcedure: - processStreamInputHandler.ServeHTTP(w, r) - case ProcessSendInputProcedure: - processSendInputHandler.ServeHTTP(w, r) - case ProcessSendSignalProcedure: - processSendSignalHandler.ServeHTTP(w, r) - case ProcessCloseStdinProcedure: - processCloseStdinHandler.ServeHTTP(w, r) - default: - http.NotFound(w, r) - } - }) -} - -// UnimplementedProcessHandler returns CodeUnimplemented from all methods. -type UnimplementedProcessHandler struct{} - -func (UnimplementedProcessHandler) List(context.Context, *connect.Request[process.ListRequest]) (*connect.Response[process.ListResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.List is not implemented")) -} - -func (UnimplementedProcessHandler) Connect(context.Context, *connect.Request[process.ConnectRequest], *connect.ServerStream[process.ConnectResponse]) error { - return connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.Connect is not implemented")) -} - -func (UnimplementedProcessHandler) Start(context.Context, *connect.Request[process.StartRequest], *connect.ServerStream[process.StartResponse]) error { - return connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.Start is not implemented")) -} - -func (UnimplementedProcessHandler) Update(context.Context, *connect.Request[process.UpdateRequest]) (*connect.Response[process.UpdateResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.Update is not implemented")) -} - -func (UnimplementedProcessHandler) StreamInput(context.Context, *connect.ClientStream[process.StreamInputRequest]) (*connect.Response[process.StreamInputResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.StreamInput is not implemented")) -} - -func (UnimplementedProcessHandler) SendInput(context.Context, *connect.Request[process.SendInputRequest]) (*connect.Response[process.SendInputResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.SendInput is not implemented")) -} - -func (UnimplementedProcessHandler) SendSignal(context.Context, *connect.Request[process.SendSignalRequest]) (*connect.Response[process.SendSignalResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.SendSignal is not implemented")) -} - -func (UnimplementedProcessHandler) CloseStdin(context.Context, *connect.Request[process.CloseStdinRequest]) (*connect.Response[process.CloseStdinResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.CloseStdin is not implemented")) -} diff --git a/envd/internal/services/spec/specconnect/filesystem.connect.go b/envd/internal/services/spec/specconnect/filesystem.connect.go deleted file mode 100644 index b06df5f..0000000 --- a/envd/internal/services/spec/specconnect/filesystem.connect.go +++ /dev/null @@ -1,339 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-connect-go. DO NOT EDIT. -// -// Source: filesystem.proto - -package specconnect - -import ( - connect "connectrpc.com/connect" - context "context" - errors "errors" - spec "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec" - http "net/http" - strings "strings" -) - -// This is a compile-time assertion to ensure that this generated file and the connect package are -// compatible. If you get a compiler error that this constant is not defined, this code was -// generated with a version of connect newer than the one compiled into your binary. You can fix the -// problem by either regenerating this code with an older version of connect or updating the connect -// version compiled into your binary. -const _ = connect.IsAtLeastVersion1_13_0 - -const ( - // FilesystemName is the fully-qualified name of the Filesystem service. - FilesystemName = "filesystem.Filesystem" -) - -// These constants are the fully-qualified names of the RPCs defined in this package. They're -// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. -// -// Note that these are different from the fully-qualified method names used by -// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to -// reflection-formatted method names, remove the leading slash and convert the remaining slash to a -// period. -const ( - // FilesystemStatProcedure is the fully-qualified name of the Filesystem's Stat RPC. - FilesystemStatProcedure = "/filesystem.Filesystem/Stat" - // FilesystemMakeDirProcedure is the fully-qualified name of the Filesystem's MakeDir RPC. - FilesystemMakeDirProcedure = "/filesystem.Filesystem/MakeDir" - // FilesystemMoveProcedure is the fully-qualified name of the Filesystem's Move RPC. - FilesystemMoveProcedure = "/filesystem.Filesystem/Move" - // FilesystemListDirProcedure is the fully-qualified name of the Filesystem's ListDir RPC. - FilesystemListDirProcedure = "/filesystem.Filesystem/ListDir" - // FilesystemRemoveProcedure is the fully-qualified name of the Filesystem's Remove RPC. - FilesystemRemoveProcedure = "/filesystem.Filesystem/Remove" - // FilesystemWatchDirProcedure is the fully-qualified name of the Filesystem's WatchDir RPC. - FilesystemWatchDirProcedure = "/filesystem.Filesystem/WatchDir" - // FilesystemCreateWatcherProcedure is the fully-qualified name of the Filesystem's CreateWatcher - // RPC. - FilesystemCreateWatcherProcedure = "/filesystem.Filesystem/CreateWatcher" - // FilesystemGetWatcherEventsProcedure is the fully-qualified name of the Filesystem's - // GetWatcherEvents RPC. - FilesystemGetWatcherEventsProcedure = "/filesystem.Filesystem/GetWatcherEvents" - // FilesystemRemoveWatcherProcedure is the fully-qualified name of the Filesystem's RemoveWatcher - // RPC. - FilesystemRemoveWatcherProcedure = "/filesystem.Filesystem/RemoveWatcher" -) - -// FilesystemClient is a client for the filesystem.Filesystem service. -type FilesystemClient interface { - Stat(context.Context, *connect.Request[spec.StatRequest]) (*connect.Response[spec.StatResponse], error) - MakeDir(context.Context, *connect.Request[spec.MakeDirRequest]) (*connect.Response[spec.MakeDirResponse], error) - Move(context.Context, *connect.Request[spec.MoveRequest]) (*connect.Response[spec.MoveResponse], error) - ListDir(context.Context, *connect.Request[spec.ListDirRequest]) (*connect.Response[spec.ListDirResponse], error) - Remove(context.Context, *connect.Request[spec.RemoveRequest]) (*connect.Response[spec.RemoveResponse], error) - WatchDir(context.Context, *connect.Request[spec.WatchDirRequest]) (*connect.ServerStreamForClient[spec.WatchDirResponse], error) - // Non-streaming versions of WatchDir - CreateWatcher(context.Context, *connect.Request[spec.CreateWatcherRequest]) (*connect.Response[spec.CreateWatcherResponse], error) - GetWatcherEvents(context.Context, *connect.Request[spec.GetWatcherEventsRequest]) (*connect.Response[spec.GetWatcherEventsResponse], error) - RemoveWatcher(context.Context, *connect.Request[spec.RemoveWatcherRequest]) (*connect.Response[spec.RemoveWatcherResponse], error) -} - -// NewFilesystemClient constructs a client for the filesystem.Filesystem service. By default, it -// uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, and sends -// uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the connect.WithGRPC() or -// connect.WithGRPCWeb() options. -// -// The URL supplied here should be the base URL for the Connect or gRPC server (for example, -// http://api.acme.com or https://acme.com/grpc). -func NewFilesystemClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) FilesystemClient { - baseURL = strings.TrimRight(baseURL, "/") - filesystemMethods := spec.File_filesystem_proto.Services().ByName("Filesystem").Methods() - return &filesystemClient{ - stat: connect.NewClient[spec.StatRequest, spec.StatResponse]( - httpClient, - baseURL+FilesystemStatProcedure, - connect.WithSchema(filesystemMethods.ByName("Stat")), - connect.WithClientOptions(opts...), - ), - makeDir: connect.NewClient[spec.MakeDirRequest, spec.MakeDirResponse]( - httpClient, - baseURL+FilesystemMakeDirProcedure, - connect.WithSchema(filesystemMethods.ByName("MakeDir")), - connect.WithClientOptions(opts...), - ), - move: connect.NewClient[spec.MoveRequest, spec.MoveResponse]( - httpClient, - baseURL+FilesystemMoveProcedure, - connect.WithSchema(filesystemMethods.ByName("Move")), - connect.WithClientOptions(opts...), - ), - listDir: connect.NewClient[spec.ListDirRequest, spec.ListDirResponse]( - httpClient, - baseURL+FilesystemListDirProcedure, - connect.WithSchema(filesystemMethods.ByName("ListDir")), - connect.WithClientOptions(opts...), - ), - remove: connect.NewClient[spec.RemoveRequest, spec.RemoveResponse]( - httpClient, - baseURL+FilesystemRemoveProcedure, - connect.WithSchema(filesystemMethods.ByName("Remove")), - connect.WithClientOptions(opts...), - ), - watchDir: connect.NewClient[spec.WatchDirRequest, spec.WatchDirResponse]( - httpClient, - baseURL+FilesystemWatchDirProcedure, - connect.WithSchema(filesystemMethods.ByName("WatchDir")), - connect.WithClientOptions(opts...), - ), - createWatcher: connect.NewClient[spec.CreateWatcherRequest, spec.CreateWatcherResponse]( - httpClient, - baseURL+FilesystemCreateWatcherProcedure, - connect.WithSchema(filesystemMethods.ByName("CreateWatcher")), - connect.WithClientOptions(opts...), - ), - getWatcherEvents: connect.NewClient[spec.GetWatcherEventsRequest, spec.GetWatcherEventsResponse]( - httpClient, - baseURL+FilesystemGetWatcherEventsProcedure, - connect.WithSchema(filesystemMethods.ByName("GetWatcherEvents")), - connect.WithClientOptions(opts...), - ), - removeWatcher: connect.NewClient[spec.RemoveWatcherRequest, spec.RemoveWatcherResponse]( - httpClient, - baseURL+FilesystemRemoveWatcherProcedure, - connect.WithSchema(filesystemMethods.ByName("RemoveWatcher")), - connect.WithClientOptions(opts...), - ), - } -} - -// filesystemClient implements FilesystemClient. -type filesystemClient struct { - stat *connect.Client[spec.StatRequest, spec.StatResponse] - makeDir *connect.Client[spec.MakeDirRequest, spec.MakeDirResponse] - move *connect.Client[spec.MoveRequest, spec.MoveResponse] - listDir *connect.Client[spec.ListDirRequest, spec.ListDirResponse] - remove *connect.Client[spec.RemoveRequest, spec.RemoveResponse] - watchDir *connect.Client[spec.WatchDirRequest, spec.WatchDirResponse] - createWatcher *connect.Client[spec.CreateWatcherRequest, spec.CreateWatcherResponse] - getWatcherEvents *connect.Client[spec.GetWatcherEventsRequest, spec.GetWatcherEventsResponse] - removeWatcher *connect.Client[spec.RemoveWatcherRequest, spec.RemoveWatcherResponse] -} - -// Stat calls filesystem.Filesystem.Stat. -func (c *filesystemClient) Stat(ctx context.Context, req *connect.Request[spec.StatRequest]) (*connect.Response[spec.StatResponse], error) { - return c.stat.CallUnary(ctx, req) -} - -// MakeDir calls filesystem.Filesystem.MakeDir. -func (c *filesystemClient) MakeDir(ctx context.Context, req *connect.Request[spec.MakeDirRequest]) (*connect.Response[spec.MakeDirResponse], error) { - return c.makeDir.CallUnary(ctx, req) -} - -// Move calls filesystem.Filesystem.Move. -func (c *filesystemClient) Move(ctx context.Context, req *connect.Request[spec.MoveRequest]) (*connect.Response[spec.MoveResponse], error) { - return c.move.CallUnary(ctx, req) -} - -// ListDir calls filesystem.Filesystem.ListDir. -func (c *filesystemClient) ListDir(ctx context.Context, req *connect.Request[spec.ListDirRequest]) (*connect.Response[spec.ListDirResponse], error) { - return c.listDir.CallUnary(ctx, req) -} - -// Remove calls filesystem.Filesystem.Remove. -func (c *filesystemClient) Remove(ctx context.Context, req *connect.Request[spec.RemoveRequest]) (*connect.Response[spec.RemoveResponse], error) { - return c.remove.CallUnary(ctx, req) -} - -// WatchDir calls filesystem.Filesystem.WatchDir. -func (c *filesystemClient) WatchDir(ctx context.Context, req *connect.Request[spec.WatchDirRequest]) (*connect.ServerStreamForClient[spec.WatchDirResponse], error) { - return c.watchDir.CallServerStream(ctx, req) -} - -// CreateWatcher calls filesystem.Filesystem.CreateWatcher. -func (c *filesystemClient) CreateWatcher(ctx context.Context, req *connect.Request[spec.CreateWatcherRequest]) (*connect.Response[spec.CreateWatcherResponse], error) { - return c.createWatcher.CallUnary(ctx, req) -} - -// GetWatcherEvents calls filesystem.Filesystem.GetWatcherEvents. -func (c *filesystemClient) GetWatcherEvents(ctx context.Context, req *connect.Request[spec.GetWatcherEventsRequest]) (*connect.Response[spec.GetWatcherEventsResponse], error) { - return c.getWatcherEvents.CallUnary(ctx, req) -} - -// RemoveWatcher calls filesystem.Filesystem.RemoveWatcher. -func (c *filesystemClient) RemoveWatcher(ctx context.Context, req *connect.Request[spec.RemoveWatcherRequest]) (*connect.Response[spec.RemoveWatcherResponse], error) { - return c.removeWatcher.CallUnary(ctx, req) -} - -// FilesystemHandler is an implementation of the filesystem.Filesystem service. -type FilesystemHandler interface { - Stat(context.Context, *connect.Request[spec.StatRequest]) (*connect.Response[spec.StatResponse], error) - MakeDir(context.Context, *connect.Request[spec.MakeDirRequest]) (*connect.Response[spec.MakeDirResponse], error) - Move(context.Context, *connect.Request[spec.MoveRequest]) (*connect.Response[spec.MoveResponse], error) - ListDir(context.Context, *connect.Request[spec.ListDirRequest]) (*connect.Response[spec.ListDirResponse], error) - Remove(context.Context, *connect.Request[spec.RemoveRequest]) (*connect.Response[spec.RemoveResponse], error) - WatchDir(context.Context, *connect.Request[spec.WatchDirRequest], *connect.ServerStream[spec.WatchDirResponse]) error - // Non-streaming versions of WatchDir - CreateWatcher(context.Context, *connect.Request[spec.CreateWatcherRequest]) (*connect.Response[spec.CreateWatcherResponse], error) - GetWatcherEvents(context.Context, *connect.Request[spec.GetWatcherEventsRequest]) (*connect.Response[spec.GetWatcherEventsResponse], error) - RemoveWatcher(context.Context, *connect.Request[spec.RemoveWatcherRequest]) (*connect.Response[spec.RemoveWatcherResponse], error) -} - -// NewFilesystemHandler builds an HTTP handler from the service implementation. It returns the path -// on which to mount the handler and the handler itself. -// -// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf -// and JSON codecs. They also support gzip compression. -func NewFilesystemHandler(svc FilesystemHandler, opts ...connect.HandlerOption) (string, http.Handler) { - filesystemMethods := spec.File_filesystem_proto.Services().ByName("Filesystem").Methods() - filesystemStatHandler := connect.NewUnaryHandler( - FilesystemStatProcedure, - svc.Stat, - connect.WithSchema(filesystemMethods.ByName("Stat")), - connect.WithHandlerOptions(opts...), - ) - filesystemMakeDirHandler := connect.NewUnaryHandler( - FilesystemMakeDirProcedure, - svc.MakeDir, - connect.WithSchema(filesystemMethods.ByName("MakeDir")), - connect.WithHandlerOptions(opts...), - ) - filesystemMoveHandler := connect.NewUnaryHandler( - FilesystemMoveProcedure, - svc.Move, - connect.WithSchema(filesystemMethods.ByName("Move")), - connect.WithHandlerOptions(opts...), - ) - filesystemListDirHandler := connect.NewUnaryHandler( - FilesystemListDirProcedure, - svc.ListDir, - connect.WithSchema(filesystemMethods.ByName("ListDir")), - connect.WithHandlerOptions(opts...), - ) - filesystemRemoveHandler := connect.NewUnaryHandler( - FilesystemRemoveProcedure, - svc.Remove, - connect.WithSchema(filesystemMethods.ByName("Remove")), - connect.WithHandlerOptions(opts...), - ) - filesystemWatchDirHandler := connect.NewServerStreamHandler( - FilesystemWatchDirProcedure, - svc.WatchDir, - connect.WithSchema(filesystemMethods.ByName("WatchDir")), - connect.WithHandlerOptions(opts...), - ) - filesystemCreateWatcherHandler := connect.NewUnaryHandler( - FilesystemCreateWatcherProcedure, - svc.CreateWatcher, - connect.WithSchema(filesystemMethods.ByName("CreateWatcher")), - connect.WithHandlerOptions(opts...), - ) - filesystemGetWatcherEventsHandler := connect.NewUnaryHandler( - FilesystemGetWatcherEventsProcedure, - svc.GetWatcherEvents, - connect.WithSchema(filesystemMethods.ByName("GetWatcherEvents")), - connect.WithHandlerOptions(opts...), - ) - filesystemRemoveWatcherHandler := connect.NewUnaryHandler( - FilesystemRemoveWatcherProcedure, - svc.RemoveWatcher, - connect.WithSchema(filesystemMethods.ByName("RemoveWatcher")), - connect.WithHandlerOptions(opts...), - ) - return "/filesystem.Filesystem/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case FilesystemStatProcedure: - filesystemStatHandler.ServeHTTP(w, r) - case FilesystemMakeDirProcedure: - filesystemMakeDirHandler.ServeHTTP(w, r) - case FilesystemMoveProcedure: - filesystemMoveHandler.ServeHTTP(w, r) - case FilesystemListDirProcedure: - filesystemListDirHandler.ServeHTTP(w, r) - case FilesystemRemoveProcedure: - filesystemRemoveHandler.ServeHTTP(w, r) - case FilesystemWatchDirProcedure: - filesystemWatchDirHandler.ServeHTTP(w, r) - case FilesystemCreateWatcherProcedure: - filesystemCreateWatcherHandler.ServeHTTP(w, r) - case FilesystemGetWatcherEventsProcedure: - filesystemGetWatcherEventsHandler.ServeHTTP(w, r) - case FilesystemRemoveWatcherProcedure: - filesystemRemoveWatcherHandler.ServeHTTP(w, r) - default: - http.NotFound(w, r) - } - }) -} - -// UnimplementedFilesystemHandler returns CodeUnimplemented from all methods. -type UnimplementedFilesystemHandler struct{} - -func (UnimplementedFilesystemHandler) Stat(context.Context, *connect.Request[spec.StatRequest]) (*connect.Response[spec.StatResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.Stat is not implemented")) -} - -func (UnimplementedFilesystemHandler) MakeDir(context.Context, *connect.Request[spec.MakeDirRequest]) (*connect.Response[spec.MakeDirResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.MakeDir is not implemented")) -} - -func (UnimplementedFilesystemHandler) Move(context.Context, *connect.Request[spec.MoveRequest]) (*connect.Response[spec.MoveResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.Move is not implemented")) -} - -func (UnimplementedFilesystemHandler) ListDir(context.Context, *connect.Request[spec.ListDirRequest]) (*connect.Response[spec.ListDirResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.ListDir is not implemented")) -} - -func (UnimplementedFilesystemHandler) Remove(context.Context, *connect.Request[spec.RemoveRequest]) (*connect.Response[spec.RemoveResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.Remove is not implemented")) -} - -func (UnimplementedFilesystemHandler) WatchDir(context.Context, *connect.Request[spec.WatchDirRequest], *connect.ServerStream[spec.WatchDirResponse]) error { - return connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.WatchDir is not implemented")) -} - -func (UnimplementedFilesystemHandler) CreateWatcher(context.Context, *connect.Request[spec.CreateWatcherRequest]) (*connect.Response[spec.CreateWatcherResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.CreateWatcher is not implemented")) -} - -func (UnimplementedFilesystemHandler) GetWatcherEvents(context.Context, *connect.Request[spec.GetWatcherEventsRequest]) (*connect.Response[spec.GetWatcherEventsResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.GetWatcherEvents is not implemented")) -} - -func (UnimplementedFilesystemHandler) RemoveWatcher(context.Context, *connect.Request[spec.RemoveWatcherRequest]) (*connect.Response[spec.RemoveWatcherResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.RemoveWatcher is not implemented")) -} diff --git a/envd/internal/services/spec/specconnect/process.connect.go b/envd/internal/services/spec/specconnect/process.connect.go deleted file mode 100644 index 57f49d5..0000000 --- a/envd/internal/services/spec/specconnect/process.connect.go +++ /dev/null @@ -1,312 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-connect-go. DO NOT EDIT. -// -// Source: process.proto - -package specconnect - -import ( - connect "connectrpc.com/connect" - context "context" - errors "errors" - spec "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec" - http "net/http" - strings "strings" -) - -// This is a compile-time assertion to ensure that this generated file and the connect package are -// compatible. If you get a compiler error that this constant is not defined, this code was -// generated with a version of connect newer than the one compiled into your binary. You can fix the -// problem by either regenerating this code with an older version of connect or updating the connect -// version compiled into your binary. -const _ = connect.IsAtLeastVersion1_13_0 - -const ( - // ProcessName is the fully-qualified name of the Process service. - ProcessName = "process.Process" -) - -// These constants are the fully-qualified names of the RPCs defined in this package. They're -// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. -// -// Note that these are different from the fully-qualified method names used by -// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to -// reflection-formatted method names, remove the leading slash and convert the remaining slash to a -// period. -const ( - // ProcessListProcedure is the fully-qualified name of the Process's List RPC. - ProcessListProcedure = "/process.Process/List" - // ProcessConnectProcedure is the fully-qualified name of the Process's Connect RPC. - ProcessConnectProcedure = "/process.Process/Connect" - // ProcessStartProcedure is the fully-qualified name of the Process's Start RPC. - ProcessStartProcedure = "/process.Process/Start" - // ProcessUpdateProcedure is the fully-qualified name of the Process's Update RPC. - ProcessUpdateProcedure = "/process.Process/Update" - // ProcessStreamInputProcedure is the fully-qualified name of the Process's StreamInput RPC. - ProcessStreamInputProcedure = "/process.Process/StreamInput" - // ProcessSendInputProcedure is the fully-qualified name of the Process's SendInput RPC. - ProcessSendInputProcedure = "/process.Process/SendInput" - // ProcessSendSignalProcedure is the fully-qualified name of the Process's SendSignal RPC. - ProcessSendSignalProcedure = "/process.Process/SendSignal" - // ProcessCloseStdinProcedure is the fully-qualified name of the Process's CloseStdin RPC. - ProcessCloseStdinProcedure = "/process.Process/CloseStdin" -) - -// ProcessClient is a client for the process.Process service. -type ProcessClient interface { - List(context.Context, *connect.Request[spec.ListRequest]) (*connect.Response[spec.ListResponse], error) - Connect(context.Context, *connect.Request[spec.ConnectRequest]) (*connect.ServerStreamForClient[spec.ConnectResponse], error) - Start(context.Context, *connect.Request[spec.StartRequest]) (*connect.ServerStreamForClient[spec.StartResponse], error) - Update(context.Context, *connect.Request[spec.UpdateRequest]) (*connect.Response[spec.UpdateResponse], error) - // Client input stream ensures ordering of messages - StreamInput(context.Context) *connect.ClientStreamForClient[spec.StreamInputRequest, spec.StreamInputResponse] - SendInput(context.Context, *connect.Request[spec.SendInputRequest]) (*connect.Response[spec.SendInputResponse], error) - SendSignal(context.Context, *connect.Request[spec.SendSignalRequest]) (*connect.Response[spec.SendSignalResponse], error) - // Close stdin to signal EOF to the process. - // Only works for non-PTY processes. For PTY, send Ctrl+D (0x04) instead. - CloseStdin(context.Context, *connect.Request[spec.CloseStdinRequest]) (*connect.Response[spec.CloseStdinResponse], error) -} - -// NewProcessClient constructs a client for the process.Process service. By default, it uses the -// Connect protocol with the binary Protobuf Codec, asks for gzipped responses, and sends -// uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the connect.WithGRPC() or -// connect.WithGRPCWeb() options. -// -// The URL supplied here should be the base URL for the Connect or gRPC server (for example, -// http://api.acme.com or https://acme.com/grpc). -func NewProcessClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) ProcessClient { - baseURL = strings.TrimRight(baseURL, "/") - processMethods := spec.File_process_proto.Services().ByName("Process").Methods() - return &processClient{ - list: connect.NewClient[spec.ListRequest, spec.ListResponse]( - httpClient, - baseURL+ProcessListProcedure, - connect.WithSchema(processMethods.ByName("List")), - connect.WithClientOptions(opts...), - ), - connect: connect.NewClient[spec.ConnectRequest, spec.ConnectResponse]( - httpClient, - baseURL+ProcessConnectProcedure, - connect.WithSchema(processMethods.ByName("Connect")), - connect.WithClientOptions(opts...), - ), - start: connect.NewClient[spec.StartRequest, spec.StartResponse]( - httpClient, - baseURL+ProcessStartProcedure, - connect.WithSchema(processMethods.ByName("Start")), - connect.WithClientOptions(opts...), - ), - update: connect.NewClient[spec.UpdateRequest, spec.UpdateResponse]( - httpClient, - baseURL+ProcessUpdateProcedure, - connect.WithSchema(processMethods.ByName("Update")), - connect.WithClientOptions(opts...), - ), - streamInput: connect.NewClient[spec.StreamInputRequest, spec.StreamInputResponse]( - httpClient, - baseURL+ProcessStreamInputProcedure, - connect.WithSchema(processMethods.ByName("StreamInput")), - connect.WithClientOptions(opts...), - ), - sendInput: connect.NewClient[spec.SendInputRequest, spec.SendInputResponse]( - httpClient, - baseURL+ProcessSendInputProcedure, - connect.WithSchema(processMethods.ByName("SendInput")), - connect.WithClientOptions(opts...), - ), - sendSignal: connect.NewClient[spec.SendSignalRequest, spec.SendSignalResponse]( - httpClient, - baseURL+ProcessSendSignalProcedure, - connect.WithSchema(processMethods.ByName("SendSignal")), - connect.WithClientOptions(opts...), - ), - closeStdin: connect.NewClient[spec.CloseStdinRequest, spec.CloseStdinResponse]( - httpClient, - baseURL+ProcessCloseStdinProcedure, - connect.WithSchema(processMethods.ByName("CloseStdin")), - connect.WithClientOptions(opts...), - ), - } -} - -// processClient implements ProcessClient. -type processClient struct { - list *connect.Client[spec.ListRequest, spec.ListResponse] - connect *connect.Client[spec.ConnectRequest, spec.ConnectResponse] - start *connect.Client[spec.StartRequest, spec.StartResponse] - update *connect.Client[spec.UpdateRequest, spec.UpdateResponse] - streamInput *connect.Client[spec.StreamInputRequest, spec.StreamInputResponse] - sendInput *connect.Client[spec.SendInputRequest, spec.SendInputResponse] - sendSignal *connect.Client[spec.SendSignalRequest, spec.SendSignalResponse] - closeStdin *connect.Client[spec.CloseStdinRequest, spec.CloseStdinResponse] -} - -// List calls process.Process.List. -func (c *processClient) List(ctx context.Context, req *connect.Request[spec.ListRequest]) (*connect.Response[spec.ListResponse], error) { - return c.list.CallUnary(ctx, req) -} - -// Connect calls process.Process.Connect. -func (c *processClient) Connect(ctx context.Context, req *connect.Request[spec.ConnectRequest]) (*connect.ServerStreamForClient[spec.ConnectResponse], error) { - return c.connect.CallServerStream(ctx, req) -} - -// Start calls process.Process.Start. -func (c *processClient) Start(ctx context.Context, req *connect.Request[spec.StartRequest]) (*connect.ServerStreamForClient[spec.StartResponse], error) { - return c.start.CallServerStream(ctx, req) -} - -// Update calls process.Process.Update. -func (c *processClient) Update(ctx context.Context, req *connect.Request[spec.UpdateRequest]) (*connect.Response[spec.UpdateResponse], error) { - return c.update.CallUnary(ctx, req) -} - -// StreamInput calls process.Process.StreamInput. -func (c *processClient) StreamInput(ctx context.Context) *connect.ClientStreamForClient[spec.StreamInputRequest, spec.StreamInputResponse] { - return c.streamInput.CallClientStream(ctx) -} - -// SendInput calls process.Process.SendInput. -func (c *processClient) SendInput(ctx context.Context, req *connect.Request[spec.SendInputRequest]) (*connect.Response[spec.SendInputResponse], error) { - return c.sendInput.CallUnary(ctx, req) -} - -// SendSignal calls process.Process.SendSignal. -func (c *processClient) SendSignal(ctx context.Context, req *connect.Request[spec.SendSignalRequest]) (*connect.Response[spec.SendSignalResponse], error) { - return c.sendSignal.CallUnary(ctx, req) -} - -// CloseStdin calls process.Process.CloseStdin. -func (c *processClient) CloseStdin(ctx context.Context, req *connect.Request[spec.CloseStdinRequest]) (*connect.Response[spec.CloseStdinResponse], error) { - return c.closeStdin.CallUnary(ctx, req) -} - -// ProcessHandler is an implementation of the process.Process service. -type ProcessHandler interface { - List(context.Context, *connect.Request[spec.ListRequest]) (*connect.Response[spec.ListResponse], error) - Connect(context.Context, *connect.Request[spec.ConnectRequest], *connect.ServerStream[spec.ConnectResponse]) error - Start(context.Context, *connect.Request[spec.StartRequest], *connect.ServerStream[spec.StartResponse]) error - Update(context.Context, *connect.Request[spec.UpdateRequest]) (*connect.Response[spec.UpdateResponse], error) - // Client input stream ensures ordering of messages - StreamInput(context.Context, *connect.ClientStream[spec.StreamInputRequest]) (*connect.Response[spec.StreamInputResponse], error) - SendInput(context.Context, *connect.Request[spec.SendInputRequest]) (*connect.Response[spec.SendInputResponse], error) - SendSignal(context.Context, *connect.Request[spec.SendSignalRequest]) (*connect.Response[spec.SendSignalResponse], error) - // Close stdin to signal EOF to the process. - // Only works for non-PTY processes. For PTY, send Ctrl+D (0x04) instead. - CloseStdin(context.Context, *connect.Request[spec.CloseStdinRequest]) (*connect.Response[spec.CloseStdinResponse], error) -} - -// NewProcessHandler builds an HTTP handler from the service implementation. It returns the path on -// which to mount the handler and the handler itself. -// -// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf -// and JSON codecs. They also support gzip compression. -func NewProcessHandler(svc ProcessHandler, opts ...connect.HandlerOption) (string, http.Handler) { - processMethods := spec.File_process_proto.Services().ByName("Process").Methods() - processListHandler := connect.NewUnaryHandler( - ProcessListProcedure, - svc.List, - connect.WithSchema(processMethods.ByName("List")), - connect.WithHandlerOptions(opts...), - ) - processConnectHandler := connect.NewServerStreamHandler( - ProcessConnectProcedure, - svc.Connect, - connect.WithSchema(processMethods.ByName("Connect")), - connect.WithHandlerOptions(opts...), - ) - processStartHandler := connect.NewServerStreamHandler( - ProcessStartProcedure, - svc.Start, - connect.WithSchema(processMethods.ByName("Start")), - connect.WithHandlerOptions(opts...), - ) - processUpdateHandler := connect.NewUnaryHandler( - ProcessUpdateProcedure, - svc.Update, - connect.WithSchema(processMethods.ByName("Update")), - connect.WithHandlerOptions(opts...), - ) - processStreamInputHandler := connect.NewClientStreamHandler( - ProcessStreamInputProcedure, - svc.StreamInput, - connect.WithSchema(processMethods.ByName("StreamInput")), - connect.WithHandlerOptions(opts...), - ) - processSendInputHandler := connect.NewUnaryHandler( - ProcessSendInputProcedure, - svc.SendInput, - connect.WithSchema(processMethods.ByName("SendInput")), - connect.WithHandlerOptions(opts...), - ) - processSendSignalHandler := connect.NewUnaryHandler( - ProcessSendSignalProcedure, - svc.SendSignal, - connect.WithSchema(processMethods.ByName("SendSignal")), - connect.WithHandlerOptions(opts...), - ) - processCloseStdinHandler := connect.NewUnaryHandler( - ProcessCloseStdinProcedure, - svc.CloseStdin, - connect.WithSchema(processMethods.ByName("CloseStdin")), - connect.WithHandlerOptions(opts...), - ) - return "/process.Process/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case ProcessListProcedure: - processListHandler.ServeHTTP(w, r) - case ProcessConnectProcedure: - processConnectHandler.ServeHTTP(w, r) - case ProcessStartProcedure: - processStartHandler.ServeHTTP(w, r) - case ProcessUpdateProcedure: - processUpdateHandler.ServeHTTP(w, r) - case ProcessStreamInputProcedure: - processStreamInputHandler.ServeHTTP(w, r) - case ProcessSendInputProcedure: - processSendInputHandler.ServeHTTP(w, r) - case ProcessSendSignalProcedure: - processSendSignalHandler.ServeHTTP(w, r) - case ProcessCloseStdinProcedure: - processCloseStdinHandler.ServeHTTP(w, r) - default: - http.NotFound(w, r) - } - }) -} - -// UnimplementedProcessHandler returns CodeUnimplemented from all methods. -type UnimplementedProcessHandler struct{} - -func (UnimplementedProcessHandler) List(context.Context, *connect.Request[spec.ListRequest]) (*connect.Response[spec.ListResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.List is not implemented")) -} - -func (UnimplementedProcessHandler) Connect(context.Context, *connect.Request[spec.ConnectRequest], *connect.ServerStream[spec.ConnectResponse]) error { - return connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.Connect is not implemented")) -} - -func (UnimplementedProcessHandler) Start(context.Context, *connect.Request[spec.StartRequest], *connect.ServerStream[spec.StartResponse]) error { - return connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.Start is not implemented")) -} - -func (UnimplementedProcessHandler) Update(context.Context, *connect.Request[spec.UpdateRequest]) (*connect.Response[spec.UpdateResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.Update is not implemented")) -} - -func (UnimplementedProcessHandler) StreamInput(context.Context, *connect.ClientStream[spec.StreamInputRequest]) (*connect.Response[spec.StreamInputResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.StreamInput is not implemented")) -} - -func (UnimplementedProcessHandler) SendInput(context.Context, *connect.Request[spec.SendInputRequest]) (*connect.Response[spec.SendInputResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.SendInput is not implemented")) -} - -func (UnimplementedProcessHandler) SendSignal(context.Context, *connect.Request[spec.SendSignalRequest]) (*connect.Response[spec.SendSignalResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.SendSignal is not implemented")) -} - -func (UnimplementedProcessHandler) CloseStdin(context.Context, *connect.Request[spec.CloseStdinRequest]) (*connect.Response[spec.CloseStdinResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.CloseStdin is not implemented")) -} diff --git a/envd/internal/shared/filesystem/entry.go b/envd/internal/shared/filesystem/entry.go deleted file mode 100644 index fb1cc2d..0000000 --- a/envd/internal/shared/filesystem/entry.go +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "os" - "path/filepath" - "syscall" - "time" -) - -func GetEntryFromPath(path string) (EntryInfo, error) { - fileInfo, err := os.Lstat(path) - if err != nil { - return EntryInfo{}, err - } - - return GetEntryInfo(path, fileInfo), nil -} - -func GetEntryInfo(path string, fileInfo os.FileInfo) EntryInfo { - fileMode := fileInfo.Mode() - - var symlinkTarget *string - if fileMode&os.ModeSymlink != 0 { - // If we can't resolve the symlink target, we won't set the target - target := followSymlink(path) - symlinkTarget = &target - } - - var entryType FileType - var mode os.FileMode - - if symlinkTarget == nil { - entryType = getEntryType(fileMode) - mode = fileMode.Perm() - } else { - // If it's a symlink, we need to determine the type of the target - targetInfo, err := os.Stat(*symlinkTarget) - if err != nil { - entryType = UnknownFileType - } else { - entryType = getEntryType(targetInfo.Mode()) - mode = targetInfo.Mode().Perm() - } - } - - entry := EntryInfo{ - Name: fileInfo.Name(), - Path: path, - Type: entryType, - Size: fileInfo.Size(), - Mode: mode, - Permissions: fileMode.String(), - ModifiedTime: fileInfo.ModTime(), - SymlinkTarget: symlinkTarget, - } - - if base := getBase(fileInfo.Sys()); base != nil { - entry.AccessedTime = toTimestamp(base.Atim) - entry.CreatedTime = toTimestamp(base.Ctim) - entry.ModifiedTime = toTimestamp(base.Mtim) - entry.UID = base.Uid - entry.GID = base.Gid - } else if !fileInfo.ModTime().IsZero() { - entry.ModifiedTime = fileInfo.ModTime() - } - - return entry -} - -// getEntryType determines the type of file entry based on its mode and path. -// If the file is a symlink, it follows the symlink to determine the actual type. -func getEntryType(mode os.FileMode) FileType { - switch { - case mode.IsRegular(): - return FileFileType - case mode.IsDir(): - return DirectoryFileType - case mode&os.ModeSymlink == os.ModeSymlink: - return SymlinkFileType - default: - return UnknownFileType - } -} - -// followSymlink resolves a symbolic link to its target path. -func followSymlink(path string) string { - // Resolve symlinks - resolvedPath, err := filepath.EvalSymlinks(path) - if err != nil { - return path - } - - return resolvedPath -} - -func toTimestamp(spec syscall.Timespec) time.Time { - if spec.Sec == 0 && spec.Nsec == 0 { - return time.Time{} - } - - return time.Unix(spec.Sec, spec.Nsec) -} - -func getBase(sys any) *syscall.Stat_t { - st, _ := sys.(*syscall.Stat_t) - - return st -} diff --git a/envd/internal/shared/filesystem/entry_test.go b/envd/internal/shared/filesystem/entry_test.go deleted file mode 100644 index 537d6e0..0000000 --- a/envd/internal/shared/filesystem/entry_test.go +++ /dev/null @@ -1,266 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "os" - "os/user" - "path/filepath" - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestGetEntryType(t *testing.T) { - t.Parallel() - - tempDir := t.TempDir() - - // Create test files - regularFile := filepath.Join(tempDir, "regular.txt") - require.NoError(t, os.WriteFile(regularFile, []byte("test content"), 0o644)) - - testDir := filepath.Join(tempDir, "testdir") - require.NoError(t, os.MkdirAll(testDir, 0o755)) - - symlink := filepath.Join(tempDir, "symlink") - require.NoError(t, os.Symlink(regularFile, symlink)) - - tests := []struct { - name string - path string - expected FileType - }{ - { - name: "regular file", - path: regularFile, - expected: FileFileType, - }, - { - name: "directory", - path: testDir, - expected: DirectoryFileType, - }, - { - name: "symlink to file", - path: symlink, - expected: SymlinkFileType, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - info, err := os.Lstat(tt.path) - require.NoError(t, err) - - result := getEntryType(info.Mode()) - assert.Equal(t, tt.expected, result) - }) - } -} - -func TestEntryInfoFromFileInfo_SymlinkChain(t *testing.T) { - t.Parallel() - - // Base temporary directory. On macOS this lives under /var/folders/… - // which itself is a symlink to /private/var/folders/…. - tempDir := t.TempDir() - - // Create final target - target := filepath.Join(tempDir, "target") - require.NoError(t, os.MkdirAll(target, 0o755)) - - // Create a chain: link1 → link2 → target - link2 := filepath.Join(tempDir, "link2") - require.NoError(t, os.Symlink(target, link2)) - - link1 := filepath.Join(tempDir, "link1") - require.NoError(t, os.Symlink(link2, link1)) - - // run the test - result, err := GetEntryFromPath(link1) - require.NoError(t, err) - - // verify the results - assert.Equal(t, "link1", result.Name) - assert.Equal(t, link1, result.Path) - assert.Equal(t, DirectoryFileType, result.Type) // Should resolve to final target type - assert.Contains(t, result.Permissions, "L") - - // Canonicalize the expected target path to handle macOS symlink indirections - expectedTarget, err := filepath.EvalSymlinks(link1) - require.NoError(t, err) - assert.Equal(t, expectedTarget, *result.SymlinkTarget) -} - -func TestEntryInfoFromFileInfo_DifferentPermissions(t *testing.T) { - t.Parallel() - - tempDir := t.TempDir() - - testCases := []struct { - name string - permissions os.FileMode - expectedMode os.FileMode - expectedString string - }{ - {"read-only", 0o444, 0o444, "-r--r--r--"}, - {"executable", 0o755, 0o755, "-rwxr-xr-x"}, - {"write-only", 0o200, 0o200, "--w-------"}, - {"no permissions", 0o000, 0o000, "----------"}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - testFile := filepath.Join(tempDir, tc.name+".txt") - require.NoError(t, os.WriteFile(testFile, []byte("test"), tc.permissions)) - - result, err := GetEntryFromPath(testFile) - require.NoError(t, err) - assert.Equal(t, tc.expectedMode, result.Mode) - assert.Equal(t, tc.expectedString, result.Permissions) - }) - } -} - -func TestEntryInfoFromFileInfo_EmptyFile(t *testing.T) { - t.Parallel() - - tempDir := t.TempDir() - emptyFile := filepath.Join(tempDir, "empty.txt") - require.NoError(t, os.WriteFile(emptyFile, []byte{}, 0o600)) - - result, err := GetEntryFromPath(emptyFile) - require.NoError(t, err) - - assert.Equal(t, "empty.txt", result.Name) - assert.Equal(t, int64(0), result.Size) - assert.Equal(t, os.FileMode(0o600), result.Mode) - assert.Equal(t, FileFileType, result.Type) -} - -func TestEntryInfoFromFileInfo_CyclicSymlink(t *testing.T) { - t.Parallel() - - tempDir := t.TempDir() - - // Create cyclic symlink - cyclicSymlink := filepath.Join(tempDir, "cyclic") - require.NoError(t, os.Symlink(cyclicSymlink, cyclicSymlink)) - - result, err := GetEntryFromPath(cyclicSymlink) - require.NoError(t, err) - - assert.Equal(t, "cyclic", result.Name) - assert.Equal(t, cyclicSymlink, result.Path) - assert.Equal(t, UnknownFileType, result.Type) - assert.Contains(t, result.Permissions, "L") -} - -func TestEntryInfoFromFileInfo_BrokenSymlink(t *testing.T) { - t.Parallel() - - tempDir := t.TempDir() - - // Create broken symlink - brokenSymlink := filepath.Join(tempDir, "broken") - require.NoError(t, os.Symlink("/nonexistent", brokenSymlink)) - - result, err := GetEntryFromPath(brokenSymlink) - require.NoError(t, err) - - assert.Equal(t, "broken", result.Name) - assert.Equal(t, brokenSymlink, result.Path) - assert.Equal(t, UnknownFileType, result.Type) - assert.Contains(t, result.Permissions, "L") - // SymlinkTarget might be empty if followSymlink fails -} - -func TestEntryInfoFromFileInfo(t *testing.T) { - t.Parallel() - - tempDir := t.TempDir() - - // Create a regular file with known content and permissions - testFile := filepath.Join(tempDir, "test.txt") - testContent := []byte("Hello, World!") - require.NoError(t, os.WriteFile(testFile, testContent, 0o644)) - - // Get current user for ownership comparison - currentUser, err := user.Current() - require.NoError(t, err) - - result, err := GetEntryFromPath(testFile) - require.NoError(t, err) - - // Basic assertions - assert.Equal(t, "test.txt", result.Name) - assert.Equal(t, testFile, result.Path) - assert.Equal(t, int64(len(testContent)), result.Size) - assert.Equal(t, FileFileType, result.Type) - assert.Equal(t, os.FileMode(0o644), result.Mode) - assert.Contains(t, result.Permissions, "-rw-r--r--") - assert.Equal(t, currentUser.Uid, strconv.Itoa(int(result.UID))) - assert.Equal(t, currentUser.Gid, strconv.Itoa(int(result.GID))) - assert.NotNil(t, result.ModifiedTime) - assert.Empty(t, result.SymlinkTarget) - - // Check that modified time is reasonable (within last minute) - modTime := result.ModifiedTime - assert.WithinDuration(t, time.Now(), modTime, time.Minute) -} - -func TestEntryInfoFromFileInfo_Directory(t *testing.T) { - t.Parallel() - - tempDir := t.TempDir() - testDir := filepath.Join(tempDir, "testdir") - require.NoError(t, os.MkdirAll(testDir, 0o755)) - - result, err := GetEntryFromPath(testDir) - require.NoError(t, err) - - assert.Equal(t, "testdir", result.Name) - assert.Equal(t, testDir, result.Path) - assert.Equal(t, DirectoryFileType, result.Type) - assert.Equal(t, os.FileMode(0o755), result.Mode) - assert.Equal(t, "drwxr-xr-x", result.Permissions) - assert.Empty(t, result.SymlinkTarget) -} - -func TestEntryInfoFromFileInfo_Symlink(t *testing.T) { - t.Parallel() - - // Base temporary directory. On macOS this lives under /var/folders/… - // which itself is a symlink to /private/var/folders/…. - tempDir := t.TempDir() - - // Create target file - targetFile := filepath.Join(tempDir, "target.txt") - require.NoError(t, os.WriteFile(targetFile, []byte("target content"), 0o644)) - - // Create symlink - symlinkPath := filepath.Join(tempDir, "symlink") - require.NoError(t, os.Symlink(targetFile, symlinkPath)) - - // Use Lstat to get symlink info (not the target) - result, err := GetEntryFromPath(symlinkPath) - require.NoError(t, err) - - assert.Equal(t, "symlink", result.Name) - assert.Equal(t, symlinkPath, result.Path) - assert.Equal(t, FileFileType, result.Type) // Should resolve to target type - assert.Contains(t, result.Permissions, "L") // Should show as symlink in permissions - - // Canonicalize the expected target path to handle macOS /var → /private/var symlink - expectedTarget, err := filepath.EvalSymlinks(symlinkPath) - require.NoError(t, err) - assert.Equal(t, expectedTarget, *result.SymlinkTarget) -} diff --git a/envd/internal/shared/filesystem/model.go b/envd/internal/shared/filesystem/model.go deleted file mode 100644 index 6024cce..0000000 --- a/envd/internal/shared/filesystem/model.go +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package filesystem - -import ( - "os" - "time" -) - -type EntryInfo struct { - Name string - Type FileType - Path string - Size int64 - Mode os.FileMode - Permissions string - UID uint32 - GID uint32 - AccessedTime time.Time - CreatedTime time.Time - ModifiedTime time.Time - SymlinkTarget *string -} - -type FileType int32 - -const ( - UnknownFileType FileType = 0 - FileFileType FileType = 1 - DirectoryFileType FileType = 2 - SymlinkFileType FileType = 3 -) diff --git a/envd/internal/shared/id/id.go b/envd/internal/shared/id/id.go deleted file mode 100644 index 45f68fe..0000000 --- a/envd/internal/shared/id/id.go +++ /dev/null @@ -1,166 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package id - -import ( - "errors" - "fmt" - "maps" - "regexp" - "slices" - "strings" - - "github.com/dchest/uniuri" - "github.com/google/uuid" -) - -var ( - caseInsensitiveAlphabet = []byte("abcdefghijklmnopqrstuvwxyz1234567890") - identifierRegex = regexp.MustCompile(`^[a-z0-9-_]+$`) - tagRegex = regexp.MustCompile(`^[a-z0-9-_.]+$`) - sandboxIDRegex = regexp.MustCompile(`^[a-z0-9]+$`) -) - -const ( - DefaultTag = "default" - TagSeparator = ":" - NamespaceSeparator = "/" -) - -func Generate() string { - return uniuri.NewLenChars(uniuri.UUIDLen, caseInsensitiveAlphabet) -} - -// ValidateSandboxID checks that a sandbox ID contains only lowercase alphanumeric characters. -func ValidateSandboxID(sandboxID string) error { - if !sandboxIDRegex.MatchString(sandboxID) { - return fmt.Errorf("invalid sandbox ID: %q", sandboxID) - } - - return nil -} - -func cleanAndValidate(value, name string, re *regexp.Regexp) (string, error) { - cleaned := strings.ToLower(strings.TrimSpace(value)) - if !re.MatchString(cleaned) { - return "", fmt.Errorf("invalid %s: %s", name, value) - } - - return cleaned, nil -} - -func validateTag(tag string) (string, error) { - cleanedTag, err := cleanAndValidate(tag, "tag", tagRegex) - if err != nil { - return "", err - } - - // Prevent tags from being a UUID - _, err = uuid.Parse(cleanedTag) - if err == nil { - return "", errors.New("tag cannot be a UUID") - } - - return cleanedTag, nil -} - -func ValidateAndDeduplicateTags(tags []string) ([]string, error) { - seen := make(map[string]struct{}) - - for _, tag := range tags { - cleanedTag, err := validateTag(tag) - if err != nil { - return nil, fmt.Errorf("invalid tag '%s': %w", tag, err) - } - - seen[cleanedTag] = struct{}{} - } - - return slices.Collect(maps.Keys(seen)), nil -} - -// SplitIdentifier splits "namespace/alias" into its parts. -// Returns nil namespace for bare aliases, pointer for explicit namespace. -func SplitIdentifier(identifier string) (namespace *string, alias string) { - before, after, found := strings.Cut(identifier, NamespaceSeparator) - if !found { - return nil, before - } - - return &before, after -} - -// ParseName parses and validates "namespace/alias:tag" or "alias:tag". -// Returns the cleaned identifier (namespace/alias or alias) and optional tag. -// All components are validated and normalized (lowercase, trimmed). -func ParseName(input string) (identifier string, tag *string, err error) { - input = strings.TrimSpace(input) - - // Extract raw parts - identifierPart, tagPart, hasTag := strings.Cut(input, TagSeparator) - namespacePart, aliasPart := SplitIdentifier(identifierPart) - - // Validate tag - if hasTag { - validated, err := cleanAndValidate(tagPart, "tag", tagRegex) - if err != nil { - return "", nil, err - } - if !strings.EqualFold(validated, DefaultTag) { - tag = &validated - } - } - - // Validate namespace - if namespacePart != nil { - validated, err := cleanAndValidate(*namespacePart, "namespace", identifierRegex) - if err != nil { - return "", nil, err - } - namespacePart = &validated - } - - // Validate alias - aliasPart, err = cleanAndValidate(aliasPart, "template ID", identifierRegex) - if err != nil { - return "", nil, err - } - - // Build identifier - if namespacePart != nil { - identifier = WithNamespace(*namespacePart, aliasPart) - } else { - identifier = aliasPart - } - - return identifier, tag, nil -} - -// WithTag returns the identifier with the given tag appended (e.g. "templateID:tag"). -func WithTag(identifier, tag string) string { - return identifier + TagSeparator + tag -} - -// WithNamespace returns identifier with the given namespace prefix. -func WithNamespace(namespace, alias string) string { - return namespace + NamespaceSeparator + alias -} - -// ExtractAlias returns just the alias portion from an identifier (namespace/alias or alias). -func ExtractAlias(identifier string) string { - _, alias := SplitIdentifier(identifier) - - return alias -} - -// ValidateNamespaceMatchesTeam checks if an explicit namespace in the identifier matches the team's slug. -// Returns an error if the namespace doesn't match. -// If the identifier has no explicit namespace, returns nil (valid). -func ValidateNamespaceMatchesTeam(identifier, teamSlug string) error { - namespace, _ := SplitIdentifier(identifier) - if namespace != nil && *namespace != teamSlug { - return fmt.Errorf("namespace '%s' must match your team '%s'", *namespace, teamSlug) - } - - return nil -} diff --git a/envd/internal/shared/id/id_test.go b/envd/internal/shared/id/id_test.go deleted file mode 100644 index 38de0a7..0000000 --- a/envd/internal/shared/id/id_test.go +++ /dev/null @@ -1,382 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package id - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "git.omukk.dev/wrenn/sandbox/envd/internal/shared/utils" -) - -func TestParseName(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - input string - wantIdentifier string - wantTag *string - wantErr bool - }{ - { - name: "bare alias only", - input: "my-template", - wantIdentifier: "my-template", - wantTag: nil, - }, - { - name: "alias with tag", - input: "my-template:v1", - wantIdentifier: "my-template", - wantTag: utils.ToPtr("v1"), - }, - { - name: "namespace and alias", - input: "acme/my-template", - wantIdentifier: "acme/my-template", - wantTag: nil, - }, - { - name: "namespace, alias and tag", - input: "acme/my-template:v1", - wantIdentifier: "acme/my-template", - wantTag: utils.ToPtr("v1"), - }, - { - name: "namespace with hyphens", - input: "my-team/my-template:prod", - wantIdentifier: "my-team/my-template", - wantTag: utils.ToPtr("prod"), - }, - { - name: "default tag normalized to nil", - input: "my-template:default", - wantIdentifier: "my-template", - wantTag: nil, - }, - { - name: "uppercase converted to lowercase", - input: "MyTemplate:Prod", - wantIdentifier: "mytemplate", - wantTag: utils.ToPtr("prod"), - }, - { - name: "whitespace trimmed", - input: " my-template : v1 ", - wantIdentifier: "my-template", - wantTag: utils.ToPtr("v1"), - }, - { - name: "invalid - empty namespace", - input: "/my-template", - wantErr: true, - }, - { - name: "invalid - empty tag after colon", - input: "my-template:", - wantErr: true, - }, - { - name: "invalid - special characters in alias", - input: "my template!", - wantErr: true, - }, - { - name: "invalid - special characters in namespace", - input: "my team!/my-template", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - gotIdentifier, gotTag, err := ParseName(tt.input) - - if tt.wantErr { - require.Error(t, err, "Expected ParseName() to return error, got") - - return - } - - require.NoError(t, err, "Expected ParseName() not to return error, got: %v", err) - assert.Equal(t, tt.wantIdentifier, gotIdentifier, "ParseName() identifier = %v, want %v", gotIdentifier, tt.wantIdentifier) - assert.Equal(t, tt.wantTag, gotTag, "ParseName() tag = %v, want %v", utils.Sprintp(gotTag), utils.Sprintp(tt.wantTag)) - }) - } -} - -func TestWithNamespace(t *testing.T) { - t.Parallel() - - got := WithNamespace("acme", "my-template") - want := "acme/my-template" - assert.Equal(t, want, got, "WithNamespace() = %q, want %q", got, want) -} - -func TestSplitIdentifier(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - identifier string - wantNamespace *string - wantAlias string - }{ - { - name: "bare alias", - identifier: "my-template", - wantNamespace: nil, - wantAlias: "my-template", - }, - { - name: "with namespace", - identifier: "acme/my-template", - wantNamespace: ptrStr("acme"), - wantAlias: "my-template", - }, - { - name: "empty namespace prefix", - identifier: "/my-template", - wantNamespace: ptrStr(""), - wantAlias: "my-template", - }, - { - name: "multiple slashes - only first split", - identifier: "a/b/c", - wantNamespace: ptrStr("a"), - wantAlias: "b/c", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - gotNamespace, gotAlias := SplitIdentifier(tt.identifier) - - if tt.wantNamespace == nil { - assert.Nil(t, gotNamespace) - } else { - require.NotNil(t, gotNamespace) - assert.Equal(t, *tt.wantNamespace, *gotNamespace) - } - - assert.Equal(t, tt.wantAlias, gotAlias) - }) - } -} - -func ptrStr(s string) *string { - return &s -} - -func TestValidateAndDeduplicateTags(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - tags []string - want []string - wantErr bool - }{ - { - name: "single valid tag", - tags: []string{"v1"}, - want: []string{"v1"}, - wantErr: false, - }, - { - name: "multiple unique tags", - tags: []string{"v1", "prod", "latest"}, - want: []string{"v1", "prod", "latest"}, - wantErr: false, - }, - { - name: "duplicate tags deduplicated", - tags: []string{"v1", "V1", "v1"}, - want: []string{"v1"}, - wantErr: false, - }, - { - name: "tags with dots and underscores", - tags: []string{"v1.0", "v1_1"}, - want: []string{"v1.0", "v1_1"}, - wantErr: false, - }, - { - name: "invalid - UUID tag rejected", - tags: []string{"550e8400-e29b-41d4-a716-446655440000"}, - wantErr: true, - }, - { - name: "invalid - special characters", - tags: []string{"v1!", "v2@"}, - wantErr: true, - }, - { - name: "empty list returns empty", - tags: []string{}, - want: []string{}, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - got, err := ValidateAndDeduplicateTags(tt.tags) - - if tt.wantErr { - require.Error(t, err) - - return - } - - require.NoError(t, err) - assert.ElementsMatch(t, tt.want, got) - }) - } -} - -func TestValidateSandboxID(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - input string - wantErr bool - }{ - { - name: "canonical sandbox ID", - input: "i1a2b3c4d5e6f7g8h9j0k", - wantErr: false, - }, - { - name: "short alphanumeric", - input: "abc123", - wantErr: false, - }, - { - name: "all digits", - input: "1234567890", - wantErr: false, - }, - { - name: "all lowercase letters", - input: "abcdefghijklmnopqrst", - wantErr: false, - }, - { - name: "invalid - empty", - input: "", - wantErr: true, - }, - { - name: "invalid - contains colon (Redis separator)", - input: "abc:def", - wantErr: true, - }, - { - name: "invalid - contains open brace (Redis hash slot)", - input: "abc{def", - wantErr: true, - }, - { - name: "invalid - contains close brace (Redis hash slot)", - input: "abc}def", - wantErr: true, - }, - { - name: "invalid - contains newline", - input: "abc\ndef", - wantErr: true, - }, - { - name: "invalid - contains space", - input: "abc def", - wantErr: true, - }, - { - name: "invalid - contains hyphen", - input: "abc-def", - wantErr: true, - }, - { - name: "invalid - contains uppercase", - input: "abcDEF", - wantErr: true, - }, - { - name: "invalid - contains slash", - input: "abc/def", - wantErr: true, - }, - { - name: "invalid - contains null byte", - input: "abc\x00def", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - err := ValidateSandboxID(tt.input) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestValidateNamespaceMatchesTeam(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - identifier string - teamSlug string - wantErr bool - }{ - { - name: "bare alias - no namespace", - identifier: "my-template", - teamSlug: "acme", - wantErr: false, - }, - { - name: "matching namespace", - identifier: "acme/my-template", - teamSlug: "acme", - wantErr: false, - }, - { - name: "mismatched namespace", - identifier: "other-team/my-template", - teamSlug: "acme", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - err := ValidateNamespaceMatchesTeam(tt.identifier, tt.teamSlug) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } -} diff --git a/envd/internal/shared/keys/constants.go b/envd/internal/shared/keys/constants.go deleted file mode 100644 index 72e8d68..0000000 --- a/envd/internal/shared/keys/constants.go +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package keys - -const ( - ApiKeyPrefix = "wrn_" - AccessTokenPrefix = "sk_wrn_" -) diff --git a/envd/internal/shared/keys/hashing.go b/envd/internal/shared/keys/hashing.go deleted file mode 100644 index 4826637..0000000 --- a/envd/internal/shared/keys/hashing.go +++ /dev/null @@ -1,7 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package keys - -type Hasher interface { - Hash(key []byte) string -} diff --git a/envd/internal/shared/keys/hmac_sha256.go b/envd/internal/shared/keys/hmac_sha256.go deleted file mode 100644 index de7e04c..0000000 --- a/envd/internal/shared/keys/hmac_sha256.go +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" -) - -type HMACSha256Hashing struct { - key []byte -} - -func NewHMACSHA256Hashing(key []byte) *HMACSha256Hashing { - return &HMACSha256Hashing{key: key} -} - -func (h *HMACSha256Hashing) Hash(content []byte) (string, error) { - mac := hmac.New(sha256.New, h.key) - _, err := mac.Write(content) - if err != nil { - return "", err - } - - return hex.EncodeToString(mac.Sum(nil)), nil -} diff --git a/envd/internal/shared/keys/hmac_sha256_test.go b/envd/internal/shared/keys/hmac_sha256_test.go deleted file mode 100644 index 22bd49e..0000000 --- a/envd/internal/shared/keys/hmac_sha256_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestHMACSha256Hashing_ValidHash(t *testing.T) { - t.Parallel() - key := []byte("test-key") - hasher := NewHMACSHA256Hashing(key) - content := []byte("hello world") - expectedHash := "18c4b268f0bbf8471eda56af3e70b1d4613d734dc538b4940b59931c412a1591" - actualHash, err := hasher.Hash(content) - require.NoError(t, err) - - if actualHash != expectedHash { - t.Errorf("expected %s, got %s", expectedHash, actualHash) - } -} - -func TestHMACSha256Hashing_EmptyContent(t *testing.T) { - t.Parallel() - key := []byte("test-key") - hasher := NewHMACSHA256Hashing(key) - content := []byte("") - expectedHash := "2711cc23e9ab1b8a9bc0fe991238da92671624a9ebdaf1c1abec06e7e9a14f9b" - actualHash, err := hasher.Hash(content) - require.NoError(t, err) - - if actualHash != expectedHash { - t.Errorf("expected %s, got %s", expectedHash, actualHash) - } -} - -func TestHMACSha256Hashing_DifferentKey(t *testing.T) { - t.Parallel() - key := []byte("test-key") - hasher := NewHMACSHA256Hashing(key) - differentKeyHasher := NewHMACSHA256Hashing([]byte("different-key")) - content := []byte("hello world") - - hashWithOriginalKey, err := hasher.Hash(content) - require.NoError(t, err) - - hashWithDifferentKey, err := differentKeyHasher.Hash(content) - require.NoError(t, err) - - if hashWithOriginalKey == hashWithDifferentKey { - t.Errorf("hashes with different keys should not match") - } -} - -func TestHMACSha256Hashing_IdenticalResult(t *testing.T) { - t.Parallel() - key := []byte("placeholder-hashing-key") - content := []byte("test content for hashing") - - mac := hmac.New(sha256.New, key) - mac.Write(content) - expectedResult := hex.EncodeToString(mac.Sum(nil)) - - hasher := NewHMACSHA256Hashing(key) - actualResult, err := hasher.Hash(content) - require.NoError(t, err) - - if actualResult != expectedResult { - t.Errorf("expected %s, got %s", expectedResult, actualResult) - } -} diff --git a/envd/internal/shared/keys/key.go b/envd/internal/shared/keys/key.go deleted file mode 100644 index c73c830..0000000 --- a/envd/internal/shared/keys/key.go +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - "strings" -) - -const ( - identifierValueSuffixLength = 4 - identifierValuePrefixLength = 2 - - keyLength = 20 -) - -var hasher Hasher = NewSHA256Hashing() - -type Key struct { - PrefixedRawValue string - HashedValue string - Masked MaskedIdentifier -} - -type MaskedIdentifier struct { - Prefix string - ValueLength int - MaskedValuePrefix string - MaskedValueSuffix string -} - -// MaskKey returns identifier masking properties in accordance to the OpenAPI response spec -func MaskKey(prefix, value string) (MaskedIdentifier, error) { - valueLength := len(value) - - suffixOffset := valueLength - identifierValueSuffixLength - prefixOffset := identifierValuePrefixLength - - if suffixOffset < 0 { - return MaskedIdentifier{}, fmt.Errorf("mask value length is less than identifier suffix length (%d)", identifierValueSuffixLength) - } - - if suffixOffset == 0 { - return MaskedIdentifier{}, fmt.Errorf("mask value length is equal to identifier suffix length (%d), which would expose the entire identifier in the mask", identifierValueSuffixLength) - } - - // cap prefixOffset by suffixOffset to prevent overlap with the suffix. - if prefixOffset > suffixOffset { - prefixOffset = suffixOffset - } - - maskPrefix := value[:prefixOffset] - maskSuffix := value[suffixOffset:] - - maskedIdentifierProperties := MaskedIdentifier{ - Prefix: prefix, - ValueLength: valueLength, - MaskedValuePrefix: maskPrefix, - MaskedValueSuffix: maskSuffix, - } - - return maskedIdentifierProperties, nil -} - -func GenerateKey(prefix string) (Key, error) { - keyBytes := make([]byte, keyLength) - - _, err := rand.Read(keyBytes) - if err != nil { - return Key{}, err - } - - generatedIdentifier := hex.EncodeToString(keyBytes) - - mask, err := MaskKey(prefix, generatedIdentifier) - if err != nil { - return Key{}, err - } - - return Key{ - PrefixedRawValue: prefix + generatedIdentifier, - HashedValue: hasher.Hash(keyBytes), - Masked: mask, - }, nil -} - -func VerifyKey(prefix string, key string) (string, error) { - if !strings.HasPrefix(key, prefix) { - return "", fmt.Errorf("invalid key prefix") - } - - keyValue := key[len(prefix):] - keyBytes, err := hex.DecodeString(keyValue) - if err != nil { - return "", fmt.Errorf("invalid key") - } - - return hasher.Hash(keyBytes), nil -} diff --git a/envd/internal/shared/keys/key_test.go b/envd/internal/shared/keys/key_test.go deleted file mode 100644 index 50dcfb2..0000000 --- a/envd/internal/shared/keys/key_test.go +++ /dev/null @@ -1,162 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "fmt" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestMaskKey(t *testing.T) { - t.Parallel() - t.Run("succeeds: value longer than suffix length", func(t *testing.T) { - t.Parallel() - masked, err := MaskKey("test_", "1234567890") - require.NoError(t, err) - assert.Equal(t, "test_", masked.Prefix) - assert.Equal(t, "12", masked.MaskedValuePrefix) - assert.Equal(t, "7890", masked.MaskedValueSuffix) - }) - - t.Run("succeeds: empty prefix, value longer than suffix length", func(t *testing.T) { - t.Parallel() - masked, err := MaskKey("", "1234567890") - require.NoError(t, err) - assert.Empty(t, masked.Prefix) - assert.Equal(t, "12", masked.MaskedValuePrefix) - assert.Equal(t, "7890", masked.MaskedValueSuffix) - }) - - t.Run("error: value length less than suffix length", func(t *testing.T) { - t.Parallel() - _, err := MaskKey("test", "123") - require.Error(t, err) - assert.EqualError(t, err, fmt.Sprintf("mask value length is less than identifier suffix length (%d)", identifierValueSuffixLength)) - }) - - t.Run("error: value length equals suffix length", func(t *testing.T) { - t.Parallel() - _, err := MaskKey("test", "1234") - require.Error(t, err) - assert.EqualError(t, err, fmt.Sprintf("mask value length is equal to identifier suffix length (%d), which would expose the entire identifier in the mask", identifierValueSuffixLength)) - }) -} - -func TestGenerateKey(t *testing.T) { - t.Parallel() - keyLength := 40 - - t.Run("succeeds", func(t *testing.T) { - t.Parallel() - key, err := GenerateKey("test_") - require.NoError(t, err) - assert.Regexp(t, "^test_.*", key.PrefixedRawValue) - assert.Equal(t, "test_", key.Masked.Prefix) - assert.Equal(t, keyLength, key.Masked.ValueLength) - assert.Regexp(t, "^[0-9a-f]{"+strconv.Itoa(identifierValuePrefixLength)+"}$", key.Masked.MaskedValuePrefix) - assert.Regexp(t, "^[0-9a-f]{"+strconv.Itoa(identifierValueSuffixLength)+"}$", key.Masked.MaskedValueSuffix) - assert.Regexp(t, "^\\$sha256\\$.*", key.HashedValue) - }) - - t.Run("no prefix", func(t *testing.T) { - t.Parallel() - key, err := GenerateKey("") - require.NoError(t, err) - assert.Regexp(t, "^[0-9a-f]{"+strconv.Itoa(keyLength)+"}$", key.PrefixedRawValue) - assert.Empty(t, key.Masked.Prefix) - assert.Equal(t, keyLength, key.Masked.ValueLength) - assert.Regexp(t, "^[0-9a-f]{"+strconv.Itoa(identifierValuePrefixLength)+"}$", key.Masked.MaskedValuePrefix) - assert.Regexp(t, "^[0-9a-f]{"+strconv.Itoa(identifierValueSuffixLength)+"}$", key.Masked.MaskedValueSuffix) - assert.Regexp(t, "^\\$sha256\\$.*", key.HashedValue) - }) -} - -func TestGetMaskedIdentifierProperties(t *testing.T) { - t.Parallel() - type testCase struct { - name string - prefix string - value string - expectedResult MaskedIdentifier - expectedErrString string - } - - testCases := []testCase{ - // --- ERROR CASES (value's length <= identifierValueSuffixLength) --- - { - name: "error: value length < suffix length (3 vs 4)", - prefix: "pk_", - value: "abc", - expectedResult: MaskedIdentifier{}, - expectedErrString: fmt.Sprintf("mask value length is less than identifier suffix length (%d)", identifierValueSuffixLength), - }, - { - name: "error: value length == suffix length (4 vs 4)", - prefix: "sk_", - value: "abcd", - expectedResult: MaskedIdentifier{}, - expectedErrString: fmt.Sprintf("mask value length is equal to identifier suffix length (%d), which would expose the entire identifier in the mask", identifierValueSuffixLength), - }, - { - name: "error: value length < suffix length (0 vs 4, empty value)", - prefix: "err_", - value: "", - expectedResult: MaskedIdentifier{}, - expectedErrString: fmt.Sprintf("mask value length is less than identifier suffix length (%d)", identifierValueSuffixLength), - }, - - // --- SUCCESS CASES (value's length > identifierValueSuffixLength) --- - { - name: "success: value long (10), prefix val len fully used", - prefix: "pk_", - value: "abcdefghij", - expectedResult: MaskedIdentifier{ - Prefix: "pk_", - ValueLength: 10, - MaskedValuePrefix: "ab", - MaskedValueSuffix: "ghij", - }, - }, - { - name: "success: value medium (5), prefix val len truncated by overlap", - prefix: "", - value: "abcde", - expectedResult: MaskedIdentifier{ - Prefix: "", - ValueLength: 5, - MaskedValuePrefix: "a", - MaskedValueSuffix: "bcde", - }, - }, - { - name: "success: value medium (6), prefix val len fits exactly", - prefix: "pk_", - value: "abcdef", - expectedResult: MaskedIdentifier{ - Prefix: "pk_", - ValueLength: 6, - MaskedValuePrefix: "ab", - MaskedValueSuffix: "cdef", - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - result, err := MaskKey(tc.prefix, tc.value) - - if tc.expectedErrString != "" { - require.EqualError(t, err, tc.expectedErrString) - assert.Equal(t, tc.expectedResult, result) - } else { - require.NoError(t, err) - assert.Equal(t, tc.expectedResult, result) - } - }) - } -} diff --git a/envd/internal/shared/keys/sha256.go b/envd/internal/shared/keys/sha256.go deleted file mode 100644 index 879bb10..0000000 --- a/envd/internal/shared/keys/sha256.go +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "crypto/sha256" - "encoding/base64" - "fmt" -) - -type Sha256Hashing struct{} - -func NewSHA256Hashing() *Sha256Hashing { - return &Sha256Hashing{} -} - -func (h *Sha256Hashing) Hash(key []byte) string { - hashBytes := sha256.Sum256(key) - - hash64 := base64.RawStdEncoding.EncodeToString(hashBytes[:]) - - return fmt.Sprintf( - "$sha256$%s", - hash64, - ) -} - -func (h *Sha256Hashing) HashWithoutPrefix(key []byte) string { - hashBytes := sha256.Sum256(key) - - return base64.RawStdEncoding.EncodeToString(hashBytes[:]) -} diff --git a/envd/internal/shared/keys/sha256_test.go b/envd/internal/shared/keys/sha256_test.go deleted file mode 100644 index 9722fbe..0000000 --- a/envd/internal/shared/keys/sha256_test.go +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSHA256Hashing(t *testing.T) { - t.Parallel() - hasher := NewSHA256Hashing() - - hashed := hasher.Hash([]byte("test")) - assert.Regexp(t, "^\\$sha256\\$.*", hashed) -} diff --git a/envd/internal/shared/keys/sha512.go b/envd/internal/shared/keys/sha512.go deleted file mode 100644 index 3bc3039..0000000 --- a/envd/internal/shared/keys/sha512.go +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "crypto/sha512" - "encoding/hex" -) - -// HashAccessToken computes the SHA-512 hash of an access token. -func HashAccessToken(token string) string { - h := sha512.Sum512([]byte(token)) - - return hex.EncodeToString(h[:]) -} - -// HashAccessTokenBytes computes the SHA-512 hash of an access token from bytes. -func HashAccessTokenBytes(token []byte) string { - h := sha512.Sum512(token) - - return hex.EncodeToString(h[:]) -} diff --git a/envd/internal/shared/smap/smap.go b/envd/internal/shared/smap/smap.go deleted file mode 100644 index fc1f816..0000000 --- a/envd/internal/shared/smap/smap.go +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package smap - -import ( - cmap "github.com/orcaman/concurrent-map/v2" -) - -type Map[V any] struct { - m cmap.ConcurrentMap[string, V] -} - -func New[V any]() *Map[V] { - return &Map[V]{ - m: cmap.New[V](), - } -} - -func (m *Map[V]) Remove(key string) { - m.m.Remove(key) -} - -func (m *Map[V]) Get(key string) (V, bool) { - return m.m.Get(key) -} - -func (m *Map[V]) Insert(key string, value V) { - m.m.Set(key, value) -} - -func (m *Map[V]) Upsert(key string, value V, cb cmap.UpsertCb[V]) V { - return m.m.Upsert(key, value, cb) -} - -func (m *Map[V]) InsertIfAbsent(key string, value V) bool { - return m.m.SetIfAbsent(key, value) -} - -func (m *Map[V]) Items() map[string]V { - return m.m.Items() -} - -func (m *Map[V]) RemoveCb(key string, cb func(key string, v V, exists bool) bool) bool { - return m.m.RemoveCb(key, cb) -} - -func (m *Map[V]) Count() int { - return m.m.Count() -} diff --git a/envd/internal/shared/utils/ptr.go b/envd/internal/shared/utils/ptr.go deleted file mode 100644 index 08c4090..0000000 --- a/envd/internal/shared/utils/ptr.go +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import "fmt" - -func ToPtr[T any](v T) *T { - return &v -} - -func FromPtr[T any](s *T) T { - if s == nil { - var zero T - - return zero - } - - return *s -} - -func Sprintp[T any](s *T) string { - if s == nil { - return "" - } - - return fmt.Sprintf("%v", *s) -} - -func DerefOrDefault[T any](s *T, defaultValue T) T { - if s == nil { - return defaultValue - } - - return *s -} - -func CastPtr[S any, T any](s *S, castFunc func(S) T) *T { - if s == nil { - return nil - } - - t := castFunc(*s) - - return &t -} diff --git a/envd/internal/utils/atomic.go b/envd/internal/utils/atomic.go deleted file mode 100644 index 6daa190..0000000 --- a/envd/internal/utils/atomic.go +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "sync" -) - -type AtomicMax struct { - val int64 - mu sync.Mutex -} - -func NewAtomicMax() *AtomicMax { - return &AtomicMax{} -} - -func (a *AtomicMax) SetToGreater(newValue int64) bool { - a.mu.Lock() - defer a.mu.Unlock() - - if a.val > newValue { - return false - } - - a.val = newValue - - return true -} diff --git a/envd/internal/utils/atomic_test.go b/envd/internal/utils/atomic_test.go deleted file mode 100644 index 5a01ddd..0000000 --- a/envd/internal/utils/atomic_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestAtomicMax_NewAtomicMax(t *testing.T) { - t.Parallel() - am := NewAtomicMax() - require.NotNil(t, am) - require.Equal(t, int64(0), am.val) -} - -func TestAtomicMax_SetToGreater_InitialValue(t *testing.T) { - t.Parallel() - am := NewAtomicMax() - - // Should succeed when newValue > current - assert.True(t, am.SetToGreater(10)) - assert.Equal(t, int64(10), am.val) -} - -func TestAtomicMax_SetToGreater_EqualValue(t *testing.T) { - t.Parallel() - am := NewAtomicMax() - am.val = 10 - - // Should succeed when newValue > current - assert.True(t, am.SetToGreater(20)) - assert.Equal(t, int64(20), am.val) -} - -func TestAtomicMax_SetToGreater_GreaterValue(t *testing.T) { - t.Parallel() - am := NewAtomicMax() - am.val = 10 - - // Should fail when newValue < current, keeping the max value - assert.False(t, am.SetToGreater(5)) - assert.Equal(t, int64(10), am.val) -} - -func TestAtomicMax_SetToGreater_NegativeValues(t *testing.T) { - t.Parallel() - am := NewAtomicMax() - am.val = -5 - - assert.True(t, am.SetToGreater(-2)) - assert.Equal(t, int64(-2), am.val) -} - -func TestAtomicMax_SetToGreater_Concurrent(t *testing.T) { - t.Parallel() - am := NewAtomicMax() - var wg sync.WaitGroup - - // Run 100 goroutines trying to update the value concurrently - numGoroutines := 100 - wg.Add(numGoroutines) - - for i := range numGoroutines { - go func(val int64) { - defer wg.Done() - am.SetToGreater(val) - }(int64(i)) - } - - wg.Wait() - - // The final value should be 99 (the maximum value) - assert.Equal(t, int64(99), am.val) -} diff --git a/envd/internal/utils/map.go b/envd/internal/utils/map.go deleted file mode 100644 index b1e522b..0000000 --- a/envd/internal/utils/map.go +++ /dev/null @@ -1,53 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import "sync" - -type Map[K comparable, V any] struct { - m sync.Map -} - -func NewMap[K comparable, V any]() *Map[K, V] { - return &Map[K, V]{ - m: sync.Map{}, - } -} - -func (m *Map[K, V]) Delete(key K) { - m.m.Delete(key) -} - -func (m *Map[K, V]) Load(key K) (value V, ok bool) { - v, ok := m.m.Load(key) - if !ok { - return value, ok - } - - return v.(V), ok -} - -func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) { - v, loaded := m.m.LoadAndDelete(key) - if !loaded { - return value, loaded - } - - return v.(V), loaded -} - -func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { - a, loaded := m.m.LoadOrStore(key, value) - - return a.(V), loaded -} - -func (m *Map[K, V]) Range(f func(key K, value V) bool) { - m.m.Range(func(key, value any) bool { - return f(key.(K), value.(V)) - }) -} - -func (m *Map[K, V]) Store(key K, value V) { - m.m.Store(key, value) -} diff --git a/envd/internal/utils/multipart.go b/envd/internal/utils/multipart.go deleted file mode 100644 index 64dce42..0000000 --- a/envd/internal/utils/multipart.go +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "errors" - "mime" - "mime/multipart" -) - -// CustomPart is a wrapper around multipart.Part that overloads the FileName method -type CustomPart struct { - *multipart.Part -} - -// FileNameWithPath returns the filename parameter of the Part's Content-Disposition header. -// This method borrows from the original FileName method implementation but returns the full -// filename without using `filepath.Base`. -func (p *CustomPart) FileNameWithPath() (string, error) { - dispositionParams, err := p.parseContentDisposition() - if err != nil { - return "", err - } - filename, ok := dispositionParams["filename"] - if !ok { - return "", errors.New("filename not found in Content-Disposition header") - } - - return filename, nil -} - -func (p *CustomPart) parseContentDisposition() (map[string]string, error) { - v := p.Header.Get("Content-Disposition") - _, dispositionParams, err := mime.ParseMediaType(v) - if err != nil { - return nil, err - } - - return dispositionParams, nil -} - -// NewCustomPart creates a new CustomPart from a multipart.Part -func NewCustomPart(part *multipart.Part) *CustomPart { - return &CustomPart{Part: part} -} diff --git a/envd/internal/utils/rfsnotify.go b/envd/internal/utils/rfsnotify.go deleted file mode 100644 index 68918f6..0000000 --- a/envd/internal/utils/rfsnotify.go +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import "path/filepath" - -// FsnotifyPath creates an optionally recursive path for fsnotify/fsnotify internal implementation -func FsnotifyPath(path string, recursive bool) string { - if recursive { - return filepath.Join(path, "...") - } - - return path -} diff --git a/envd/main.go b/envd/main.go deleted file mode 100644 index 3acd2c6..0000000 --- a/envd/main.go +++ /dev/null @@ -1,294 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Modifications by M/S Omukk - -package main - -import ( - "context" - "flag" - "fmt" - "log" - "net/http" - "os" - "path/filepath" - "strconv" - "time" - - "connectrpc.com/authn" - connectcors "connectrpc.com/cors" - "github.com/go-chi/chi/v5" - "github.com/rs/cors" - - "git.omukk.dev/wrenn/sandbox/envd/internal/api" - "git.omukk.dev/wrenn/sandbox/envd/internal/execcontext" - "git.omukk.dev/wrenn/sandbox/envd/internal/host" - "git.omukk.dev/wrenn/sandbox/envd/internal/logs" - "git.omukk.dev/wrenn/sandbox/envd/internal/permissions" - publicport "git.omukk.dev/wrenn/sandbox/envd/internal/port" - "git.omukk.dev/wrenn/sandbox/envd/internal/services/cgroups" - filesystemRpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/filesystem" - processRpc "git.omukk.dev/wrenn/sandbox/envd/internal/services/process" - processSpec "git.omukk.dev/wrenn/sandbox/envd/internal/services/spec/process" - "git.omukk.dev/wrenn/sandbox/envd/internal/utils" -) - -const ( - // Downstream timeout should be greater than upstream (in orchestrator proxy). - idleTimeout = 640 * time.Second - maxAge = 2 * time.Hour - - defaultPort = 49983 - - portScannerInterval = 1000 * time.Millisecond - - // This is the default user used in the container if not specified otherwise. - // It should be always overridden by the user in /init when building the template. - defaultUser = "root" - - kilobyte = 1024 - megabyte = 1024 * kilobyte -) - -var ( - Version = "0.1.0" - - commitSHA string - - isNotFC bool - port int64 - - versionFlag bool - commitFlag bool - startCmdFlag string - cgroupRoot string -) - -func parseFlags() { - flag.BoolVar( - &isNotFC, - "isnotfc", - false, - "isNotFCmode prints all logs to stdout", - ) - - flag.BoolVar( - &versionFlag, - "version", - false, - "print envd version", - ) - - flag.BoolVar( - &commitFlag, - "commit", - false, - "print envd source commit", - ) - - flag.Int64Var( - &port, - "port", - defaultPort, - "a port on which the daemon should run", - ) - - flag.StringVar( - &startCmdFlag, - "cmd", - "", - "a command to run on the daemon start", - ) - - flag.StringVar( - &cgroupRoot, - "cgroup-root", - "/sys/fs/cgroup", - "cgroup root directory", - ) - - flag.Parse() -} - -func withCORS(h http.Handler) http.Handler { - middleware := cors.New(cors.Options{ - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{ - http.MethodHead, - http.MethodGet, - http.MethodPost, - http.MethodPut, - http.MethodPatch, - http.MethodDelete, - }, - AllowedHeaders: []string{"*"}, - ExposedHeaders: append( - connectcors.ExposedHeaders(), - "Location", - "Cache-Control", - "X-Content-Type-Options", - ), - MaxAge: int(maxAge.Seconds()), - }) - - return middleware.Handler(h) -} - -func main() { - parseFlags() - - if versionFlag { - fmt.Printf("%s\n", Version) - - return - } - - if commitFlag { - fmt.Printf("%s\n", commitSHA) - - return - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - if err := os.MkdirAll(host.WrennRunDir, 0o755); err != nil { - fmt.Fprintf(os.Stderr, "error creating wrenn run directory: %v\n", err) - } - - defaults := &execcontext.Defaults{ - User: defaultUser, - EnvVars: utils.NewMap[string, string](), - } - isFCBoolStr := strconv.FormatBool(!isNotFC) - defaults.EnvVars.Store("WRENN_SANDBOX", isFCBoolStr) - if err := os.WriteFile(filepath.Join(host.WrennRunDir, ".WRENN_SANDBOX"), []byte(isFCBoolStr), 0o444); err != nil { - fmt.Fprintf(os.Stderr, "error writing sandbox file: %v\n", err) - } - - mmdsChan := make(chan *host.MMDSOpts, 1) - defer close(mmdsChan) - if !isNotFC { - go host.PollForMMDSOpts(ctx, mmdsChan, defaults.EnvVars) - } - - l := logs.NewLogger(ctx, isNotFC, mmdsChan) - - m := chi.NewRouter() - - envLogger := l.With().Str("logger", "envd").Logger() - fsLogger := l.With().Str("logger", "filesystem").Logger() - filesystemRpc.Handle(m, &fsLogger, defaults) - - cgroupManager := createCgroupManager() - defer func() { - err := cgroupManager.Close() - if err != nil { - fmt.Fprintf(os.Stderr, "failed to close cgroup manager: %v\n", err) - } - }() - - processLogger := l.With().Str("logger", "process").Logger() - processService := processRpc.Handle(m, &processLogger, defaults, cgroupManager) - - // Port scanner and forwarder are managed by PortSubsystem, which - // supports stop/restart across Firecracker snapshot/restore cycles. - portLogger := l.With().Str("logger", "port-forwarder").Logger() - portSubsystem := publicport.NewPortSubsystem(&portLogger, cgroupManager, portScannerInterval) - portSubsystem.Start(ctx) - defer portSubsystem.Stop() - - connTracker := api.NewServerConnTracker() - - service := api.New(&envLogger, defaults, mmdsChan, isNotFC, ctx, portSubsystem, connTracker, Version) - handler := api.HandlerFromMux(service, m) - middleware := authn.NewMiddleware(permissions.AuthenticateUsername) - - s := &http.Server{ - Handler: withCORS( - service.WithAuthorization( - middleware.Wrap(handler), - ), - ), - Addr: fmt.Sprintf("0.0.0.0:%d", port), - // We remove the timeouts as the connection is terminated by closing of the sandbox and keepalive close. - ReadTimeout: 0, - WriteTimeout: 0, - IdleTimeout: idleTimeout, - ConnState: connTracker.Track, - } - connTracker.SetServer(s) - - // TODO: Not used anymore in template build, replaced by direct envd command call. - if startCmdFlag != "" { - tag := "startCmd" - cwd := "/home/user" - user, err := permissions.GetUser("root") - if err != nil { - log.Fatalf("error getting user: %v", err) //nolint:gocritic // probably fine to bail if we're done? - } - - if err = processService.InitializeStartProcess(ctx, user, &processSpec.StartRequest{ - Tag: &tag, - Process: &processSpec.ProcessConfig{ - Envs: make(map[string]string), - Cmd: "/bin/bash", - Args: []string{"-l", "-c", startCmdFlag}, - Cwd: &cwd, - }, - }); err != nil { - log.Fatalf("error starting process: %v", err) - } - } - - err := s.ListenAndServe() - if err != nil { - log.Fatalf("error starting server: %v", err) - } -} - -func createCgroupManager() (m cgroups.Manager) { - defer func() { - if m == nil { - fmt.Fprintf(os.Stderr, "falling back to no-op cgroup manager\n") - m = cgroups.NewNoopManager() - } - }() - - metrics, err := host.GetMetrics() - if err != nil { - fmt.Fprintf(os.Stderr, "failed to calculate host metrics: %v\n", err) - - return nil - } - - // try to keep 1/8 of the memory free, but no more than 128 MB - maxMemoryReserved := uint64(float64(metrics.MemTotal) * .125) - maxMemoryReserved = min(maxMemoryReserved, uint64(128)*megabyte) - - opts := []cgroups.Cgroup2ManagerOption{ - cgroups.WithCgroup2ProcessType(cgroups.ProcessTypePTY, "ptys", map[string]string{ - "cpu.weight": "200", // gets much preferred cpu access, to help keep these real time - }), - cgroups.WithCgroup2ProcessType(cgroups.ProcessTypeSocat, "socats", map[string]string{ - "cpu.weight": "150", // gets slightly preferred cpu access - "memory.min": fmt.Sprintf("%d", 5*megabyte), - "memory.low": fmt.Sprintf("%d", 8*megabyte), - }), - cgroups.WithCgroup2ProcessType(cgroups.ProcessTypeUser, "user", map[string]string{ - "memory.high": fmt.Sprintf("%d", metrics.MemTotal-maxMemoryReserved), - "cpu.weight": "50", // less than envd, and less than core processes that default to 100 - }), - } - if cgroupRoot != "" { - opts = append(opts, cgroups.WithCgroup2RootSysFSPath(cgroupRoot)) - } - - mgr, err := cgroups.NewCgroup2Manager(opts...) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to create cgroup2 manager: %v\n", err) - - return nil - } - - return mgr -} diff --git a/envd/spec/buf.gen.yaml b/envd/spec/buf.gen.yaml deleted file mode 100644 index 51282c2..0000000 --- a/envd/spec/buf.gen.yaml +++ /dev/null @@ -1,15 +0,0 @@ -version: v2 -plugins: - - protoc_builtin: go - out: ../internal/services/spec - opt: paths=source_relative - - local: protoc-gen-connect-go - out: ../internal/services/spec - opt: paths=source_relative -inputs: - - directory: ../../proto/envd -managed: - enabled: true - override: - - file_option: go_package_prefix - value: git.omukk.dev/wrenn/sandbox/envd/internal/services/spec diff --git a/envd/spec/envd.yaml b/envd/spec/envd.yaml deleted file mode 100644 index 5160ab7..0000000 --- a/envd/spec/envd.yaml +++ /dev/null @@ -1,313 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Modifications by M/S Omukk - -openapi: 3.0.0 -info: - title: envd - version: 0.1.1 - description: API for managing files' content and controlling envd - -tags: - - name: files - -paths: - /health: - get: - summary: Check the health of the service - responses: - "204": - description: The service is healthy - - /metrics: - get: - summary: Get the stats of the service - security: - - AccessTokenAuth: [] - - {} - responses: - "200": - description: The resource usage metrics of the service - content: - application/json: - schema: - $ref: "#/components/schemas/Metrics" - - /init: - post: - summary: Set initial vars, ensure the time and metadata is synced with the host - security: - - AccessTokenAuth: [] - - {} - requestBody: - content: - application/json: - schema: - type: object - properties: - volumeMounts: - type: array - items: - $ref: "#/components/schemas/VolumeMount" - hyperloopIP: - type: string - description: IP address of the hyperloop server to connect to - envVars: - $ref: "#/components/schemas/EnvVars" - accessToken: - type: string - description: Access token for secure access to envd service - x-go-type: SecureToken - timestamp: - type: string - format: date-time - description: The current timestamp in RFC3339 format - defaultUser: - type: string - description: The default user to use for operations - defaultWorkdir: - type: string - description: The default working directory to use for operations - responses: - "204": - description: Env vars set, the time and metadata is synced with the host - - /snapshot/prepare: - post: - summary: Quiesce continuous goroutines before Firecracker snapshot - responses: - "204": - description: Goroutines quiesced, safe to snapshot - - /envs: - get: - summary: Get the environment variables - security: - - AccessTokenAuth: [] - - {} - responses: - "200": - description: Environment variables - content: - application/json: - schema: - $ref: "#/components/schemas/EnvVars" - - /files: - get: - summary: Download a file - tags: [files] - security: - - AccessTokenAuth: [] - - {} - parameters: - - $ref: "#/components/parameters/FilePath" - - $ref: "#/components/parameters/User" - - $ref: "#/components/parameters/Signature" - - $ref: "#/components/parameters/SignatureExpiration" - responses: - "200": - $ref: "#/components/responses/DownloadSuccess" - "401": - $ref: "#/components/responses/InvalidUser" - "400": - $ref: "#/components/responses/InvalidPath" - "404": - $ref: "#/components/responses/FileNotFound" - "500": - $ref: "#/components/responses/InternalServerError" - post: - summary: Upload a file and ensure the parent directories exist. If the file exists, it will be overwritten. - tags: [files] - security: - - AccessTokenAuth: [] - - {} - parameters: - - $ref: "#/components/parameters/FilePath" - - $ref: "#/components/parameters/User" - - $ref: "#/components/parameters/Signature" - - $ref: "#/components/parameters/SignatureExpiration" - requestBody: - $ref: "#/components/requestBodies/File" - responses: - "200": - $ref: "#/components/responses/UploadSuccess" - "400": - $ref: "#/components/responses/InvalidPath" - "401": - $ref: "#/components/responses/InvalidUser" - "500": - $ref: "#/components/responses/InternalServerError" - "507": - $ref: "#/components/responses/NotEnoughDiskSpace" - -components: - securitySchemes: - AccessTokenAuth: - type: apiKey - in: header - name: X-Access-Token - - parameters: - FilePath: - name: path - in: query - required: false - description: Path to the file, URL encoded. Can be relative to user's home directory. - schema: - type: string - User: - name: username - in: query - required: false - description: User used for setting the owner, or resolving relative paths. - schema: - type: string - Signature: - name: signature - in: query - required: false - description: Signature used for file access permission verification. - schema: - type: string - SignatureExpiration: - name: signature_expiration - in: query - required: false - description: Signature expiration used for defining the expiration time of the signature. - schema: - type: integer - - requestBodies: - File: - required: true - content: - multipart/form-data: - schema: - type: object - properties: - file: - type: string - format: binary - - responses: - UploadSuccess: - description: The file was uploaded successfully. - content: - application/json: - schema: - type: array - items: - $ref: "#/components/schemas/EntryInfo" - - DownloadSuccess: - description: Entire file downloaded successfully. - content: - application/octet-stream: - schema: - type: string - format: binary - description: The file content - InvalidPath: - description: Invalid path - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - InternalServerError: - description: Internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - FileNotFound: - description: File not found - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - InvalidUser: - description: Invalid user - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - NotEnoughDiskSpace: - description: Not enough disk space - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - - schemas: - Error: - required: - - message - - code - properties: - message: - type: string - description: Error message - code: - type: integer - description: Error code - EntryInfo: - required: - - path - - name - - type - properties: - path: - type: string - description: Path to the file - name: - type: string - description: Name of the file - type: - type: string - description: Type of the file - enum: - - file - EnvVars: - type: object - description: Environment variables to set - additionalProperties: - type: string - Metrics: - type: object - description: Resource usage metrics - properties: - ts: - type: integer - format: int64 - description: Unix timestamp in UTC for current sandbox time - cpu_count: - type: integer - description: Number of CPU cores - cpu_used_pct: - type: number - format: float - description: CPU usage percentage - mem_total: - type: integer - description: Total virtual memory in bytes - mem_used: - type: integer - description: Used virtual memory in bytes - disk_used: - type: integer - description: Used disk space in bytes - disk_total: - type: integer - description: Total disk space in bytes - VolumeMount: - type: object - description: Volume - additionalProperties: false - properties: - nfs_target: - type: string - path: - type: string - required: - - nfs_target - - path diff --git a/envd/spec/generate.go b/envd/spec/generate.go deleted file mode 100644 index 60bb0d1..0000000 --- a/envd/spec/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package spec - -//go:generate buf generate diff --git a/internal/envdclient/client.go b/internal/envdclient/client.go index aed0349..2229093 100644 --- a/internal/envdclient/client.go +++ b/internal/envdclient/client.go @@ -250,7 +250,7 @@ func (c *Client) WriteFile(ctx context.Context, path string, content []byte) err respBody, _ := io.ReadAll(resp.Body) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + if resp.StatusCode != http.StatusOK { return fmt.Errorf("write file %s: status %d: %s", path, resp.StatusCode, string(respBody)) } @@ -292,10 +292,9 @@ func (c *Client) ReadFile(ctx context.Context, path string) ([]byte, error) { return data, nil } -// PrepareSnapshot calls envd's POST /snapshot/prepare endpoint, which quiesces -// continuous goroutines (port scanner, forwarder) and forces a GC cycle before -// Firecracker takes a VM snapshot. This ensures the Go runtime's page allocator -// is in a consistent state when vCPUs are frozen. +// PrepareSnapshot calls envd's POST /snapshot/prepare endpoint, which stops +// the port scanner/forwarder and marks active connections for post-restore +// cleanup before Firecracker freezes vCPUs. // // Best-effort: the caller should log a warning on error but not abort the pause. func (c *Client) PrepareSnapshot(ctx context.Context) error { diff --git a/internal/envdclient/health.go b/internal/envdclient/health.go index 4837051..b2e96c0 100644 --- a/internal/envdclient/health.go +++ b/internal/envdclient/health.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "io" "log/slog" "net/http" "time" @@ -46,20 +45,15 @@ func (c *Client) FetchVersion(ctx context.Context) (string, error) { } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("health check returned %d", resp.StatusCode) } - body, err := io.ReadAll(resp.Body) - if err != nil || len(body) == 0 { - return "", nil // envd may not support version reporting yet - } - var data struct { Version string `json:"version"` } - if err := json.Unmarshal(body, &data); err != nil { - return "", nil // non-JSON response, old envd + if err := json.NewDecoder(resp.Body).Decode(&data); err != nil { + return "", fmt.Errorf("decode version response: %w", err) } return data.Version, nil @@ -78,7 +72,7 @@ func (c *Client) healthCheck(ctx context.Context) error { } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + if resp.StatusCode != http.StatusOK { return fmt.Errorf("health check returned %d", resp.StatusCode) } diff --git a/internal/sandbox/manager.go b/internal/sandbox/manager.go index 371d95b..3f295e1 100644 --- a/internal/sandbox/manager.go +++ b/internal/sandbox/manager.go @@ -95,11 +95,9 @@ type snapshotParent struct { } // maxDiffGenerations caps how many incremental diff generations we chain -// before falling back to a Full snapshot to collapse the chain. Firecracker -// snapshot/restore of a Go process (envd) accumulates runtime memory state -// drift; empirically, ~10 diff-based cycles corrupt the Go page allocator. -// A Full snapshot resets the generation counter and produces a clean base, -// preventing the crash. +// before falling back to a Full snapshot to collapse the chain. Long diff +// chains increase restore latency and snapshot directory size; a periodic +// Full snapshot resets the counter and produces a clean base. const maxDiffGenerations = 8 // buildMetadata constructs the metadata map with version information. @@ -382,8 +380,7 @@ func (m *Manager) Pause(ctx context.Context, sandboxID string) error { m.stopSampler(sb) // Step 0: Drain in-flight proxy connections before freezing vCPUs. - // This prevents Go runtime corruption inside the guest caused by stale - // TCP state from connections that were alive when the VM was snapshotted. + // Stale TCP state from mid-flight connections causes issues on restore. sb.connTracker.Drain(2 * time.Second) slog.Debug("pause: proxy connections drained", "id", sandboxID) @@ -393,10 +390,8 @@ func (m *Manager) Pause(ctx context.Context, sandboxID string) error { sb.client.CloseIdleConnections() slog.Debug("pause: envd client idle connections closed", "id", sandboxID) - // Step 0c: Signal envd to quiesce continuous goroutines (port scanner, - // forwarder), close idle HTTP connections, and run GC before freezing - // vCPUs. This prevents Go runtime page allocator corruption ("bad - // summary data") on snapshot restore. The 3s timeout also gives time + // Step 0c: Signal envd to quiesce (stop port scanner/forwarder, mark + // connections for post-restore cleanup). The 3s timeout also gives time // for the FINs from Step 0b to be processed by the guest kernel. // Best-effort: a failure is logged but does not abort the pause. func() { @@ -405,7 +400,7 @@ func (m *Manager) Pause(ctx context.Context, sandboxID string) error { if err := sb.client.PrepareSnapshot(prepCtx); err != nil { slog.Warn("pause: pre-snapshot quiesce failed (best-effort)", "id", sandboxID, "error", err) } else { - slog.Debug("pause: envd goroutines quiesced", "id", sandboxID) + slog.Debug("pause: envd quiesced", "id", sandboxID) } }() From f328113a2aa6026c94fc5f0f9b526ecb655aa2bc Mon Sep 17 00:00:00 2001 From: pptx704 Date: Sun, 3 May 2026 03:32:03 +0600 Subject: [PATCH 07/10] rename guest hostname from "sandbox" to "capsule" Terminal prompt inside VMs now shows root@capsule instead of root@sandbox, aligning with user-facing "capsule" terminology. --- images/wrenn-init.sh | 4 ++-- internal/vm/config.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/images/wrenn-init.sh b/images/wrenn-init.sh index 8a9e22e..b05a08b 100644 --- a/images/wrenn-init.sh +++ b/images/wrenn-init.sh @@ -18,8 +18,8 @@ mount -t cgroup2 cgroup2 /sys/fs/cgroup 2>/dev/null || true echo "+cpu +memory +io" > /sys/fs/cgroup/cgroup.subtree_control 2>/dev/null || true # Set hostname and make it resolvable (sudo requires this). -hostname sandbox -echo "127.0.0.1 sandbox" >> /etc/hosts +hostname capsule +echo "127.0.0.1 capsule" >> /etc/hosts # Configure networking if the kernel ip= boot arg did not already set it up. if ! ip addr show eth0 2>/dev/null | grep -q "169.254.0.21"; then diff --git a/internal/vm/config.go b/internal/vm/config.go index 0c1f258..ea229b6 100644 --- a/internal/vm/config.go +++ b/internal/vm/config.go @@ -90,7 +90,7 @@ func (c *VMConfig) applyDefaults() { // kernelArgs builds the kernel command line for the VM. func (c *VMConfig) kernelArgs() string { // ip= format: :::::: - ipArg := fmt.Sprintf("ip=%s::%s:%s:sandbox:eth0:off", + ipArg := fmt.Sprintf("ip=%s::%s:%s:capsule:eth0:off", c.GuestIP, c.GatewayIP, c.NetMask, ) From bbcde17d494252cff9877c2dde854219c08d2094 Mon Sep 17 00:00:00 2001 From: pptx704 Date: Sun, 3 May 2026 03:32:41 +0600 Subject: [PATCH 08/10] Updated static link check for envd --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 41b9251..5ac341e 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ build-agent: build-envd: cd envd-rs && ENVD_COMMIT=$(COMMIT) cargo build --release --target x86_64-unknown-linux-musl @cp envd-rs/target/x86_64-unknown-linux-musl/release/envd $(BIN_DIR)/envd - @file $(BIN_DIR)/envd | grep -q "statically linked" || \ + @file $(BIN_DIR)/envd | grep -q "static-pie linked" || \ (echo "ERROR: envd is not statically linked!" && exit 1) # ═══════════════════════════════════════════════════ From 31456fd169aed17434040cd82f97fe8719520a17 Mon Sep 17 00:00:00 2001 From: pptx704 Date: Sun, 3 May 2026 04:28:10 +0600 Subject: [PATCH 09/10] fix: resolve PTY failure, MMDS file writes, and metrics instability in envd-rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three bugs fixed: 1. PTY connections failed because home directory was hardcoded as /home/{username} instead of reading from /etc/passwd. For root, this produced /home/root/ which doesn't exist — CWD validation rejected every PTY Start request without explicit cwd. Fixed all 6 locations to use user.dir from nix::unistd::User. 2. MMDS polling silently failed to parse metadata because the logs_collector_address field lacked #[serde(default)]. The host agent only sends instanceID + envID — missing "address" field caused every deserialize attempt to fail, so .WRENN_SANDBOX_ID and .WRENN_TEMPLATE_ID were never written. Also added error logging and create_dir_all before file writes. 3. Metrics CPU values were non-deterministic because a fresh sysinfo::System was created per request with a 100ms sleep between reads. Replaced with a background thread that samples CPU at fixed 1-second intervals via a persistent System instance, matching gopsutil's internal caching behavior. Metrics endpoint now reads cached atomic values — no blocking, consistent window. Also: close master PTY fd in child pre_exec, add process.Start request logging, bump version to 0.2.0. --- envd-rs/Cargo.lock | 2 +- envd-rs/Cargo.toml | 2 +- envd-rs/src/host/mmds.rs | 13 +++++-- envd-rs/src/http/files.rs | 4 +-- envd-rs/src/http/metrics.rs | 26 ++++---------- envd-rs/src/main.rs | 2 +- envd-rs/src/rpc/filesystem_service.rs | 2 +- envd-rs/src/rpc/process_handler.rs | 4 ++- envd-rs/src/rpc/process_service.rs | 13 ++++++- envd-rs/src/state.rs | 51 +++++++++++++++++++++++++-- 10 files changed, 85 insertions(+), 34 deletions(-) diff --git a/envd-rs/Cargo.lock b/envd-rs/Cargo.lock index ecafb78..2e173d6 100644 --- a/envd-rs/Cargo.lock +++ b/envd-rs/Cargo.lock @@ -514,7 +514,7 @@ dependencies = [ [[package]] name = "envd" -version = "0.1.2" +version = "0.2.0" dependencies = [ "async-stream", "axum", diff --git a/envd-rs/Cargo.toml b/envd-rs/Cargo.toml index eea979a..55947e3 100644 --- a/envd-rs/Cargo.toml +++ b/envd-rs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "envd" -version = "0.1.2" +version = "0.2.0" edition = "2024" rust-version = "1.88" diff --git a/envd-rs/src/host/mmds.rs b/envd-rs/src/host/mmds.rs index ff74201..e2bf5bb 100644 --- a/envd-rs/src/host/mmds.rs +++ b/envd-rs/src/host/mmds.rs @@ -13,7 +13,7 @@ pub struct MMDSOpts { pub sandbox_id: String, #[serde(rename = "envID")] pub template_id: String, - #[serde(rename = "address")] + #[serde(rename = "address", default)] pub logs_collector_address: String, #[serde(rename = "accessTokenHash", default)] pub access_token_hash: String, @@ -103,8 +103,15 @@ pub async fn poll_for_opts( env_vars.insert("WRENN_TEMPLATE_ID".into(), opts.template_id.clone()); let run_dir = std::path::Path::new(WRENN_RUN_DIR); - let _ = std::fs::write(run_dir.join(".WRENN_SANDBOX_ID"), &opts.sandbox_id); - let _ = std::fs::write(run_dir.join(".WRENN_TEMPLATE_ID"), &opts.template_id); + if let Err(e) = std::fs::create_dir_all(run_dir) { + tracing::error!(error = %e, "mmds: failed to create run dir"); + } + if let Err(e) = std::fs::write(run_dir.join(".WRENN_SANDBOX_ID"), &opts.sandbox_id) { + tracing::error!(error = %e, "mmds: failed to write .WRENN_SANDBOX_ID"); + } + if let Err(e) = std::fs::write(run_dir.join(".WRENN_TEMPLATE_ID"), &opts.template_id) { + tracing::error!(error = %e, "mmds: failed to write .WRENN_TEMPLATE_ID"); + } return Some(opts); } diff --git a/envd-rs/src/http/files.rs b/envd-rs/src/http/files.rs index df9206f..dfe1e54 100644 --- a/envd-rs/src/http/files.rs +++ b/envd-rs/src/http/files.rs @@ -95,7 +95,7 @@ pub async fn get_files( Err(e) => return json_error(StatusCode::UNAUTHORIZED, &e), }; - let home_dir = format!("/home/{}", user.name); + let home_dir = user.dir.to_string_lossy().to_string(); let resolved = match expand_and_resolve(path_str, &home_dir, state.defaults.workdir.as_deref()) { Ok(p) => p, @@ -246,7 +246,7 @@ pub async fn post_files( Err(e) => return json_error(StatusCode::UNAUTHORIZED, &e), }; - let home_dir = format!("/home/{}", user.name); + let home_dir = user.dir.to_string_lossy().to_string(); let uid = user.uid; let gid = user.gid; diff --git a/envd-rs/src/http/metrics.rs b/envd-rs/src/http/metrics.rs index b63dbda..da13452 100644 --- a/envd-rs/src/http/metrics.rs +++ b/envd-rs/src/http/metrics.rs @@ -22,10 +22,10 @@ pub struct Metrics { disk_total: u64, } -pub async fn get_metrics(State(_state): State>) -> impl IntoResponse { +pub async fn get_metrics(State(state): State>) -> impl IntoResponse { tracing::trace!("get metrics"); - match collect_metrics() { + match collect_metrics(&state) { Ok(m) => ( StatusCode::OK, [(header::CACHE_CONTROL, "no-store")], @@ -39,26 +39,12 @@ pub async fn get_metrics(State(_state): State>) -> impl IntoRespon } } -fn collect_metrics() -> Result { - use sysinfo::System; +fn collect_metrics(state: &AppState) -> Result { + let cpu_count = state.cpu_count(); + let cpu_used_pct_rounded = state.cpu_used_pct(); - let mut sys = System::new(); + let mut sys = sysinfo::System::new(); sys.refresh_memory(); - sys.refresh_cpu_all(); - - // sysinfo needs a small delay for accurate CPU — first call returns 0. - // In a real daemon this would be cached; for now, report instantaneous. - std::thread::sleep(std::time::Duration::from_millis(100)); - sys.refresh_cpu_all(); - - let cpu_count = sys.cpus().len() as u32; - let cpu_used_pct = sys.global_cpu_usage(); - let cpu_used_pct_rounded = if cpu_used_pct > 0.0 { - (cpu_used_pct * 100.0).round() / 100.0 - } else { - 0.0 - }; - let mem_total = sys.total_memory(); let mem_used = sys.used_memory(); let mem_total_mib = mem_total / 1024 / 1024; diff --git a/envd-rs/src/main.rs b/envd-rs/src/main.rs index 760cb93..587fc1a 100644 --- a/envd-rs/src/main.rs +++ b/envd-rs/src/main.rs @@ -196,7 +196,7 @@ fn spawn_initial_command(cmd: &str, state: &AppState) { } }; - let home = format!("/home/{}", user.name); + let home = user.dir.to_string_lossy().to_string(); let cwd = state .defaults .workdir diff --git a/envd-rs/src/rpc/filesystem_service.rs b/envd-rs/src/rpc/filesystem_service.rs index 8cf2b2c..1c73e93 100644 --- a/envd-rs/src/rpc/filesystem_service.rs +++ b/envd-rs/src/rpc/filesystem_service.rs @@ -36,7 +36,7 @@ impl FilesystemServiceImpl { ConnectError::new(ErrorCode::Unauthenticated, format!("invalid user: {e}")) })?; - let home_dir = format!("/home/{}", user.name); + let home_dir = user.dir.to_string_lossy().to_string(); let default_workdir = self.state.defaults.workdir.as_deref(); expand_and_resolve(path, &home_dir, default_workdir) diff --git a/envd-rs/src/rpc/process_handler.rs b/envd-rs/src/rpc/process_handler.rs index cf0287c..296c075 100644 --- a/envd-rs/src/rpc/process_handler.rs +++ b/envd-rs/src/rpc/process_handler.rs @@ -141,7 +141,7 @@ pub fn spawn_process( ) -> Result, ConnectError> { let mut env: Vec<(String, String)> = Vec::new(); env.push(("PATH".into(), std::env::var("PATH").unwrap_or_default())); - let home = format!("/home/{}", user.name); + let home = user.dir.to_string_lossy().to_string(); env.push(("HOME".into(), home)); env.push(("USER".into(), user.name.clone())); env.push(("LOGNAME".into(), user.name.clone())); @@ -206,7 +206,9 @@ pub fn spawn_process( unsafe { use std::os::unix::io::AsRawFd; let slave_raw = slave_fd.as_raw_fd(); + let master_raw = master_fd.as_raw_fd(); command.pre_exec(move || { + libc::close(master_raw); nix::unistd::setsid() .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; libc::ioctl(slave_raw, libc::TIOCSCTTY, 0); diff --git a/envd-rs/src/rpc/process_service.rs b/envd-rs/src/rpc/process_service.rs index c69c646..92738b5 100644 --- a/envd-rs/src/rpc/process_service.rs +++ b/envd-rs/src/rpc/process_service.rs @@ -83,7 +83,7 @@ impl ProcessServiceImpl { .map(|(k, v)| (k.to_string(), v.to_string())) .collect(); - let home_dir = format!("/home/{}", user.name); + let home_dir = user.dir.to_string_lossy().to_string(); let cwd_str: &str = proc_config.cwd.unwrap_or(""); let cwd = expand_and_resolve(cwd_str, &home_dir, self.state.defaults.workdir.as_deref()) .map_err(|e| ConnectError::new(ErrorCode::InvalidArgument, e))?; @@ -105,6 +105,17 @@ impl ProcessServiceImpl { let enable_stdin = request.stdin.unwrap_or(true); let tag = request.tag.map(|s| s.to_string()); + tracing::info!( + cmd = cmd, + has_pty = pty_opts.is_some(), + pty_size = ?pty_opts, + tag = ?tag, + stdin = enable_stdin, + cwd = effective_cwd, + user = %username, + "process.Start request" + ); + let handle = process_handler::spawn_process( cmd, &args, diff --git a/envd-rs/src/state.rs b/envd-rs/src/state.rs index d54ea38..aa1f4a2 100644 --- a/envd-rs/src/state.rs +++ b/envd-rs/src/state.rs @@ -1,4 +1,4 @@ -use std::sync::atomic::AtomicBool; +use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; use std::sync::Arc; use crate::auth::token::SecureToken; @@ -17,6 +17,8 @@ pub struct AppState { pub access_token: SecureToken, pub conn_tracker: ConnTracker, pub port_subsystem: Option>, + pub cpu_used_pct: AtomicU32, + pub cpu_count: AtomicU32, } impl AppState { @@ -27,7 +29,7 @@ impl AppState { is_fc: bool, port_subsystem: Option>, ) -> Arc { - Arc::new(Self { + let state = Arc::new(Self { defaults, version, commit, @@ -37,6 +39,49 @@ impl AppState { access_token: SecureToken::new(), conn_tracker: ConnTracker::new(), port_subsystem, - }) + cpu_used_pct: AtomicU32::new(0), + cpu_count: AtomicU32::new(0), + }); + + let state_clone = Arc::clone(&state); + std::thread::spawn(move || { + cpu_sampler(state_clone); + }); + + state + } + + pub fn cpu_used_pct(&self) -> f32 { + f32::from_bits(self.cpu_used_pct.load(Ordering::Relaxed)) + } + + pub fn cpu_count(&self) -> u32 { + self.cpu_count.load(Ordering::Relaxed) + } +} + +fn cpu_sampler(state: Arc) { + use sysinfo::System; + + let mut sys = System::new(); + sys.refresh_cpu_all(); + + loop { + std::thread::sleep(std::time::Duration::from_secs(1)); + sys.refresh_cpu_all(); + + let pct = sys.global_cpu_usage(); + let rounded = if pct > 0.0 { + (pct * 100.0).round() / 100.0 + } else { + 0.0 + }; + + state + .cpu_used_pct + .store(rounded.to_bits(), Ordering::Relaxed); + state + .cpu_count + .store(sys.cpus().len() as u32, Ordering::Relaxed); } } From ef5f22386324924669ee96ad5e44c040c1eb334e Mon Sep 17 00:00:00 2001 From: pptx704 Date: Sun, 3 May 2026 04:47:10 +0600 Subject: [PATCH 10/10] fix: improve error feedback for terminal disconnects and host unavailability Show "[session disconnected]" in terminal when PTY websocket closes cleanly. Map scheduler and agent unavailability errors to 503 with user-friendly message instead of leaking internal details. --- frontend/src/lib/components/TerminalTab.svelte | 1 + internal/api/middleware.go | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/frontend/src/lib/components/TerminalTab.svelte b/frontend/src/lib/components/TerminalTab.svelte index e7eadf4..f1e9637 100644 --- a/frontend/src/lib/components/TerminalTab.svelte +++ b/frontend/src/lib/components/TerminalTab.svelte @@ -332,6 +332,7 @@ if (s.state === 'connected') { updateSession(id, { state: 'disconnected' }); + int.term.write('\r\n\x1b[38;2;107;104;98m[session disconnected]\x1b[0m\r\n'); } }; diff --git a/internal/api/middleware.go b/internal/api/middleware.go index b1c9f00..6e80136 100644 --- a/internal/api/middleware.go +++ b/internal/api/middleware.go @@ -54,6 +54,8 @@ func agentErrToHTTP(err error) (int, string, string) { return http.StatusConflict, "conflict", err.Error() case connect.CodePermissionDenied: return http.StatusForbidden, "forbidden", err.Error() + case connect.CodeUnavailable: + return http.StatusServiceUnavailable, "no_hosts_available", "no servers available — try again later" case connect.CodeUnimplemented: return http.StatusNotImplemented, "agent_error", err.Error() default: @@ -108,6 +110,9 @@ func serviceErrToHTTP(err error) (int, string, string) { return http.StatusForbidden, "forbidden", "forbidden" case strings.Contains(msg, "invalid or expired"): return http.StatusUnauthorized, "unauthorized", "invalid or expired credentials" + case strings.Contains(msg, "no online") && strings.Contains(msg, "hosts available"), + strings.Contains(msg, "no host has sufficient resources"): + return http.StatusServiceUnavailable, "no_hosts_available", "no servers available — try again later" case strings.Contains(msg, "invalid"): return http.StatusBadRequest, "invalid_request", "invalid request" default: