Port envd from e2b with internalized shared packages and Connect RPC

- Copy envd source from e2b-dev/infra, internalize shared dependencies
  into envd/internal/shared/ (keys, filesystem, id, smap, utils)
- Switch from gRPC to Connect RPC for all envd services
- Update module paths to git.omukk.dev/wrenn/{sandbox,sandbox/envd}
- Add proto specs (process, filesystem) with buf-based code generation
- Implement full envd: process exec, filesystem ops, port forwarding,
  cgroup management, MMDS integration, and HTTP API
- Update main module dependencies (firecracker SDK, pgx, goose, etc.)
- Remove placeholder .gitkeep files replaced by real implementations
This commit is contained in:
2026-03-09 21:03:19 +06:00
parent bd78cc068c
commit a3898d68fb
99 changed files with 17185 additions and 24 deletions

View File

@ -0,0 +1,47 @@
package logs
import (
"time"
"github.com/rs/zerolog"
)
const (
defaultMaxBufferSize = 2 << 15
defaultTimeout = 2 * time.Second
)
func LogBufferedDataEvents(dataCh <-chan []byte, logger *zerolog.Logger, eventType string) {
timer := time.NewTicker(defaultTimeout)
defer timer.Stop()
var buffer []byte
defer func() {
if len(buffer) > 0 {
logger.Info().Str(eventType, string(buffer)).Msg("Streaming process event (flush)")
}
}()
for {
select {
case <-timer.C:
if len(buffer) > 0 {
logger.Info().Str(eventType, string(buffer)).Msg("Streaming process event")
buffer = nil
}
case data, ok := <-dataCh:
if !ok {
return
}
buffer = append(buffer, data...)
if len(buffer) >= defaultMaxBufferSize {
logger.Info().Str(eventType, string(buffer)).Msg("Streaming process event")
buffer = nil
continue
}
}
}
}

View File

@ -0,0 +1,172 @@
package exporter
import (
"bytes"
"context"
"fmt"
"log"
"net/http"
"os"
"sync"
"time"
"git.omukk.dev/wrenn/sandbox/envd/internal/host"
)
const ExporterTimeout = 10 * time.Second
type HTTPExporter struct {
client http.Client
logs [][]byte
isNotFC bool
mmdsOpts *host.MMDSOpts
// Concurrency coordination
triggers chan struct{}
logLock sync.RWMutex
mmdsLock sync.RWMutex
startOnce sync.Once
}
func NewHTTPLogsExporter(ctx context.Context, isNotFC bool, mmdsChan <-chan *host.MMDSOpts) *HTTPExporter {
exporter := &HTTPExporter{
client: http.Client{
Timeout: ExporterTimeout,
},
triggers: make(chan struct{}, 1),
isNotFC: isNotFC,
startOnce: sync.Once{},
mmdsOpts: &host.MMDSOpts{
SandboxID: "unknown",
TemplateID: "unknown",
LogsCollectorAddress: "",
},
}
go exporter.listenForMMDSOptsAndStart(ctx, mmdsChan)
return exporter
}
func (w *HTTPExporter) sendInstanceLogs(ctx context.Context, logs []byte, address string) error {
if address == "" {
return nil
}
request, err := http.NewRequestWithContext(ctx, http.MethodPost, address, bytes.NewBuffer(logs))
if err != nil {
return err
}
request.Header.Set("Content-Type", "application/json")
response, err := w.client.Do(request)
if err != nil {
return err
}
defer response.Body.Close()
return nil
}
func printLog(logs []byte) {
fmt.Fprintf(os.Stdout, "%v", string(logs))
}
func (w *HTTPExporter) listenForMMDSOptsAndStart(ctx context.Context, mmdsChan <-chan *host.MMDSOpts) {
for {
select {
case <-ctx.Done():
return
case mmdsOpts, ok := <-mmdsChan:
if !ok {
return
}
w.mmdsLock.Lock()
w.mmdsOpts.Update(mmdsOpts.SandboxID, mmdsOpts.TemplateID, mmdsOpts.LogsCollectorAddress)
w.mmdsLock.Unlock()
w.startOnce.Do(func() {
go w.start(ctx)
})
}
}
}
func (w *HTTPExporter) start(ctx context.Context) {
for range w.triggers {
logs := w.getAllLogs()
if len(logs) == 0 {
continue
}
if w.isNotFC {
for _, log := range logs {
fmt.Fprintf(os.Stdout, "%v", string(log))
}
continue
}
for _, logLine := range logs {
w.mmdsLock.RLock()
logLineWithOpts, err := w.mmdsOpts.AddOptsToJSON(logLine)
w.mmdsLock.RUnlock()
if err != nil {
log.Printf("error adding instance logging options (%+v) to JSON (%+v) with logs : %v\n", w.mmdsOpts, logLine, err)
printLog(logLine)
continue
}
err = w.sendInstanceLogs(ctx, logLineWithOpts, w.mmdsOpts.LogsCollectorAddress)
if err != nil {
log.Printf("error sending instance logs: %+v", err)
printLog(logLine)
continue
}
}
}
}
func (w *HTTPExporter) resumeProcessing() {
select {
case w.triggers <- struct{}{}:
default:
// Exporter processing already triggered
// This is expected behavior if the exporter is already processing logs
}
}
func (w *HTTPExporter) Write(logs []byte) (int, error) {
logsCopy := make([]byte, len(logs))
copy(logsCopy, logs)
go w.addLogs(logsCopy)
return len(logs), nil
}
func (w *HTTPExporter) getAllLogs() [][]byte {
w.logLock.Lock()
defer w.logLock.Unlock()
logs := w.logs
w.logs = nil
return logs
}
func (w *HTTPExporter) addLogs(logs []byte) {
w.logLock.Lock()
defer w.logLock.Unlock()
w.logs = append(w.logs, logs)
w.resumeProcessing()
}

View File

@ -0,0 +1,172 @@
package logs
import (
"context"
"fmt"
"strconv"
"strings"
"sync/atomic"
"connectrpc.com/connect"
"github.com/rs/zerolog"
)
type OperationID string
const (
OperationIDKey OperationID = "operation_id"
DefaultHTTPMethod string = "POST"
)
var operationID = atomic.Int32{}
func AssignOperationID() string {
id := operationID.Add(1)
return strconv.Itoa(int(id))
}
func AddRequestIDToContext(ctx context.Context) context.Context {
return context.WithValue(ctx, OperationIDKey, AssignOperationID())
}
func formatMethod(method string) string {
parts := strings.Split(method, ".")
if len(parts) < 2 {
return method
}
split := strings.Split(parts[1], "/")
if len(split) < 2 {
return method
}
servicePart := split[0]
servicePart = strings.ToUpper(servicePart[:1]) + servicePart[1:]
methodPart := split[1]
methodPart = strings.ToLower(methodPart[:1]) + methodPart[1:]
return fmt.Sprintf("%s %s", servicePart, methodPart)
}
func NewUnaryLogInterceptor(logger *zerolog.Logger) connect.UnaryInterceptorFunc {
interceptor := func(next connect.UnaryFunc) connect.UnaryFunc {
return connect.UnaryFunc(func(
ctx context.Context,
req connect.AnyRequest,
) (connect.AnyResponse, error) {
ctx = AddRequestIDToContext(ctx)
res, err := next(ctx, req)
l := logger.
Err(err).
Str("method", DefaultHTTPMethod+" "+req.Spec().Procedure).
Str(string(OperationIDKey), ctx.Value(OperationIDKey).(string))
if err != nil {
l = l.Int("error_code", int(connect.CodeOf(err)))
}
if req != nil {
l = l.Interface("request", req.Any())
}
if res != nil && err == nil {
l = l.Interface("response", res.Any())
}
if res == nil && err == nil {
l = l.Interface("response", nil)
}
l.Msg(formatMethod(req.Spec().Procedure))
return res, err
})
}
return connect.UnaryInterceptorFunc(interceptor)
}
func LogServerStreamWithoutEvents[T any, R any](
ctx context.Context,
logger *zerolog.Logger,
req *connect.Request[R],
stream *connect.ServerStream[T],
handler func(ctx context.Context, req *connect.Request[R], stream *connect.ServerStream[T]) error,
) error {
ctx = AddRequestIDToContext(ctx)
l := logger.Debug().
Str("method", DefaultHTTPMethod+" "+req.Spec().Procedure).
Str(string(OperationIDKey), ctx.Value(OperationIDKey).(string))
if req != nil {
l = l.Interface("request", req.Any())
}
l.Msg(fmt.Sprintf("%s (server stream start)", formatMethod(req.Spec().Procedure)))
err := handler(ctx, req, stream)
logEvent := getErrDebugLogEvent(logger, err).
Str("method", DefaultHTTPMethod+" "+req.Spec().Procedure).
Str(string(OperationIDKey), ctx.Value(OperationIDKey).(string))
if err != nil {
logEvent = logEvent.Int("error_code", int(connect.CodeOf(err)))
} else {
logEvent = logEvent.Interface("response", nil)
}
logEvent.Msg(fmt.Sprintf("%s (server stream end)", formatMethod(req.Spec().Procedure)))
return err
}
func LogClientStreamWithoutEvents[T any, R any](
ctx context.Context,
logger *zerolog.Logger,
stream *connect.ClientStream[T],
handler func(ctx context.Context, stream *connect.ClientStream[T]) (*connect.Response[R], error),
) (*connect.Response[R], error) {
ctx = AddRequestIDToContext(ctx)
logger.Debug().
Str("method", DefaultHTTPMethod+" "+stream.Spec().Procedure).
Str(string(OperationIDKey), ctx.Value(OperationIDKey).(string)).
Msg(fmt.Sprintf("%s (client stream start)", formatMethod(stream.Spec().Procedure)))
res, err := handler(ctx, stream)
logEvent := getErrDebugLogEvent(logger, err).
Str("method", DefaultHTTPMethod+" "+stream.Spec().Procedure).
Str(string(OperationIDKey), ctx.Value(OperationIDKey).(string))
if err != nil {
logEvent = logEvent.Int("error_code", int(connect.CodeOf(err)))
}
if res != nil && err == nil {
logEvent = logEvent.Interface("response", res.Any())
}
if res == nil && err == nil {
logEvent = logEvent.Interface("response", nil)
}
logEvent.Msg(fmt.Sprintf("%s (client stream end)", formatMethod(stream.Spec().Procedure)))
return res, err
}
// Return logger with error level if err is not nil, otherwise return logger with debug level
func getErrDebugLogEvent(logger *zerolog.Logger, err error) *zerolog.Event {
if err != nil {
return logger.Error().Err(err) //nolint:zerologlint // this builds an event, it is not expected to return it
}
return logger.Debug() //nolint:zerologlint // this builds an event, it is not expected to return it
}

View File

@ -0,0 +1,35 @@
package logs
import (
"context"
"io"
"os"
"time"
"github.com/rs/zerolog"
"git.omukk.dev/wrenn/sandbox/envd/internal/host"
"git.omukk.dev/wrenn/sandbox/envd/internal/logs/exporter"
)
func NewLogger(ctx context.Context, isNotFC bool, mmdsChan <-chan *host.MMDSOpts) *zerolog.Logger {
zerolog.TimestampFieldName = "timestamp"
zerolog.TimeFieldFormat = time.RFC3339Nano
exporters := []io.Writer{}
if isNotFC {
exporters = append(exporters, os.Stdout)
} else {
exporters = append(exporters, exporter.NewHTTPLogsExporter(ctx, isNotFC, mmdsChan), os.Stdout)
}
l := zerolog.
New(io.MultiWriter(exporters...)).
With().
Timestamp().
Logger().
Level(zerolog.DebugLevel)
return &l
}