1
0
forked from wrenn/wrenn

COPY multi-source support, configurable rootfs size, build fixes

- COPY now supports multiple sources: COPY a.txt b.txt /dest/
  Last argument is always destination (matches Dockerfile semantics).
- COPY resolves relative destinations against current WORKDIR.
- WRENN_DEFAULT_ROOTFS_SIZE env var (e.g. 5G, 2Gi, 1000M, 512Mi)
  controls template rootfs expansion. Used both at agent startup
  (EnsureImageSizes) and after FlattenRootfs (shrink then re-expand).
- Pre-build now sets WORKDIR /home/wrenn-user after USER switch.
- Extracted archive files get chmod a+rX for readability.
- Path traversal validation on COPY sources.
This commit is contained in:
2026-04-12 03:39:17 +06:00
parent 46c43b95c2
commit 25b5258841
8 changed files with 110 additions and 29 deletions

View File

@ -12,6 +12,7 @@ WRENN_HOST_LISTEN_ADDR=:50051
WRENN_DIR=/var/lib/wrenn WRENN_DIR=/var/lib/wrenn
WRENN_HOST_INTERFACE=eth0 WRENN_HOST_INTERFACE=eth0
WRENN_CP_URL=http://localhost:8080 WRENN_CP_URL=http://localhost:8080
WRENN_DEFAULT_ROOTFS_SIZE=5Gi
# Lago (billing — external service) # Lago (billing — external service)
LAGO_API_URL=http://localhost:3000 LAGO_API_URL=http://localhost:3000

View File

@ -63,15 +63,28 @@ func main() {
os.Exit(1) os.Exit(1)
} }
// Expand base images to the standard disk size (sparse, no extra physical // Parse default rootfs size from env (e.g. "5G", "2Gi", "1000M").
defaultRootfsSizeMB := sandbox.DefaultDiskSizeMB
if sizeStr := os.Getenv("WRENN_DEFAULT_ROOTFS_SIZE"); sizeStr != "" {
parsed, err := sandbox.ParseSizeToMB(sizeStr)
if err != nil {
slog.Error("invalid WRENN_DEFAULT_ROOTFS_SIZE", "value", sizeStr, "error", err)
os.Exit(1)
}
defaultRootfsSizeMB = parsed
slog.Info("using custom rootfs size", "size_mb", defaultRootfsSizeMB)
}
// Expand base images to the configured disk size (sparse, no extra physical
// disk). This ensures dm-snapshot sandboxes see the full size from boot. // disk). This ensures dm-snapshot sandboxes see the full size from boot.
if err := sandbox.EnsureImageSizes(rootDir, sandbox.DefaultDiskSizeMB); err != nil { if err := sandbox.EnsureImageSizes(rootDir, defaultRootfsSizeMB); err != nil {
slog.Error("failed to expand base images", "error", err) slog.Error("failed to expand base images", "error", err)
os.Exit(1) os.Exit(1)
} }
cfg := sandbox.Config{ cfg := sandbox.Config{
WrennDir: rootDir, WrennDir: rootDir,
DefaultRootfsSizeMB: defaultRootfsSizeMB,
} }
mgr := sandbox.New(cfg) mgr := sandbox.New(cfg)

View File

@ -208,25 +208,33 @@ func execCopy(
bctx *ExecContext, bctx *ExecContext,
execFn ExecFunc, execFn ExecFunc,
) (BuildLogEntry, bool) { ) (BuildLogEntry, bool) {
// Validate source path: must be relative and not escape the archive directory. // Validate all source paths: must be relative and not escape the archive directory.
cleaned := path.Clean(st.Src) var srcPaths []string
if strings.HasPrefix(cleaned, "..") || strings.HasPrefix(cleaned, "/") { for _, s := range st.Srcs {
return BuildLogEntry{ cleaned := path.Clean(s)
Step: step, if strings.HasPrefix(cleaned, "..") || strings.HasPrefix(cleaned, "/") {
Phase: phase, return BuildLogEntry{
Cmd: st.Raw, Step: step,
Stderr: "COPY source must be a relative path within the archive", Phase: phase,
}, false Cmd: st.Raw,
Stderr: fmt.Sprintf("COPY source must be a relative path within the archive: %q", s),
}, false
}
srcPaths = append(srcPaths, shellescape(BuildFilesDir+"/"+cleaned))
} }
src := BuildFilesDir + "/" + cleaned
dst := st.Dst dst := st.Dst
// Resolve relative destination against the current WORKDIR.
if dst != "" && dst[0] != '/' && bctx.WorkDir != "" {
dst = bctx.WorkDir + "/" + dst
}
owner := "root" owner := "root"
if bctx.User != "" { if bctx.User != "" {
owner = bctx.User owner = bctx.User
} }
script := fmt.Sprintf( script := fmt.Sprintf(
"cp -r %s %s && chown -R %s:%s %s", "cp -r %s %s && chown -R %s:%s %s",
shellescape(src), shellescape(dst), shellescape(owner), shellescape(owner), shellescape(dst), strings.Join(srcPaths, " "), shellescape(dst), shellescape(owner), shellescape(owner), shellescape(dst),
) )
entry := execRawShell(ctx, st.Raw, sandboxID, phase, step, 60*time.Second, execFn, script) entry := execRawShell(ctx, st.Raw, sandboxID, phase, step, 60*time.Second, execFn, script)

View File

@ -27,7 +27,7 @@ type Step struct {
Key string // KindENV: variable name; KindUSER: username Key string // KindENV: variable name; KindUSER: username
Value string // KindENV: variable value Value string // KindENV: variable value
Path string // KindWORKDIR: directory path Path string // KindWORKDIR: directory path
Src string // KindCOPY: source path (relative to build archive) Srcs []string // KindCOPY: source paths (relative to build archive)
Dst string // KindCOPY: destination path inside sandbox Dst string // KindCOPY: destination path inside sandbox
} }
@ -148,12 +148,14 @@ func parseUSER(raw, username string) (Step, error) {
func parseCOPY(raw, rest string) (Step, error) { func parseCOPY(raw, rest string) (Step, error) {
if rest == "" { if rest == "" {
return Step{}, fmt.Errorf("COPY requires <src> <dst>: %q", raw) return Step{}, fmt.Errorf("COPY requires <src>... <dst>: %q", raw)
} }
src, dst, found := strings.Cut(rest, " ") parts := strings.Fields(rest)
dst = strings.TrimSpace(dst) if len(parts) < 2 {
if !found || dst == "" { return Step{}, fmt.Errorf("COPY requires <src>... <dst>: %q", raw)
return Step{}, fmt.Errorf("COPY requires <src> <dst>: %q", raw)
} }
return Step{Kind: KindCOPY, Raw: raw, Src: src, Dst: dst}, nil // Last argument is the destination, everything before is sources.
dst := parts[len(parts)-1]
srcs := parts[:len(parts)-1]
return Step{Kind: KindCOPY, Raw: raw, Srcs: srcs, Dst: dst}, nil
} }

View File

@ -1,6 +1,7 @@
package recipe package recipe
import ( import (
"reflect"
"testing" "testing"
"time" "time"
) )
@ -131,7 +132,12 @@ func TestParseStep(t *testing.T) {
{ {
name: "COPY basic", name: "COPY basic",
input: "COPY config.yaml /etc/app/config.yaml", input: "COPY config.yaml /etc/app/config.yaml",
want: Step{Kind: KindCOPY, Raw: "COPY config.yaml /etc/app/config.yaml", Src: "config.yaml", Dst: "/etc/app/config.yaml"}, want: Step{Kind: KindCOPY, Raw: "COPY config.yaml /etc/app/config.yaml", Srcs: []string{"config.yaml"}, Dst: "/etc/app/config.yaml"},
},
{
name: "COPY multiple sources",
input: "COPY a.txt b.txt /dest/",
want: Step{Kind: KindCOPY, Raw: "COPY a.txt b.txt /dest/", Srcs: []string{"a.txt", "b.txt"}, Dst: "/dest/"},
}, },
{ {
name: "COPY missing dst", name: "COPY missing dst",
@ -169,7 +175,7 @@ func TestParseStep(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("ParseStep(%q) unexpected error: %v", tc.input, err) t.Fatalf("ParseStep(%q) unexpected error: %v", tc.input, err)
} }
if got != tc.want { if !reflect.DeepEqual(got, tc.want) {
t.Errorf("ParseStep(%q)\n got %+v\n want %+v", tc.input, got, tc.want) t.Errorf("ParseStep(%q)\n got %+v\n want %+v", tc.input, got, tc.want)
} }
}) })

View File

@ -6,6 +6,8 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strconv"
"strings"
"git.omukk.dev/wrenn/wrenn/internal/id" "git.omukk.dev/wrenn/wrenn/internal/id"
"git.omukk.dev/wrenn/wrenn/internal/layout" "git.omukk.dev/wrenn/wrenn/internal/layout"
@ -66,6 +68,42 @@ func EnsureImageSizes(wrennDir string, targetMB int) error {
return nil return nil
} }
// ParseSizeToMB parses a human-readable size string into megabytes.
// Supported suffixes: G, Gi (gibibytes), M, Mi (mebibytes).
// Examples: "5G" → 5120, "2Gi" → 2048, "1000M" → 1000, "512Mi" → 512.
func ParseSizeToMB(s string) (int, error) {
s = strings.TrimSpace(s)
if s == "" {
return 0, fmt.Errorf("empty size string")
}
// Find where the numeric part ends.
i := 0
for i < len(s) && (s[i] == '.' || (s[i] >= '0' && s[i] <= '9')) {
i++
}
if i == 0 {
return 0, fmt.Errorf("invalid size %q: no numeric value", s)
}
numStr := s[:i]
suffix := strings.TrimSpace(s[i:])
num, err := strconv.ParseFloat(numStr, 64)
if err != nil {
return 0, fmt.Errorf("invalid size %q: %w", s, err)
}
switch suffix {
case "G", "Gi":
return int(num * 1024), nil
case "M", "Mi", "":
return int(num), nil
default:
return 0, fmt.Errorf("invalid size %q: unknown suffix %q (use G, Gi, M, or Mi)", s, suffix)
}
}
// expandImage expands a single rootfs image if it is smaller than targetBytes. // expandImage expands a single rootfs image if it is smaller than targetBytes.
func expandImage(rootfs string, targetBytes int64, targetMB int) error { func expandImage(rootfs string, targetBytes int64, targetMB int) error {
info, err := os.Stat(rootfs) info, err := os.Stat(rootfs)

View File

@ -28,8 +28,9 @@ import (
// Config holds the paths and defaults for the sandbox manager. // Config holds the paths and defaults for the sandbox manager.
type Config struct { type Config struct {
WrennDir string // root directory (e.g. /var/lib/wrenn); all sub-paths derived via layout package WrennDir string // root directory (e.g. /var/lib/wrenn); all sub-paths derived via layout package
EnvdTimeout time.Duration EnvdTimeout time.Duration
DefaultRootfsSizeMB int // target size for template rootfs images; 0 → DefaultDiskSizeMB
} }
// Manager orchestrates sandbox lifecycle: VM, network, filesystem, envd. // Manager orchestrates sandbox lifecycle: VM, network, filesystem, envd.
@ -924,8 +925,8 @@ func (m *Manager) FlattenRootfs(ctx context.Context, sandboxID string, teamID, t
// Clean up dm device and loop device now that flatten is complete. // Clean up dm device and loop device now that flatten is complete.
m.cleanupDM(sb) m.cleanupDM(sb)
// Shrink the flattened image to its minimum size so stored templates are // Shrink the flattened image to its minimum size, then re-expand to the
// compact. EnsureImageSizes will re-expand them on the next agent startup. // configured default rootfs size so sandboxes see the full disk from boot.
if out, err := exec.Command("e2fsck", "-fy", outputPath).CombinedOutput(); err != nil { if out, err := exec.Command("e2fsck", "-fy", outputPath).CombinedOutput(); err != nil {
if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() > 1 { if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() > 1 {
slog.Warn("e2fsck before shrink failed (non-fatal)", "output", string(out), "error", err) slog.Warn("e2fsck before shrink failed (non-fatal)", "output", string(out), "error", err)
@ -935,6 +936,15 @@ func (m *Manager) FlattenRootfs(ctx context.Context, sandboxID string, teamID, t
slog.Warn("resize2fs -M failed (non-fatal)", "output", string(out), "error", err) slog.Warn("resize2fs -M failed (non-fatal)", "output", string(out), "error", err)
} }
// Re-expand to default rootfs size.
targetMB := m.cfg.DefaultRootfsSizeMB
if targetMB <= 0 {
targetMB = DefaultDiskSizeMB
}
if err := expandImage(outputPath, int64(targetMB)*1024*1024, targetMB); err != nil {
slog.Warn("failed to expand template to default size (non-fatal)", "error", err)
}
sizeBytes, err := snapshot.DirSize(flattenDstDir, "") sizeBytes, err := snapshot.DirSize(flattenDstDir, "")
if err != nil { if err != nil {
slog.Warn("failed to calculate template size", "error", err) slog.Warn("failed to calculate template size", "error", err)

View File

@ -31,6 +31,7 @@ const (
var preBuildCmds = []string{ var preBuildCmds = []string{
"RUN apt update", "RUN apt update",
"USER wrenn-user", "USER wrenn-user",
"WORKDIR /home/wrenn-user",
} }
// postBuildCmds run after the user recipe to clean up caches and reduce image size. // postBuildCmds run after the user recipe to clean up caches and reduce image size.
@ -725,11 +726,13 @@ func (s *BuildService) uploadAndExtractArchive(
return fmt.Errorf("write archive: %w", err) return fmt.Errorf("write archive: %w", err)
} }
// Extract. // Extract and ensure files are readable.
fullCmd := extractCmd + " && chmod -R a+rX /tmp/build-files"
resp, err := agent.Exec(ctx, connect.NewRequest(&pb.ExecRequest{ resp, err := agent.Exec(ctx, connect.NewRequest(&pb.ExecRequest{
SandboxId: sandboxID, SandboxId: sandboxID,
Cmd: "/bin/sh", Cmd: "/bin/sh",
Args: []string{"-c", extractCmd}, Args: []string{"-c", fullCmd},
TimeoutSec: 120, TimeoutSec: 120,
})) }))
if err != nil { if err != nil {