forked from wrenn/wrenn
Replace one-shot clock_settime with chrony for continuous guest time sync
Switch from the envd /init endpoint pushing host time via syscall to chronyd reading the KVM PTP hardware clock (/dev/ptp0) continuously. This fixes clock drift between init calls and handles snapshot resume gracefully. Changes: - Add clocksource=kvm-clock kernel boot arg - Start chronyd in wrenn-init.sh before tini (PHC /dev/ptp0, makestep 1.0 -1) - Remove clock_settime logic from envd SetData and shouldSetSystemTime - Remove client.Init() clock sync calls from sandbox manager (3 sites) - Remove Init() method from envdclient (no longer needed) - Simplify rootfs scripts: socat/chrony now come from apt in the container image, only envd/wrenn-init/tini are injected by build scripts
This commit is contained in:
@ -203,16 +203,6 @@ func (m *Manager) Create(ctx context.Context, sandboxID, template string, vcpus,
|
||||
return nil, fmt.Errorf("wait for envd: %w", err)
|
||||
}
|
||||
|
||||
// Sync guest clock in background. Non-fatal — sandbox is usable before this completes.
|
||||
// Run in a goroutine so Init latency doesn't block the RPC response back to the control plane.
|
||||
go func() {
|
||||
initCtx, initCancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer initCancel()
|
||||
if err := client.Init(initCtx); err != nil {
|
||||
slog.Warn("envd init (clock sync) failed", "sandbox", sandboxID, "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
now := time.Now()
|
||||
sb := &sandboxState{
|
||||
Sandbox: models.Sandbox{
|
||||
@ -636,16 +626,6 @@ func (m *Manager) Resume(ctx context.Context, sandboxID string, timeoutSec int)
|
||||
return nil, fmt.Errorf("wait for envd: %w", err)
|
||||
}
|
||||
|
||||
// Sync guest clock in background. Non-fatal — sandbox is usable before this completes.
|
||||
// Run in a goroutine so Init latency doesn't block the RPC response back to the control plane.
|
||||
go func() {
|
||||
initCtx, initCancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer initCancel()
|
||||
if err := client.Init(initCtx); err != nil {
|
||||
slog.Warn("envd init (clock sync) failed", "sandbox", sandboxID, "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
now := time.Now()
|
||||
sb := &sandboxState{
|
||||
Sandbox: models.Sandbox{
|
||||
@ -957,16 +937,6 @@ func (m *Manager) createFromSnapshot(ctx context.Context, sandboxID, snapshotNam
|
||||
return nil, fmt.Errorf("wait for envd: %w", err)
|
||||
}
|
||||
|
||||
// Sync guest clock in background. Non-fatal — sandbox is usable before this completes.
|
||||
// Run in a goroutine so Init latency doesn't block the RPC response back to the control plane.
|
||||
go func() {
|
||||
initCtx, initCancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer initCancel()
|
||||
if err := client.Init(initCtx); err != nil {
|
||||
slog.Warn("envd init (clock sync) failed", "sandbox", sandboxID, "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
now := time.Now()
|
||||
sb := &sandboxState{
|
||||
Sandbox: models.Sandbox{
|
||||
|
||||
Reference in New Issue
Block a user