1
0
forked from wrenn/wrenn

Push GetSandboxMetricPoints time filter into SQL

The query was fetching all rows for a (sandbox_id, tier) pair and
filtering by timestamp in Go. For repeatedly-paused sandboxes the
24h tier can accumulate up to 30 days of data, causing up to 120x
over-fetching for a 6h range request.

Add AND ts >= $3 to the query so Postgres filters on the primary key
(sandbox_id, tier, ts) directly. Drop the redundant Go-side loop.
This commit is contained in:
2026-03-25 21:53:19 +06:00
parent 6eacf0f735
commit 27ff828e60
3 changed files with 12 additions and 13 deletions

View File

@ -35,7 +35,7 @@ ON CONFLICT (sandbox_id, tier, ts) DO NOTHING;
-- name: GetSandboxMetricPoints :many -- name: GetSandboxMetricPoints :many
SELECT ts, cpu_pct, mem_bytes, disk_bytes SELECT ts, cpu_pct, mem_bytes, disk_bytes
FROM sandbox_metric_points FROM sandbox_metric_points
WHERE sandbox_id = $1 AND tier = $2 WHERE sandbox_id = $1 AND tier = $2 AND ts >= $3
ORDER BY ts ASC; ORDER BY ts ASC;
-- name: DeleteSandboxMetricPoints :exec -- name: DeleteSandboxMetricPoints :exec

View File

@ -123,22 +123,20 @@ func (h *sandboxMetricsHandler) getFromDB(ctx context.Context, w http.ResponseWr
rows, err := h.db.GetSandboxMetricPoints(ctx, db.GetSandboxMetricPointsParams{ rows, err := h.db.GetSandboxMetricPoints(ctx, db.GetSandboxMetricPointsParams{
SandboxID: sandboxID, SandboxID: sandboxID,
Tier: mapping.tier, Tier: mapping.tier,
Ts: time.Now().Add(-mapping.cutoff).Unix(),
}) })
if err != nil { if err != nil {
writeError(w, http.StatusInternalServerError, "internal_error", "failed to read metrics") writeError(w, http.StatusInternalServerError, "internal_error", "failed to read metrics")
return return
} }
threshold := time.Now().Add(-mapping.cutoff).Unix() points := make([]metricPointResponse, len(rows))
var points []metricPointResponse for i, row := range rows {
for _, row := range rows { points[i] = metricPointResponse{
if row.Ts >= threshold { TimestampUnix: row.Ts,
points = append(points, metricPointResponse{ CPUPct: row.CpuPct,
TimestampUnix: row.Ts, MemBytes: row.MemBytes,
CPUPct: row.CpuPct, DiskBytes: row.DiskBytes,
MemBytes: row.MemBytes,
DiskBytes: row.DiskBytes,
})
} }
} }

View File

@ -86,13 +86,14 @@ func (q *Queries) GetPeakMetrics(ctx context.Context, teamID string) (GetPeakMet
const getSandboxMetricPoints = `-- name: GetSandboxMetricPoints :many const getSandboxMetricPoints = `-- name: GetSandboxMetricPoints :many
SELECT ts, cpu_pct, mem_bytes, disk_bytes SELECT ts, cpu_pct, mem_bytes, disk_bytes
FROM sandbox_metric_points FROM sandbox_metric_points
WHERE sandbox_id = $1 AND tier = $2 WHERE sandbox_id = $1 AND tier = $2 AND ts >= $3
ORDER BY ts ASC ORDER BY ts ASC
` `
type GetSandboxMetricPointsParams struct { type GetSandboxMetricPointsParams struct {
SandboxID string `json:"sandbox_id"` SandboxID string `json:"sandbox_id"`
Tier string `json:"tier"` Tier string `json:"tier"`
Ts int64 `json:"ts"`
} }
type GetSandboxMetricPointsRow struct { type GetSandboxMetricPointsRow struct {
@ -103,7 +104,7 @@ type GetSandboxMetricPointsRow struct {
} }
func (q *Queries) GetSandboxMetricPoints(ctx context.Context, arg GetSandboxMetricPointsParams) ([]GetSandboxMetricPointsRow, error) { func (q *Queries) GetSandboxMetricPoints(ctx context.Context, arg GetSandboxMetricPointsParams) ([]GetSandboxMetricPointsRow, error) {
rows, err := q.db.Query(ctx, getSandboxMetricPoints, arg.SandboxID, arg.Tier) rows, err := q.db.Query(ctx, getSandboxMetricPoints, arg.SandboxID, arg.Tier, arg.Ts)
if err != nil { if err != nil {
return nil, err return nil, err
} }