forked from wrenn/wrenn
Push GetSandboxMetricPoints time filter into SQL
The query was fetching all rows for a (sandbox_id, tier) pair and filtering by timestamp in Go. For repeatedly-paused sandboxes the 24h tier can accumulate up to 30 days of data, causing up to 120x over-fetching for a 6h range request. Add AND ts >= $3 to the query so Postgres filters on the primary key (sandbox_id, tier, ts) directly. Drop the redundant Go-side loop.
This commit is contained in:
@ -123,22 +123,20 @@ func (h *sandboxMetricsHandler) getFromDB(ctx context.Context, w http.ResponseWr
|
||||
rows, err := h.db.GetSandboxMetricPoints(ctx, db.GetSandboxMetricPointsParams{
|
||||
SandboxID: sandboxID,
|
||||
Tier: mapping.tier,
|
||||
Ts: time.Now().Add(-mapping.cutoff).Unix(),
|
||||
})
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, "internal_error", "failed to read metrics")
|
||||
return
|
||||
}
|
||||
|
||||
threshold := time.Now().Add(-mapping.cutoff).Unix()
|
||||
var points []metricPointResponse
|
||||
for _, row := range rows {
|
||||
if row.Ts >= threshold {
|
||||
points = append(points, metricPointResponse{
|
||||
TimestampUnix: row.Ts,
|
||||
CPUPct: row.CpuPct,
|
||||
MemBytes: row.MemBytes,
|
||||
DiskBytes: row.DiskBytes,
|
||||
})
|
||||
points := make([]metricPointResponse, len(rows))
|
||||
for i, row := range rows {
|
||||
points[i] = metricPointResponse{
|
||||
TimestampUnix: row.Ts,
|
||||
CPUPct: row.CpuPct,
|
||||
MemBytes: row.MemBytes,
|
||||
DiskBytes: row.DiskBytes,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user