forked from wrenn/wrenn
Introduce pre-computed daily usage rollups from sandbox_metrics_snapshots. An hourly background worker aggregates completed days, while today's usage is computed live from snapshots at query time for freshness. Backend: new daily_usage table, rollup worker, UsageService, and GET /v1/capsules/usage endpoint with date range filtering (up to 92 days). Frontend: replace Usage page placeholder with bar charts (Chart.js), summary total cards, and preset/custom date range controls.
399 lines
12 KiB
Go
399 lines
12 KiB
Go
// Code generated by sqlc. DO NOT EDIT.
|
|
// versions:
|
|
// sqlc v1.30.0
|
|
// source: metrics.sql
|
|
|
|
package db
|
|
|
|
import (
|
|
"context"
|
|
|
|
"github.com/jackc/pgx/v5/pgtype"
|
|
)
|
|
|
|
const computeDailyUsageForDay = `-- name: ComputeDailyUsageForDay :one
|
|
SELECT
|
|
COALESCE(SUM(vcpus_reserved * 10.0 / 60.0), 0)::NUMERIC(18,4) AS cpu_minutes,
|
|
COALESCE(SUM(memory_mb_reserved * 10.0 / 60.0), 0)::NUMERIC(18,4) AS ram_mb_minutes
|
|
FROM sandbox_metrics_snapshots
|
|
WHERE team_id = $1
|
|
AND sampled_at >= $2
|
|
AND sampled_at < $3
|
|
`
|
|
|
|
type ComputeDailyUsageForDayParams struct {
|
|
TeamID pgtype.UUID `json:"team_id"`
|
|
SampledAt pgtype.Timestamptz `json:"sampled_at"`
|
|
SampledAt_2 pgtype.Timestamptz `json:"sampled_at_2"`
|
|
}
|
|
|
|
type ComputeDailyUsageForDayRow struct {
|
|
CpuMinutes pgtype.Numeric `json:"cpu_minutes"`
|
|
RamMbMinutes pgtype.Numeric `json:"ram_mb_minutes"`
|
|
}
|
|
|
|
func (q *Queries) ComputeDailyUsageForDay(ctx context.Context, arg ComputeDailyUsageForDayParams) (ComputeDailyUsageForDayRow, error) {
|
|
row := q.db.QueryRow(ctx, computeDailyUsageForDay, arg.TeamID, arg.SampledAt, arg.SampledAt_2)
|
|
var i ComputeDailyUsageForDayRow
|
|
err := row.Scan(&i.CpuMinutes, &i.RamMbMinutes)
|
|
return i, err
|
|
}
|
|
|
|
const deleteDailyUsageByTeam = `-- name: DeleteDailyUsageByTeam :exec
|
|
DELETE FROM daily_usage WHERE team_id = $1
|
|
`
|
|
|
|
func (q *Queries) DeleteDailyUsageByTeam(ctx context.Context, teamID pgtype.UUID) error {
|
|
_, err := q.db.Exec(ctx, deleteDailyUsageByTeam, teamID)
|
|
return err
|
|
}
|
|
|
|
const deleteMetricPointsByTeam = `-- name: DeleteMetricPointsByTeam :exec
|
|
DELETE FROM sandbox_metric_points
|
|
WHERE sandbox_id IN (SELECT id FROM sandboxes WHERE team_id = $1)
|
|
`
|
|
|
|
func (q *Queries) DeleteMetricPointsByTeam(ctx context.Context, teamID pgtype.UUID) error {
|
|
_, err := q.db.Exec(ctx, deleteMetricPointsByTeam, teamID)
|
|
return err
|
|
}
|
|
|
|
const deleteMetricsSnapshotsByTeam = `-- name: DeleteMetricsSnapshotsByTeam :exec
|
|
DELETE FROM sandbox_metrics_snapshots WHERE team_id = $1
|
|
`
|
|
|
|
func (q *Queries) DeleteMetricsSnapshotsByTeam(ctx context.Context, teamID pgtype.UUID) error {
|
|
_, err := q.db.Exec(ctx, deleteMetricsSnapshotsByTeam, teamID)
|
|
return err
|
|
}
|
|
|
|
const deleteSandboxMetricPoints = `-- name: DeleteSandboxMetricPoints :exec
|
|
DELETE FROM sandbox_metric_points
|
|
WHERE sandbox_id = $1
|
|
`
|
|
|
|
func (q *Queries) DeleteSandboxMetricPoints(ctx context.Context, sandboxID pgtype.UUID) error {
|
|
_, err := q.db.Exec(ctx, deleteSandboxMetricPoints, sandboxID)
|
|
return err
|
|
}
|
|
|
|
const deleteSandboxMetricPointsByTier = `-- name: DeleteSandboxMetricPointsByTier :exec
|
|
DELETE FROM sandbox_metric_points
|
|
WHERE sandbox_id = $1 AND tier = $2
|
|
`
|
|
|
|
type DeleteSandboxMetricPointsByTierParams struct {
|
|
SandboxID pgtype.UUID `json:"sandbox_id"`
|
|
Tier string `json:"tier"`
|
|
}
|
|
|
|
func (q *Queries) DeleteSandboxMetricPointsByTier(ctx context.Context, arg DeleteSandboxMetricPointsByTierParams) error {
|
|
_, err := q.db.Exec(ctx, deleteSandboxMetricPointsByTier, arg.SandboxID, arg.Tier)
|
|
return err
|
|
}
|
|
|
|
const getDailyUsage = `-- name: GetDailyUsage :many
|
|
SELECT day, cpu_minutes, ram_mb_minutes
|
|
FROM daily_usage
|
|
WHERE team_id = $1
|
|
AND day >= $2
|
|
AND day <= $3
|
|
ORDER BY day ASC
|
|
`
|
|
|
|
type GetDailyUsageParams struct {
|
|
TeamID pgtype.UUID `json:"team_id"`
|
|
Day pgtype.Date `json:"day"`
|
|
Day_2 pgtype.Date `json:"day_2"`
|
|
}
|
|
|
|
type GetDailyUsageRow struct {
|
|
Day pgtype.Date `json:"day"`
|
|
CpuMinutes pgtype.Numeric `json:"cpu_minutes"`
|
|
RamMbMinutes pgtype.Numeric `json:"ram_mb_minutes"`
|
|
}
|
|
|
|
func (q *Queries) GetDailyUsage(ctx context.Context, arg GetDailyUsageParams) ([]GetDailyUsageRow, error) {
|
|
rows, err := q.db.Query(ctx, getDailyUsage, arg.TeamID, arg.Day, arg.Day_2)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetDailyUsageRow
|
|
for rows.Next() {
|
|
var i GetDailyUsageRow
|
|
if err := rows.Scan(&i.Day, &i.CpuMinutes, &i.RamMbMinutes); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getLiveMetrics = `-- name: GetLiveMetrics :one
|
|
SELECT
|
|
(COUNT(*) FILTER (WHERE status IN ('running', 'starting')))::INTEGER AS running_count,
|
|
(COALESCE(SUM(vcpus) FILTER (WHERE status IN ('running', 'starting')), 0))::INTEGER AS vcpus_reserved,
|
|
(COALESCE(SUM(memory_mb) FILTER (WHERE status IN ('running', 'starting')), 0)
|
|
+ COALESCE(SUM(CEIL(memory_mb::NUMERIC / 2)) FILTER (WHERE status = 'paused'), 0))::INTEGER AS memory_mb_reserved
|
|
FROM sandboxes
|
|
WHERE team_id = $1
|
|
`
|
|
|
|
type GetLiveMetricsRow struct {
|
|
RunningCount int32 `json:"running_count"`
|
|
VcpusReserved int32 `json:"vcpus_reserved"`
|
|
MemoryMbReserved int32 `json:"memory_mb_reserved"`
|
|
}
|
|
|
|
// Reads directly from sandboxes for accurate real-time current values.
|
|
// CPU reserved = running + starting only (paused VMs release CPU).
|
|
// RAM reserved = running + starting + sum(ceil(each_paused/2)) (per-VM ceiling).
|
|
func (q *Queries) GetLiveMetrics(ctx context.Context, teamID pgtype.UUID) (GetLiveMetricsRow, error) {
|
|
row := q.db.QueryRow(ctx, getLiveMetrics, teamID)
|
|
var i GetLiveMetricsRow
|
|
err := row.Scan(&i.RunningCount, &i.VcpusReserved, &i.MemoryMbReserved)
|
|
return i, err
|
|
}
|
|
|
|
const getPeakMetrics = `-- name: GetPeakMetrics :one
|
|
SELECT
|
|
COALESCE(MAX(running_count), 0)::INTEGER AS peak_running_count,
|
|
COALESCE(MAX(vcpus_reserved), 0)::INTEGER AS peak_vcpus,
|
|
COALESCE(MAX(memory_mb_reserved), 0)::INTEGER AS peak_memory_mb
|
|
FROM sandbox_metrics_snapshots
|
|
WHERE team_id = $1
|
|
AND sampled_at > NOW() - INTERVAL '30 days'
|
|
`
|
|
|
|
type GetPeakMetricsRow struct {
|
|
PeakRunningCount int32 `json:"peak_running_count"`
|
|
PeakVcpus int32 `json:"peak_vcpus"`
|
|
PeakMemoryMb int32 `json:"peak_memory_mb"`
|
|
}
|
|
|
|
func (q *Queries) GetPeakMetrics(ctx context.Context, teamID pgtype.UUID) (GetPeakMetricsRow, error) {
|
|
row := q.db.QueryRow(ctx, getPeakMetrics, teamID)
|
|
var i GetPeakMetricsRow
|
|
err := row.Scan(&i.PeakRunningCount, &i.PeakVcpus, &i.PeakMemoryMb)
|
|
return i, err
|
|
}
|
|
|
|
const getSandboxMetricPoints = `-- name: GetSandboxMetricPoints :many
|
|
SELECT ts, cpu_pct, mem_bytes, disk_bytes
|
|
FROM sandbox_metric_points
|
|
WHERE sandbox_id = $1 AND tier = $2 AND ts >= $3
|
|
ORDER BY ts ASC
|
|
`
|
|
|
|
type GetSandboxMetricPointsParams struct {
|
|
SandboxID pgtype.UUID `json:"sandbox_id"`
|
|
Tier string `json:"tier"`
|
|
Ts int64 `json:"ts"`
|
|
}
|
|
|
|
type GetSandboxMetricPointsRow struct {
|
|
Ts int64 `json:"ts"`
|
|
CpuPct float64 `json:"cpu_pct"`
|
|
MemBytes int64 `json:"mem_bytes"`
|
|
DiskBytes int64 `json:"disk_bytes"`
|
|
}
|
|
|
|
func (q *Queries) GetSandboxMetricPoints(ctx context.Context, arg GetSandboxMetricPointsParams) ([]GetSandboxMetricPointsRow, error) {
|
|
rows, err := q.db.Query(ctx, getSandboxMetricPoints, arg.SandboxID, arg.Tier, arg.Ts)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetSandboxMetricPointsRow
|
|
for rows.Next() {
|
|
var i GetSandboxMetricPointsRow
|
|
if err := rows.Scan(
|
|
&i.Ts,
|
|
&i.CpuPct,
|
|
&i.MemBytes,
|
|
&i.DiskBytes,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTeamsWithSnapshots = `-- name: GetTeamsWithSnapshots :many
|
|
SELECT DISTINCT team_id
|
|
FROM sandbox_metrics_snapshots
|
|
WHERE sampled_at > NOW() - INTERVAL '93 days'
|
|
`
|
|
|
|
func (q *Queries) GetTeamsWithSnapshots(ctx context.Context) ([]pgtype.UUID, error) {
|
|
rows, err := q.db.Query(ctx, getTeamsWithSnapshots)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []pgtype.UUID
|
|
for rows.Next() {
|
|
var team_id pgtype.UUID
|
|
if err := rows.Scan(&team_id); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, team_id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertMetricsSnapshot = `-- name: InsertMetricsSnapshot :exec
|
|
INSERT INTO sandbox_metrics_snapshots (team_id, running_count, vcpus_reserved, memory_mb_reserved)
|
|
VALUES ($1, $2, $3, $4)
|
|
`
|
|
|
|
type InsertMetricsSnapshotParams struct {
|
|
TeamID pgtype.UUID `json:"team_id"`
|
|
RunningCount int32 `json:"running_count"`
|
|
VcpusReserved int32 `json:"vcpus_reserved"`
|
|
MemoryMbReserved int32 `json:"memory_mb_reserved"`
|
|
}
|
|
|
|
func (q *Queries) InsertMetricsSnapshot(ctx context.Context, arg InsertMetricsSnapshotParams) error {
|
|
_, err := q.db.Exec(ctx, insertMetricsSnapshot,
|
|
arg.TeamID,
|
|
arg.RunningCount,
|
|
arg.VcpusReserved,
|
|
arg.MemoryMbReserved,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const insertSandboxMetricPoint = `-- name: InsertSandboxMetricPoint :exec
|
|
INSERT INTO sandbox_metric_points (sandbox_id, tier, ts, cpu_pct, mem_bytes, disk_bytes)
|
|
VALUES ($1, $2, $3, $4, $5, $6)
|
|
ON CONFLICT (sandbox_id, tier, ts) DO NOTHING
|
|
`
|
|
|
|
type InsertSandboxMetricPointParams struct {
|
|
SandboxID pgtype.UUID `json:"sandbox_id"`
|
|
Tier string `json:"tier"`
|
|
Ts int64 `json:"ts"`
|
|
CpuPct float64 `json:"cpu_pct"`
|
|
MemBytes int64 `json:"mem_bytes"`
|
|
DiskBytes int64 `json:"disk_bytes"`
|
|
}
|
|
|
|
func (q *Queries) InsertSandboxMetricPoint(ctx context.Context, arg InsertSandboxMetricPointParams) error {
|
|
_, err := q.db.Exec(ctx, insertSandboxMetricPoint,
|
|
arg.SandboxID,
|
|
arg.Tier,
|
|
arg.Ts,
|
|
arg.CpuPct,
|
|
arg.MemBytes,
|
|
arg.DiskBytes,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const pruneOldMetrics = `-- name: PruneOldMetrics :exec
|
|
DELETE FROM sandbox_metrics_snapshots
|
|
WHERE sampled_at < NOW() - INTERVAL '60 days'
|
|
`
|
|
|
|
func (q *Queries) PruneOldMetrics(ctx context.Context) error {
|
|
_, err := q.db.Exec(ctx, pruneOldMetrics)
|
|
return err
|
|
}
|
|
|
|
const pruneSandboxMetricPoints = `-- name: PruneSandboxMetricPoints :exec
|
|
DELETE FROM sandbox_metric_points
|
|
WHERE ts < EXTRACT(EPOCH FROM NOW() - INTERVAL '30 days')::BIGINT
|
|
`
|
|
|
|
// Remove metric points older than 30 days for destroyed sandboxes.
|
|
func (q *Queries) PruneSandboxMetricPoints(ctx context.Context) error {
|
|
_, err := q.db.Exec(ctx, pruneSandboxMetricPoints)
|
|
return err
|
|
}
|
|
|
|
const sampleSandboxMetrics = `-- name: SampleSandboxMetrics :many
|
|
SELECT
|
|
team_id,
|
|
(COUNT(*) FILTER (WHERE status IN ('running', 'starting')))::INTEGER AS running_count,
|
|
(COALESCE(SUM(vcpus) FILTER (WHERE status IN ('running', 'starting')), 0))::INTEGER AS vcpus_reserved,
|
|
(COALESCE(SUM(memory_mb) FILTER (WHERE status IN ('running', 'starting')), 0)
|
|
+ COALESCE(SUM(CEIL(memory_mb::NUMERIC / 2)) FILTER (WHERE status = 'paused'), 0))::INTEGER AS memory_mb_reserved
|
|
FROM sandboxes
|
|
GROUP BY team_id
|
|
`
|
|
|
|
type SampleSandboxMetricsRow struct {
|
|
TeamID pgtype.UUID `json:"team_id"`
|
|
RunningCount int32 `json:"running_count"`
|
|
VcpusReserved int32 `json:"vcpus_reserved"`
|
|
MemoryMbReserved int32 `json:"memory_mb_reserved"`
|
|
}
|
|
|
|
// Aggregates per-team resource usage from the live sandboxes table.
|
|
// Groups by all teams that have any sandbox row (including stopped) so that
|
|
// zero-value snapshots are recorded when all capsules are stopped, keeping the
|
|
// time-series charts continuous rather than trailing off into empty space.
|
|
// CPU reserved = running + starting only (paused VMs release CPU).
|
|
// RAM reserved = running + starting + sum(ceil(each_paused/2)) (per-VM ceiling).
|
|
func (q *Queries) SampleSandboxMetrics(ctx context.Context) ([]SampleSandboxMetricsRow, error) {
|
|
rows, err := q.db.Query(ctx, sampleSandboxMetrics)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []SampleSandboxMetricsRow
|
|
for rows.Next() {
|
|
var i SampleSandboxMetricsRow
|
|
if err := rows.Scan(
|
|
&i.TeamID,
|
|
&i.RunningCount,
|
|
&i.VcpusReserved,
|
|
&i.MemoryMbReserved,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const upsertDailyUsage = `-- name: UpsertDailyUsage :exec
|
|
INSERT INTO daily_usage (team_id, day, cpu_minutes, ram_mb_minutes)
|
|
VALUES ($1, $2, $3, $4)
|
|
ON CONFLICT (team_id, day) DO UPDATE
|
|
SET cpu_minutes = EXCLUDED.cpu_minutes,
|
|
ram_mb_minutes = EXCLUDED.ram_mb_minutes
|
|
`
|
|
|
|
type UpsertDailyUsageParams struct {
|
|
TeamID pgtype.UUID `json:"team_id"`
|
|
Day pgtype.Date `json:"day"`
|
|
CpuMinutes pgtype.Numeric `json:"cpu_minutes"`
|
|
RamMbMinutes pgtype.Numeric `json:"ram_mb_minutes"`
|
|
}
|
|
|
|
func (q *Queries) UpsertDailyUsage(ctx context.Context, arg UpsertDailyUsageParams) error {
|
|
_, err := q.db.Exec(ctx, upsertDailyUsage,
|
|
arg.TeamID,
|
|
arg.Day,
|
|
arg.CpuMinutes,
|
|
arg.RamMbMinutes,
|
|
)
|
|
return err
|
|
}
|