forked from wrenn/wrenn
Consolidate 16 migrations into one with UUID columns for all entity
IDs. TEXT is kept only for polymorphic fields (audit_logs.actor_id,
resource_id) and template names. The id package now generates UUIDs
via google/uuid, with Format*/Parse* helpers for the prefixed wire
format (sb-{uuid}, usr-{uuid}, etc.). Auth context, services, and
handlers pass pgtype.UUID internally; conversion to/from prefixed
strings happens at API and RPC boundaries. Adds PlatformTeamID
(all-zeros UUID) for shared resources.
251 lines
8.0 KiB
Go
251 lines
8.0 KiB
Go
// Code generated by sqlc. DO NOT EDIT.
|
|
// versions:
|
|
// sqlc v1.30.0
|
|
// source: metrics.sql
|
|
|
|
package db
|
|
|
|
import (
|
|
"context"
|
|
|
|
"github.com/jackc/pgx/v5/pgtype"
|
|
)
|
|
|
|
const deleteSandboxMetricPoints = `-- name: DeleteSandboxMetricPoints :exec
|
|
DELETE FROM sandbox_metric_points
|
|
WHERE sandbox_id = $1
|
|
`
|
|
|
|
func (q *Queries) DeleteSandboxMetricPoints(ctx context.Context, sandboxID pgtype.UUID) error {
|
|
_, err := q.db.Exec(ctx, deleteSandboxMetricPoints, sandboxID)
|
|
return err
|
|
}
|
|
|
|
const deleteSandboxMetricPointsByTier = `-- name: DeleteSandboxMetricPointsByTier :exec
|
|
DELETE FROM sandbox_metric_points
|
|
WHERE sandbox_id = $1 AND tier = $2
|
|
`
|
|
|
|
type DeleteSandboxMetricPointsByTierParams struct {
|
|
SandboxID pgtype.UUID `json:"sandbox_id"`
|
|
Tier string `json:"tier"`
|
|
}
|
|
|
|
func (q *Queries) DeleteSandboxMetricPointsByTier(ctx context.Context, arg DeleteSandboxMetricPointsByTierParams) error {
|
|
_, err := q.db.Exec(ctx, deleteSandboxMetricPointsByTier, arg.SandboxID, arg.Tier)
|
|
return err
|
|
}
|
|
|
|
const getLiveMetrics = `-- name: GetLiveMetrics :one
|
|
SELECT
|
|
(COUNT(*) FILTER (WHERE status IN ('running', 'starting')))::INTEGER AS running_count,
|
|
(COALESCE(SUM(vcpus) FILTER (WHERE status IN ('running', 'starting')), 0))::INTEGER AS vcpus_reserved,
|
|
(COALESCE(SUM(memory_mb) FILTER (WHERE status IN ('running', 'starting')), 0)
|
|
+ COALESCE(SUM(CEIL(memory_mb::NUMERIC / 2)) FILTER (WHERE status = 'paused'), 0))::INTEGER AS memory_mb_reserved
|
|
FROM sandboxes
|
|
WHERE team_id = $1
|
|
`
|
|
|
|
type GetLiveMetricsRow struct {
|
|
RunningCount int32 `json:"running_count"`
|
|
VcpusReserved int32 `json:"vcpus_reserved"`
|
|
MemoryMbReserved int32 `json:"memory_mb_reserved"`
|
|
}
|
|
|
|
// Reads directly from sandboxes for accurate real-time current values.
|
|
// CPU reserved = running + starting only (paused VMs release CPU).
|
|
// RAM reserved = running + starting + sum(ceil(each_paused/2)) (per-VM ceiling).
|
|
func (q *Queries) GetLiveMetrics(ctx context.Context, teamID pgtype.UUID) (GetLiveMetricsRow, error) {
|
|
row := q.db.QueryRow(ctx, getLiveMetrics, teamID)
|
|
var i GetLiveMetricsRow
|
|
err := row.Scan(&i.RunningCount, &i.VcpusReserved, &i.MemoryMbReserved)
|
|
return i, err
|
|
}
|
|
|
|
const getPeakMetrics = `-- name: GetPeakMetrics :one
|
|
SELECT
|
|
COALESCE(MAX(running_count), 0)::INTEGER AS peak_running_count,
|
|
COALESCE(MAX(vcpus_reserved), 0)::INTEGER AS peak_vcpus,
|
|
COALESCE(MAX(memory_mb_reserved), 0)::INTEGER AS peak_memory_mb
|
|
FROM sandbox_metrics_snapshots
|
|
WHERE team_id = $1
|
|
AND sampled_at > NOW() - INTERVAL '30 days'
|
|
`
|
|
|
|
type GetPeakMetricsRow struct {
|
|
PeakRunningCount int32 `json:"peak_running_count"`
|
|
PeakVcpus int32 `json:"peak_vcpus"`
|
|
PeakMemoryMb int32 `json:"peak_memory_mb"`
|
|
}
|
|
|
|
func (q *Queries) GetPeakMetrics(ctx context.Context, teamID pgtype.UUID) (GetPeakMetricsRow, error) {
|
|
row := q.db.QueryRow(ctx, getPeakMetrics, teamID)
|
|
var i GetPeakMetricsRow
|
|
err := row.Scan(&i.PeakRunningCount, &i.PeakVcpus, &i.PeakMemoryMb)
|
|
return i, err
|
|
}
|
|
|
|
const getSandboxMetricPoints = `-- name: GetSandboxMetricPoints :many
|
|
SELECT ts, cpu_pct, mem_bytes, disk_bytes
|
|
FROM sandbox_metric_points
|
|
WHERE sandbox_id = $1 AND tier = $2 AND ts >= $3
|
|
ORDER BY ts ASC
|
|
`
|
|
|
|
type GetSandboxMetricPointsParams struct {
|
|
SandboxID pgtype.UUID `json:"sandbox_id"`
|
|
Tier string `json:"tier"`
|
|
Ts int64 `json:"ts"`
|
|
}
|
|
|
|
type GetSandboxMetricPointsRow struct {
|
|
Ts int64 `json:"ts"`
|
|
CpuPct float64 `json:"cpu_pct"`
|
|
MemBytes int64 `json:"mem_bytes"`
|
|
DiskBytes int64 `json:"disk_bytes"`
|
|
}
|
|
|
|
func (q *Queries) GetSandboxMetricPoints(ctx context.Context, arg GetSandboxMetricPointsParams) ([]GetSandboxMetricPointsRow, error) {
|
|
rows, err := q.db.Query(ctx, getSandboxMetricPoints, arg.SandboxID, arg.Tier, arg.Ts)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetSandboxMetricPointsRow
|
|
for rows.Next() {
|
|
var i GetSandboxMetricPointsRow
|
|
if err := rows.Scan(
|
|
&i.Ts,
|
|
&i.CpuPct,
|
|
&i.MemBytes,
|
|
&i.DiskBytes,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertMetricsSnapshot = `-- name: InsertMetricsSnapshot :exec
|
|
INSERT INTO sandbox_metrics_snapshots (team_id, running_count, vcpus_reserved, memory_mb_reserved)
|
|
VALUES ($1, $2, $3, $4)
|
|
`
|
|
|
|
type InsertMetricsSnapshotParams struct {
|
|
TeamID pgtype.UUID `json:"team_id"`
|
|
RunningCount int32 `json:"running_count"`
|
|
VcpusReserved int32 `json:"vcpus_reserved"`
|
|
MemoryMbReserved int32 `json:"memory_mb_reserved"`
|
|
}
|
|
|
|
func (q *Queries) InsertMetricsSnapshot(ctx context.Context, arg InsertMetricsSnapshotParams) error {
|
|
_, err := q.db.Exec(ctx, insertMetricsSnapshot,
|
|
arg.TeamID,
|
|
arg.RunningCount,
|
|
arg.VcpusReserved,
|
|
arg.MemoryMbReserved,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const insertSandboxMetricPoint = `-- name: InsertSandboxMetricPoint :exec
|
|
INSERT INTO sandbox_metric_points (sandbox_id, tier, ts, cpu_pct, mem_bytes, disk_bytes)
|
|
VALUES ($1, $2, $3, $4, $5, $6)
|
|
ON CONFLICT (sandbox_id, tier, ts) DO NOTHING
|
|
`
|
|
|
|
type InsertSandboxMetricPointParams struct {
|
|
SandboxID pgtype.UUID `json:"sandbox_id"`
|
|
Tier string `json:"tier"`
|
|
Ts int64 `json:"ts"`
|
|
CpuPct float64 `json:"cpu_pct"`
|
|
MemBytes int64 `json:"mem_bytes"`
|
|
DiskBytes int64 `json:"disk_bytes"`
|
|
}
|
|
|
|
func (q *Queries) InsertSandboxMetricPoint(ctx context.Context, arg InsertSandboxMetricPointParams) error {
|
|
_, err := q.db.Exec(ctx, insertSandboxMetricPoint,
|
|
arg.SandboxID,
|
|
arg.Tier,
|
|
arg.Ts,
|
|
arg.CpuPct,
|
|
arg.MemBytes,
|
|
arg.DiskBytes,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const pruneOldMetrics = `-- name: PruneOldMetrics :exec
|
|
DELETE FROM sandbox_metrics_snapshots
|
|
WHERE sampled_at < NOW() - INTERVAL '60 days'
|
|
`
|
|
|
|
func (q *Queries) PruneOldMetrics(ctx context.Context) error {
|
|
_, err := q.db.Exec(ctx, pruneOldMetrics)
|
|
return err
|
|
}
|
|
|
|
const pruneSandboxMetricPoints = `-- name: PruneSandboxMetricPoints :exec
|
|
DELETE FROM sandbox_metric_points
|
|
WHERE ts < EXTRACT(EPOCH FROM NOW() - INTERVAL '30 days')::BIGINT
|
|
`
|
|
|
|
// Remove metric points older than 30 days for destroyed sandboxes.
|
|
func (q *Queries) PruneSandboxMetricPoints(ctx context.Context) error {
|
|
_, err := q.db.Exec(ctx, pruneSandboxMetricPoints)
|
|
return err
|
|
}
|
|
|
|
const sampleSandboxMetrics = `-- name: SampleSandboxMetrics :many
|
|
SELECT
|
|
team_id,
|
|
(COUNT(*) FILTER (WHERE status IN ('running', 'starting')))::INTEGER AS running_count,
|
|
(COALESCE(SUM(vcpus) FILTER (WHERE status IN ('running', 'starting')), 0))::INTEGER AS vcpus_reserved,
|
|
(COALESCE(SUM(memory_mb) FILTER (WHERE status IN ('running', 'starting')), 0)
|
|
+ COALESCE(SUM(CEIL(memory_mb::NUMERIC / 2)) FILTER (WHERE status = 'paused'), 0))::INTEGER AS memory_mb_reserved
|
|
FROM sandboxes
|
|
GROUP BY team_id
|
|
`
|
|
|
|
type SampleSandboxMetricsRow struct {
|
|
TeamID pgtype.UUID `json:"team_id"`
|
|
RunningCount int32 `json:"running_count"`
|
|
VcpusReserved int32 `json:"vcpus_reserved"`
|
|
MemoryMbReserved int32 `json:"memory_mb_reserved"`
|
|
}
|
|
|
|
// Aggregates per-team resource usage from the live sandboxes table.
|
|
// Groups by all teams that have any sandbox row (including stopped) so that
|
|
// zero-value snapshots are recorded when all capsules are stopped, keeping the
|
|
// time-series charts continuous rather than trailing off into empty space.
|
|
// CPU reserved = running + starting only (paused VMs release CPU).
|
|
// RAM reserved = running + starting + sum(ceil(each_paused/2)) (per-VM ceiling).
|
|
func (q *Queries) SampleSandboxMetrics(ctx context.Context) ([]SampleSandboxMetricsRow, error) {
|
|
rows, err := q.db.Query(ctx, sampleSandboxMetrics)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []SampleSandboxMetricsRow
|
|
for rows.Next() {
|
|
var i SampleSandboxMetricsRow
|
|
if err := rows.Scan(
|
|
&i.TeamID,
|
|
&i.RunningCount,
|
|
&i.VcpusReserved,
|
|
&i.MemoryMbReserved,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|