forked from wrenn/wrenn
v0.1.6 (#45)
## What's New? Performance updates for large capsules, admin panel enhancement and bug fixes ### Envd - Fixed bug with sandbox metrics calculation - Page cache drop and balloon inflation to reduce memfile snapshot - Updated rpc timeout logic for better control - Added tests ### Admin Panel - Add/Remove platform admin - Updated template deletion logic for fine grained permission ### Others - Minor frontend visual improvement - Minor bugfixes - Version bump Co-authored-by: Tasnim Kabir Sadik <tksadik92@gmail.com> Reviewed-on: wrenn/wrenn#45 Co-authored-by: pptx704 <rafeed@omukk.dev> Co-committed-by: pptx704 <rafeed@omukk.dev>
This commit is contained in:
@ -145,3 +145,192 @@ pub fn parse_content_encoding<B>(r: &Request<B>) -> Result<&'static str, String>
|
||||
|
||||
Err(format!("unsupported Content-Encoding: {header}, supported: {SUPPORTED_ENCODINGS:?}"))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use axum::http::Request;
|
||||
|
||||
fn req_with_accept(v: &str) -> Request<()> {
|
||||
Request::builder()
|
||||
.header("accept-encoding", v)
|
||||
.body(())
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn req_with_content(v: &str) -> Request<()> {
|
||||
Request::builder()
|
||||
.header("content-encoding", v)
|
||||
.body(())
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn req_no_headers() -> Request<()> {
|
||||
Request::builder().body(()).unwrap()
|
||||
}
|
||||
|
||||
// parse_encoding_with_quality
|
||||
|
||||
#[test]
|
||||
fn encoding_quality_default_1() {
|
||||
let eq = parse_encoding_with_quality("gzip");
|
||||
assert_eq!(eq.encoding, "gzip");
|
||||
assert_eq!(eq.quality, 1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encoding_quality_explicit() {
|
||||
let eq = parse_encoding_with_quality("gzip;q=0.8");
|
||||
assert_eq!(eq.encoding, "gzip");
|
||||
assert_eq!(eq.quality, 0.8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encoding_quality_case_insensitive() {
|
||||
let eq = parse_encoding_with_quality("GZIP;Q=0.5");
|
||||
assert_eq!(eq.encoding, "gzip");
|
||||
assert_eq!(eq.quality, 0.5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encoding_quality_zero() {
|
||||
let eq = parse_encoding_with_quality("gzip;q=0");
|
||||
assert_eq!(eq.quality, 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encoding_quality_whitespace_trimmed() {
|
||||
let eq = parse_encoding_with_quality(" gzip ; q=0.9 ");
|
||||
assert_eq!(eq.encoding, "gzip");
|
||||
assert_eq!(eq.quality, 0.9);
|
||||
}
|
||||
|
||||
// parse_accept_encoding_header
|
||||
|
||||
#[test]
|
||||
fn accept_header_empty() {
|
||||
let (encs, rejected) = parse_accept_encoding_header("");
|
||||
assert!(encs.is_empty());
|
||||
assert!(!rejected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accept_header_identity_q0_rejects() {
|
||||
let (_, rejected) = parse_accept_encoding_header("identity;q=0");
|
||||
assert!(rejected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accept_header_wildcard_q0_rejects_identity() {
|
||||
let (_, rejected) = parse_accept_encoding_header("*;q=0");
|
||||
assert!(rejected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accept_header_wildcard_q0_but_identity_explicit_accepted() {
|
||||
let (_, rejected) = parse_accept_encoding_header("*;q=0, identity");
|
||||
assert!(!rejected);
|
||||
}
|
||||
|
||||
// parse_accept_encoding (full)
|
||||
|
||||
#[test]
|
||||
fn accept_encoding_no_header_returns_identity() {
|
||||
assert_eq!(parse_accept_encoding(&req_no_headers()).unwrap(), "identity");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accept_encoding_gzip() {
|
||||
assert_eq!(parse_accept_encoding(&req_with_accept("gzip")).unwrap(), "gzip");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accept_encoding_identity_explicit() {
|
||||
assert_eq!(parse_accept_encoding(&req_with_accept("identity")).unwrap(), "identity");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accept_encoding_gzip_higher_quality() {
|
||||
assert_eq!(
|
||||
parse_accept_encoding(&req_with_accept("identity;q=0.1, gzip;q=0.9")).unwrap(),
|
||||
"gzip"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accept_encoding_wildcard_returns_identity() {
|
||||
assert_eq!(parse_accept_encoding(&req_with_accept("*")).unwrap(), "identity");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accept_encoding_wildcard_identity_rejected_returns_gzip() {
|
||||
assert_eq!(
|
||||
parse_accept_encoding(&req_with_accept("identity;q=0, *")).unwrap(),
|
||||
"gzip"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accept_encoding_all_rejected_errors() {
|
||||
assert!(parse_accept_encoding(&req_with_accept("identity;q=0, *;q=0")).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accept_encoding_unsupported_only_falls_to_identity() {
|
||||
assert_eq!(parse_accept_encoding(&req_with_accept("br")).unwrap(), "identity");
|
||||
}
|
||||
|
||||
// is_identity_acceptable
|
||||
|
||||
#[test]
|
||||
fn identity_acceptable_no_header() {
|
||||
assert!(is_identity_acceptable(&req_no_headers()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn identity_acceptable_gzip_only() {
|
||||
assert!(is_identity_acceptable(&req_with_accept("gzip")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn identity_not_acceptable_identity_q0() {
|
||||
assert!(!is_identity_acceptable(&req_with_accept("identity;q=0")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn identity_not_acceptable_wildcard_q0() {
|
||||
assert!(!is_identity_acceptable(&req_with_accept("*;q=0")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn identity_acceptable_wildcard_q0_but_identity_explicit() {
|
||||
assert!(is_identity_acceptable(&req_with_accept("*;q=0, identity")));
|
||||
}
|
||||
|
||||
// parse_content_encoding
|
||||
|
||||
#[test]
|
||||
fn content_encoding_empty_returns_identity() {
|
||||
assert_eq!(parse_content_encoding(&req_no_headers()).unwrap(), "identity");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn content_encoding_gzip() {
|
||||
assert_eq!(parse_content_encoding(&req_with_content("gzip")).unwrap(), "gzip");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn content_encoding_identity_explicit() {
|
||||
assert_eq!(parse_content_encoding(&req_with_content("identity")).unwrap(), "identity");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn content_encoding_unsupported_errors() {
|
||||
assert!(parse_content_encoding(&req_with_content("br")).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn content_encoding_case_insensitive() {
|
||||
assert_eq!(parse_content_encoding(&req_with_content("GZIP")).unwrap(), "gzip");
|
||||
}
|
||||
}
|
||||
|
||||
@ -71,9 +71,10 @@ pub async fn get_files(
|
||||
let path_str = params.path.as_deref().unwrap_or("");
|
||||
let header_token = extract_header_token(&req);
|
||||
|
||||
let default_user = state.defaults.user();
|
||||
let username = match execcontext::resolve_default_username(
|
||||
params.username.as_deref(),
|
||||
&state.defaults.user,
|
||||
&default_user,
|
||||
) {
|
||||
Ok(u) => u.to_string(),
|
||||
Err(e) => return json_error(StatusCode::BAD_REQUEST, e),
|
||||
@ -96,7 +97,8 @@ pub async fn get_files(
|
||||
};
|
||||
|
||||
let home_dir = user.dir.to_string_lossy().to_string();
|
||||
let resolved = match expand_and_resolve(path_str, &home_dir, state.defaults.workdir.as_deref())
|
||||
let default_workdir = state.defaults.workdir();
|
||||
let resolved = match expand_and_resolve(path_str, &home_dir, default_workdir.as_deref())
|
||||
{
|
||||
Ok(p) => p,
|
||||
Err(e) => return json_error(StatusCode::BAD_REQUEST, &e),
|
||||
@ -222,9 +224,10 @@ pub async fn post_files(
|
||||
let path_str = params.path.as_deref().unwrap_or("");
|
||||
let header_token = extract_header_token(&req);
|
||||
|
||||
let default_user = state.defaults.user();
|
||||
let username = match execcontext::resolve_default_username(
|
||||
params.username.as_deref(),
|
||||
&state.defaults.user,
|
||||
&default_user,
|
||||
) {
|
||||
Ok(u) => u.to_string(),
|
||||
Err(e) => return json_error(StatusCode::BAD_REQUEST, e),
|
||||
@ -266,6 +269,7 @@ pub async fn post_files(
|
||||
};
|
||||
|
||||
let mut uploaded: Vec<EntryInfo> = Vec::new();
|
||||
let default_workdir = state.defaults.workdir();
|
||||
|
||||
while let Ok(Some(field)) = multipart.next_field().await {
|
||||
let field_name = field.name().unwrap_or("").to_string();
|
||||
@ -274,7 +278,7 @@ pub async fn post_files(
|
||||
}
|
||||
|
||||
let file_path = if !path_str.is_empty() {
|
||||
match expand_and_resolve(path_str, &home_dir, state.defaults.workdir.as_deref()) {
|
||||
match expand_and_resolve(path_str, &home_dir, default_workdir.as_deref()) {
|
||||
Ok(p) => p,
|
||||
Err(e) => return json_error(StatusCode::BAD_REQUEST, &e),
|
||||
}
|
||||
@ -283,7 +287,7 @@ pub async fn post_files(
|
||||
.file_name()
|
||||
.unwrap_or("upload")
|
||||
.to_string();
|
||||
match expand_and_resolve(&fname, &home_dir, state.defaults.workdir.as_deref()) {
|
||||
match expand_and_resolve(&fname, &home_dir, default_workdir.as_deref()) {
|
||||
Ok(p) => p,
|
||||
Err(e) => return json_error(StatusCode::BAD_REQUEST, &e),
|
||||
}
|
||||
|
||||
@ -29,6 +29,8 @@ pub async fn get_health(State(state): State<Arc<AppState>>) -> impl IntoResponse
|
||||
fn post_restore_recovery(state: &AppState) {
|
||||
tracing::info!("restore: post-restore recovery (no GC needed in Rust)");
|
||||
|
||||
state.snapshot_in_progress.store(false, std::sync::atomic::Ordering::Release);
|
||||
|
||||
state.conn_tracker.restore_after_snapshot();
|
||||
tracing::info!("restore: zombie connections closed");
|
||||
|
||||
|
||||
@ -78,11 +78,15 @@ pub async fn post_init(
|
||||
if let Some(ref user) = init_req.default_user {
|
||||
if !user.is_empty() {
|
||||
tracing::debug!(user = %user, "setting default user");
|
||||
let mut defaults = state.defaults.clone();
|
||||
defaults.user = user.clone();
|
||||
// Note: In Rust we'd need interior mutability for this.
|
||||
// For now, env_vars (DashMap) handles concurrent access.
|
||||
// User/workdir mutation deferred to full state refactor.
|
||||
state.defaults.set_user(user.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Set default workdir
|
||||
if let Some(ref workdir) = init_req.default_workdir {
|
||||
if !workdir.is_empty() {
|
||||
tracing::debug!(workdir = %workdir, "setting default workdir");
|
||||
state.defaults.set_workdir(Some(workdir.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,6 +151,9 @@ async fn trigger_restore_and_respond(state: &AppState) -> axum::response::Respon
|
||||
|
||||
fn post_restore_recovery(state: &AppState) {
|
||||
tracing::info!("restore: post-restore recovery (no GC needed in Rust)");
|
||||
|
||||
state.snapshot_in_progress.store(false, std::sync::atomic::Ordering::Release);
|
||||
|
||||
state.conn_tracker.restore_after_snapshot();
|
||||
|
||||
if let Some(ref ps) = state.port_subsystem {
|
||||
|
||||
@ -46,7 +46,8 @@ fn collect_metrics(state: &AppState) -> Result<Metrics, String> {
|
||||
let mut sys = sysinfo::System::new();
|
||||
sys.refresh_memory();
|
||||
let mem_total = sys.total_memory();
|
||||
let mem_used = sys.used_memory();
|
||||
let mem_available = sys.available_memory();
|
||||
let mem_used = mem_total.saturating_sub(mem_available);
|
||||
let mem_total_mib = mem_total / 1024 / 1024;
|
||||
let mem_used_mib = mem_used / 1024 / 1024;
|
||||
|
||||
|
||||
@ -10,10 +10,24 @@ use crate::state::AppState;
|
||||
/// POST /snapshot/prepare — quiesce subsystems before Firecracker snapshot.
|
||||
///
|
||||
/// In Rust there is no GC dance. We just:
|
||||
/// 1. Stop port subsystem
|
||||
/// 2. Close idle connections via conntracker
|
||||
/// 3. Set needs_restore flag
|
||||
/// 1. Drop page cache to shrink snapshot size
|
||||
/// 2. Stop port subsystem
|
||||
/// 3. Close idle connections via conntracker
|
||||
/// 4. Set needs_restore flag
|
||||
pub async fn post_snapshot_prepare(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
// Drop page cache BEFORE blocking the reclaimer — avoids snapshotting
|
||||
// gigabytes of stale cache that inflates the memory dump on disk.
|
||||
// "1" = pagecache only (keep dentries/inodes for faster resume).
|
||||
if let Err(e) = std::fs::write("/proc/sys/vm/drop_caches", "1") {
|
||||
tracing::warn!(error = %e, "snapshot/prepare: drop_caches failed");
|
||||
} else {
|
||||
tracing::info!("snapshot/prepare: page cache dropped");
|
||||
}
|
||||
|
||||
// Block memory reclaimer — prevents drop_caches from running mid-freeze
|
||||
// which would corrupt kernel page table state.
|
||||
state.snapshot_in_progress.store(true, Ordering::Release);
|
||||
|
||||
if let Some(ref ps) = state.port_subsystem {
|
||||
ps.stop();
|
||||
tracing::info!("snapshot/prepare: port subsystem stopped");
|
||||
@ -22,6 +36,9 @@ pub async fn post_snapshot_prepare(State(state): State<Arc<AppState>>) -> impl I
|
||||
state.conn_tracker.prepare_for_snapshot();
|
||||
tracing::info!("snapshot/prepare: connections prepared");
|
||||
|
||||
// Sync filesystem buffers so dirty pages are flushed before freeze.
|
||||
unsafe { libc::sync(); }
|
||||
|
||||
state.needs_restore.store(true, Ordering::Release);
|
||||
tracing::info!("snapshot/prepare: ready for freeze");
|
||||
|
||||
|
||||
Reference in New Issue
Block a user