From a3b60f4dfc263e3c9831ab758882402ccf592ebd Mon Sep 17 00:00:00 2001 From: Rafael Garcia Date: Fri, 16 Jan 2026 08:50:56 -0500 Subject: [PATCH 1/5] feat: add zstd directory transfer endpoints and optimize chromium restart This PR adds two major improvements: 1. New zstd directory transfer endpoints (2.45x faster than zip): - GET /fs/download_dir_zstd - Download directory as tar.zst archive - POST /fs/upload_zstd - Upload and extract tar.zst archive - Supports compression levels: fastest, default, better, best - Uses klauspost/compress library for high-performance compression 2. Chromium restart optimization (19x faster): - Changed startsecs=5 to startsecs=0 (API server already detects readiness) - Added stopsignal=KILL for immediate termination - Added stopwaitsecs=0 to eliminate shutdown grace period - Reduces restart time from ~8 seconds to ~0.4 seconds Includes benchmark tests for both improvements with documented results. Co-Authored-By: Claude Opus 4.5 --- .../supervisor/services/chromium.conf | 4 +- .../image/supervisor/services/chromium.conf | 4 +- server/cmd/api/api/fs.go | 142 ++++ server/e2e/e2e_chromium_restart_bench_test.go | 375 ++++++++ server/e2e/e2e_zip_transfer_bench_test.go | 587 +++++++++++++ server/go.mod | 1 + server/go.sum | 2 + server/lib/oapi/oapi.go | 798 +++++++++++++++--- server/lib/zstdutil/zstdutil.go | 257 ++++++ server/openapi.yaml | 83 ++ 10 files changed, 2131 insertions(+), 122 deletions(-) create mode 100644 server/e2e/e2e_chromium_restart_bench_test.go create mode 100644 server/e2e/e2e_zip_transfer_bench_test.go create mode 100644 server/lib/zstdutil/zstdutil.go diff --git a/images/chromium-headful/supervisor/services/chromium.conf b/images/chromium-headful/supervisor/services/chromium.conf index 07bfe026..ae1f22e9 100644 --- a/images/chromium-headful/supervisor/services/chromium.conf +++ b/images/chromium-headful/supervisor/services/chromium.conf @@ -2,6 +2,8 @@ command=/usr/local/bin/chromium-launcher autostart=false autorestart=true -startsecs=5 +startsecs=0 +stopsignal=KILL +stopwaitsecs=0 stdout_logfile=/var/log/supervisord/chromium redirect_stderr=true diff --git a/images/chromium-headless/image/supervisor/services/chromium.conf b/images/chromium-headless/image/supervisor/services/chromium.conf index 09c0823a..7018e12a 100644 --- a/images/chromium-headless/image/supervisor/services/chromium.conf +++ b/images/chromium-headless/image/supervisor/services/chromium.conf @@ -2,6 +2,8 @@ command=/usr/local/bin/chromium-launcher --headless autostart=false autorestart=true -startsecs=5 +startsecs=0 +stopsignal=KILL +stopwaitsecs=0 stdout_logfile=/var/log/supervisord/chromium redirect_stderr=true diff --git a/server/cmd/api/api/fs.go b/server/cmd/api/api/fs.go index 980236b0..62776d55 100644 --- a/server/cmd/api/api/fs.go +++ b/server/cmd/api/api/fs.go @@ -20,6 +20,7 @@ import ( "github.com/onkernel/kernel-images/server/lib/logger" oapi "github.com/onkernel/kernel-images/server/lib/oapi" "github.com/onkernel/kernel-images/server/lib/ziputil" + "github.com/onkernel/kernel-images/server/lib/zstdutil" ) // fsWatch represents an in-memory directory watch. @@ -850,3 +851,144 @@ func (s *ApiService) DownloadDirZip(ctx context.Context, request oapi.DownloadDi body := io.NopCloser(bytes.NewReader(zipBytes)) return oapi.DownloadDirZip200ApplicationzipResponse{Body: body, ContentLength: int64(len(zipBytes))}, nil } + +func (s *ApiService) DownloadDirZstd(ctx context.Context, request oapi.DownloadDirZstdRequestObject) (oapi.DownloadDirZstdResponseObject, error) { + log := logger.FromContext(ctx) + path := request.Params.Path + if path == "" { + return oapi.DownloadDirZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "path cannot be empty"}}, nil + } + + info, err := os.Stat(path) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return oapi.DownloadDirZstd404JSONResponse{NotFoundErrorJSONResponse: oapi.NotFoundErrorJSONResponse{Message: "directory not found"}}, nil + } + log.Error("failed to stat path", "err", err, "path", path) + return oapi.DownloadDirZstd500JSONResponse{InternalErrorJSONResponse: oapi.InternalErrorJSONResponse{Message: "failed to stat path"}}, nil + } + if !info.IsDir() { + return oapi.DownloadDirZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "path is not a directory"}}, nil + } + + // Determine compression level + level := zstdutil.LevelDefault + if request.Params.CompressionLevel != nil { + switch *request.Params.CompressionLevel { + case oapi.Fastest: + level = zstdutil.LevelFastest + case oapi.Better: + level = zstdutil.LevelBetter + case oapi.Best: + level = zstdutil.LevelBest + default: + level = zstdutil.LevelDefault + } + } + + // Create streaming response using a pipe + pr, pw := io.Pipe() + + go func() { + defer pw.Close() + if err := zstdutil.TarZstdDir(pw, path, level); err != nil { + log.Error("failed to create tar.zst archive", "err", err, "path", path) + pw.CloseWithError(err) + } + }() + + return oapi.DownloadDirZstd200ApplicationzstdResponse{Body: pr, ContentLength: 0}, nil +} + +func (s *ApiService) UploadZstd(ctx context.Context, request oapi.UploadZstdRequestObject) (oapi.UploadZstdResponseObject, error) { + log := logger.FromContext(ctx) + + if request.Body == nil { + return oapi.UploadZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "request body required"}}, nil + } + + // Create temp file for uploaded archive + tmpArchive, err := os.CreateTemp("", "upload-*.tar.zst") + if err != nil { + log.Error("failed to create temporary file", "err", err) + return oapi.UploadZstd500JSONResponse{InternalErrorJSONResponse: oapi.InternalErrorJSONResponse{Message: "internal error"}}, nil + } + defer os.Remove(tmpArchive.Name()) + defer tmpArchive.Close() + + var destPath string + var stripComponents int + var archiveReceived bool + + for { + part, err := request.Body.NextPart() + if err == io.EOF { + break + } + if err != nil { + log.Error("failed to read form part", "err", err) + return oapi.UploadZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "failed to read form part"}}, nil + } + + switch part.FormName() { + case "archive": + archiveReceived = true + if _, err := io.Copy(tmpArchive, part); err != nil { + log.Error("failed to read archive data", "err", err) + return oapi.UploadZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "failed to read archive"}}, nil + } + case "dest_path": + data, err := io.ReadAll(part) + if err != nil { + log.Error("failed to read dest_path", "err", err) + return oapi.UploadZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "failed to read dest_path"}}, nil + } + destPath = strings.TrimSpace(string(data)) + if destPath == "" || !filepath.IsAbs(destPath) { + return oapi.UploadZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "dest_path must be an absolute path"}}, nil + } + case "strip_components": + data, err := io.ReadAll(part) + if err != nil { + log.Error("failed to read strip_components", "err", err) + return oapi.UploadZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "failed to read strip_components"}}, nil + } + if v, err := strconv.Atoi(strings.TrimSpace(string(data))); err == nil && v >= 0 { + stripComponents = v + } + default: + return oapi.UploadZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "invalid form field: " + part.FormName()}}, nil + } + } + + // Validate required parts + if !archiveReceived || destPath == "" { + return oapi.UploadZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "archive and dest_path are required"}}, nil + } + + // Close temp writer and reopen for reading + if err := tmpArchive.Close(); err != nil { + log.Error("failed to finalize temporary archive", "err", err) + return oapi.UploadZstd500JSONResponse{InternalErrorJSONResponse: oapi.InternalErrorJSONResponse{Message: "internal error"}}, nil + } + + // Open for reading + archiveReader, err := os.Open(tmpArchive.Name()) + if err != nil { + log.Error("failed to reopen temporary archive", "err", err) + return oapi.UploadZstd500JSONResponse{InternalErrorJSONResponse: oapi.InternalErrorJSONResponse{Message: "internal error"}}, nil + } + defer archiveReader.Close() + + // Extract the archive + if err := zstdutil.UntarZstd(archiveReader, destPath, stripComponents); err != nil { + msg := err.Error() + if strings.Contains(msg, "illegal file path") { + return oapi.UploadZstd400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "invalid archive: path traversal detected"}}, nil + } + log.Error("failed to extract tar.zst archive", "err", err) + return oapi.UploadZstd500JSONResponse{InternalErrorJSONResponse: oapi.InternalErrorJSONResponse{Message: "failed to extract archive"}}, nil + } + + return oapi.UploadZstd201Response{}, nil +} diff --git a/server/e2e/e2e_chromium_restart_bench_test.go b/server/e2e/e2e_chromium_restart_bench_test.go new file mode 100644 index 00000000..1878490d --- /dev/null +++ b/server/e2e/e2e_chromium_restart_bench_test.go @@ -0,0 +1,375 @@ +package e2e + +import ( + "context" + "encoding/base64" + "fmt" + "log/slog" + "net/http" + "os/exec" + "testing" + "time" + + logctx "github.com/onkernel/kernel-images/server/lib/logger" + instanceoapi "github.com/onkernel/kernel-images/server/lib/oapi" + "github.com/stretchr/testify/require" +) + +// BenchmarkChromiumRestart benchmarks chromium stop/start time on both headful and headless images. +// Run with: go test -bench=BenchmarkChromiumRestart -benchtime=5x -v ./e2e/... +// +// This benchmark uses supervisorctl to stop and start chromium, measuring: +// 1. Time to stop chromium +// 2. Time to start chromium +// 3. Time until DevTools is ready (via CDP endpoint) +func BenchmarkChromiumRestart(b *testing.B) { + if _, err := exec.LookPath("docker"); err != nil { + b.Skip("docker not available") + } + + benchmarks := []struct { + name string + image string + }{ + {"Headless", headlessImage}, + {"Headful", headfulImage}, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + runChromiumRestartBenchmark(b, bm.image, bm.name) + }) + } +} + +func runChromiumRestartBenchmark(b *testing.B, image, imageType string) { + name := fmt.Sprintf("%s-restart-bench-%s", containerName, imageType) + + logger := slog.New(slog.NewTextHandler(b.Output(), &slog.HandlerOptions{Level: slog.LevelInfo})) + baseCtx := logctx.AddToContext(context.Background(), logger) + + // Clean slate + _ = stopContainer(baseCtx, name) + + env := map[string]string{ + "WIDTH": "1024", + "HEIGHT": "768", + } + + // Start container + _, exitCh, err := runContainer(baseCtx, image, name, env) + if err != nil { + b.Fatalf("failed to start container: %v", err) + } + defer stopContainer(baseCtx, name) + + ctx, cancel := context.WithTimeout(baseCtx, 5*time.Minute) + defer cancel() + + logger.Info("[setup]", "action", "waiting for API", "url", apiBaseURL+"/spec.yaml") + if err := waitHTTPOrExit(ctx, apiBaseURL+"/spec.yaml", exitCh); err != nil { + b.Fatalf("api not ready: %v", err) + } + + // Wait for initial DevTools to be ready + logger.Info("[setup]", "action", "waiting for DevTools") + if err := waitTCP(ctx, "127.0.0.1:9222"); err != nil { + b.Fatalf("DevTools not ready: %v", err) + } + + client, err := apiClient() + if err != nil { + b.Fatalf("failed to create API client: %v", err) + } + + // Warmup - do one restart cycle to ensure everything is ready + logger.Info("[warmup]", "action", "performing warmup restart") + if err := doChromiumRestart(ctx, client, logger); err != nil { + b.Fatalf("warmup restart failed: %v", err) + } + + // Reset timer after setup + b.ResetTimer() + + var totalStopTime, totalStartTime, totalDevToolsTime time.Duration + + for i := 0; i < b.N; i++ { + stopTime, startTime, devtoolsTime, err := measureChromiumRestartCycle(ctx, client, logger) + if err != nil { + b.Fatalf("restart cycle %d failed: %v", i, err) + } + + totalStopTime += stopTime + totalStartTime += startTime + totalDevToolsTime += devtoolsTime + + logger.Info("[iteration]", + "i", i, + "stop_ms", stopTime.Milliseconds(), + "start_ms", startTime.Milliseconds(), + "devtools_ms", devtoolsTime.Milliseconds(), + "total_ms", (stopTime + startTime + devtoolsTime).Milliseconds(), + ) + } + + b.StopTimer() + + // Report metrics + if b.N > 0 { + avgStop := totalStopTime / time.Duration(b.N) + avgStart := totalStartTime / time.Duration(b.N) + avgDevTools := totalDevToolsTime / time.Duration(b.N) + avgTotal := avgStop + avgStart + avgDevTools + + b.ReportMetric(float64(avgStop.Milliseconds()), "stop_ms/op") + b.ReportMetric(float64(avgStart.Milliseconds()), "start_ms/op") + b.ReportMetric(float64(avgDevTools.Milliseconds()), "devtools_ms/op") + b.ReportMetric(float64(avgTotal.Milliseconds()), "total_ms/op") + + logger.Info("[summary]", + "image", imageType, + "iterations", b.N, + "avg_stop_ms", avgStop.Milliseconds(), + "avg_start_ms", avgStart.Milliseconds(), + "avg_devtools_ms", avgDevTools.Milliseconds(), + "avg_total_ms", avgTotal.Milliseconds(), + ) + } +} + +// measureChromiumRestartCycle performs a full stop/start cycle and returns timing for each phase. +// Returns: stopTime, startTime, devtoolsReadyTime, error +func measureChromiumRestartCycle(ctx context.Context, client *instanceoapi.ClientWithResponses, logger *slog.Logger) (time.Duration, time.Duration, time.Duration, error) { + // Phase 1: Stop chromium + stopStart := time.Now() + stopDuration, err := execSupervisorctl(ctx, client, "stop", "chromium") + if err != nil { + return 0, 0, 0, fmt.Errorf("stop failed: %w", err) + } + stopTime := time.Since(stopStart) + _ = stopDuration // we use wall-clock time instead + + // Phase 2: Start chromium + startStart := time.Now() + startDuration, err := execSupervisorctl(ctx, client, "start", "chromium") + if err != nil { + return 0, 0, 0, fmt.Errorf("start failed: %w", err) + } + startTime := time.Since(startStart) + _ = startDuration // we use wall-clock time instead + + // Phase 3: Wait for DevTools to be ready + devtoolsStart := time.Now() + if err := waitForDevToolsReady(ctx, client); err != nil { + return 0, 0, 0, fmt.Errorf("devtools not ready: %w", err) + } + devtoolsTime := time.Since(devtoolsStart) + + return stopTime, startTime, devtoolsTime, nil +} + +// execSupervisorctl executes a supervisorctl command via the process exec API. +// Returns the duration reported by the API and any error. +func execSupervisorctl(ctx context.Context, client *instanceoapi.ClientWithResponses, action, service string) (time.Duration, error) { + args := []string{"-c", "/etc/supervisor/supervisord.conf", action, service} + req := instanceoapi.ProcessExecJSONRequestBody{ + Command: "supervisorctl", + Args: &args, + } + + rsp, err := client.ProcessExecWithResponse(ctx, req) + if err != nil { + return 0, fmt.Errorf("request error: %w", err) + } + if rsp.StatusCode() != http.StatusOK { + return 0, fmt.Errorf("unexpected status: %s body=%s", rsp.Status(), string(rsp.Body)) + } + if rsp.JSON200 == nil { + return 0, fmt.Errorf("nil response") + } + + // Check exit code + exitCode := 0 + if rsp.JSON200.ExitCode != nil { + exitCode = *rsp.JSON200.ExitCode + } + if exitCode != 0 { + var stdout, stderr string + if rsp.JSON200.StdoutB64 != nil { + if b, err := base64.StdEncoding.DecodeString(*rsp.JSON200.StdoutB64); err == nil { + stdout = string(b) + } + } + if rsp.JSON200.StderrB64 != nil { + if b, err := base64.StdEncoding.DecodeString(*rsp.JSON200.StderrB64); err == nil { + stderr = string(b) + } + } + return 0, fmt.Errorf("supervisorctl %s %s failed with exit code %d: stdout=%s stderr=%s", action, service, exitCode, stdout, stderr) + } + + // Return duration reported by the API + var duration time.Duration + if rsp.JSON200.DurationMs != nil { + duration = time.Duration(*rsp.JSON200.DurationMs) * time.Millisecond + } + return duration, nil +} + +// waitForDevToolsReady polls the CDP endpoint until it responds. +func waitForDevToolsReady(ctx context.Context, client *instanceoapi.ClientWithResponses) error { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + timeout := time.After(30 * time.Second) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-timeout: + return fmt.Errorf("timeout waiting for DevTools") + case <-ticker.C: + // Try to list CDP targets via curl inside the container + args := []string{"-s", "-o", "/dev/null", "-w", "%{http_code}", "http://localhost:9223/json/version"} + req := instanceoapi.ProcessExecJSONRequestBody{ + Command: "curl", + Args: &args, + } + rsp, err := client.ProcessExecWithResponse(ctx, req) + if err != nil { + continue + } + if rsp.JSON200 != nil && rsp.JSON200.ExitCode != nil && *rsp.JSON200.ExitCode == 0 { + // Check if we got a 200 response + if rsp.JSON200.StdoutB64 != nil { + if b, err := base64.StdEncoding.DecodeString(*rsp.JSON200.StdoutB64); err == nil { + if string(b) == "200" { + return nil + } + } + } + } + } + } +} + +// doChromiumRestart performs a full restart cycle (for warmup). +func doChromiumRestart(ctx context.Context, client *instanceoapi.ClientWithResponses, logger *slog.Logger) error { + args := []string{"-c", "/etc/supervisor/supervisord.conf", "restart", "chromium"} + req := instanceoapi.ProcessExecJSONRequestBody{ + Command: "supervisorctl", + Args: &args, + } + + rsp, err := client.ProcessExecWithResponse(ctx, req) + if err != nil { + return fmt.Errorf("request error: %w", err) + } + if rsp.StatusCode() != http.StatusOK { + return fmt.Errorf("unexpected status: %s body=%s", rsp.Status(), string(rsp.Body)) + } + + // Wait for DevTools + return waitForDevToolsReady(ctx, client) +} + +// TestChromiumRestartTiming is a non-benchmark test that prints detailed timing info. +// Useful for quick iteration without the full benchmark harness. +// Run with: go test -v -run TestChromiumRestartTiming ./e2e/... +func TestChromiumRestartTiming(t *testing.T) { + if _, err := exec.LookPath("docker"); err != nil { + t.Skip("docker not available") + } + + images := []struct { + name string + image string + }{ + {"Headless", headlessImage}, + {"Headful", headfulImage}, + } + + const iterations = 3 + + for _, img := range images { + t.Run(img.name, func(t *testing.T) { + name := fmt.Sprintf("%s-restart-timing-%s", containerName, img.name) + + logger := slog.New(slog.NewTextHandler(t.Output(), &slog.HandlerOptions{Level: slog.LevelInfo})) + baseCtx := logctx.AddToContext(context.Background(), logger) + + // Clean slate + _ = stopContainer(baseCtx, name) + + env := map[string]string{ + "WIDTH": "1024", + "HEIGHT": "768", + } + + // Start container + _, exitCh, err := runContainer(baseCtx, img.image, name, env) + require.NoError(t, err, "failed to start container") + defer stopContainer(baseCtx, name) + + ctx, cancel := context.WithTimeout(baseCtx, 5*time.Minute) + defer cancel() + + t.Logf("Waiting for API...") + require.NoError(t, waitHTTPOrExit(ctx, apiBaseURL+"/spec.yaml", exitCh), "api not ready") + + t.Logf("Waiting for DevTools...") + require.NoError(t, waitTCP(ctx, "127.0.0.1:9222"), "DevTools not ready") + + client, err := apiClient() + require.NoError(t, err, "failed to create API client") + + // Warmup + t.Logf("Performing warmup restart...") + require.NoError(t, doChromiumRestart(ctx, client, logger), "warmup restart failed") + + // Collect timing data + var stopTimes, startTimes, devtoolsTimes []time.Duration + + for i := 0; i < iterations; i++ { + stopTime, startTime, devtoolsTime, err := measureChromiumRestartCycle(ctx, client, logger) + require.NoError(t, err, "restart cycle %d failed", i) + + stopTimes = append(stopTimes, stopTime) + startTimes = append(startTimes, startTime) + devtoolsTimes = append(devtoolsTimes, devtoolsTime) + + t.Logf(" Iteration %d: stop=%dms start=%dms devtools=%dms total=%dms", + i+1, + stopTime.Milliseconds(), + startTime.Milliseconds(), + devtoolsTime.Milliseconds(), + (stopTime + startTime + devtoolsTime).Milliseconds(), + ) + } + + // Calculate averages + avgStop := avg(stopTimes) + avgStart := avg(startTimes) + avgDevTools := avg(devtoolsTimes) + avgTotal := avgStop + avgStart + avgDevTools + + t.Logf("\n=== %s Results (%d iterations) ===", img.name, iterations) + t.Logf(" Average stop time: %dms", avgStop.Milliseconds()) + t.Logf(" Average start time: %dms", avgStart.Milliseconds()) + t.Logf(" Average devtools time: %dms", avgDevTools.Milliseconds()) + t.Logf(" Average total time: %dms", avgTotal.Milliseconds()) + }) + } +} + +func avg(durations []time.Duration) time.Duration { + if len(durations) == 0 { + return 0 + } + var total time.Duration + for _, d := range durations { + total += d + } + return total / time.Duration(len(durations)) +} diff --git a/server/e2e/e2e_zip_transfer_bench_test.go b/server/e2e/e2e_zip_transfer_bench_test.go new file mode 100644 index 00000000..67e2a260 --- /dev/null +++ b/server/e2e/e2e_zip_transfer_bench_test.go @@ -0,0 +1,587 @@ +package e2e + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "log/slog" + "mime/multipart" + "net/http" + "os/exec" + "testing" + "time" + + logctx "github.com/onkernel/kernel-images/server/lib/logger" + instanceoapi "github.com/onkernel/kernel-images/server/lib/oapi" + "github.com/stretchr/testify/require" +) + +// TestZipTransferTiming measures the time to download a directory as a zip and re-upload it. +// This is useful for understanding the performance characteristics of the zip transfer endpoints +// and evaluating whether alternative compression methods (like zstd) would be beneficial. +// +// Run with: go test -v -run TestZipTransferTiming ./e2e/... +func TestZipTransferTiming(t *testing.T) { + if _, err := exec.LookPath("docker"); err != nil { + t.Skip("docker not available") + } + + image := headlessImage + name := containerName + "-zip-transfer" + + logger := slog.New(slog.NewTextHandler(t.Output(), &slog.HandlerOptions{Level: slog.LevelInfo})) + baseCtx := logctx.AddToContext(context.Background(), logger) + + // Clean slate + _ = stopContainer(baseCtx, name) + + env := map[string]string{ + "WIDTH": "1024", + "HEIGHT": "768", + } + + // Start container + _, exitCh, err := runContainer(baseCtx, image, name, env) + require.NoError(t, err, "failed to start container") + defer stopContainer(baseCtx, name) + + ctx, cancel := context.WithTimeout(baseCtx, 5*time.Minute) + defer cancel() + + t.Logf("Waiting for API...") + require.NoError(t, waitHTTPOrExit(ctx, apiBaseURL+"/spec.yaml", exitCh), "api not ready") + + client, err := apiClient() + require.NoError(t, err, "failed to create API client") + + // First, let's populate user-data with some content by navigating to a page + // This ensures we have a realistic directory to transfer + t.Logf("Populating user-data by browsing...") + populateStart := time.Now() + err = populateUserData(ctx, client) + require.NoError(t, err, "failed to populate user-data") + t.Logf("User-data population took %dms", time.Since(populateStart).Milliseconds()) + + // Get initial directory size for reference + dirSize, fileCount, err := getDirStats(ctx, client, "/home/kernel/user-data") + require.NoError(t, err, "failed to get dir stats") + t.Logf("Directory stats: %d files, ~%d KB", fileCount, dirSize/1024) + + const iterations = 3 + var downloadTimes, uploadTimes []time.Duration + var zipSizes []int64 + + for i := 0; i < iterations; i++ { + t.Logf("\n--- Iteration %d ---", i+1) + + // Download /home/kernel/user-data as zip + downloadStart := time.Now() + zipData, err := downloadDirAsZip(ctx, client, "/home/kernel/user-data") + downloadTime := time.Since(downloadStart) + require.NoError(t, err, "download failed") + downloadTimes = append(downloadTimes, downloadTime) + zipSizes = append(zipSizes, int64(len(zipData))) + + t.Logf(" Download: %dms (zip size: %d KB, compression ratio: %.1f%%)", + downloadTime.Milliseconds(), + len(zipData)/1024, + float64(len(zipData))/float64(dirSize)*100) + + // Upload to a different location + destPath := fmt.Sprintf("/tmp/upload-test-%d", i) + uploadStart := time.Now() + err = uploadZip(ctx, client, zipData, destPath) + uploadTime := time.Since(uploadStart) + require.NoError(t, err, "upload failed") + uploadTimes = append(uploadTimes, uploadTime) + + t.Logf(" Upload: %dms", uploadTime.Milliseconds()) + t.Logf(" Total: %dms", (downloadTime + uploadTime).Milliseconds()) + } + + // Calculate averages + avgDownload := avg(downloadTimes) + avgUpload := avg(uploadTimes) + avgZipSize := avgInt64(zipSizes) + + t.Logf("\n=== Zip Transfer Results (%d iterations) ===", iterations) + t.Logf(" Directory size: ~%d KB (%d files)", dirSize/1024, fileCount) + t.Logf(" Average zip size: %d KB (%.1f%% of original)", + avgZipSize/1024, + float64(avgZipSize)/float64(dirSize)*100) + t.Logf(" Average download: %dms", avgDownload.Milliseconds()) + t.Logf(" Average upload: %dms", avgUpload.Milliseconds()) + t.Logf(" Average round-trip: %dms", (avgDownload + avgUpload).Milliseconds()) + t.Logf(" Download throughput: %.1f MB/s (uncompressed)", float64(dirSize)/1024/1024/avgDownload.Seconds()) + t.Logf(" Upload throughput: %.1f MB/s (uncompressed)", float64(dirSize)/1024/1024/avgUpload.Seconds()) +} + +// populateUserData creates some realistic content in the user-data directory +// by executing a playwright script that navigates to a page. +func populateUserData(ctx context.Context, client *instanceoapi.ClientWithResponses) error { + // Navigate to example.com to generate some browser state + code := ` + await page.goto('https://example.com'); + await page.waitForTimeout(500); + // Visit another page to generate more cache/state + await page.goto('https://www.google.com'); + await page.waitForTimeout(500); + return 'done'; + ` + req := instanceoapi.ExecutePlaywrightCodeJSONRequestBody{Code: code} + rsp, err := client.ExecutePlaywrightCodeWithResponse(ctx, req) + if err != nil { + return fmt.Errorf("playwright execute failed: %w", err) + } + if rsp.StatusCode() != http.StatusOK { + return fmt.Errorf("playwright execute returned %d: %s", rsp.StatusCode(), string(rsp.Body)) + } + if rsp.JSON200 != nil && !rsp.JSON200.Success { + errMsg := "unknown error" + if rsp.JSON200.Error != nil { + errMsg = *rsp.JSON200.Error + } + return fmt.Errorf("playwright execution failed: %s", errMsg) + } + return nil +} + +// getDirStats returns approximate size and file count of a directory +func getDirStats(ctx context.Context, client *instanceoapi.ClientWithResponses, path string) (int64, int, error) { + // Use du command via process exec to get accurate size + args := []string{"-sb", path} + req := instanceoapi.ProcessExecJSONRequestBody{ + Command: "du", + Args: &args, + } + rsp, err := client.ProcessExecWithResponse(ctx, req) + if err != nil { + return 0, 0, err + } + if rsp.JSON200 == nil || (rsp.JSON200.ExitCode != nil && *rsp.JSON200.ExitCode != 0) { + return 0, 0, fmt.Errorf("du command failed") + } + + var size int64 + if rsp.JSON200.StdoutB64 != nil { + // Parse du output: "SIZE\tPATH" + stdout := decodeBase64(*rsp.JSON200.StdoutB64) + fmt.Sscanf(stdout, "%d", &size) + } + + // Get file count + args2 := []string{path, "-type", "f"} + req2 := instanceoapi.ProcessExecJSONRequestBody{ + Command: "find", + Args: &args2, + } + rsp2, err := client.ProcessExecWithResponse(ctx, req2) + if err != nil { + return size, 0, err + } + + fileCount := 0 + if rsp2.JSON200 != nil && rsp2.JSON200.StdoutB64 != nil { + stdout := decodeBase64(*rsp2.JSON200.StdoutB64) + // Count lines + for _, c := range stdout { + if c == '\n' { + fileCount++ + } + } + } + + return size, fileCount, nil +} + +// downloadDirAsZip downloads a directory as a zip file +func downloadDirAsZip(ctx context.Context, client *instanceoapi.ClientWithResponses, path string) ([]byte, error) { + params := &instanceoapi.DownloadDirZipParams{Path: path} + rsp, err := client.DownloadDirZipWithResponse(ctx, params) + if err != nil { + return nil, fmt.Errorf("download request failed: %w", err) + } + if rsp.StatusCode() != http.StatusOK { + return nil, fmt.Errorf("download returned %d: %s", rsp.StatusCode(), string(rsp.Body)) + } + return rsp.Body, nil +} + +// uploadZip uploads a zip file to the specified destination +func uploadZip(ctx context.Context, client *instanceoapi.ClientWithResponses, zipData []byte, destPath string) error { + // Create multipart form + var body bytes.Buffer + writer := multipart.NewWriter(&body) + + // Add zip_file + part, err := writer.CreateFormFile("zip_file", "archive.zip") + if err != nil { + return fmt.Errorf("create form file failed: %w", err) + } + if _, err := io.Copy(part, bytes.NewReader(zipData)); err != nil { + return fmt.Errorf("copy zip data failed: %w", err) + } + + // Add dest_path + if err := writer.WriteField("dest_path", destPath); err != nil { + return fmt.Errorf("write dest_path failed: %w", err) + } + + if err := writer.Close(); err != nil { + return fmt.Errorf("close writer failed: %w", err) + } + + rsp, err := client.UploadZipWithBodyWithResponse(ctx, writer.FormDataContentType(), &body) + if err != nil { + return fmt.Errorf("upload request failed: %w", err) + } + if rsp.StatusCode() != http.StatusCreated { + return fmt.Errorf("upload returned %d: %s", rsp.StatusCode(), string(rsp.Body)) + } + return nil +} + +func decodeBase64(s string) string { + b, _ := base64.StdEncoding.DecodeString(s) + return string(b) +} + +func avgInt64(vals []int64) int64 { + if len(vals) == 0 { + return 0 + } + var total int64 + for _, v := range vals { + total += v + } + return total / int64(len(vals)) +} + +// TestZstdTransferTiming measures the time to download a directory as a tar.zst and re-upload it. +// This compares performance against the zip endpoint baseline. +// +// Run with: go test -v -run TestZstdTransferTiming ./e2e/... +func TestZstdTransferTiming(t *testing.T) { + if _, err := exec.LookPath("docker"); err != nil { + t.Skip("docker not available") + } + + image := headlessImage + name := containerName + "-zstd-transfer" + + logger := slog.New(slog.NewTextHandler(t.Output(), &slog.HandlerOptions{Level: slog.LevelInfo})) + baseCtx := logctx.AddToContext(context.Background(), logger) + + // Clean slate + _ = stopContainer(baseCtx, name) + + env := map[string]string{ + "WIDTH": "1024", + "HEIGHT": "768", + } + + // Start container + _, exitCh, err := runContainer(baseCtx, image, name, env) + require.NoError(t, err, "failed to start container") + defer stopContainer(baseCtx, name) + + ctx, cancel := context.WithTimeout(baseCtx, 5*time.Minute) + defer cancel() + + t.Logf("Waiting for API...") + require.NoError(t, waitHTTPOrExit(ctx, apiBaseURL+"/spec.yaml", exitCh), "api not ready") + + client, err := apiClient() + require.NoError(t, err, "failed to create API client") + + // Populate user-data with some content + t.Logf("Populating user-data by browsing...") + populateStart := time.Now() + err = populateUserData(ctx, client) + require.NoError(t, err, "failed to populate user-data") + t.Logf("User-data population took %dms", time.Since(populateStart).Milliseconds()) + + // Get directory stats for reference + dirSize, fileCount, err := getDirStats(ctx, client, "/home/kernel/user-data") + require.NoError(t, err, "failed to get dir stats") + t.Logf("Directory stats: %d files, ~%d KB", fileCount, dirSize/1024) + + const iterations = 3 + levels := []string{"fastest", "default", "better"} + + for _, level := range levels { + t.Logf("\n=== Zstd Level: %s ===", level) + var downloadTimes, uploadTimes []time.Duration + var archiveSizes []int64 + + for i := 0; i < iterations; i++ { + t.Logf("--- Iteration %d ---", i+1) + + // Download as zstd + downloadStart := time.Now() + zstdData, err := downloadDirAsZstd(ctx, client, "/home/kernel/user-data", level) + downloadTime := time.Since(downloadStart) + require.NoError(t, err, "zstd download failed") + downloadTimes = append(downloadTimes, downloadTime) + archiveSizes = append(archiveSizes, int64(len(zstdData))) + + t.Logf(" Download: %dms (size: %d KB, ratio: %.1f%%)", + downloadTime.Milliseconds(), + len(zstdData)/1024, + float64(len(zstdData))/float64(dirSize)*100) + + // Upload to a different location + destPath := fmt.Sprintf("/tmp/zstd-upload-%s-%d", level, i) + uploadStart := time.Now() + err = uploadZstd(ctx, client, zstdData, destPath, 0) + uploadTime := time.Since(uploadStart) + require.NoError(t, err, "zstd upload failed") + uploadTimes = append(uploadTimes, uploadTime) + + t.Logf(" Upload: %dms", uploadTime.Milliseconds()) + t.Logf(" Total: %dms", (downloadTime + uploadTime).Milliseconds()) + } + + // Calculate averages + avgDownload := avg(downloadTimes) + avgUpload := avg(uploadTimes) + avgArchiveSize := avgInt64(archiveSizes) + + t.Logf("\n--- Level %s Results ---", level) + t.Logf(" Average archive size: %d KB (%.1f%% of original)", + avgArchiveSize/1024, + float64(avgArchiveSize)/float64(dirSize)*100) + t.Logf(" Average download: %dms", avgDownload.Milliseconds()) + t.Logf(" Average upload: %dms", avgUpload.Milliseconds()) + t.Logf(" Average round-trip: %dms", (avgDownload + avgUpload).Milliseconds()) + t.Logf(" Download throughput: %.1f MB/s (uncompressed)", float64(dirSize)/1024/1024/avgDownload.Seconds()) + t.Logf(" Upload throughput: %.1f MB/s (uncompressed)", float64(dirSize)/1024/1024/avgUpload.Seconds()) + } +} + +// downloadDirAsZstd downloads a directory as a tar.zst archive +func downloadDirAsZstd(ctx context.Context, client *instanceoapi.ClientWithResponses, path string, level string) ([]byte, error) { + compressionLevel := instanceoapi.DownloadDirZstdParamsCompressionLevel(level) + params := &instanceoapi.DownloadDirZstdParams{ + Path: path, + CompressionLevel: &compressionLevel, + } + rsp, err := client.DownloadDirZstdWithResponse(ctx, params) + if err != nil { + return nil, fmt.Errorf("download request failed: %w", err) + } + if rsp.StatusCode() != http.StatusOK { + return nil, fmt.Errorf("download returned %d: %s", rsp.StatusCode(), string(rsp.Body)) + } + return rsp.Body, nil +} + +// uploadZstd uploads a tar.zst archive to the specified destination +func uploadZstd(ctx context.Context, client *instanceoapi.ClientWithResponses, archiveData []byte, destPath string, stripComponents int) error { + // Create multipart form + var body bytes.Buffer + writer := multipart.NewWriter(&body) + + // Add archive file + part, err := writer.CreateFormFile("archive", "archive.tar.zst") + if err != nil { + return fmt.Errorf("create form file failed: %w", err) + } + if _, err := io.Copy(part, bytes.NewReader(archiveData)); err != nil { + return fmt.Errorf("copy archive data failed: %w", err) + } + + // Add dest_path + if err := writer.WriteField("dest_path", destPath); err != nil { + return fmt.Errorf("write dest_path failed: %w", err) + } + + // Add strip_components if non-zero + if stripComponents > 0 { + if err := writer.WriteField("strip_components", fmt.Sprintf("%d", stripComponents)); err != nil { + return fmt.Errorf("write strip_components failed: %w", err) + } + } + + if err := writer.Close(); err != nil { + return fmt.Errorf("close writer failed: %w", err) + } + + rsp, err := client.UploadZstdWithBodyWithResponse(ctx, writer.FormDataContentType(), &body) + if err != nil { + return fmt.Errorf("upload request failed: %w", err) + } + if rsp.StatusCode() != http.StatusCreated { + return fmt.Errorf("upload returned %d: %s", rsp.StatusCode(), string(rsp.Body)) + } + return nil +} + +// TestZipVsZstdComparison runs a direct comparison of zip and zstd endpoints +// +// Run with: go test -v -run TestZipVsZstdComparison ./e2e/... +func TestZipVsZstdComparison(t *testing.T) { + if _, err := exec.LookPath("docker"); err != nil { + t.Skip("docker not available") + } + + image := headlessImage + name := containerName + "-comparison" + + logger := slog.New(slog.NewTextHandler(t.Output(), &slog.HandlerOptions{Level: slog.LevelInfo})) + baseCtx := logctx.AddToContext(context.Background(), logger) + + // Clean slate + _ = stopContainer(baseCtx, name) + + env := map[string]string{ + "WIDTH": "1024", + "HEIGHT": "768", + } + + // Start container + _, exitCh, err := runContainer(baseCtx, image, name, env) + require.NoError(t, err, "failed to start container") + defer stopContainer(baseCtx, name) + + ctx, cancel := context.WithTimeout(baseCtx, 5*time.Minute) + defer cancel() + + t.Logf("Waiting for API...") + require.NoError(t, waitHTTPOrExit(ctx, apiBaseURL+"/spec.yaml", exitCh), "api not ready") + + client, err := apiClient() + require.NoError(t, err, "failed to create API client") + + // Populate user-data + t.Logf("Populating user-data by browsing...") + err = populateUserData(ctx, client) + require.NoError(t, err, "failed to populate user-data") + + // Get directory stats + dirSize, fileCount, err := getDirStats(ctx, client, "/home/kernel/user-data") + require.NoError(t, err, "failed to get dir stats") + t.Logf("Directory stats: %d files, ~%d KB\n", fileCount, dirSize/1024) + + const iterations = 3 + type result struct { + name string + downloadMs int64 + uploadMs int64 + archiveSize int64 + } + var results []result + + // Test Zip + { + var downloadTotal, uploadTotal, sizeTotal int64 + for i := 0; i < iterations; i++ { + start := time.Now() + zipData, err := downloadDirAsZip(ctx, client, "/home/kernel/user-data") + downloadTime := time.Since(start).Milliseconds() + require.NoError(t, err) + + start = time.Now() + err = uploadZip(ctx, client, zipData, fmt.Sprintf("/tmp/zip-test-%d", i)) + uploadTime := time.Since(start).Milliseconds() + require.NoError(t, err) + + downloadTotal += downloadTime + uploadTotal += uploadTime + sizeTotal += int64(len(zipData)) + } + results = append(results, result{ + name: "Zip", + downloadMs: downloadTotal / iterations, + uploadMs: uploadTotal / iterations, + archiveSize: sizeTotal / iterations, + }) + } + + // Test Zstd (fastest) + { + var downloadTotal, uploadTotal, sizeTotal int64 + for i := 0; i < iterations; i++ { + start := time.Now() + zstdData, err := downloadDirAsZstd(ctx, client, "/home/kernel/user-data", "fastest") + downloadTime := time.Since(start).Milliseconds() + require.NoError(t, err) + + start = time.Now() + err = uploadZstd(ctx, client, zstdData, fmt.Sprintf("/tmp/zstd-fastest-%d", i), 0) + uploadTime := time.Since(start).Milliseconds() + require.NoError(t, err) + + downloadTotal += downloadTime + uploadTotal += uploadTime + sizeTotal += int64(len(zstdData)) + } + results = append(results, result{ + name: "Zstd (fastest)", + downloadMs: downloadTotal / iterations, + uploadMs: uploadTotal / iterations, + archiveSize: sizeTotal / iterations, + }) + } + + // Test Zstd (default) + { + var downloadTotal, uploadTotal, sizeTotal int64 + for i := 0; i < iterations; i++ { + start := time.Now() + zstdData, err := downloadDirAsZstd(ctx, client, "/home/kernel/user-data", "default") + downloadTime := time.Since(start).Milliseconds() + require.NoError(t, err) + + start = time.Now() + err = uploadZstd(ctx, client, zstdData, fmt.Sprintf("/tmp/zstd-default-%d", i), 0) + uploadTime := time.Since(start).Milliseconds() + require.NoError(t, err) + + downloadTotal += downloadTime + uploadTotal += uploadTime + sizeTotal += int64(len(zstdData)) + } + results = append(results, result{ + name: "Zstd (default)", + downloadMs: downloadTotal / iterations, + uploadMs: uploadTotal / iterations, + archiveSize: sizeTotal / iterations, + }) + } + + // Print comparison table + t.Logf("\n=== Zip vs Zstd Comparison (%d iterations each) ===", iterations) + t.Logf("Directory: %d files, %d KB\n", fileCount, dirSize/1024) + t.Logf("%-18s | %-10s | %-10s | %-10s | %-12s", "Method", "Download", "Upload", "Total", "Archive Size") + t.Logf("%-18s-+-%-10s-+-%-10s-+-%-10s-+-%-12s", "--------", "--------", "------", "-----", "------------") + + baseline := results[0] + for _, r := range results { + totalMs := r.downloadMs + r.uploadMs + baselineTotal := baseline.downloadMs + baseline.uploadMs + speedup := float64(baselineTotal) / float64(totalMs) + sizeRatio := float64(r.archiveSize) / float64(dirSize) * 100 + + t.Logf("%-18s | %7dms | %7dms | %7dms | %6dKB (%.0f%%)", + r.name, + r.downloadMs, + r.uploadMs, + totalMs, + r.archiveSize/1024, + sizeRatio) + + if r.name != baseline.name { + t.Logf(" -> %.2fx %s than %s", speedup, speedDesc(speedup), baseline.name) + } + } +} + +func speedDesc(ratio float64) string { + if ratio > 1 { + return "faster" + } + return "slower" +} diff --git a/server/go.mod b/server/go.mod index 3a7ea256..a9ce8ec7 100644 --- a/server/go.mod +++ b/server/go.mod @@ -33,6 +33,7 @@ require ( github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/klauspost/compress v1.18.3 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect diff --git a/server/go.sum b/server/go.sum index ecefca2e..324dfabf 100644 --- a/server/go.sum +++ b/server/go.sum @@ -44,6 +44,8 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw= +github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= diff --git a/server/lib/oapi/oapi.go b/server/lib/oapi/oapi.go index 595f89f4..0fae4552 100644 --- a/server/lib/oapi/oapi.go +++ b/server/lib/oapi/oapi.go @@ -89,6 +89,14 @@ const ( Stdout ProcessStreamEventStream = "stdout" ) +// Defines values for DownloadDirZstdParamsCompressionLevel. +const ( + Best DownloadDirZstdParamsCompressionLevel = "best" + Better DownloadDirZstdParamsCompressionLevel = "better" + Default DownloadDirZstdParamsCompressionLevel = "default" + Fastest DownloadDirZstdParamsCompressionLevel = "fastest" +) + // Defines values for LogsStreamParamsSource. const ( Path LogsStreamParamsSource = "path" @@ -626,6 +634,22 @@ type DownloadDirZipParams struct { Path string `form:"path" json:"path"` } +// DownloadDirZstdParams defines parameters for DownloadDirZstd. +type DownloadDirZstdParams struct { + // Path Absolute directory path to archive and download. + Path string `form:"path" json:"path"` + + // CompressionLevel Compression level. Higher levels produce smaller archives but take longer. + // - fastest: ~zstd level 1, maximum speed (~300-500 MB/s) + // - default: ~zstd level 3, balanced speed/ratio (~150 MB/s) + // - better: ~zstd level 7, better ratio (~50-80 MB/s) + // - best: ~zstd level 11, best ratio (~20-40 MB/s) + CompressionLevel *DownloadDirZstdParamsCompressionLevel `form:"compression_level,omitempty" json:"compression_level,omitempty"` +} + +// DownloadDirZstdParamsCompressionLevel defines parameters for DownloadDirZstd. +type DownloadDirZstdParamsCompressionLevel string + // FileInfoParams defines parameters for FileInfo. type FileInfoParams struct { // Path Absolute path of the file or directory. @@ -660,6 +684,18 @@ type UploadZipMultipartBody struct { ZipFile openapi_types.File `json:"zip_file"` } +// UploadZstdMultipartBody defines parameters for UploadZstd. +type UploadZstdMultipartBody struct { + // Archive The tar.zst archive file. + Archive openapi_types.File `json:"archive"` + + // DestPath Absolute destination directory to extract the archive to. + DestPath string `json:"dest_path"` + + // StripComponents Number of leading path components to strip during extraction (like tar --strip-components). + StripComponents *int `json:"strip_components,omitempty"` +} + // WriteFileParams defines parameters for WriteFile. type WriteFileParams struct { // Path Destination absolute file path. @@ -744,6 +780,9 @@ type UploadFilesMultipartRequestBody UploadFilesMultipartBody // UploadZipMultipartRequestBody defines body for UploadZip for multipart/form-data ContentType. type UploadZipMultipartRequestBody UploadZipMultipartBody +// UploadZstdMultipartRequestBody defines body for UploadZstd for multipart/form-data ContentType. +type UploadZstdMultipartRequestBody UploadZstdMultipartBody + // StartFsWatchJSONRequestBody defines body for StartFsWatch for application/json ContentType. type StartFsWatchJSONRequestBody = StartFsWatchRequest @@ -918,6 +957,9 @@ type ClientInterface interface { // DownloadDirZip request DownloadDirZip(ctx context.Context, params *DownloadDirZipParams, reqEditors ...RequestEditorFn) (*http.Response, error) + // DownloadDirZstd request + DownloadDirZstd(ctx context.Context, params *DownloadDirZstdParams, reqEditors ...RequestEditorFn) (*http.Response, error) + // FileInfo request FileInfo(ctx context.Context, params *FileInfoParams, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -943,6 +985,9 @@ type ClientInterface interface { // UploadZipWithBody request with any body UploadZipWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + // UploadZstdWithBody request with any body + UploadZstdWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + // StartFsWatchWithBody request with any body StartFsWatchWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -1354,6 +1399,18 @@ func (c *Client) DownloadDirZip(ctx context.Context, params *DownloadDirZipParam return c.Client.Do(req) } +func (c *Client) DownloadDirZstd(ctx context.Context, params *DownloadDirZstdParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDownloadDirZstdRequest(c.Server, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) FileInfo(ctx context.Context, params *FileInfoParams, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewFileInfoRequest(c.Server, params) if err != nil { @@ -1462,6 +1519,18 @@ func (c *Client) UploadZipWithBody(ctx context.Context, contentType string, body return c.Client.Do(req) } +func (c *Client) UploadZstdWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewUploadZstdRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) StartFsWatchWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewStartFsWatchRequestWithBody(c.Server, contentType, body) if err != nil { @@ -2392,6 +2461,67 @@ func NewDownloadDirZipRequest(server string, params *DownloadDirZipParams) (*htt return req, nil } +// NewDownloadDirZstdRequest generates requests for DownloadDirZstd +func NewDownloadDirZstdRequest(server string, params *DownloadDirZstdParams) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/fs/download_dir_zstd") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + if params != nil { + queryValues := queryURL.Query() + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "path", runtime.ParamLocationQuery, params.Path); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + if params.CompressionLevel != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "compression_level", runtime.ParamLocationQuery, *params.CompressionLevel); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + queryURL.RawQuery = queryValues.Encode() + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + // NewFileInfoRequest generates requests for FileInfo func NewFileInfoRequest(server string, params *FileInfoParams) (*http.Request, error) { var err error @@ -2665,6 +2795,35 @@ func NewUploadZipRequestWithBody(server string, contentType string, body io.Read return req, nil } +// NewUploadZstdRequestWithBody generates requests for UploadZstd with any type of body +func NewUploadZstdRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/fs/upload_zstd") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + // NewStartFsWatchRequest calls the generic StartFsWatch builder with application/json body func NewStartFsWatchRequest(server string, body StartFsWatchJSONRequestBody) (*http.Request, error) { var bodyReader io.Reader @@ -3568,6 +3727,9 @@ type ClientWithResponsesInterface interface { // DownloadDirZipWithResponse request DownloadDirZipWithResponse(ctx context.Context, params *DownloadDirZipParams, reqEditors ...RequestEditorFn) (*DownloadDirZipResponse, error) + // DownloadDirZstdWithResponse request + DownloadDirZstdWithResponse(ctx context.Context, params *DownloadDirZstdParams, reqEditors ...RequestEditorFn) (*DownloadDirZstdResponse, error) + // FileInfoWithResponse request FileInfoWithResponse(ctx context.Context, params *FileInfoParams, reqEditors ...RequestEditorFn) (*FileInfoResponse, error) @@ -3593,6 +3755,9 @@ type ClientWithResponsesInterface interface { // UploadZipWithBodyWithResponse request with any body UploadZipWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*UploadZipResponse, error) + // UploadZstdWithBodyWithResponse request with any body + UploadZstdWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*UploadZstdResponse, error) + // StartFsWatchWithBodyWithResponse request with any body StartFsWatchWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*StartFsWatchResponse, error) @@ -4019,6 +4184,30 @@ func (r DownloadDirZipResponse) StatusCode() int { return 0 } +type DownloadDirZstdResponse struct { + Body []byte + HTTPResponse *http.Response + JSON400 *BadRequestError + JSON404 *NotFoundError + JSON500 *InternalError +} + +// Status returns HTTPResponse.Status +func (r DownloadDirZstdResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DownloadDirZstdResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + type FileInfoResponse struct { Body []byte HTTPResponse *http.Response @@ -4189,6 +4378,30 @@ func (r UploadZipResponse) StatusCode() int { return 0 } +type UploadZstdResponse struct { + Body []byte + HTTPResponse *http.Response + JSON400 *BadRequestError + JSON404 *NotFoundError + JSON500 *InternalError +} + +// Status returns HTTPResponse.Status +func (r UploadZstdResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r UploadZstdResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + type StartFsWatchResponse struct { Body []byte HTTPResponse *http.Response @@ -4864,6 +5077,15 @@ func (c *ClientWithResponses) DownloadDirZipWithResponse(ctx context.Context, pa return ParseDownloadDirZipResponse(rsp) } +// DownloadDirZstdWithResponse request returning *DownloadDirZstdResponse +func (c *ClientWithResponses) DownloadDirZstdWithResponse(ctx context.Context, params *DownloadDirZstdParams, reqEditors ...RequestEditorFn) (*DownloadDirZstdResponse, error) { + rsp, err := c.DownloadDirZstd(ctx, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseDownloadDirZstdResponse(rsp) +} + // FileInfoWithResponse request returning *FileInfoResponse func (c *ClientWithResponses) FileInfoWithResponse(ctx context.Context, params *FileInfoParams, reqEditors ...RequestEditorFn) (*FileInfoResponse, error) { rsp, err := c.FileInfo(ctx, params, reqEditors...) @@ -4943,6 +5165,15 @@ func (c *ClientWithResponses) UploadZipWithBodyWithResponse(ctx context.Context, return ParseUploadZipResponse(rsp) } +// UploadZstdWithBodyWithResponse request with arbitrary body returning *UploadZstdResponse +func (c *ClientWithResponses) UploadZstdWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*UploadZstdResponse, error) { + rsp, err := c.UploadZstdWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseUploadZstdResponse(rsp) +} + // StartFsWatchWithBodyWithResponse request with arbitrary body returning *StartFsWatchResponse func (c *ClientWithResponses) StartFsWatchWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*StartFsWatchResponse, error) { rsp, err := c.StartFsWatchWithBody(ctx, contentType, body, reqEditors...) @@ -5722,6 +5953,46 @@ func ParseDownloadDirZipResponse(rsp *http.Response) (*DownloadDirZipResponse, e return response, nil } +// ParseDownloadDirZstdResponse parses an HTTP response from a DownloadDirZstdWithResponse call +func ParseDownloadDirZstdResponse(rsp *http.Response) (*DownloadDirZstdResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DownloadDirZstdResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest BadRequestError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest NotFoundError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest InternalError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + // ParseFileInfoResponse parses an HTTP response from a FileInfoWithResponse call func ParseFileInfoResponse(rsp *http.Response) (*FileInfoResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) @@ -6016,6 +6287,46 @@ func ParseUploadZipResponse(rsp *http.Response) (*UploadZipResponse, error) { return response, nil } +// ParseUploadZstdResponse parses an HTTP response from a UploadZstdWithResponse call +func ParseUploadZstdResponse(rsp *http.Response) (*UploadZstdResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &UploadZstdResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest BadRequestError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest NotFoundError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest InternalError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + // ParseStartFsWatchResponse parses an HTTP response from a StartFsWatchWithResponse call func ParseStartFsWatchResponse(rsp *http.Response) (*StartFsWatchResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) @@ -6790,6 +7101,9 @@ type ServerInterface interface { // Download a directory as a ZIP archive // (GET /fs/download_dir_zip) DownloadDirZip(w http.ResponseWriter, r *http.Request, params DownloadDirZipParams) + // Download a directory as a tar.zst archive + // (GET /fs/download_dir_zstd) + DownloadDirZstd(w http.ResponseWriter, r *http.Request, params DownloadDirZstdParams) // Get information about a file or directory // (GET /fs/file_info) FileInfo(w http.ResponseWriter, r *http.Request, params FileInfoParams) @@ -6811,6 +7125,9 @@ type ServerInterface interface { // Upload a zip archive and extract it // (POST /fs/upload_zip) UploadZip(w http.ResponseWriter, r *http.Request) + // Upload a tar.zst archive and extract it + // (POST /fs/upload_zstd) + UploadZstd(w http.ResponseWriter, r *http.Request) // Watch a directory for changes // (POST /fs/watch) StartFsWatch(w http.ResponseWriter, r *http.Request) @@ -6961,6 +7278,12 @@ func (_ Unimplemented) DownloadDirZip(w http.ResponseWriter, r *http.Request, pa w.WriteHeader(http.StatusNotImplemented) } +// Download a directory as a tar.zst archive +// (GET /fs/download_dir_zstd) +func (_ Unimplemented) DownloadDirZstd(w http.ResponseWriter, r *http.Request, params DownloadDirZstdParams) { + w.WriteHeader(http.StatusNotImplemented) +} + // Get information about a file or directory // (GET /fs/file_info) func (_ Unimplemented) FileInfo(w http.ResponseWriter, r *http.Request, params FileInfoParams) { @@ -7003,6 +7326,12 @@ func (_ Unimplemented) UploadZip(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotImplemented) } +// Upload a tar.zst archive and extract it +// (POST /fs/upload_zstd) +func (_ Unimplemented) UploadZstd(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} + // Watch a directory for changes // (POST /fs/watch) func (_ Unimplemented) StartFsWatch(w http.ResponseWriter, r *http.Request) { @@ -7350,6 +7679,48 @@ func (siw *ServerInterfaceWrapper) DownloadDirZip(w http.ResponseWriter, r *http handler.ServeHTTP(w, r) } +// DownloadDirZstd operation middleware +func (siw *ServerInterfaceWrapper) DownloadDirZstd(w http.ResponseWriter, r *http.Request) { + + var err error + + // Parameter object where we will unmarshal all parameters from the context + var params DownloadDirZstdParams + + // ------------- Required query parameter "path" ------------- + + if paramValue := r.URL.Query().Get("path"); paramValue != "" { + + } else { + siw.ErrorHandlerFunc(w, r, &RequiredParamError{ParamName: "path"}) + return + } + + err = runtime.BindQueryParameter("form", true, true, "path", r.URL.Query(), ¶ms.Path) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "path", Err: err}) + return + } + + // ------------- Optional query parameter "compression_level" ------------- + + err = runtime.BindQueryParameter("form", true, false, "compression_level", r.URL.Query(), ¶ms.CompressionLevel) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "compression_level", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.DownloadDirZstd(w, r, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + // FileInfo operation middleware func (siw *ServerInterfaceWrapper) FileInfo(w http.ResponseWriter, r *http.Request) { @@ -7508,6 +7879,20 @@ func (siw *ServerInterfaceWrapper) UploadZip(w http.ResponseWriter, r *http.Requ handler.ServeHTTP(w, r) } +// UploadZstd operation middleware +func (siw *ServerInterfaceWrapper) UploadZstd(w http.ResponseWriter, r *http.Request) { + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.UploadZstd(w, r) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + // StartFsWatch operation middleware func (siw *ServerInterfaceWrapper) StartFsWatch(w http.ResponseWriter, r *http.Request) { @@ -8080,6 +8465,9 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl r.Group(func(r chi.Router) { r.Get(options.BaseURL+"/fs/download_dir_zip", wrapper.DownloadDirZip) }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/fs/download_dir_zstd", wrapper.DownloadDirZstd) + }) r.Group(func(r chi.Router) { r.Get(options.BaseURL+"/fs/file_info", wrapper.FileInfo) }) @@ -8101,6 +8489,9 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl r.Group(func(r chi.Router) { r.Post(options.BaseURL+"/fs/upload_zip", wrapper.UploadZip) }) + r.Group(func(r chi.Router) { + r.Post(options.BaseURL+"/fs/upload_zstd", wrapper.UploadZstd) + }) r.Group(func(r chi.Router) { r.Post(options.BaseURL+"/fs/watch", wrapper.StartFsWatch) }) @@ -8737,6 +9128,60 @@ func (response DownloadDirZip500JSONResponse) VisitDownloadDirZipResponse(w http return json.NewEncoder(w).Encode(response) } +type DownloadDirZstdRequestObject struct { + Params DownloadDirZstdParams +} + +type DownloadDirZstdResponseObject interface { + VisitDownloadDirZstdResponse(w http.ResponseWriter) error +} + +type DownloadDirZstd200ApplicationzstdResponse struct { + Body io.Reader + ContentLength int64 +} + +func (response DownloadDirZstd200ApplicationzstdResponse) VisitDownloadDirZstdResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/zstd") + if response.ContentLength != 0 { + w.Header().Set("Content-Length", fmt.Sprint(response.ContentLength)) + } + w.WriteHeader(200) + + if closer, ok := response.Body.(io.ReadCloser); ok { + defer closer.Close() + } + _, err := io.Copy(w, response.Body) + return err +} + +type DownloadDirZstd400JSONResponse struct{ BadRequestErrorJSONResponse } + +func (response DownloadDirZstd400JSONResponse) VisitDownloadDirZstdResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type DownloadDirZstd404JSONResponse struct{ NotFoundErrorJSONResponse } + +func (response DownloadDirZstd404JSONResponse) VisitDownloadDirZstdResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type DownloadDirZstd500JSONResponse struct{ InternalErrorJSONResponse } + +func (response DownloadDirZstd500JSONResponse) VisitDownloadDirZstdResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + type FileInfoRequestObject struct { Params FileInfoParams } @@ -9051,6 +9496,49 @@ func (response UploadZip500JSONResponse) VisitUploadZipResponse(w http.ResponseW return json.NewEncoder(w).Encode(response) } +type UploadZstdRequestObject struct { + Body *multipart.Reader +} + +type UploadZstdResponseObject interface { + VisitUploadZstdResponse(w http.ResponseWriter) error +} + +type UploadZstd201Response struct { +} + +func (response UploadZstd201Response) VisitUploadZstdResponse(w http.ResponseWriter) error { + w.WriteHeader(201) + return nil +} + +type UploadZstd400JSONResponse struct{ BadRequestErrorJSONResponse } + +func (response UploadZstd400JSONResponse) VisitUploadZstdResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type UploadZstd404JSONResponse struct{ NotFoundErrorJSONResponse } + +func (response UploadZstd404JSONResponse) VisitUploadZstdResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type UploadZstd500JSONResponse struct{ InternalErrorJSONResponse } + +func (response UploadZstd500JSONResponse) VisitUploadZstdResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + type StartFsWatchRequestObject struct { Body *StartFsWatchJSONRequestBody } @@ -9965,6 +10453,9 @@ type StrictServerInterface interface { // Download a directory as a ZIP archive // (GET /fs/download_dir_zip) DownloadDirZip(ctx context.Context, request DownloadDirZipRequestObject) (DownloadDirZipResponseObject, error) + // Download a directory as a tar.zst archive + // (GET /fs/download_dir_zstd) + DownloadDirZstd(ctx context.Context, request DownloadDirZstdRequestObject) (DownloadDirZstdResponseObject, error) // Get information about a file or directory // (GET /fs/file_info) FileInfo(ctx context.Context, request FileInfoRequestObject) (FileInfoResponseObject, error) @@ -9986,6 +10477,9 @@ type StrictServerInterface interface { // Upload a zip archive and extract it // (POST /fs/upload_zip) UploadZip(ctx context.Context, request UploadZipRequestObject) (UploadZipResponseObject, error) + // Upload a tar.zst archive and extract it + // (POST /fs/upload_zstd) + UploadZstd(ctx context.Context, request UploadZstdRequestObject) (UploadZstdResponseObject, error) // Watch a directory for changes // (POST /fs/watch) StartFsWatch(ctx context.Context, request StartFsWatchRequestObject) (StartFsWatchResponseObject, error) @@ -10531,6 +11025,32 @@ func (sh *strictHandler) DownloadDirZip(w http.ResponseWriter, r *http.Request, } } +// DownloadDirZstd operation middleware +func (sh *strictHandler) DownloadDirZstd(w http.ResponseWriter, r *http.Request, params DownloadDirZstdParams) { + var request DownloadDirZstdRequestObject + + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.DownloadDirZstd(ctx, request.(DownloadDirZstdRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "DownloadDirZstd") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(DownloadDirZstdResponseObject); ok { + if err := validResponse.VisitDownloadDirZstdResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + // FileInfo operation middleware func (sh *strictHandler) FileInfo(w http.ResponseWriter, r *http.Request, params FileInfoParams) { var request FileInfoRequestObject @@ -10733,6 +11253,37 @@ func (sh *strictHandler) UploadZip(w http.ResponseWriter, r *http.Request) { } } +// UploadZstd operation middleware +func (sh *strictHandler) UploadZstd(w http.ResponseWriter, r *http.Request) { + var request UploadZstdRequestObject + + if reader, err := r.MultipartReader(); err != nil { + sh.options.RequestErrorHandlerFunc(w, r, fmt.Errorf("can't decode multipart body: %w", err)) + return + } else { + request.Body = reader + } + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.UploadZstd(ctx, request.(UploadZstdRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "UploadZstd") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(UploadZstdResponseObject); ok { + if err := validResponse.VisitUploadZstdResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + // StartFsWatch operation middleware func (sh *strictHandler) StartFsWatch(w http.ResponseWriter, r *http.Request) { var request StartFsWatchRequestObject @@ -11260,126 +11811,133 @@ func (sh *strictHandler) StopRecording(w http.ResponseWriter, r *http.Request) { // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+w9a3PbNrZ/BcO7M4nv6pVXd+r9lCZO65ukydju7W7rXC1MHklYgwALgJKVjP/7HRyA", - "D5GgXrbjuLMznUaWSOAA5/3AwZcolmkmBQijo8MvkQKdSaEB//iBJifwRw7aHCkllf0qlsKAMPYjzTLO", - "YmqYFMN/aynsdzqeQUrtp78omESH0X8Nq/GH7lc9dKNdX1/3ogR0rFhmB4kO7YTEzxhd96JXUkw4i7/W", - "7MV0dupjYUAJyr/S1MV05BTUHBTxD/ain6V5I3ORfCU4fpaG4HyR/c0/bkd7xVl8+V7mGgr8WACShNkX", - "Kf+oZAbKMEs3E8o19KKs9tWX6CI3xkG4OiEOSdyvxEjC7EbQ2JAFM7OoF4HI0+jw94jDxES9SLHpzP6b", - "siThEPWiCxpfRr1oItWCqiT61IvMMoPoMNJGMTG1Wxhb0Mfu6+b0Z8sMiJwQfIbQGL+uZk3kwv6ZZ5Ef", - "JjjBTPJkfAlLHVpewiYMFLE/2/XZZ0mS21eJmYGbOOpFzECK77dG919QpejS/i3ydIxv+ekmNOcmOnzS", - "QmWeXoCyizMsBZxcQQbUrMzrR7fbPgWkuKv2Kv5BYilVwgQ1uFvlACSTmvk9a4+0bI/0z31Guu5FCv7I", - "mYLEIuUqskNXiJAX/wbHtK8UUAOvmYLYSLXcj1JTmQQI5UPmXidJMTqxD5LHMjaUE4euHoHBdED+9uLF", - "wYC8dpjBjf/bixeDqBdl1Fg2jw6j//t91P/bpy/Pes+v/xIFSCqjZtYG4uWFljw3UAPCPmhniHHpjUmG", - "g/9uD97YTZwptJmvgYOBj9TM9tvHDUsoAE9wmtsH/ARiJLTpftCzpA37cQLCOHb2pKuKSWorIS95NqMi", - "T0GxmEhFZstsBqKJf9r//LL/26j/ff/TX/8SXGx7YUxnnC6tmmLTHdczA5ScrTW9ypUCYUjixibuOcIE", - "ydgVcB1kbAUTBXo2VtTA5iH908Q+bQf+6TN5nNIluQAics4JmxAhDUnAQGzoBYeD4KQLloQIqjkbPrYW", - "/uDWKjr9CtotUXTaodlKjeZUXEjPJMDpckXoj5pC/7V9xK4+ZZwzDbEUiSYXYBYAogDEajVCRUK0ocp4", - "6k3lHAjl0usly10DBEuw1AI6CuHkJprP7sVOii8sUD6oBBQkhDNtLFv+ftUjy091NZNRpnS5RDNTMp/O", - "yGLGuANiysR0QN7n2hBrXFEmCDWEA9WGPCWZZMLoQR3SJsi1DUnp1bH79SnuXfVHczVrf9QGsjGie5yu", - "qvkXO6JcAaeGzYHYIXVj1eSxZTyLDCaYYVa72cEONiMeRxtnoMYapqm3RytbZNRtjJQAITYcVBko4sex", - "Cynpj7x3QJAnKxA92WgidOqG0oxu6HzQmk4hQIaNgYsHg2NfQZwb+MjpcoFMvK0sWd0q/5YlWHAjkmpI", - "ElvrpCl+4qDJYm3bU/x7+D90Tt1HHKA29oCcWRPMfjmjmtA4Bo3M8iijU3jUI4/Q4bgyj3ooMh5dKLnQ", - "oB6ROVXMSms9OBdHVzTNOByS84guKDPEvjyYSiMfP5oZk+nD4RDcM4NYpo8O/k4UmFwJUnvcMMPh8cHf", - "z6NzEbKJrBkrczPWEK9Q23ctantPr5Bs3BqZlb0sRd3j2aO0zgjT5LsRUpd7Jzp8NhrtRGu4+VvSg0aA", - "dyQH+5LlnAYVVKtr0QMUVL46FBI/8SRs1W61PxPKOCShXVcl0A3qmgGZU56DxyQk5GLp7Hm0i9mEULE8", - "cMIiARWA59RQkVCVEISXTJRMcYD6wlrwaJPI3KwZTOYmy822o+VI8O3hfp2BmYGqFuT5JSH+lUnO+bIa", - "8kJKDlS0qKOYIEQgbxiHYzGRbXnE9Dhhaj1UaEAzTWjlDQwC8PSsQzO29N8e7p1VcSkqahdGQD4ZOH86", - "pSY6jBJqoI9vB3Yv7CrZZTnn6IIZTR5bn6hHzqNELa5U3/53Hlm7+Dzqq0Vf9e1/59HBIDSDoCG4f6Aa", - "iP2psMMndkqpgjuxtVNVmDxtImGfYXyxNBCgk1P2GQUL/jwgIzKpgcFADzb7s7hGD93KZL2CDmo49Jve", - "RU6nS20gPZqXGrmJGI0PkHhGxRQI2AcHLfmxDfnRyQRiyw9b0+G+uCyn2hepu1FJOFCEW0rsb4Oa7f7q", - "5Ojl2VHUi349OcZ/Xx+9O8IPJ0c/v3x/FDDjG8jHX3vdBss7pg3iLbBGay3atbV3jAnHwJalQZiCEEvD", - "dV1ssJRKARP8nZx20NZLwuUU51pWorcWoGwTWc3makglOS2VlLU8Bl3GgDY0zQKayep6O30F0YJqkimZ", - "5LGjom3EW4flV586hLD3cg438CRv4lFZi3onj2pTqK/ymYDEudJSESP3CvVtO9LWoT67zfvHphLQZrwp", - "xgbaWOAtDxWqYVOIqhdpFW8aWMtcxbD1mE2DopigV1tFaIc+XJ74XM6OFuePIDB09eEtKbJBbe6Vlys2", - "uFE5tHMaiWV+0IXJNNhsLsnL4Fo+UhPPfPhrT77qiH+97o57lT7A0+ej3aNgrzujXwNyPCEyZcZA0iO5", - "Bo1sMWPTmfX76Jwybh0r94q1J1yoEcnHi1KvgL4b9Z6Nek9f9J6MPoVBxK0ds4TDZnxNCH5tQc41uISB", - "NUfIYgaCcOu0zxksrKopA59DBbhMawDE1q8P634FGGsaxzMlU2Zh/9I9Oz5KXvlHCZ0YULX1F8aLdWKF", - "zhUQZghNaOZi7QIWxEK94uMhTeBezoAmk5z3cLbyG95Bnp1hx9ed4caSbJ49HW0XfPyoQOu3sCdlJ7mi", - "Dqi1gUH/VKk3LE2hIsFoYCN8VCdRi+5Rzz1LFRBDs8xp0b1jg2UyJd2k0i5hSTK7PUTbzRExDHbScOH5", - "3/lYoR1dL9MLyXFynGhAjmg8I3YKomcy5wm5AEJrzxKdZ5lUxnm8V4k0UvJz8VgDkH88eYJrWaYkgQlG", - "1aTQBwPiIySaMBHzPAFyHp2g33weWd/odMYmxn18ZRR3n15y/9WbF+fR4NzFC12AjGkX8IwRQMq1tFDG", - "Mr3wKkv7XJQb76+mcLnwL5ztr2f0AofdYUMb0hp3NyivlbQC/+gK4lsLglG7vBTD1kth5YiQuebLtmqi", - "aroaM/39UzvT70aiapqn0IzvbqQqqsdKytWYZ3gZuY9muv3A0D+xr5JMsTnjMIUOsUP1ONcQ8MGaQ1Lt", - "yME+bYcSOUftUcj4djrcrT3g4uBGo+aRiugZcF5uudUFuQha4vEiMNavUl1aHq5ckse07pId+BF9fMVN", - "wkRoAZttLhDzbvL6EsqjeJx9adU/HIk5U1JgJLoMcFpYNZhSFfutr+1GRfmtIOVuccluBHaHHx06N7Lh", - "jWKPtM50JcLKdbSZsNBKZf6iTWl2/cVjLQUU9DLgiplxONjtl0rsIxiwC4/gQpHji++ehyMR3z3vg7Cv", - "J8Q9Si7yycRxVkcoctvBZG66B7vuxt5bxvl+QvSUTa2SRep1PNyg3lWUaXx8RahFZ0cn76P149bjIf7x", - "t8fv3kW96Pjns6gX/fTLx81hED/3GiI+QVN0X22CZiwlH8/+2b+g8SUk3dsQSx4g2Z9hQQyolNmVx5Ln", - "qdCbklK9SMnFprHsIztmt3DUngN0zY6dZnQh6hvG+YdJdPj7+vBPQHVf95rxacq5tK7d2JjlZi340j9N", - "KMk05Insl6t//PHsnwdNweose1RERTkYZjCtRupQl2GkHfusZhNxzqGpL8L6CFbc7ovS1kz2sf2naYuD", - "Ty287iHPj2thQXphBRIl2o62jh+yUCnMh9MSWcevw6LW/z4Ove6qHPtUW76HhLCqsiagZMtoXZ6zJCyI", - "qTXHx9SEo4EYrXPYqJOZf22HgGAnqxlqcr0jNorKFY0vOy3bLZWyfJzFgfUdacNSap2RVx9/ITlGTTNQ", - "MQhDp3UtKDAFv0GNHhXqk7DJyl7NqNOtbrs22Si9KIW0K2VSQaxAI+ZJCqm1ER30ZTalQ4MHwy0fK5ya", - "lRC9yoWw6HPLhiSsi7oRmzCxn9J5TQ21kmyhmAuANkjPZSuZyPJABiahhm5lWCT1WQYbo4fluJ82rvlG", - "9qIFxxcWaTtce4X2CQOii0iqghF8gPjHB9G2IRW/FAW0SoftYjudHpGMLrmklkwzBdpKKDEtMejTzFIR", - "ziYQL2Pu02n6ptgs0ycVsdhVBE1QCGdj3q2C1MpbWVYIlphtJRpKQeoGZ5qc44vnURfLWvgDWsAFwt3P", - "RZIOtyCe5eKyDrDP+pe1BNsxsasBBRVOsk+YYHq2ndqoCj2Lt7qUxkb/2+nD9te6rFit/V4zcXZQchW0", - "/qU9gW0ID1S+dThDQuQ0VgBCz6Q5gakPS95CnP4nF58v626n3mlcU6XaEbn9FSO2uwy0ZUW8G+uRNV+z", - "PoeJ5RYlQN2kNn6HMYOps2IXesXGbkLZPhFoVSJ6nWPRIowgy57GSm7v7zazetzQ8dX6QPhPUrHPUmDR", - "Ps5FaCpzYQbkI55AsI4Gfq8J1lr1iIApXfne4iEs6RwEG2p0/9dCHG8xfyIXIjB9noUnv0nq2I19q8lj", - "ashixmIs8s9AWfmzOtXuTLHzkFunk0/BvMK09J7ZRZYkIDZUkbm0d5VT8C9tzIn65zrAfsM4fLRep9ZM", - "Cr0f/FMl8ywcqMCffIGOIj+ueHu7VoIFzsp89/z5wW5HY+RChOLiFlb8CSPhBby/dMC7TdXQYiY1+lLF", - "3rr0l8u0YAoy2ffYypoqrlOrsd/oX6mJb/XgTXkqCr0FO/ogXO5p6ZTNYXNYpyRuPx4p3+XLLVL9nYUL", - "uAM3PL4zUTSFcGL+pDLlioes/p9klkDnoBRLQBPtzmH6HTioFwg/HW2KEQUjJkXOMxDrqNlrgKR2S4eI", - "EOgi83ssTl1svTsvUcFRj8sXRwrW787aDUnpFVYnss9wLN7/0A0BlrJpX1P5/octMfJkNFot2t4y8X5q", - "ZHZTQpMqBjvOZn45TlNIGDXAl0QbmWE2UOaGTBWNYZJzome5sUp/QM5mTJMUy0fQpWYC859K5ZmBhMxZ", - "AhI3KxwO3eX0muNgC9AdHl07W2ZwBldmb8PuZgefrNljlLwEvbFswcBVyMGCK0xGGzwv7LzfmcQEfJrl", - "pm6QdxV62nHb4s4+xrx7igcgosPoLSgBnByndAqavPx4HPWiOSjtQBkNngxGqAgzEDRj0WH0bDAaPPNV", - "pLhhw6LOZjjhdFpohTigFt6DmgLWzOCTLkMNV0xjsEMK0D2SZ9ZnJI1BA5U6c0aJzjNQc6alSnrngoqE", - "4AmPXBjGcdvKp1/D/ExKrsl5xJk2IJiYnkdYtcmZAMI0kRfI9dZcmkhVHDVAQelLyrB8wdKKk3FJdOiK", - "xYpZ3uD6HSpAmx9kstzpFH2D24vdbERyiyW5PTSSpLitvvT99/Oo379kUl+6co5+P2Haut39aZafR58O", - "9q/AcACFyap6zjr3rgir6u3wdDQKGGwIv8N3gud9yqV5ZDcPQFz3oudupJDvV844bLaSuO5FL7Z5b7UP", - "AzYlyNOUqmV0GP3i6LIEkdNcxDOPBAu8hxlfq6g3z7ikSR+uDAi06/pUJP3iWYtzqQMi4Bd8zbKElYyp", - "JcdyCPKZZYSqeMbmlmHgymAPAzODlOTCitjhTKYwvETOHlZTD8/z0ehZbM1V/AS9c6HBEGX5Ja3P4FbF", - "xB5sSAouPBdfkQ3dfh2VS30pkhO/x+vYMc25YRlVZmjdu35CDV3HkdVWdpd5Vc9Y1nToxz3BxKI1Emv8", - "tzp8+MzCG8ktTtHJsK4opzH4s0YFunbDekPBvuz/RvufR/3vB+P+py9Pek9fvAj7Qp9ZNrZWQBvE3yqC", - "LE61WnxRC1nmMuAlBVRQP05zbcoStZQKNgFtBlYsHtRjiBdMWBbcpPNK8Pzhj5C1v1a81bC7n4x7Eopj", - "l9TgSAGSXkDMOa4pmYNpooAm9y3wWiKoxGaNyB9TbQWSPqgLwXKJXhp6u2XouqOkMnd14oXsW+XlqvvL", - "DVTpuuBgu73MvirMHbl3nVyKIBEk94q2U5bm3NU/4D6vdJsJW5MNHGHoqBs9ZfTqjrDTio5tj5xbmb92", - "lCHUtskF1uZMswvGmVmWBsw3Y6n8xBJfVCkXtWBgA82JotM2Jzbz3Fj0KRIXwi0oynV26BHpowx86czu", - "iVSE2mmVcWf7e3Z60ez2MGVzcKdcvMjgQDUMzsXZykHTDT0WQlZA2Vjjjkiz1bhjX7lhB/pG5AWC4g50", - "oSxDNFHEQ4NiLBo3ye7yQNodYaB14O1mktuHye3K7hcL74vzamkdLl/HoTOI2YRBUmMCvY0oxzMG40tY", - "bmBxfyiomgczN8jOouTyMkw3IG/tz1VuoXay4VyEzisMyBsUDRYwBTNrOsyhZPDa6z2iAc6FBSZ8uIFQ", - "Q4oeD/GUmcFEASSgL43MBlJNh1f2f5mSRg6vnjxxHzJOmRi6wRKYDGZO1PgY30wKqXQ9lNPnMIdqvZrk", - "2kdwY78VmgNk2tvdDgsyCYYH/GmbO2KH5mGefbkBEYrU8i0pMqd+6gYo0uUWhK/L9G+3qDqjl1Clie/K", - "mGllu689jtZaLyylUxhmrjqjmmmzS9SyVyoACA56rwh9RTOTK2uaVggq4sMb0Ck57xZiLo9P5j7XzZfW", - "sBhKy9tF/t1+Z2rmR02Srhoy2LPImjuW5VeOjHkLZSWR7tJ0TBAup5hmNyy+1K7VkSvycH5RjYLIBczo", - "nFmSpksyp2r5d2JydJh9o7KCgQfn4ldrP11IM6stBQcs1kqwCsCBkSk5Z+hhmkq84cxOwKf+XJthuNTH", - "5RhopVUTHLhQ6gU18QywsBi4LzfzovBfXrB756Lf980efyb9Plp+ZERc2MHZii7w8K+QhDwt0ul3xH61", - "Ao99paMnr2/Ev3PAVLaCQw811mjzbS23EZFF54kO4ehTKHeEl2aGZl/MuEzJMvuWtBZ2eTUWsG4s+P6B", - "K6mSQF7Bn/u9K+MhcM79K/vaq00mA+rrF+9cFw0XY3yyOIR8AzQ/H32/+b3VltC3mEXoWI4ljYkeuvaq", - "4/I4I5JJHoqUrbagvatwWbjR7b4h0ao2xK3zG2Jdt1JCMUVZbX+BF9dzdQu8uKawd42Xds/cvcMRJUrc", - "EpObcdbzze+tdhq/lTgGQl5vDNXEW5G7WIOyNy5/8G1jCwvd/gSIQnyUOJILwSVNLHeNPzOscJmCCVVU", - "mVwJTSj57fijK+GppZzcCW9Ely48iyqssdKLq4F/P/9rpn5jGabIFE3BgNJ4inHr3thFHsxa0MWi8MC/", - "fe+PHFAcuExfUZ63SgO9evpxU7nfp52Us9/XGzmUdteLNZalPUhY9Q1+iHTpkVUXIYQWhOaXXNKrJbxx", - "UUvjCXWVosrWZtvS0sbucd8CCe0m9Kr2bm1CQjFW6x33AEnmRzAr3e+KY64t7JVkw5k2qIh0J91UTfj2", - "E0IPk1KqVQdIpbJPuKsVe4C0gvUhiHlXX9mmDeyo12WfFC3o7jCvchu2CeYxKnv+AeIJV4BNx7DiZh0z", - "K6BJaVUGefkEaOJtyu1YGScrTAk7/rfCzTI2YPrV4cob2RAo+u3qbs31uydisfitbFC81qogDg1O0I9r", - "Zzo6ubt9tObuiis6zvDsy/G1oYpSiAeIyFMwgc62NdQN8biPnrGsxLAr6OrOSrzkXC6Kui+sX2Ri6qZw", - "dYccvELweV4FqfQywHVOHnTUORbmwa0VNpYWSUdl4j4tTGvtCLxBu11T00Kg7lr/52v/1vcpXV/fjLtw", - "a7V/iKWy7O+hi7pAOeDE22t1dih897VlzRRLmJHfXGcvV8HMjK6c91btQ6hFbog5nPt+a6yxK+kn9aNv", - "tdrs0mk2cjs+qJfb3qAWdh0/7EnYv7GsIusaAv80RE7rJfYNEi3pfVEkbjrKJGtHK+9KmQdOb26P0z1P", - "peCyg32WfhHsjxxCRw4rnlj47dh4iqttNOIyyW2fC7knQnOLqUea7F65g756lcSGX4otv/bH08CdNG3S", - "m8wqcmt4G+hBeJfBOxAlHtc5EZt9hkCbmQJRMssePqJO8eykXRGeaAh4gU0kDV2lRKdP6NoEvdFH7rGv", - "iKumf2fgyjhog47dpsBe/fqPUOXR6VGt205l1PpKEuwSQhNc9ZfoH/3T06P+Kwdb/yx4K8Z7SBj1hyIn", - "xA6P7Xt8YcrjphA7iOq7U/T2aYm6QHOf64dIprjRrV32NdlO7JYUa63y9emwX+0j20QuXtdMH9qKYtxd", - "9KLXeeR9UvaB6GwBsXJd6nfPn3eBmbr7z4JgrW0c4ZhvG41/w7jKnm5J0eHswatR9C+t5iwy91VSkcup", - "HlYbG461y6lv29YhhxsE4W7TWEu5haApblgqj0YG24iFp5lIzuVihfIalym021000SwFX5aVhIRNiptA", - "mCYetDWM2a1VdpmntvbwbNUDY99+Lro3jVbeNrRRlVnC+qa1V0gzWKCJnIOyUzsGycor/oa+7X23435U", - "9MVXF8woqpatCwIxqeFuH6k6jvvrHAmdUia084P9nY7E98o8F1IQLmPKZ1Kbw++fPn16O9dEnrl7THyP", - "yMbVethtRFe3CfqLQMsraAKFqq0bFl857XAXnl3n7Z5fuT6v61bJ0MG47nsL77Ok66h1q+mwuqrUUUSA", - "OD2DOJmE3NHt6Nd6WN/ZKY92l+yvSwft3v4BCqga7ftrPL8FvHdc5LGKYGxLvRHD2Ar7blG80kX9fnBc", - "b/gdUoWug/c3hlu6Brlfqt7g18NLtnqOJIjotwwPJGz2y2tdx9eZhBtaim/vLOyF0PqVDt/UUeoPbx9k", - "otCKkvJOisJs7aY4d6vZRppzt0b8eahu9QaN/9DdzSsNOm8VWUN8urwqIOj+rl4o8LVp7471mFtUSIX5", - "Xx5kuWGtp79bXjfqE7aFTYNP/WmkzsoNCvdkP9UuNAgQ3w/1CwYebMSt0nzuxoX1dChzsykQV22ezM3a", - "iNw9yaMbRJYC10NsjDE1Ln6wNm7z5of/JFDuIIFSo2qZm0bArLqHtkrChqWrO+NS3V1wl0eKWj1luzsM", - "dPUmvrfDRPd0CrM8gpQpmDP0GYv+tPV2ty2s+xMinVKsOEJSR/za7FmZtCq741bVEwOCh//Lu5trZ/rL", - "a5x9VqB8vSuRhUIvnMba1F93s2jEDRum2fMb1wXXumW71OOKgCt/7b/x16L0X669nkROqttj2neqDMiP", - "OVVUGIDEN1o/efPq2bNn3w/WZ0BWQDl19Sh7QVJcCbYnIBaUp6On6xibWUnGOMc7R5ScKtC6RzJsuEWM", - "WrrYJ+HUNRWubfcJGLXsv5yYUPv703w6dQe+sO9X417RWuNOtXRMUC1i7UV01w/41JhryKCRF0GY7SQK", - "Z057dB4EKi4VctW+N7Bcy8LedQpl5QqjdrVsi1+LnqeqhPLWTspQzuvDrm5bq3luoPTurpVv+OKAoO59", - "so5Fi0uTHl4vA9yBspdPJdcG5IPgS6wUrmRdBoocvyYxFa7DzZRpAwoS17jESpBBG8syW4fkWjv9O8Nx", - "oGX/7uaVL4W737YxRmar6gcX8v8BAAD//4GIUOSSnQAA", + "H4sIAAAAAAAC/+x9eXMbN/bgV0H1TpWlHV7ykWw0fzm2nGhtxy5L2cwk9PIHdj+S+Kkb6ABoUrTL89m3", + "8IC+0bwkWVZ2qlIxRXYDD3gn3oXPQSiSVHDgWgWnnwMJKhVcAf7xI40+wJ8ZKH0mpZDmq1BwDVybjzRN", + "YxZSzQQf/rcS3HynwgUk1Hz6m4RZcBr8j2E5/tD+qoZ2tC9fvvSCCFQoWWoGCU7NhMTNGHzpBS8En8Us", + "/Fqz59OZqc+5Bslp/JWmzqcjFyCXIIl7sBf8IvQrkfHoK8Hxi9AE5wvMb+5xM9qLmIVXb0WmIMePASCK", + "mHmRxu+lSEFqZuhmRmMFvSCtfPU5mGZaWwjrE+KQxP5KtCDMbAQNNVkxvQh6AfAsCU7/CGKY6aAXSDZf", + "mH8TFkUxBL1gSsOroBfMhFxRGQUfe4FepxCcBkpLxudmC0MD+sR+3Zz+cp0CETOCzxAa4tflrJFYmT+z", + "NHDDeCdYiDiaXMFa+ZYXsRkDSczPZn3mWRJl5lWiF2AnDnoB05Dg+63R3RdUSro2f/MsmeBbbroZzWId", + "nJ60UJklU5BmcZolgJNLSIHq2rxudLPtc0CKu26v4p8kFEJGjFONu1UMQFKhmNuz9kjr9kj/OmSkL71A", + "wp8ZkxAZpFwHZugSEWL632CZ9oUEquElkxBqIdeHUWoiIg+hvEvt6yTKRyfmQXIkQk1jYtHVIzCYD8j3", + "z54dD8hLixnc+O+fPRsEvSCl2rB5cBr83z9G/e8/fn7Se/rlb4GHpFKqF20gnk+ViDMNFSDMg2aGEJfe", + "mGQ4+J/twRu7iTP5NvMlxKDhPdWLw/ZxyxJywCOc5vYB/wAhEtr8MOhZ1Ib9PAKuLTs70pX5JJWVkOdx", + "uqA8S0CykAhJFut0AbyJf9r/9Lz/+6j/Q//j3//mXWx7YUylMV0bNcXme65nASg5W2t6kUkJXJPIjk3s", + "c4RxkrJriJWXsSXMJKjFRFIN24d0TxPztBn450/kKKFrMgXCszgmbEa40CQCDaGm0xiOvZOuWOQjqOZs", + "+NhG+L1bK+n8K2i3SNJ5h2YrNJpVcT49E0FM1zWhP2oK/ZfmEbP6hMUxUxAKHikyBb0C4DkgRqsRyiOi", + "NJXaUW8ilkBoLJxeMtw1QLA4SwygIx9ObqL5zF7spfj8AuWdjEBCRGKmtGHLP657ZP2xqmZSyqQqlqgX", + "UmTzBVktWGyBmDM+H5C3mdLEGFeUcUI1iYEqTR6TVDCu1aAKaRPkyoYk9Prc/voY9678o7majT8qDekE", + "0T1J6mr+2Z4olxBTzZZAzJCqsWpyZBjPIINxppnRbmaw4+2Ix9EmKciJgnni7NHSFhl1GyMFQIgNC1UK", + "krhxzEIK+iNvLRDkpAbRyVYToVM3FGZ0Q+eDUnQOHjJsDJw/6B37GsJMw/uYrlfIxLvKkvpWubcMwYId", + "kZRDktBYJ03xE3pNFmPbXuDfw/9Nl9R+xAEqYw/IpTHBzJcLqggNQ1DILI9SOodHPfIIDxzX+lEPRcaj", + "qRQrBfIRWVLJjLRWgzE/u6ZJGsMpGQd0RZkm5uXBXGhx9GihdapOh0OwzwxCkTw6/geRoDPJSeVxzXQM", + "R8f/GAdj7rOJjBkrMj1RENao7bsWtb2l10g2do3MyF6WoO5x7FFYZ4Qp8t0Iqcu+E5w+GY32ojXc/B3p", + "QSHAe5KDeclwToMKytW16AFyKq8PhcRPHAkbtVvuz4yyGCLfrssC6AZ1LYAsaZyBwyREZLq29jzaxWxG", + "KF8fW2ERgfTAc6Epj6iMCMJLZlIkOEB1YS14lI5EpjcMJjKdZnrX0TIk+PZwvy1AL0CWC3L8EhH3yiyL", + "43U55FSIGChvUUc+gY9AXrEYzvlMtOURU5OIyc1QoQHNFKHlaWDggadnDjQTQ//t4d4YFZegorZuBOST", + "gT1PJ1QHp0FENfTxbc/u+Y9KZln2cDRlWpEjcybqkXEQydW17Jv/xoGxi8dBX676sm/+GwfHA98MnPrg", + "/pEqIOan3A6fmSmF9O7Ezoeq3ORpEwn7BJPpWoOHTi7YJxQs+POAjMisAgYDNdh+nsU1Ouhqk/VyOqjg", + "0G16FzldrJWG5GxZaOQmYhQ+QMIF5XMgYB4ctOTHLuRHZzMIDT/sTIeH4rKY6lCk7kclfkcRbikxvw0q", + "tvuLD2fPL8+CXvDbh3P89+XZmzP88OHsl+dvzzxmfAP5+Guv22B5w5RGvHnWaKxFs7b2jjFuGdiwNHCd", + "E2JhuG7yDRZSyWOCvxHzDtp6TmIxx7nWpeitOCjbRFaxuRpSScwLJWUsj0GXMaA0TVKPZjK63kxfQrSi", + "iqRSRFloqWgX8dZh+VWn9iHsrVjCDU6SNzlRGYt6rxPVNldfeWYCEmZSCUm0OMjVt+tIO7v6zDYf7puK", + "QOnJNh8bKG2ANzyUq4ZtLqpeoGS4bWAlMhnCzmM2DYp8gl5lFb4denf1wcVy9rQ4fwKOrqt3r0keDWpz", + "r7iq2eBaZtCOaUSG+UHlJtNgu7kkrrxreU91uHDurwP5qsP/9bLb71WcAR4/He3vBXvZ6f0akPMZEQnT", + "GqIeyRQoZIsFmy/MuY8uKYvNwcq+YuwJ62pE8nGi1Cmg70a9J6Pe42e9k9FHP4i4tRMWxbAdXzOCXxuQ", + "MwU2YGDMEbJaACexObQvGayMqikcn0MJuExjAITmXO/X/RLQ1zQJF1IkzMD+uXt2fJS8cI8SOtMgK+vP", + "jRdziOUqk0CYJjSiqfW1c1gRA3XtjIc0gXu5ABrNsriHsxXfxB3k2el2fNnpbizI5snj0W7Ox/cSlHoN", + "B1J2lElqgdroGHRPFXrD0BQqEvQGNtxHVRI16B717LNUAtE0Ta0WPdg3WARTkm0q7QrWJDXbQ5TZHB7C", + "YC8N55//jfMVmtHVOpmKGCfHiQbkjIYLYqYgaiGyOCJTILTyLFFZmgqp7Yn3OhJaiHjMjxQA+efJCa5l", + "nZAIZuhVE1wdD4jzkCjCeBhnEZBx8AHPzePAnI0uFmym7ccXWsb20/PYffXq2TgYjK2/0DrImLIOzxAB", + "pLESBspQJFOnspSLRdnx/q7zIxf+hbP9/ZJOcdg9NrQhrXF3vfJaCiPwz64hvDUnGDXLS9BtveZGjnCR", + "qXjdVk1Uzus+0z8+tiP9diQq51kCTf/uVqqiaiKFqPs8/cvInDfT7ge6/ol5laSSLVkMc+gQO1RNMgWe", + "M1hzSKosOZinzVA8i1F75DK+HQ63a/cccXCjUfMISdQC4rjYcqMLMu61xMOVZ6zfhLwyPFweSY5o9Uh2", + "7EZ0/hU7CeO+BWy3uYAvu8nrsy+O4nD2uZX/cMaXTAqOnujCwWlgVaALVey2vrIbJeW3nJT7+SW7Edjt", + "frTo3MqGN/I90irTFQgr1tFmwlwrFfGLNqWZ9eePtRSQ95QB10xP/M5ut1RiHkGHnX8E64qcTL976vdE", + "fPe0D9y8HhH7KJlms5nlrA5X5K6DiUx3D/alG3uvWRwfJkQv2NwoWaRey8MN6q2jTOHjNaEWXJ59eBts", + "HrfqD3GPvz5/8yboBee/XAa94Odf3293g7i5NxDxBzRFD9UmaMZS8v7yX/0pDa8g6t6GUMQekv0FVkSD", + "TJhZeSjiLOFqW1CqF0ix2jaWeWTP6BaO2rOAbtixi5SueHXD4vjdLDj9Y7P7x6O6v/Sa/mkax8Ic7SZa", + "r7drwefuaUJJqiCLRL9Y/dH7y38dNwWrtexREeXpYBjBNBqpQ136kXbuoppNxNkDTXUR5oxgxO2hKG3N", + "ZB47fJq2OPjYwusB8vy84hakUyOQKFFmtE38kPpSYd5dFMg6f+kXte73ie91m+XYp8rwPUSElZk1HiVb", + "eOuyjEV+QUyNOT6h2u8NRG+dxUaVzNxrezgEO1lNU52pPbGRZ64ofNlq2W6plGaTNPSs70xpllBzGHnx", + "/leSodc0BRkC13Re1YIcQ/Bb1OhZrj4Jm9X2akGtbrXbtc1G6QUJJF0hkxJiCQoxTxJIjI1ooS+iKR0a", + "3OtueV/iVNdc9DLj3KDPLhsivy7qRmzE+GFK5yXV1EiylWTWAdogPRutZDzNPBGYiGq6k2ERVWcZbPUe", + "FuN+3LrmG9mLBhyXWKTMcO0Vmic08C4iKRNG8AHiHh8Eu7pU3FIk0DIcto/tdHFGUrqOBTVkmkpQRkLx", + "eYFBF2YWksRsBuE6jF04Td0Um0X4pCQWswqvCQr+aMybOkituJVhBW+K2U6ioRCkdnCmyBhfHAddLGvg", + "92gB6wi3P+dBOtyCcJHxqyrALupf5BLsxsQ2BxSkP8g+Y5ypxW5qo0z0zN/qUhpbz99WH7a/VkXGauX3", + "iomzh5IroXUvHQhsQ3ig8q3C6RMiF6EE4Goh9AeYO7fkLfjpf7b++SLvdu4OjRuyVDs8t7+hx3afgXbM", + "iLdjPTLma9qPYWa4RXKQN8mN32NMb+gs34VevrHbUHaIB1oWiN50sGgRhpdlL0Ipdj/vNqN6saaT682O", + "8J+FZJ8Ex6R9nIvQRGRcD8h7rEAwBw38XhHMteoRDnNa+97gwS/pLARbcnT/j4E43GH+SKy4Z/os9U9+", + "k9CxHftWg8dUk9WChZjkn4I08qc+1f5MsfeQO4eTL0C/wLD0gdFFFkXAt2SR2bB3GVNwL22NibrnOsB+", + "xWJ4b06dSjHB1WHwz6XIUr+jAn9yCTqS/FQ77e2bCeaplfnu6dPj/UpjxIr7/OIGVvwJPeE5vL92wLtL", + "1tBqIRSepfK9teEvG2nBEGR0aNnKhiyuC6OxX6nfqA5vtfCmqIrC04IZfeBP9zR0ypaw3a1TELcbjxTv", + "xusdQv2diQu4Azcs35lJmoA/MP+hNOXyh4z+n6WGQJcgJYtAEWXrMN0OHFcThB+PtvmIvB6TPObp8XVU", + "7DVAUrulIiIEOo/8nvML61vvjkuUcFT98nlJwebd2bghCb3G7ET2Cc752x+7IcBUNuVyKt/+uCNGTkaj", + "etL2joH3Cy3SmxKakCGYcbbzy3mSQMSohnhNlBYpRgNFpslc0hBmWUzUItNG6Q/I5YIpkmD6CB6pGcf4", + "p5RZqiEiSxaBwM3yu0P3qV6zHGwAusPStct1CpdwrQ827G5W+GTMHi3FFaitaQsarn0HLLjGYLTGemF7", + "+l0IDMAnaaarBnlXoqcZty3uzGPMHU+xACI4DV6D5BCT84TOQZHn78+DXrAEqSwoo8HJYISKMAVOUxac", + "Bk8Go8ETl0WKGzbM82yGs5jOc60QetTCW5BzwJwZfNJGqOGaKXR2CA6qR7LUnBlJY1BPps6SUaKyFOSS", + "KSGj3phTHhGs8Mi4ZjFuW/H0S1heChErMg5ipjRwxufjALM2Y8aBMEXEFLnemEszIfNSAxSULqUM0xcM", + "rVgZFwWnNlksn+UVrt+iApT+UUTrvaroG9ye72bDk5svye6hFiTBbXWp73+Mg37/igl1ZdM5+v2IKXPs", + "7s/TbBx8PD48A8MC5Cer8jlzuLdJWGVvh8ejkcdgQ/gtviOs9ymW5pDdLID40gue2pF8Z79ixmGzlcSX", + "XvBsl/fqfRiwKUGWJFSug9PgV0uXBYgxzXi4cEgwwDuY8bWSerM0FjTqw7UGjnZdn/Konz9rcC6URwT8", + "iq8ZljCSMTHkWAxBPrGUUBku2NIwDFxr7GGgF5CQjBsRO1yIBIZXyNnDcurhOBuNnoTGXMVP0BtzBZpI", + "wy9JdQa7KsYPYEOSc+GYf0U2tPt1Viz1OY8+uD3exI5JFmuWUqmH5njXj6immziy3MruNK/yGcOaFv24", + "JxhYNEZihf/qw/trFl6J2OAUDxnmKBrTEFytUY6u/bDeULDP+7/T/qdR/4fBpP/x80nv8bNn/rPQJ5ZO", + "jBXQBvH3kiDzqlaDL2ogS20EvKCAEuqjJFO6SFFLKGczUHpgxOJx1Yc4Zdyw4DadV4Dnij981v5G8VbB", + "7mEy7sTnxy6owZICRD2PmLNcUzAHU0QCje5b4LVEUIHNCpEfUWUEkjquCsFiiU4aOrtlaLujJCKzeeK5", + "7Kvzctn95QaqdJNzsN1e5lAVZkvubSeX3EkE0b2i7YIlWWzzH3Cfa91m/NZkA0foOupGT+G9uiPstLxj", + "uyPnVuavlDL42jZZx9qSKTZlMdPrwoD5ZiyVn1nkkirFquIMbKA5knTe5sRmnBuTPnlkXbg5RdnODj0i", + "nJchXluzeyYkoWZaqW1tf89Mz5vdHuZsCbbKxYmMGKiCwZhf1gpNt/RY8FkBRWONOyLNVuOOQ+WGGegb", + "kRcIii3oQlmGaKKIhwbFGDRuk91FQdodYaBV8HYzye3c5GZl94uFt3m9WlKFy+VxqBRCNmMQVZhA7SLK", + "scZgcgXrLSzuioLKeTByg+zMCy4v3HQD8tr8XMYWKpUNY+6rVxiQVygaDGASFsZ0WELB4JXXe0QBjLkB", + "xl/cQKgmeY+HcM70YCYBIlBXWqQDIefDa/O/VAothtcnJ/ZDGlPGh3awCGaDhRU1zse3EFxIVXXl9GNY", + "QrleRTLlPLih2woVA6TK2d0WCyLyugdctc0dsUOzmOdQbkCEIrV8S4rMqp+qAYp0uQPhqyL82y2qLukV", + "lGHiuzJmWtHuLw5HG60XltA5DFObnVHOtP1I1LJXSgAIDnqvCH1BU51JY5qWCMr9w1vQKeK4W4jZOD5Z", + "ulh3vDaGxVAY3s7j7+Y7XTE/KpK0bshgzyJj7hiWr5WMOQulFki3YTrGSSzmGGbXLLxSttWRTfKw56IK", + "BZEpLOiSGZKma7Kkcv0PojM8MLtGZTkDD8b8N2M/TYVeVJaCA+ZrJZgFYMFIpVgyPGHqUrzhzFbAJ66u", + "TTNc6lExBlpp5QTH1pU6pTpcACYWQ+zSzZwo/C8n2N3hot93zR5/If0+Wn5kRKzbwdqK1vHwXz4JeZGH", + "0++I/SoJHodKR0de38j5zgJT2goWPVQbo821tdxFROadJzqEowuh3BFemhGaQzFjIyXr9FvSWtjlVRvA", + "urHg+gfWQiWeuIKr+70r48FT5/6Vz9r1JpMe9fWrO1znDRdDfDIvQr4Bmp+Oftj+Xr0l9C1GETqWY0hj", + "poa2veqkKGdEMsl8nrJ6C9q7cpf5G90e6hItc0PsOr8h1rUrJRRDlOX253ixPVd3wIttCnvXeGn3zD3Y", + "HVGgxC4xuhlnPd3+Xr3T+K34MRDyamOoJt7y2MUGlL2y8YNvG1uY6PYXQBTio8CRWPFY0Mhw1+QTwwyX", + "OWhfRpXOJFeEkt/P39sUnkrIyVZ4I7pUfrIo3Rq1XlwN/Lv5XzL5O0sxRCZpAhqkwirGnXtj53EwY0Hn", + "i8KCf/PenxmgOLCRvjw9r04DvWr4cVu638e9lLPb1xsdKM2u52ssUnuQsKob/BDp0iGrKkIIzQnNLbmD", + "XpWOdiBYTeXgk9LkSFNZiZcmueMF02HMWMcb6XrMNxA2+V3piIjZDKQiis05tlvkOl6TGVUaZDEh1mXy", + "aMwjqH5lPlMJWMH9iaXuQEzDBYOlgWQKujkKspHfIV/hKrNHD4Wtep/bPTyK5aJ3cEB+ZvMFSPtX0fCN", + "qITGMRToVWSaaaLpFZBY8DnIwZj3LSaUPiX/Nti2Q5CTHnE5hgaxEJGjfz8ZjfrPRiPy9sehOjYvupS0", + "+otPemRKY8pDY0qZN4eIAXL075NnlXct4uqvft/L8Zm/8mzU/1+1l1pgnvTw2+KNx6P+0+KNDoxUqGWC", + "wwRVdJQdAPJPZS2W26qgV/nNgowflK+ybF+p6Lj3RmLx0vH2/2eiUdeXXYhHI78meaqhE4t10VB0ftxV", + "JmxtrvktaNj9bMKy+2WboNDKq7TWfIBk8xPoWnPQvAtAC3sF2cRMabTTVSfdlD1KD1MmD5NSylV7SKU8", + "vsU2lfYB0gqmzyHmbfp5mzaw4WjX8S3v0HmHYefbOLphmLd0dzxAPOEKsCcjJiRuYmYJNCoO3V5e/gA0", + "ckfu3VgZJ8tNQjP+t8LNItSg+2Xt+Y1sCRT9ZnW35hm7J2Ix+C2PMnjrX04cCqygn1RK3jq5u115eHe5", + "Zx0ljodyfGWoPFPsASLyArSn8XcFdUOshlQLlhYYtvmu3UHb53EsVnlaLKZ3Mz63U9i07BicQnBpMBIS", + "4WSAbSw/6EgDz82DW8v7LiySjsTtQzo8V7q1OIN2t57PuUDdNz3apUZvbuO8ufwDd+HWUqMRS0VW9EMX", + "dZ5s6Zmz16rskLs2N1Z9UHS8IL/Zxoe2wINpVfo2W6lhvg7iPuaw3s1bY419ST+qVgZXSleKg7MWu/FB", + "tRrhBqUCm/jhQML+naUlWVcQ+JchclqtQGqQaIvenXNlC8Hv6xrt4osx384Y212kNY/omDdcot31R87H", + "eWvMlXtVvPchNVwvhQrZygy9+2Na8ymd1C+B7i6zLdtyxWBNBFSc5eu2lliyNO8u4mDD6qKYXeEmkX4f", + "n+mX7229c64hL3I83Im4eO728C8uMprk2iE2Vnk6TEfxSaVhxV2dATw9MXbH7YG1vrhsb/fKXzn7MwNf", + "I4eSK1duO7bWxrfPmrhMctvVtvdEbHYxVSe12SvbPkXVSWz4Od/yL67oH2z/jia9ibQkt4aTAh0PztPg", + "/A4FHjf5Hra7GjzN+3JEiTR9+Ii6wI4UZkVYJ+pxHjWRNLT5p52uJNt88ZU6s499RVw13UIarrWF1usP", + "2hYPqF6q5svnvjir9DAsz8IuPxd7r9EIV/05+Gf/4uKs/8LC1r/03jX2FiJGXauJGTHDY1NEl+571BRi", + "x7XIXR6la4k6T1Duy0MkU9zo1i67SjcrdguKNYf5zUlGv5lHdnF4vqwYX7Tl/PyKce+ikdCs6K7V2Vir", + "dgn9d0+fdoGZ2FtlvWBtbMdlmW8XjX9Dd+yB3oy8b+yDV6PoljKaM8+HLFO1YjFXw3Jj/SE6MXfNcDvk", + "cIMg7B1lGyk3FzT5vZVFwwlvc1b/NDMRx2LlzzyodSStNBFrolnweF3UZxA2y+9XY4o40DYwZrdW2Wee", + "ytr9s5UPTFxT3+DeNFpxh+NWVWYI65vWXj7NYIAmYgnSTG0ZJC0uTh66y4S63R9n+W1Dcsq0pHLdunYZ", + "Y6H2TrfyHhd3STahc8q4sidxd1M2cR3Ix1xwEouQxguh9OkPjx8/vp3Lty/t7XCu83bjwmLs4abKO5rd", + "9erFxX4ex0nr3uoXVjvcxcmu8870r1z10HVXt6/dQPdt0PeZKH/Wuit+WF4AbynCQ5yOQaxMQu7oPuhX", + "bga5s9rZ9t0jX5cO2jcmeSigvL7IXY7+LeC943q0OoLxso+tGMYLRu4WxbW7ae4Hx9VrVHyq0N6L8o3h", + "lm5A7ufyxpUvwytWr871Ivo1wzLP7efyyl0um0zCLRe17H5YOAih1YuyvqkGNe9eP8j8AiNKipu+crO1", + "m+LsXbFbac7exfXXobr6vWT/obubJyh13tW2gfhUcQGT9/hbv6bpa9PeHesxuyifCnO/PMgs5cpNSXZ5", + "3aiP2A42DT71l5E6tXup7sl+qlwT5SG+H6vXNj1Yj1up+ew9VpvpUGR6myOu3DyR6Y0euXuSRzfwLHku", + "3drqY2pcp2Vs3OZ9Wv8JoNxBAKVC1SLTDYdZebt/GYT1S1dbOVzeCHWXhdqtTv3dfZu6bny4txLte+pt", + "URR2pxKWDM+Medf/6iUCLay74rJOKZZXn1URvzF6VgStijsHyuyJAcGWSiIxqqLeKSnL++C5qEDxelcg", + "C4WeP4y17daC7aIRN2yYpE9vXE5QuYPEhh5rAq74tf/KXTbXf77x0jcxK+/ka99UNyA/ZVRSrsHmy02B", + "fHj14smTJz8MNkdAaqBc2HyUgyDJL1o9EBADyuPR402MzYwkY3GMN7lJMZegVI+k2MaUaLm2vk8SU1ul", + "WtnuD6Dluv98pn2XCl1k87mtFcVuqo3b2ivt0OXaMkG5iI3X+355wAWnts2VQl4ETNHcQaLEzGqPzvrB", + "/KpGWyRwA8u1qAfYpFBqF0O2k+xb/Jp3kpcFlLdWYEfjuDpsfdtaVxJ4Uu/uWvn6r2Py6t6TTSyaX0X5", + "8DpE4Q4UHRJLuTYg73i8xgKDUtalIMn5SxJSbvsGzpnSICGy7eCMBBm0sSzSTUiuXFJ0Zzj2XIS0v3nl", + "UuHutxmfFmld/eBC/l8AAAD//90+RAzopgAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/server/lib/zstdutil/zstdutil.go b/server/lib/zstdutil/zstdutil.go new file mode 100644 index 00000000..94e9f406 --- /dev/null +++ b/server/lib/zstdutil/zstdutil.go @@ -0,0 +1,257 @@ +// Package zstdutil provides utilities for creating and extracting tar.zst archives. +package zstdutil + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/klauspost/compress/zstd" +) + +// CompressionLevel represents the zstd compression level. +type CompressionLevel string + +const ( + LevelFastest CompressionLevel = "fastest" + LevelDefault CompressionLevel = "default" + LevelBetter CompressionLevel = "better" + LevelBest CompressionLevel = "best" +) + +// ToZstdLevel converts a CompressionLevel to a zstd.EncoderLevel. +func (l CompressionLevel) ToZstdLevel() zstd.EncoderLevel { + switch l { + case LevelFastest: + return zstd.SpeedFastest + case LevelBetter: + return zstd.SpeedBetterCompression + case LevelBest: + return zstd.SpeedBestCompression + default: + return zstd.SpeedDefault + } +} + +// TarZstdDir creates a tar.zst archive from a directory and writes it to the provided writer. +// This is a streaming implementation that doesn't buffer the entire archive in memory. +func TarZstdDir(w io.Writer, sourceDir string, level CompressionLevel) error { + // Create zstd encoder + zw, err := zstd.NewWriter(w, + zstd.WithEncoderLevel(level.ToZstdLevel()), + zstd.WithEncoderConcurrency(1), // Synchronous for predictable streaming + ) + if err != nil { + return fmt.Errorf("create zstd encoder: %w", err) + } + defer zw.Close() + + // Create tar writer on top of zstd + tw := tar.NewWriter(zw) + defer tw.Close() + + // Walk directory and write to tar + err = filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("walk error at %s: %w", path, err) + } + + // Create relative path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return fmt.Errorf("rel path for %s: %w", path, err) + } + + // Skip the root directory itself + if relPath == "." { + return nil + } + + // Create tar header + header, err := tar.FileInfoHeader(info, "") + if err != nil { + return fmt.Errorf("header for %s: %w", path, err) + } + header.Name = relPath + + // Handle symlinks + if info.Mode()&os.ModeSymlink != 0 { + link, err := os.Readlink(path) + if err != nil { + return fmt.Errorf("readlink %s: %w", path, err) + } + header.Linkname = link + } + + // Write header + if err := tw.WriteHeader(header); err != nil { + return fmt.Errorf("write header for %s: %w", path, err) + } + + // If it's a regular file, write the content + if info.Mode().IsRegular() { + f, err := os.Open(path) + if err != nil { + return fmt.Errorf("open file %s: %w", path, err) + } + defer f.Close() + + if _, err := io.Copy(tw, f); err != nil { + return fmt.Errorf("copy file %s: %w", path, err) + } + } + + return nil + }) + + if err != nil { + return err + } + + // Close tar writer first to flush tar footer + if err := tw.Close(); err != nil { + return fmt.Errorf("close tar writer: %w", err) + } + + // Close zstd writer to flush compression + if err := zw.Close(); err != nil { + return fmt.Errorf("close zstd writer: %w", err) + } + + return nil +} + +// UntarZstd extracts a tar.zst archive from the reader to the destination directory. +// stripComponents specifies the number of leading path components to strip. +func UntarZstd(r io.Reader, destDir string, stripComponents int) error { + // Create zstd decoder + zr, err := zstd.NewReader(r, + zstd.WithDecoderConcurrency(1), // Synchronous for predictable streaming + ) + if err != nil { + return fmt.Errorf("create zstd decoder: %w", err) + } + defer zr.Close() + + // Create tar reader on top of zstd + tr := tar.NewReader(zr) + + // Ensure destination directory exists + if err := os.MkdirAll(destDir, 0755); err != nil { + return fmt.Errorf("create dest dir: %w", err) + } + + // Extract files + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("read tar header: %w", err) + } + + // Apply strip-components + name := header.Name + if stripComponents > 0 { + parts := strings.Split(name, string(os.PathSeparator)) + if len(parts) <= stripComponents { + continue // Skip this entry, not enough components + } + name = filepath.Join(parts[stripComponents:]...) + } + + // Skip empty names (can happen after stripping) + if name == "" || name == "." { + continue + } + + // Create full destination path + destPath := filepath.Join(destDir, name) + + // Security check: prevent path traversal + if !strings.HasPrefix(filepath.Clean(destPath), filepath.Clean(destDir)+string(os.PathSeparator)) { + return fmt.Errorf("illegal file path: %s", header.Name) + } + + switch header.Typeflag { + case tar.TypeDir: + if err := os.MkdirAll(destPath, os.FileMode(header.Mode)); err != nil { + return fmt.Errorf("create directory %s: %w", destPath, err) + } + + case tar.TypeReg: + // Ensure parent directory exists + if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { + return fmt.Errorf("create parent dir for %s: %w", destPath, err) + } + + // Create file + f, err := os.OpenFile(destPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(header.Mode)) + if err != nil { + return fmt.Errorf("create file %s: %w", destPath, err) + } + + if _, err := io.Copy(f, tr); err != nil { + f.Close() + return fmt.Errorf("extract file %s: %w", destPath, err) + } + f.Close() + + case tar.TypeSymlink: + // Ensure parent directory exists + if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { + return fmt.Errorf("create parent dir for symlink %s: %w", destPath, err) + } + + // Remove existing file if present + os.Remove(destPath) + + if err := os.Symlink(header.Linkname, destPath); err != nil { + return fmt.Errorf("create symlink %s: %w", destPath, err) + } + + case tar.TypeLink: + // Hard link + linkPath := filepath.Join(destDir, header.Linkname) + if stripComponents > 0 { + parts := strings.Split(header.Linkname, string(os.PathSeparator)) + if len(parts) > stripComponents { + linkPath = filepath.Join(destDir, filepath.Join(parts[stripComponents:]...)) + } + } + + // Ensure parent directory exists + if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { + return fmt.Errorf("create parent dir for link %s: %w", destPath, err) + } + + // Remove existing file if present + os.Remove(destPath) + + if err := os.Link(linkPath, destPath); err != nil { + return fmt.Errorf("create hard link %s: %w", destPath, err) + } + + default: + // Skip other types (devices, FIFOs, etc.) + continue + } + } + + return nil +} + +// TarZstdDirToBytes creates a tar.zst archive from a directory and returns it as bytes. +// This is a convenience function for smaller directories where buffering is acceptable. +func TarZstdDirToBytes(sourceDir string, level CompressionLevel) ([]byte, error) { + var buf bytes.Buffer + if err := TarZstdDir(&buf, sourceDir, level); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/server/openapi.yaml b/server/openapi.yaml index f2b8d3ac..cb7cd4bd 100644 --- a/server/openapi.yaml +++ b/server/openapi.yaml @@ -666,6 +666,89 @@ paths: "500": $ref: "#/components/responses/InternalError" + /fs/download_dir_zstd: + get: + summary: Download a directory as a tar.zst archive + description: | + Returns a tar.zst (tar archive compressed with zstd) file containing the contents + of the specified directory. Zstd offers significantly faster compression and + decompression compared to zip while achieving better compression ratios. + operationId: downloadDirZstd + parameters: + - name: path + in: query + required: true + schema: + type: string + pattern: "^/.*" + description: Absolute directory path to archive and download. + - name: compression_level + in: query + required: false + schema: + type: string + enum: [fastest, default, better, best] + default: default + description: | + Compression level. Higher levels produce smaller archives but take longer. + - fastest: ~zstd level 1, maximum speed (~300-500 MB/s) + - default: ~zstd level 3, balanced speed/ratio (~150 MB/s) + - better: ~zstd level 7, better ratio (~50-80 MB/s) + - best: ~zstd level 11, best ratio (~20-40 MB/s) + responses: + "200": + description: Tar.zst archive of the requested directory + content: + application/zstd: + schema: + type: string + format: binary + "400": + $ref: "#/components/responses/BadRequestError" + "404": + $ref: "#/components/responses/NotFoundError" + "500": + $ref: "#/components/responses/InternalError" + + /fs/upload_zstd: + post: + summary: Upload a tar.zst archive and extract it + description: | + Upload a tar.zst (tar archive compressed with zstd) file and extract its contents + to the specified destination path. Zstd offers significantly faster decompression + compared to zip. + operationId: uploadZstd + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + archive: + type: string + format: binary + description: The tar.zst archive file. + dest_path: + type: string + description: Absolute destination directory to extract the archive to. + pattern: "^/.*" + strip_components: + type: integer + minimum: 0 + default: 0 + description: Number of leading path components to strip during extraction (like tar --strip-components). + required: [archive, dest_path] + responses: + "201": + description: Archive uploaded and extracted successfully + "400": + $ref: "#/components/responses/BadRequestError" + "404": + $ref: "#/components/responses/NotFoundError" + "500": + $ref: "#/components/responses/InternalError" + /fs/list_files: get: summary: List files in a directory From f43dce1fcd191e5864efb3d1332bce64fd7b7797 Mon Sep 17 00:00:00 2001 From: Rafael Garcia Date: Fri, 16 Jan 2026 09:19:18 -0500 Subject: [PATCH 2/5] fix: add symlink and hardlink path traversal protection in zstdutil Addresses security issue found by Cursor Bugbot: symlink targets were not validated, allowing malicious archives to write files outside the destination directory via symlink path traversal. Changes: - Reject absolute symlink targets - Validate that symlink targets resolve within destDir - Validate that hardlink targets are within destDir Co-Authored-By: Claude Opus 4.5 --- server/lib/zstdutil/zstdutil.go | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/server/lib/zstdutil/zstdutil.go b/server/lib/zstdutil/zstdutil.go index 94e9f406..1fb9552e 100644 --- a/server/lib/zstdutil/zstdutil.go +++ b/server/lib/zstdutil/zstdutil.go @@ -203,6 +203,20 @@ func UntarZstd(r io.Reader, destDir string, stripComponents int) error { f.Close() case tar.TypeSymlink: + // Security check: reject absolute symlink targets + if filepath.IsAbs(header.Linkname) { + return fmt.Errorf("illegal symlink target (absolute path): %s -> %s", header.Name, header.Linkname) + } + + // Security check: resolve symlink target and ensure it stays within destDir + // The target is relative to the symlink's directory + symlinkDir := filepath.Dir(destPath) + resolvedTarget := filepath.Clean(filepath.Join(symlinkDir, header.Linkname)) + if !strings.HasPrefix(resolvedTarget, filepath.Clean(destDir)+string(os.PathSeparator)) && + resolvedTarget != filepath.Clean(destDir) { + return fmt.Errorf("illegal symlink target (escapes destination): %s -> %s", header.Name, header.Linkname) + } + // Ensure parent directory exists if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { return fmt.Errorf("create parent dir for symlink %s: %w", destPath, err) @@ -216,14 +230,20 @@ func UntarZstd(r io.Reader, destDir string, stripComponents int) error { } case tar.TypeLink: - // Hard link - linkPath := filepath.Join(destDir, header.Linkname) + // Hard link - apply strip-components to link target + linkName := header.Linkname if stripComponents > 0 { - parts := strings.Split(header.Linkname, string(os.PathSeparator)) + parts := strings.Split(linkName, string(os.PathSeparator)) if len(parts) > stripComponents { - linkPath = filepath.Join(destDir, filepath.Join(parts[stripComponents:]...)) + linkName = filepath.Join(parts[stripComponents:]...) } } + linkPath := filepath.Join(destDir, linkName) + + // Security check: ensure hard link target is within destDir + if !strings.HasPrefix(filepath.Clean(linkPath), filepath.Clean(destDir)+string(os.PathSeparator)) { + return fmt.Errorf("illegal hard link target: %s -> %s", header.Name, header.Linkname) + } // Ensure parent directory exists if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { From 52ad257288da71af62380dbeff537f1778135ddb Mon Sep 17 00:00:00 2001 From: Rafael Garcia Date: Fri, 16 Jan 2026 09:44:32 -0500 Subject: [PATCH 3/5] fix: skip hard links when target has insufficient strip-components When stripComponents > 0, hard link entries whose target paths have too few components should be skipped (like regular files), not processed with unchanged paths that would cause extraction failures. Co-Authored-By: Claude Opus 4.5 --- server/lib/zstdutil/zstdutil.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/lib/zstdutil/zstdutil.go b/server/lib/zstdutil/zstdutil.go index 1fb9552e..5e9a815c 100644 --- a/server/lib/zstdutil/zstdutil.go +++ b/server/lib/zstdutil/zstdutil.go @@ -234,9 +234,10 @@ func UntarZstd(r io.Reader, destDir string, stripComponents int) error { linkName := header.Linkname if stripComponents > 0 { parts := strings.Split(linkName, string(os.PathSeparator)) - if len(parts) > stripComponents { - linkName = filepath.Join(parts[stripComponents:]...) + if len(parts) <= stripComponents { + continue // Skip this hard link, target has insufficient components } + linkName = filepath.Join(parts[stripComponents:]...) } linkPath := filepath.Join(destDir, linkName) From 1753ebbfb3e28626fd72a61f8da4909cf8bc7725 Mon Sep 17 00:00:00 2001 From: Rafael Garcia Date: Fri, 16 Jan 2026 10:11:32 -0500 Subject: [PATCH 4/5] Fix port binding race condition after SIGKILL termination - Add lock file cleanup (SingletonLock, SingletonSocket, SingletonCookie) to handle stale files from SIGKILL termination - Add waitForPort function that checks port availability before starting chromium, with SO_REUSEADDR disabled to accurately match chromium's bind behavior - This allows keeping stopwaitsecs=0 for fast restarts while ensuring the port is actually free before chromium tries to bind Co-Authored-By: Claude Opus 4.5 --- server/cmd/chromium-launcher/main.go | 54 ++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/server/cmd/chromium-launcher/main.go b/server/cmd/chromium-launcher/main.go index 698cba28..f0a8cd50 100644 --- a/server/cmd/chromium-launcher/main.go +++ b/server/cmd/chromium-launcher/main.go @@ -1,13 +1,16 @@ package main import ( + "context" "flag" "fmt" + "net" "os" "os/exec" "path/filepath" "strings" "syscall" + "time" "github.com/onkernel/kernel-images/server/lib/chromiumflags" ) @@ -18,11 +21,21 @@ func main() { runtimeFlagsPath := flag.String("runtime-flags", "/chromium/flags", "Path to runtime flags overlay file") flag.Parse() + // Clean up stale lock file from previous SIGKILL termination + // Chromium creates this lock and doesn't clean it up when killed + _ = os.Remove("/home/kernel/user-data/SingletonLock") + _ = os.Remove("/home/kernel/user-data/SingletonSocket") + _ = os.Remove("/home/kernel/user-data/SingletonCookie") + // Inputs internalPort := strings.TrimSpace(os.Getenv("INTERNAL_PORT")) if internalPort == "" { internalPort = "9223" } + + // Wait for devtools port to be available (handles SIGKILL socket cleanup delay) + waitForPort(internalPort, 5*time.Second) + baseFlags := os.Getenv("CHROMIUM_FLAGS") runtimeTokens, err := chromiumflags.ReadOptionalFlagFile(*runtimeFlagsPath) if err != nil { @@ -104,3 +117,44 @@ func execLookPath(file string) (string, error) { } return exec.LookPath(file) } + +// waitForPort waits until the given port is available for binding on both IPv4 and IPv6. +// This handles the delay after SIGKILL before the kernel releases the socket. +// We disable SO_REUSEADDR to get an accurate check matching chromium's bind behavior. +func waitForPort(port string, timeout time.Duration) { + deadline := time.Now().Add(timeout) + addrs := []string{"127.0.0.1:" + port, "[::1]:" + port} + + // ListenConfig with Control to disable SO_REUSEADDR for accurate port availability check + lc := &net.ListenConfig{ + Control: func(network, address string, c syscall.RawConn) error { + var sockErr error + err := c.Control(func(fd uintptr) { + // Disable SO_REUSEADDR to match chromium's behavior + sockErr = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 0) + }) + if err != nil { + return err + } + return sockErr + }, + } + + ctx := context.Background() + for time.Now().Before(deadline) { + allFree := true + for _, addr := range addrs { + ln, err := lc.Listen(ctx, "tcp", addr) + if err != nil { + allFree = false + break + } + ln.Close() + } + if allFree { + return + } + time.Sleep(50 * time.Millisecond) + } + // Timeout reached, proceed anyway and let chromium report the error +} From 2316b9ef794c58285a3b14c90c7915e7755c7e77 Mon Sep 17 00:00:00 2001 From: Rafael Garcia Date: Fri, 16 Jan 2026 11:28:11 -0500 Subject: [PATCH 5/5] Fix zstd extraction to allow absolute symlinks Chromium creates symlinks to /tmp (e.g., SingletonSocket -> /tmp/org.chromium.Chromium.xxx/SingletonSocket). The previous security check rejected all absolute symlinks, but absolute paths aren't a path traversal risk - only relative symlinks using ../ to escape the destination are. Now we only check relative symlinks for path traversal while allowing absolute symlinks. Co-Authored-By: Claude Opus 4.5 --- server/lib/zstdutil/zstdutil.go | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/server/lib/zstdutil/zstdutil.go b/server/lib/zstdutil/zstdutil.go index 5e9a815c..e721faa5 100644 --- a/server/lib/zstdutil/zstdutil.go +++ b/server/lib/zstdutil/zstdutil.go @@ -203,18 +203,15 @@ func UntarZstd(r io.Reader, destDir string, stripComponents int) error { f.Close() case tar.TypeSymlink: - // Security check: reject absolute symlink targets - if filepath.IsAbs(header.Linkname) { - return fmt.Errorf("illegal symlink target (absolute path): %s -> %s", header.Name, header.Linkname) - } - - // Security check: resolve symlink target and ensure it stays within destDir - // The target is relative to the symlink's directory - symlinkDir := filepath.Dir(destPath) - resolvedTarget := filepath.Clean(filepath.Join(symlinkDir, header.Linkname)) - if !strings.HasPrefix(resolvedTarget, filepath.Clean(destDir)+string(os.PathSeparator)) && - resolvedTarget != filepath.Clean(destDir) { - return fmt.Errorf("illegal symlink target (escapes destination): %s -> %s", header.Name, header.Linkname) + // Security check for relative symlinks: ensure they don't escape destDir + // Absolute symlinks are allowed (e.g., chromium creates symlinks to /tmp) + if !filepath.IsAbs(header.Linkname) { + symlinkDir := filepath.Dir(destPath) + resolvedTarget := filepath.Clean(filepath.Join(symlinkDir, header.Linkname)) + if !strings.HasPrefix(resolvedTarget, filepath.Clean(destDir)+string(os.PathSeparator)) && + resolvedTarget != filepath.Clean(destDir) { + return fmt.Errorf("illegal symlink target (escapes destination): %s -> %s", header.Name, header.Linkname) + } } // Ensure parent directory exists