diff --git a/.github/workflows/api-reference-validation.yml b/.github/workflows/api-reference-validation.yml new file mode 100644 index 00000000..578b7ebd --- /dev/null +++ b/.github/workflows/api-reference-validation.yml @@ -0,0 +1,130 @@ +name: API Reference Validation + +on: + # schedule: + # # Every Thursday at 8 PM UTC + # - cron: '0 20 * * 4' + workflow_dispatch: + +concurrency: + group: api-reference-validation + cancel-in-progress: false + +jobs: + validate: + runs-on: ubuntu-latest + timeout-minutes: 20 + permissions: + contents: write + pull-requests: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: pip install pyyaml + + # Step 1: Generate spec from source + - name: Generate OpenAPI spec + run: python3 scripts/generate_openapi_reference.py --output openapi-generated.yml + + # Step 2: Compare with committed spec + - name: Compare specs + id: diff + run: | + if diff -q openapi-public.yml openapi-generated.yml > /dev/null 2>&1; then + echo "Spec is up to date — nothing to do" + echo "changed=false" >> $GITHUB_OUTPUT + else + echo "Spec has drifted from source" + echo "changed=true" >> $GITHUB_OUTPUT + fi + + # Step 3: If no difference, exit early + # (all subsequent steps are gated on changed == 'true') + + # Step 4: Run validation against the NEW generated spec + - name: Run validation + if: steps.diff.outputs.changed == 'true' + id: validate + continue-on-error: true + env: + E2B_API_KEY: ${{ secrets.E2B_API_KEY }} + E2B_ACCESS_TOKEN: ${{ secrets.E2B_ACCESS_TOKEN }} + run: | + # Replace committed spec with generated one before validating + cp openapi-generated.yml openapi-public.yml + + # Capture the full output; the script exits 1 on critical findings + python3 scripts/validate_api_reference.py \ + --output openapi-validation-report.md \ + --verbose 2>&1 | tee validation-output.txt + + # Extract the final summary block (everything after the last ===... line) + SUMMARY=$(awk '/^={50,}/{buf=""} {buf=buf"\n"$0} END{print buf}' validation-output.txt) + # Store for PR body (escape newlines for GitHub output) + EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) + echo "summary<<$EOF" >> $GITHUB_OUTPUT + echo "$SUMMARY" >> $GITHUB_OUTPUT + echo "$EOF" >> $GITHUB_OUTPUT + + # Step 5+6: Create PR with status indicator + - name: Create PR + if: steps.diff.outputs.changed == 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + VALIDATION_OUTCOME: ${{ steps.validate.outcome }} + VALIDATION_SUMMARY: ${{ steps.validate.outputs.summary }} + run: | + if [ "$VALIDATION_OUTCOME" = "success" ]; then + STATUS_ICON="🟢" + STATUS_TEXT="Validation passed" + else + STATUS_ICON="🔴" + STATUS_TEXT="Validation failed — critical findings detected" + fi + + BRANCH="api-spec-update-$(date +%Y-%m-%d)" + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + git checkout -b "$BRANCH" + git add openapi-public.yml + git commit -m "docs: update openapi-public.yml from source specs $(date +%Y-%m-%d)" + git push -u origin "$BRANCH" + + gh pr create \ + --title "$STATUS_ICON Update API spec $(date +%Y-%m-%d)" \ + --body "$(cat <> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "| Result | Value |" >> $GITHUB_STEP_SUMMARY + echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY + echo "| Spec changed | ${CHANGED:-false} |" >> $GITHUB_STEP_SUMMARY + echo "| Validation | ${VALIDATION_OUTCOME:-skipped} |" >> $GITHUB_STEP_SUMMARY diff --git a/docs.json b/docs.json index cac6de66..45695415 100644 --- a/docs.json +++ b/docs.json @@ -222,6 +222,14 @@ "anchor": "SDK reference", "icon": "brackets-curly", "href": "https://e2b.dev/docs/sdk-reference" + }, + { + "anchor": "API reference", + "icon": "code", + "openapi": { + "source": "openapi-public.yml", + "directory": "docs/api-reference" + } } ], "global": {} diff --git a/openapi-public.yml b/openapi-public.yml new file mode 100644 index 00000000..5058c5e5 --- /dev/null +++ b/openapi-public.yml @@ -0,0 +1,4082 @@ +openapi: 3.1.0 +info: + title: E2B API + version: 0.1.0 + description: Complete E2B developer API. Platform endpoints are served on api.e2b.app. + Sandbox endpoints (envd) are served on {port}-{sandboxID}.e2b.app. +servers: +- &id001 + url: https://api.e2b.app + description: E2B Platform API +paths: + /sandboxes: + get: + description: List all running sandboxes + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + parameters: + - name: metadata + in: query + description: Metadata query used to filter the sandboxes (e.g. "user=abc&app=prod"). + Each key and values must be URL encoded. + required: false + schema: + type: string + responses: + '200': + description: Successfully returned all running sandboxes + content: + application/json: + schema: + type: array + items: + allOf: + - $ref: '#/components/schemas/ListedSandbox' + '401': + $ref: '#/components/responses/401' + '400': + $ref: '#/components/responses/400' + '500': + $ref: '#/components/responses/500' + operationId: listSandboxes + summary: List sandboxes + post: + description: Create a sandbox from the template + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/NewSandbox' + responses: + '201': + description: The sandbox was created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Sandbox' + '401': + $ref: '#/components/responses/401' + '400': + $ref: '#/components/responses/400' + '500': + $ref: '#/components/responses/500' + operationId: postSandboxes + summary: Create sandbox + servers: + - *id001 + /v2/sandboxes: + get: + description: List all sandboxes + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + parameters: + - name: metadata + in: query + description: Metadata query used to filter the sandboxes (e.g. "user=abc&app=prod"). + Each key and values must be URL encoded. + required: false + schema: + type: string + - name: state + in: query + description: Filter sandboxes by one or more states + required: false + schema: + type: array + items: + $ref: '#/components/schemas/SandboxState' + style: form + explode: false + - $ref: '#/components/parameters/paginationNextToken' + - $ref: '#/components/parameters/paginationLimit' + responses: + '200': + description: Successfully returned all running sandboxes + content: + application/json: + schema: + type: array + items: + allOf: + - $ref: '#/components/schemas/ListedSandbox' + '401': + $ref: '#/components/responses/401' + '400': + $ref: '#/components/responses/400' + '500': + $ref: '#/components/responses/500' + operationId: listSandboxesV2 + summary: List sandboxes (v2) + servers: + - *id001 + /sandboxes/metrics: + get: + description: List metrics for given sandboxes + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + parameters: + - name: sandbox_ids + in: query + required: true + description: Comma-separated list of sandbox IDs to get metrics for + explode: false + schema: + type: array + items: + type: string + maxItems: 100 + uniqueItems: true + responses: + '200': + description: Successfully returned all running sandboxes with metrics + content: + application/json: + schema: + $ref: '#/components/schemas/SandboxesWithMetrics' + '401': + $ref: '#/components/responses/401' + '400': + $ref: '#/components/responses/400' + '500': + $ref: '#/components/responses/500' + operationId: listSandboxesMetrics + summary: List sandbox metrics + servers: + - *id001 + /sandboxes/{sandboxID}/logs: + get: + description: Get sandbox logs. + deprecated: true + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/sandboxID' + - in: query + name: start + schema: + type: integer + format: int64 + minimum: 0 + description: Starting timestamp of the logs that should be returned in milliseconds + - in: query + name: limit + schema: + default: 1000 + format: int32 + minimum: 0 + type: integer + description: Maximum number of logs that should be returned + responses: + '200': + description: Successfully returned the sandbox logs + content: + application/json: + schema: + $ref: '#/components/schemas/SandboxLogs' + '404': + $ref: '#/components/responses/404' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: getSandboxLogs + summary: Get sandbox logs + servers: + - *id001 + /sandboxes/{sandboxID}: + get: + description: Get a sandbox by id + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/sandboxID' + responses: + '200': + description: Successfully returned the sandbox + content: + application/json: + schema: + $ref: '#/components/schemas/SandboxDetail' + '404': + $ref: '#/components/responses/404' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: getSandbox + summary: Get sandbox + delete: + description: Kill a sandbox + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/sandboxID' + responses: + '204': + description: The sandbox was killed successfully + '404': + $ref: '#/components/responses/404' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: deleteSandbox + summary: Delete sandbox + servers: + - *id001 + /sandboxes/{sandboxID}/metrics: + get: + description: Get sandbox metrics + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/sandboxID' + - in: query + name: start + schema: + type: integer + format: int64 + minimum: 0 + description: Unix timestamp for the start of the interval, in seconds, for + which the metrics are returned. + - in: query + name: end + schema: + type: integer + format: int64 + minimum: 0 + description: Unix timestamp for the end of the interval, in seconds, for which + the metrics are returned. + responses: + '200': + description: Successfully returned the sandbox metrics + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/SandboxMetric' + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + operationId: getSandboxMetrics + summary: Get sandbox metrics + servers: + - *id001 + /sandboxes/{sandboxID}/pause: + post: + description: Pause the sandbox + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/sandboxID' + responses: + '204': + description: The sandbox was paused successfully and can be resumed + '409': + $ref: '#/components/responses/409' + '404': + $ref: '#/components/responses/404' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: postSandboxPause + summary: Pause sandbox + servers: + - *id001 + /sandboxes/{sandboxID}/resume: + post: + deprecated: true + description: Resume the sandbox + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/sandboxID' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ResumedSandbox' + responses: + '201': + description: The sandbox was resumed successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Sandbox' + '409': + $ref: '#/components/responses/409' + '404': + $ref: '#/components/responses/404' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: postSandboxResume + summary: Resume sandbox + servers: + - *id001 + /sandboxes/{sandboxID}/connect: + post: + description: Returns sandbox details. If the sandbox is paused, it will be resumed. + TTL is only extended. + tags: + - Sandboxes + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/sandboxID' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ConnectSandbox' + responses: + '200': + description: The sandbox was already running + content: + application/json: + schema: + $ref: '#/components/schemas/Sandbox' + '201': + description: The sandbox was resumed successfully + content: + application/json: + schema: + $ref: '#/components/schemas/Sandbox' + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + operationId: postSandboxConnect + summary: Connect to sandbox + servers: + - *id001 + /sandboxes/{sandboxID}/timeout: + post: + description: Set the timeout for the sandbox. The sandbox will expire x seconds + from the time of the request. Calling this method multiple times overwrites + the TTL, each time using the current timestamp as the starting point to measure + the timeout duration. + security: + - ApiKeyAuth: [] + tags: + - Sandboxes + requestBody: + content: + application/json: + schema: + type: object + required: + - timeout + properties: + timeout: + description: Timeout in seconds from the current time after which + the sandbox should expire + type: integer + format: int32 + minimum: 0 + parameters: + - $ref: '#/components/parameters/sandboxID' + responses: + '204': + description: Successfully set the sandbox timeout + '401': + $ref: '#/components/responses/401' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + operationId: postSandboxTimeout + summary: Set sandbox timeout + servers: + - *id001 + /sandboxes/{sandboxID}/refreshes: + post: + description: Refresh the sandbox extending its time to live + security: + - ApiKeyAuth: [] + tags: + - Sandboxes + requestBody: + content: + application/json: + schema: + type: object + properties: + duration: + description: Duration for which the sandbox should be kept alive + in seconds + type: integer + maximum: 3600 + minimum: 0 + parameters: + - $ref: '#/components/parameters/sandboxID' + responses: + '204': + description: Successfully refreshed the sandbox + '401': + $ref: '#/components/responses/401' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + operationId: postSandboxRefreshes + summary: Refresh sandbox + servers: + - *id001 + /v3/templates: + post: + description: Create a new template + tags: + - Templates + security: + - ApiKeyAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateBuildRequestV3' + responses: + '202': + description: The build was requested successfully + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateRequestResponseV3' + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: postTemplatesV3 + summary: Create template (v3) + servers: + - *id001 + /v2/templates: + post: + description: Create a new template + deprecated: true + tags: + - Templates + security: + - ApiKeyAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateBuildRequestV2' + responses: + '202': + description: The build was requested successfully + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateLegacy' + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: postTemplatesV2 + summary: Create template (v2) + servers: + - *id001 + /templates/{templateID}/files/{hash}: + get: + description: Get an upload link for a tar file containing build layer files + tags: + - Templates + security: + - AccessTokenAuth: [] + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + - in: path + name: hash + required: true + schema: + type: string + description: Hash of the files + responses: + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + '200': + description: Upload link for the tar file containing build layer files + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateBuildFileUpload' + operationId: getTemplateFile + summary: Get build upload link + servers: + - *id001 + /templates: + get: + description: List all templates + tags: + - Templates + security: + - ApiKeyAuth: [] + - AccessTokenAuth: [] + parameters: + - in: query + required: false + name: teamID + schema: + type: string + description: Identifier of the team + responses: + '200': + description: Successfully returned all templates + content: + application/json: + schema: + type: array + items: + allOf: + - $ref: '#/components/schemas/Template' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: listTemplates + summary: List templates + post: + description: Create a new template + deprecated: true + tags: + - Templates + security: + - AccessTokenAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateBuildRequest' + responses: + '202': + description: The build was accepted + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateLegacy' + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: postTemplates + summary: Create template + servers: + - *id001 + /templates/{templateID}: + get: + description: List all builds for a template + tags: + - Templates + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + - $ref: '#/components/parameters/paginationNextToken' + - $ref: '#/components/parameters/paginationLimit' + responses: + '200': + description: Successfully returned the template with its builds + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateWithBuilds' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: getTemplate + summary: Get template + post: + description: Rebuild an template + deprecated: true + tags: + - Templates + security: + - AccessTokenAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateBuildRequest' + responses: + '202': + description: The build was accepted + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateLegacy' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: postTemplate + summary: Rebuild template + delete: + description: Delete a template + tags: + - Templates + security: + - ApiKeyAuth: [] + - AccessTokenAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + responses: + '204': + description: The template was deleted successfully + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: deleteTemplate + summary: Delete template + patch: + description: Update template + deprecated: true + tags: + - Templates + security: + - ApiKeyAuth: [] + - AccessTokenAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateUpdateRequest' + responses: + '200': + description: The template was updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateUpdateResponse' + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: patchTemplate + summary: Update template + servers: + - *id001 + /templates/{templateID}/builds/{buildID}: + post: + description: Start the build + deprecated: true + tags: + - Templates + security: + - AccessTokenAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + - $ref: '#/components/parameters/buildID' + responses: + '202': + description: The build has started + content: &id002 + application/json: + schema: + type: object + description: Empty response + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: postTemplateBuild + summary: Start build + servers: + - *id001 + /v2/templates/{templateID}/builds/{buildID}: + post: + description: Start the build + tags: + - Templates + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + - $ref: '#/components/parameters/buildID' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateBuildStartV2' + responses: + '202': + description: The build has started + content: *id002 + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: postTemplateBuildV2 + summary: Start build (v2) + servers: + - *id001 + /v2/templates/{templateID}: + patch: + description: Update template + tags: + - Templates + security: + - ApiKeyAuth: [] + - AccessTokenAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateUpdateRequest' + responses: + '200': + description: The template was updated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateUpdateResponse' + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: patchTemplateV2 + summary: Update template (v2) + servers: + - *id001 + /templates/{templateID}/builds/{buildID}/status: + get: + description: Get template build info + tags: + - Templates + security: + - AccessTokenAuth: [] + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + - $ref: '#/components/parameters/buildID' + - in: query + name: logsOffset + schema: + default: 0 + type: integer + format: int32 + minimum: 0 + description: Index of the starting build log that should be returned with + the template + - in: query + name: limit + schema: + default: 100 + type: integer + format: int32 + minimum: 0 + maximum: 100 + description: Maximum number of logs that should be returned + - in: query + name: level + schema: + $ref: '#/components/schemas/LogLevel' + responses: + '200': + description: Successfully returned the template + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateBuildInfo' + '401': + $ref: '#/components/responses/401' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + operationId: getTemplateBuildStatus + summary: Get build status + servers: + - *id001 + /templates/{templateID}/builds/{buildID}/logs: + get: + description: Get template build logs + tags: + - Templates + security: + - AccessTokenAuth: [] + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + - $ref: '#/components/parameters/buildID' + - in: query + name: cursor + schema: + type: integer + format: int64 + minimum: 0 + description: Starting timestamp of the logs that should be returned in milliseconds + - in: query + name: limit + schema: + default: 100 + type: integer + format: int32 + minimum: 0 + maximum: 100 + description: Maximum number of logs that should be returned + - in: query + name: direction + schema: + $ref: '#/components/schemas/LogsDirection' + - in: query + name: level + schema: + $ref: '#/components/schemas/LogLevel' + - in: query + name: source + schema: + $ref: '#/components/schemas/LogsSource' + description: Source of the logs that should be returned from + responses: + '200': + description: Successfully returned the template build logs + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateBuildLogsResponse' + '401': + $ref: '#/components/responses/401' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + operationId: getTemplateBuildLogs + summary: Get build logs + servers: + - *id001 + /templates/aliases/{alias}: + get: + description: Check if template with given alias exists + tags: + - Templates + security: + - ApiKeyAuth: [] + parameters: + - name: alias + in: path + required: true + schema: + type: string + description: Template alias + responses: + '200': + description: Successfully queried template by alias + content: + application/json: + schema: + $ref: '#/components/schemas/TemplateAliasResponse' + '400': + $ref: '#/components/responses/400' + '403': + $ref: '#/components/responses/403' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + operationId: getTemplatesAlias + summary: Get template by alias + servers: + - *id001 + /filesystem.Filesystem/CreateWatcher: + post: + tags: + - Filesystem + summary: CreateWatcher + description: Non-streaming versions of WatchDir + operationId: filesystem.Filesystem.CreateWatcher + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.CreateWatcherRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.CreateWatcherResponse' + '502': &id003 + description: Sandbox not found + content: + application/json: + schema: + type: object + required: + - sandboxID + - message + - code + properties: + message: + type: string + description: Error message + example: The sandbox was not found + code: + type: integer + description: Error code + example: 502 + sandboxID: + type: string + description: Identifier of the sandbox + example: i1234abcd5678efgh90jk + security: + - &id004 + SandboxAccessTokenAuth: [] + servers: + - &id005 + url: https://{port}-{sandboxID}.e2b.app + description: Sandbox API (envd) — runs inside each sandbox + variables: + port: + default: '49983' + description: Port number + sandboxID: + default: $SANDBOX_ID + description: Sandbox identifier + /filesystem.Filesystem/GetWatcherEvents: + post: + tags: + - Filesystem + summary: GetWatcherEvents + operationId: filesystem.Filesystem.GetWatcherEvents + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.GetWatcherEventsRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.GetWatcherEventsResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /filesystem.Filesystem/ListDir: + post: + tags: + - Filesystem + summary: ListDir + operationId: filesystem.Filesystem.ListDir + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.ListDirRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.ListDirResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /filesystem.Filesystem/MakeDir: + post: + tags: + - Filesystem + summary: MakeDir + operationId: filesystem.Filesystem.MakeDir + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.MakeDirRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.MakeDirResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /filesystem.Filesystem/Move: + post: + tags: + - Filesystem + summary: Move + operationId: filesystem.Filesystem.Move + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.MoveRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.MoveResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /filesystem.Filesystem/Remove: + post: + tags: + - Filesystem + summary: Remove + operationId: filesystem.Filesystem.Remove + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.RemoveRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.RemoveResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /filesystem.Filesystem/RemoveWatcher: + post: + tags: + - Filesystem + summary: RemoveWatcher + operationId: filesystem.Filesystem.RemoveWatcher + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.RemoveWatcherRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.RemoveWatcherResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /filesystem.Filesystem/Stat: + post: + tags: + - Filesystem + summary: Stat + operationId: filesystem.Filesystem.Stat + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.StatRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/filesystem.StatResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /filesystem.Filesystem/WatchDir: + post: + tags: + - Filesystem + summary: WatchDir + description: Server-streaming RPC. Use the Connect protocol with streaming support. + operationId: filesystem.Filesystem.WatchDir + requestBody: + content: + application/connect+json: + schema: + $ref: '#/components/schemas/filesystem.WatchDirRequest' + required: true + responses: + '200': + description: Stream of WatchDirResponse events + content: + application/connect+json: + schema: + $ref: '#/components/schemas/filesystem.WatchDirResponse' + '502': *id003 + security: + - *id004 + parameters: + - &id006 + name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - &id007 + name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + servers: + - *id005 + /process.Process/CloseStdin: + post: + tags: + - Process + summary: CloseStdin + description: "Close stdin to signal EOF to the process.\n Only works for non-PTY\ + \ processes. For PTY, send Ctrl+D (0x04) instead." + operationId: process.Process.CloseStdin + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/process.CloseStdinRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/process.CloseStdinResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /process.Process/Connect: + post: + tags: + - Process + summary: Connect + description: Server-streaming RPC. Use the Connect protocol with streaming support. + operationId: process.Process.Connect + requestBody: + content: + application/connect+json: + schema: + $ref: '#/components/schemas/process.ConnectRequest' + required: true + responses: + '200': + description: Stream of ConnectResponse events + content: + application/connect+json: + schema: + $ref: '#/components/schemas/process.ConnectResponse' + '502': *id003 + security: + - *id004 + parameters: + - *id006 + - *id007 + servers: + - *id005 + /process.Process/List: + post: + tags: + - Process + summary: List + operationId: process.Process.List + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/process.ListRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/process.ListResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /process.Process/SendInput: + post: + tags: + - Process + summary: SendInput + operationId: process.Process.SendInput + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/process.SendInputRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/process.SendInputResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /process.Process/SendSignal: + post: + tags: + - Process + summary: SendSignal + operationId: process.Process.SendSignal + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/process.SendSignalRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/process.SendSignalResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /process.Process/Start: + post: + tags: + - Process + summary: Start + description: Server-streaming RPC. Use the Connect protocol with streaming support. + operationId: process.Process.Start + requestBody: + content: + application/connect+json: + schema: + $ref: '#/components/schemas/process.StartRequest' + required: true + responses: + '200': + description: Stream of StartResponse events + content: + application/connect+json: + schema: + $ref: '#/components/schemas/process.StartResponse' + '502': *id003 + security: + - *id004 + parameters: + - *id006 + - *id007 + servers: + - *id005 + /process.Process/StreamInput: + post: + tags: + - Process + summary: StreamInput + description: Client-streaming RPC. Client input stream ensures ordering of messages. + Use the Connect protocol with streaming support. + operationId: process.Process.StreamInput + requestBody: + content: + application/connect+json: + schema: + $ref: '#/components/schemas/process.StreamInputRequest' + required: true + responses: + '200': + description: Stream of StreamInputResponse events + content: + application/connect+json: + schema: + $ref: '#/components/schemas/process.StreamInputResponse' + '502': *id003 + security: + - *id004 + parameters: + - *id006 + - *id007 + servers: + - *id005 + /process.Process/Update: + post: + tags: + - Process + summary: Update + operationId: process.Process.Update + parameters: + - name: Connect-Protocol-Version + in: header + required: true + schema: + $ref: '#/components/schemas/connect-protocol-version' + - name: Connect-Timeout-Ms + in: header + schema: + $ref: '#/components/schemas/connect-timeout-header' + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/process.UpdateRequest' + required: true + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/process.UpdateResponse' + '502': *id003 + security: + - *id004 + servers: + - *id005 + /templates/tags: + post: + description: Assign tag(s) to a template build + tags: + - Tags + security: + - ApiKeyAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/AssignTemplateTagsRequest' + responses: + '201': + description: Tag assigned successfully + content: + application/json: + schema: + $ref: '#/components/schemas/AssignedTemplateTags' + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + operationId: postTemplatesTags + summary: Assign tags + delete: + description: Delete multiple tags from templates + tags: + - Tags + security: + - ApiKeyAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteTemplateTagsRequest' + responses: + '204': + description: Tags deleted successfully + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + operationId: deleteTemplatesTags + summary: Delete tags + servers: + - *id001 + /templates/{templateID}/tags: + get: + description: List all tags for a template + tags: + - Tags + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/templateID' + responses: + '200': + description: Successfully returned the template tags + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/TemplateTag' + '401': + $ref: '#/components/responses/401' + '403': + $ref: '#/components/responses/403' + '404': + $ref: '#/components/responses/404' + '500': + $ref: '#/components/responses/500' + operationId: getTemplateTags + summary: List template tags + servers: + - *id001 + /teams: + get: + description: List all teams + tags: + - Teams + security: + - AccessTokenAuth: [] + responses: + '200': + description: Successfully returned all teams + content: + application/json: + schema: + type: array + items: + allOf: + - $ref: '#/components/schemas/Team' + '401': + $ref: '#/components/responses/401' + '500': + $ref: '#/components/responses/500' + operationId: listTeams + summary: List teams + servers: + - *id001 + /teams/{teamID}/metrics: + get: + description: Get metrics for the team + tags: + - Teams + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/teamID' + - in: query + name: start + schema: + type: integer + format: int64 + minimum: 0 + description: Unix timestamp for the start of the interval, in seconds, for + which the metrics are returned. + - in: query + name: end + schema: + type: integer + format: int64 + minimum: 0 + description: Unix timestamp for the end of the interval, in seconds, for which + the metrics are returned. + responses: + '200': + description: Successfully returned the team metrics + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/TeamMetric' + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '403': + $ref: '#/components/responses/403' + '500': + $ref: '#/components/responses/500' + operationId: getTeamMetrics + summary: Get team metrics + servers: + - *id001 + /teams/{teamID}/metrics/max: + get: + description: Get the maximum metrics for the team in the given interval + tags: + - Teams + security: + - ApiKeyAuth: [] + parameters: + - $ref: '#/components/parameters/teamID' + - in: query + name: start + schema: + type: integer + format: int64 + minimum: 0 + description: Unix timestamp for the start of the interval, in seconds, for + which the metrics are returned. + - in: query + name: end + schema: + type: integer + format: int64 + minimum: 0 + description: Unix timestamp for the end of the interval, in seconds, for which + the metrics are returned. + - in: query + name: metric + required: true + schema: + type: string + enum: + - concurrent_sandboxes + - sandbox_start_rate + description: Metric to retrieve the maximum value for + responses: + '200': + description: Successfully returned the team metrics + content: + application/json: + schema: + $ref: '#/components/schemas/MaxTeamMetric' + '400': + $ref: '#/components/responses/400' + '401': + $ref: '#/components/responses/401' + '403': + $ref: '#/components/responses/403' + '500': + $ref: '#/components/responses/500' + operationId: getTeamMetricsMax + summary: Get team metrics max + servers: + - *id001 + /health: + get: + summary: Check the health of the service + responses: + '204': + description: The service is healthy + '502': *id003 + operationId: getHealth + security: [] + tags: + - Others + servers: + - *id005 + /metrics: + get: + summary: Get the stats of the service + security: + - *id004 + responses: + '200': + description: The resource usage metrics of the service + content: + application/json: + schema: + $ref: '#/components/schemas/Metrics' + '502': *id003 + operationId: getMetrics + tags: + - Others + servers: + - *id005 + /envs: + get: + summary: Get the environment variables + security: + - *id004 + responses: + '200': + description: Environment variables + content: + application/json: + schema: + $ref: '#/components/schemas/EnvVars' + '502': *id003 + operationId: getEnvVars + tags: + - Others + servers: + - *id005 + /files: + get: + summary: Download a file + tags: + - Others + security: + - *id004 + parameters: + - $ref: '#/components/parameters/FilePath' + - $ref: '#/components/parameters/User' + - $ref: '#/components/parameters/Signature' + - $ref: '#/components/parameters/SignatureExpiration' + responses: + '200': + description: Entire file downloaded successfully. + content: + application/octet-stream: + schema: + type: string + format: binary + description: The file content + '401': + description: Invalid user + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: &id009 + code: 401 + message: 'Authentication error: missing or invalid API key' + '400': + description: Invalid path + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: &id008 + code: 400 + message: 'Bad request: invalid or missing request parameters' + '404': + description: File not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: &id011 + code: 404 + message: 'Not found: the requested resource does not exist' + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: &id010 + code: 500 + message: 'Server error: an unexpected error occurred' + '502': *id003 + operationId: downloadFile + post: + summary: Upload a file and ensure the parent directories exist. If the file + exists, it will be overwritten. + tags: + - Others + security: + - *id004 + parameters: + - $ref: '#/components/parameters/FilePath' + - $ref: '#/components/parameters/User' + - $ref: '#/components/parameters/Signature' + - $ref: '#/components/parameters/SignatureExpiration' + requestBody: + $ref: '#/components/requestBodies/File' + responses: + '200': + description: The file was uploaded successfully. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/EntryInfo' + '400': + description: Invalid path + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: *id008 + '401': + description: Invalid user + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: *id009 + '500': + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: *id010 + '507': + description: Not enough disk space + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: 507 + message: 'Insufficient storage: not enough disk space' + '502': *id003 + operationId: uploadFile + servers: + - *id005 +components: + securitySchemes: + AccessTokenAuth: + type: http + scheme: bearer + ApiKeyAuth: + type: apiKey + in: header + name: X-API-Key + SandboxAccessTokenAuth: + type: apiKey + in: header + name: X-Access-Token + description: 'Sandbox access token (`envdAccessToken`) for authenticating requests + to a running sandbox. Returned by: [POST /sandboxes](/docs/api-reference/sandboxes/create-a-sandbox) + (on create), [POST /sandboxes/{sandboxID}/connect](/docs/api-reference/sandboxes/connect-to-a-sandbox) + (on connect), [POST /sandboxes/{sandboxID}/resume](/docs/api-reference/sandboxes/resume-a-sandbox) + (on resume), and [GET /sandboxes/{sandboxID}](/docs/api-reference/sandboxes/get-a-sandbox) + (for running or paused sandboxes).' + parameters: + FilePath: + name: path + in: query + required: false + description: Path to the file, URL encoded. Can be relative to user's home directory. + schema: + type: string + User: + name: username + in: query + required: false + description: User used for setting the owner, or resolving relative paths. + schema: + type: string + Signature: + name: signature + in: query + required: false + description: Signature used for file access permission verification. + schema: + type: string + SignatureExpiration: + name: signature_expiration + in: query + required: false + description: Signature expiration used for defining the expiration time of the + signature. + schema: + type: integer + templateID: + name: templateID + in: path + required: true + schema: + type: string + buildID: + name: buildID + in: path + required: true + schema: + type: string + sandboxID: + name: sandboxID + in: path + required: true + schema: + type: string + teamID: + name: teamID + in: path + required: true + schema: + type: string + nodeID: + name: nodeID + in: path + required: true + schema: + type: string + apiKeyID: + name: apiKeyID + in: path + required: true + schema: + type: string + accessTokenID: + name: accessTokenID + in: path + required: true + schema: + type: string + snapshotID: + name: snapshotID + in: path + required: true + schema: + type: string + description: Identifier of the snapshot (template ID) + tag: + name: tag + in: path + required: true + schema: + type: string + description: Tag name + paginationLimit: + name: limit + in: query + description: Maximum number of items to return per page + required: false + schema: + type: integer + format: int32 + minimum: 1 + default: 100 + maximum: 100 + paginationNextToken: + name: nextToken + in: query + description: Cursor to start the list from + required: false + schema: + type: string + volumeID: + name: volumeID + in: path + required: true + schema: + type: string + requestBodies: + File: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + file: + type: string + format: binary + responses: + UploadSuccess: + description: The file was uploaded successfully. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/EntryInfo' + DownloadSuccess: + description: Entire file downloaded successfully. + content: + application/octet-stream: + schema: + type: string + format: binary + description: The file content + InvalidPath: + description: Invalid path + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + InternalServerError: + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + FileNotFound: + description: File not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + InvalidUser: + description: Invalid user + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + NotEnoughDiskSpace: + description: Not enough disk space + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + '400': + description: Bad request + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: *id008 + '401': + description: Authentication error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: *id009 + '403': + description: Forbidden + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: 403 + message: 'Forbidden: insufficient permissions' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: *id011 + '409': + description: Conflict + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + code: 409 + message: 'Conflict: the resource is in a conflicting state' + '500': + description: Server error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: *id010 + schemas: + Error: + required: + - code + - message + properties: + code: + type: integer + format: int32 + description: Error code + message: + type: string + description: Error + type: object + EntryInfo: + required: + - path + - name + - type + properties: + path: + type: string + description: Path to the file + name: + type: string + description: Name of the file + type: + type: string + description: Type of the file + enum: + - file + - directory + type: object + EnvVars: + additionalProperties: + type: string + description: Environment variables for the sandbox + type: object + Metrics: + type: object + description: Resource usage metrics + properties: + ts: + type: integer + format: int64 + description: Unix timestamp in UTC for current sandbox time + cpu_count: + type: integer + description: Number of CPU cores + cpu_used_pct: + type: number + format: float + description: CPU usage percentage + mem_total: + type: integer + description: Total virtual memory in bytes + format: int64 + mem_used: + type: integer + description: Used virtual memory in bytes + format: int64 + disk_used: + type: integer + description: Used disk space in bytes + format: int64 + disk_total: + type: integer + description: Total disk space in bytes + format: int64 + mem_used_mib: + type: integer + description: Used virtual memory in MiB + format: int64 + mem_total_mib: + type: integer + description: Total virtual memory in MiB + format: int64 + connect-protocol-version: + type: number + title: Connect-Protocol-Version + description: Define the version of the Connect protocol + const: 1 + connect-timeout-header: + type: number + title: Connect-Timeout-Ms + description: Define the timeout, in ms + filesystem.CreateWatcherRequest: + type: object + properties: + path: + type: string + title: path + recursive: + type: boolean + title: recursive + title: CreateWatcherRequest + additionalProperties: false + filesystem.CreateWatcherResponse: + type: object + properties: + watcherId: + type: string + title: watcher_id + title: CreateWatcherResponse + additionalProperties: false + filesystem.EntryInfo: + type: object + properties: + name: + type: string + title: name + type: + title: type + $ref: '#/components/schemas/filesystem.FileType' + path: + type: string + title: path + size: + type: + - integer + - string + title: size + format: int64 + description: File size in bytes. Encoded as string for values exceeding + JSON number precision (int64). + mode: + type: integer + title: mode + permissions: + type: string + title: permissions + owner: + type: string + title: owner + group: + type: string + title: group + modifiedTime: + title: modified_time + $ref: '#/components/schemas/google.protobuf.Timestamp' + symlinkTarget: + type: + - string + - 'null' + title: symlink_target + description: If the entry is a symlink, this field contains the target of + the symlink. + title: EntryInfo + additionalProperties: false + filesystem.EventType: + type: string + title: EventType + enum: + - EVENT_TYPE_UNSPECIFIED + - EVENT_TYPE_CREATE + - EVENT_TYPE_WRITE + - EVENT_TYPE_REMOVE + - EVENT_TYPE_RENAME + - EVENT_TYPE_CHMOD + filesystem.FileType: + type: string + title: FileType + enum: + - FILE_TYPE_UNSPECIFIED + - FILE_TYPE_FILE + - FILE_TYPE_DIRECTORY + filesystem.FilesystemEvent: + type: object + properties: + name: + type: string + title: name + type: + title: type + $ref: '#/components/schemas/filesystem.EventType' + title: FilesystemEvent + additionalProperties: false + filesystem.GetWatcherEventsRequest: + type: object + properties: + watcherId: + type: string + title: watcher_id + title: GetWatcherEventsRequest + additionalProperties: false + filesystem.GetWatcherEventsResponse: + type: object + properties: + events: + type: array + items: + $ref: '#/components/schemas/filesystem.FilesystemEvent' + title: events + title: GetWatcherEventsResponse + additionalProperties: false + filesystem.ListDirRequest: + type: object + properties: + path: + type: string + title: path + depth: + type: integer + title: depth + title: ListDirRequest + additionalProperties: false + filesystem.ListDirResponse: + type: object + properties: + entries: + type: array + items: + $ref: '#/components/schemas/filesystem.EntryInfo' + title: entries + title: ListDirResponse + additionalProperties: false + filesystem.MakeDirRequest: + type: object + properties: + path: + type: string + title: path + title: MakeDirRequest + additionalProperties: false + filesystem.MakeDirResponse: + type: object + properties: + entry: + title: entry + $ref: '#/components/schemas/filesystem.EntryInfo' + title: MakeDirResponse + additionalProperties: false + filesystem.MoveRequest: + type: object + properties: + source: + type: string + title: source + destination: + type: string + title: destination + title: MoveRequest + additionalProperties: false + filesystem.MoveResponse: + type: object + properties: + entry: + title: entry + $ref: '#/components/schemas/filesystem.EntryInfo' + title: MoveResponse + additionalProperties: false + filesystem.RemoveRequest: + type: object + properties: + path: + type: string + title: path + title: RemoveRequest + additionalProperties: false + filesystem.RemoveResponse: + type: object + title: RemoveResponse + additionalProperties: false + filesystem.RemoveWatcherRequest: + type: object + properties: + watcherId: + type: string + title: watcher_id + title: RemoveWatcherRequest + additionalProperties: false + filesystem.RemoveWatcherResponse: + type: object + title: RemoveWatcherResponse + additionalProperties: false + filesystem.StatRequest: + type: object + properties: + path: + type: string + title: path + title: StatRequest + additionalProperties: false + filesystem.StatResponse: + type: object + properties: + entry: + title: entry + $ref: '#/components/schemas/filesystem.EntryInfo' + title: StatResponse + additionalProperties: false + filesystem.WatchDirRequest: + type: object + properties: + path: + type: string + title: path + recursive: + type: boolean + title: recursive + title: WatchDirRequest + additionalProperties: false + filesystem.WatchDirResponse: + type: object + oneOf: + - properties: + filesystem: + title: filesystem + $ref: '#/components/schemas/filesystem.FilesystemEvent' + title: filesystem + required: + - filesystem + - properties: + keepalive: + title: keepalive + $ref: '#/components/schemas/filesystem.WatchDirResponse.KeepAlive' + title: keepalive + required: + - keepalive + - properties: + start: + title: start + $ref: '#/components/schemas/filesystem.WatchDirResponse.StartEvent' + title: start + required: + - start + title: WatchDirResponse + additionalProperties: false + filesystem.WatchDirResponse.KeepAlive: + type: object + title: KeepAlive + additionalProperties: false + filesystem.WatchDirResponse.StartEvent: + type: object + title: StartEvent + additionalProperties: false + google.protobuf.Timestamp: + type: string + examples: + - '2023-01-15T01:30:15.01Z' + - '2024-12-25T12:00:00Z' + format: date-time + description: "A Timestamp represents a point in time independent of any time\ + \ zone or local\n calendar, encoded as a count of seconds and fractions of\ + \ seconds at\n nanosecond resolution. The count is relative to an epoch at\ + \ UTC midnight on\n January 1, 1970, in the proleptic Gregorian calendar which\ + \ extends the\n Gregorian calendar backwards to year one.\n\n All minutes\ + \ are 60 seconds long. Leap seconds are \"smeared\" so that no leap\n second\ + \ table is needed for interpretation, using a [24-hour linear\n smear](https://developers.google.com/time/smear).\n\ + \n The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.\ + \ By\n restricting to that range, we ensure that we can convert to and from\ + \ [RFC\n 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.\n\n # Examples\n\ + \n Example 1: Compute Timestamp from POSIX `time()`.\n\n Timestamp timestamp;\n\ + \ timestamp.set_seconds(time(NULL));\n timestamp.set_nanos(0);\n\n\ + \ Example 2: Compute Timestamp from POSIX `gettimeofday()`.\n\n struct\ + \ timeval tv;\n gettimeofday(&tv, NULL);\n\n Timestamp timestamp;\n\ + \ timestamp.set_seconds(tv.tv_sec);\n timestamp.set_nanos(tv.tv_usec\ + \ * 1000);\n\n Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.\n\ + \n FILETIME ft;\n GetSystemTimeAsFileTime(&ft);\n UINT64 ticks\ + \ = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;\n\n // A Windows\ + \ tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z\n // is\ + \ 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.\n Timestamp\ + \ timestamp;\n timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));\n\ + \ timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));\n\n Example\ + \ 4: Compute Timestamp from Java `System.currentTimeMillis()`.\n\n long\ + \ millis = System.currentTimeMillis();\n\n Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis\ + \ / 1000)\n .setNanos((int) ((millis % 1000) * 1000000)).build();\n\ + \n Example 5: Compute Timestamp from Java `Instant.now()`.\n\n Instant\ + \ now = Instant.now();\n\n Timestamp timestamp =\n Timestamp.newBuilder().setSeconds(now.getEpochSecond())\n\ + \ .setNanos(now.getNano()).build();\n\n Example 6: Compute Timestamp\ + \ from current time in Python.\n\n timestamp = Timestamp()\n timestamp.GetCurrentTime()\n\ + \n # JSON Mapping\n\n In JSON format, the Timestamp type is encoded as a string\ + \ in the\n [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is,\ + \ the\n format is \"{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z\"\ + \n where {year} is always expressed using four digits while {month}, {day},\n\ + \ {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional\n\ + \ seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),\n\ + \ are optional. The \"Z\" suffix indicates the timezone (\"UTC\"); the timezone\n\ + \ is required. A proto3 JSON serializer should always use UTC (as indicated\ + \ by\n \"Z\") when printing the Timestamp type and a proto3 JSON parser should\ + \ be\n able to accept both UTC and other timezones (as indicated by an offset).\n\ + \n For example, \"2017-01-15T01:30:15.01Z\" encodes 15.01 seconds past\n 01:30\ + \ UTC on January 15, 2017.\n\n In JavaScript, one can convert a Date object\ + \ to this format using the\n standard\n [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)\n\ + \ method. In Python, a standard `datetime.datetime` object can be converted\n\ + \ to this format using\n [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)\ + \ with\n the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java,\ + \ one can use\n the Joda Time's [`ISODateTimeFormat.dateTime()`](\n http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()\n\ + \ ) to obtain a formatter capable of generating timestamps in this format." + process.CloseStdinRequest: + type: object + properties: + process: + title: process + $ref: '#/components/schemas/process.ProcessSelector' + title: CloseStdinRequest + additionalProperties: false + process.CloseStdinResponse: + type: object + title: CloseStdinResponse + additionalProperties: false + process.ConnectRequest: + type: object + properties: + process: + title: process + $ref: '#/components/schemas/process.ProcessSelector' + title: ConnectRequest + additionalProperties: false + process.ConnectResponse: + type: object + properties: + event: + title: event + $ref: '#/components/schemas/process.ProcessEvent' + title: ConnectResponse + additionalProperties: false + process.ListRequest: + type: object + title: ListRequest + additionalProperties: false + process.ListResponse: + type: object + properties: + processes: + type: array + items: + $ref: '#/components/schemas/process.ProcessInfo' + title: processes + title: ListResponse + additionalProperties: false + process.PTY: + type: object + properties: + size: + title: size + $ref: '#/components/schemas/process.PTY.Size' + title: PTY + additionalProperties: false + process.PTY.Size: + type: object + properties: + cols: + type: integer + title: cols + rows: + type: integer + title: rows + title: Size + additionalProperties: false + process.ProcessConfig: + type: object + properties: + cmd: + type: string + title: cmd + args: + type: array + items: + type: string + title: args + envs: + type: object + title: envs + additionalProperties: + type: string + title: value + cwd: + type: + - string + - 'null' + title: cwd + title: ProcessConfig + additionalProperties: false + process.ProcessEvent: + type: object + oneOf: + - properties: + data: + title: data + $ref: '#/components/schemas/process.ProcessEvent.DataEvent' + title: data + required: + - data + - properties: + end: + title: end + $ref: '#/components/schemas/process.ProcessEvent.EndEvent' + title: end + required: + - end + - properties: + keepalive: + title: keepalive + $ref: '#/components/schemas/process.ProcessEvent.KeepAlive' + title: keepalive + required: + - keepalive + - properties: + start: + title: start + $ref: '#/components/schemas/process.ProcessEvent.StartEvent' + title: start + required: + - start + title: ProcessEvent + additionalProperties: false + process.ProcessEvent.DataEvent: + type: object + oneOf: + - properties: + pty: + type: string + title: pty + format: byte + title: pty + required: + - pty + - properties: + stderr: + type: string + title: stderr + format: byte + title: stderr + required: + - stderr + - properties: + stdout: + type: string + title: stdout + format: byte + title: stdout + required: + - stdout + title: DataEvent + additionalProperties: false + process.ProcessEvent.EndEvent: + type: object + properties: + exitCode: + type: integer + title: exit_code + format: int32 + deprecated: true + description: 'Deprecated: not populated by the server. Parse the exit code + from the `status` string (e.g. "exit status 0").' + exited: + type: boolean + title: exited + status: + type: string + title: status + description: Process exit status string (e.g. "exit status 0"). Parse the + integer exit code from this field. + error: + type: + - string + - 'null' + title: error + title: EndEvent + additionalProperties: false + process.ProcessEvent.KeepAlive: + type: object + title: KeepAlive + additionalProperties: false + process.ProcessEvent.StartEvent: + type: object + properties: + pid: + type: integer + title: pid + title: StartEvent + additionalProperties: false + process.ProcessInfo: + type: object + properties: + config: + title: config + $ref: '#/components/schemas/process.ProcessConfig' + pid: + type: integer + title: pid + tag: + type: + - string + - 'null' + title: tag + title: ProcessInfo + additionalProperties: false + process.ProcessInput: + type: object + oneOf: + - properties: + pty: + type: string + title: pty + format: byte + title: pty + required: + - pty + - properties: + stdin: + type: string + title: stdin + format: byte + title: stdin + required: + - stdin + title: ProcessInput + additionalProperties: false + process.ProcessSelector: + type: object + oneOf: + - properties: + pid: + type: integer + title: pid + title: pid + required: + - pid + - properties: + tag: + type: string + title: tag + title: tag + required: + - tag + title: ProcessSelector + additionalProperties: false + process.SendInputRequest: + type: object + properties: + process: + title: process + $ref: '#/components/schemas/process.ProcessSelector' + input: + title: input + $ref: '#/components/schemas/process.ProcessInput' + title: SendInputRequest + additionalProperties: false + process.SendInputResponse: + type: object + title: SendInputResponse + additionalProperties: false + process.SendSignalRequest: + type: object + properties: + process: + title: process + $ref: '#/components/schemas/process.ProcessSelector' + signal: + title: signal + $ref: '#/components/schemas/process.Signal' + title: SendSignalRequest + additionalProperties: false + process.SendSignalResponse: + type: object + title: SendSignalResponse + additionalProperties: false + process.Signal: + type: string + title: Signal + enum: + - SIGNAL_UNSPECIFIED + - SIGNAL_SIGTERM + - SIGNAL_SIGKILL + process.StartRequest: + type: object + properties: + process: + title: process + $ref: '#/components/schemas/process.ProcessConfig' + pty: + oneOf: + - $ref: '#/components/schemas/process.PTY' + - type: 'null' + title: pty + tag: + type: + - string + - 'null' + title: tag + stdin: + type: + - boolean + - 'null' + title: stdin + description: "This is optional for backwards compatibility.\n We default\ + \ to true. New SDK versions will set this to false by default." + title: StartRequest + additionalProperties: false + process.StartResponse: + type: object + properties: + event: + title: event + $ref: '#/components/schemas/process.ProcessEvent' + title: StartResponse + additionalProperties: false + process.StreamInputRequest: + type: object + oneOf: + - properties: + data: + title: data + $ref: '#/components/schemas/process.StreamInputRequest.DataEvent' + title: data + required: + - data + - properties: + keepalive: + title: keepalive + $ref: '#/components/schemas/process.StreamInputRequest.KeepAlive' + title: keepalive + required: + - keepalive + - properties: + start: + title: start + $ref: '#/components/schemas/process.StreamInputRequest.StartEvent' + title: start + required: + - start + title: StreamInputRequest + additionalProperties: false + process.StreamInputRequest.DataEvent: + type: object + properties: + input: + title: input + $ref: '#/components/schemas/process.ProcessInput' + title: DataEvent + additionalProperties: false + process.StreamInputRequest.KeepAlive: + type: object + title: KeepAlive + additionalProperties: false + process.StreamInputRequest.StartEvent: + type: object + properties: + process: + title: process + $ref: '#/components/schemas/process.ProcessSelector' + title: StartEvent + additionalProperties: false + process.StreamInputResponse: + type: object + title: StreamInputResponse + additionalProperties: false + process.UpdateRequest: + type: object + properties: + process: + title: process + $ref: '#/components/schemas/process.ProcessSelector' + pty: + oneOf: + - $ref: '#/components/schemas/process.PTY' + - type: 'null' + title: pty + title: UpdateRequest + additionalProperties: false + process.UpdateResponse: + type: object + title: UpdateResponse + additionalProperties: false + Team: + required: + - teamID + - name + - apiKey + - isDefault + properties: + teamID: + type: string + description: Identifier of the team + name: + type: string + description: Name of the team + apiKey: + type: string + description: API key for the team + isDefault: + type: boolean + description: Whether the team is the default team + type: object + TeamUser: + required: + - id + - email + properties: + id: + type: string + format: uuid + description: Identifier of the user + email: + type: string + description: Email of the user + type: object + TemplateUpdateRequest: + properties: + public: + type: boolean + description: Whether the template is public or only accessible by the team + type: object + TemplateUpdateResponse: + required: + - names + properties: + names: + type: array + description: Names of the template (namespace/alias format when namespaced) + items: + type: string + type: object + CPUCount: + type: integer + format: int32 + minimum: 1 + description: CPU cores for the sandbox + MemoryMB: + type: integer + format: int32 + minimum: 128 + description: Memory for the sandbox in MiB + DiskSizeMB: + type: integer + format: int32 + minimum: 0 + description: Disk size for the sandbox in MiB + EnvdVersion: + type: string + description: Version of the envd running in the sandbox + SandboxMetadata: + additionalProperties: + type: string + description: Metadata of the sandbox + type: object + SandboxState: + type: string + description: State of the sandbox + enum: + - running + - paused + Mcp: + type: object + description: MCP configuration for the sandbox + additionalProperties: {} + nullable: true + SandboxNetworkConfig: + type: object + properties: + allowPublicTraffic: + type: boolean + default: true + description: Specify if the sandbox URLs should be accessible only with + authentication. + allowOut: + type: array + description: List of allowed CIDR blocks or IP addresses for egress traffic. + Allowed addresses always take precedence over blocked addresses. + items: + type: string + denyOut: + type: array + description: List of denied CIDR blocks or IP addresses for egress traffic + items: + type: string + maskRequestHost: + type: string + description: Specify host mask which will be used for all sandbox requests + SandboxAutoResumePolicy: + type: string + description: Auto-resume policy for paused sandboxes. Default is off. + default: 'off' + enum: + - any + - 'off' + SandboxAutoResumeConfig: + type: object + description: Auto-resume configuration for paused sandboxes. Default is off. + required: + - policy + properties: + policy: + $ref: '#/components/schemas/SandboxAutoResumePolicy' + SandboxLog: + description: Log entry with timestamp and line + required: + - timestamp + - line + properties: + timestamp: + type: string + format: date-time + description: Timestamp of the log entry + line: + type: string + description: Log line content + type: object + SandboxLogEntry: + required: + - timestamp + - level + - message + - fields + properties: + timestamp: + type: string + format: date-time + description: Timestamp of the log entry + message: + type: string + description: Log message content + level: + $ref: '#/components/schemas/LogLevel' + fields: + type: object + additionalProperties: + type: string + type: object + SandboxLogs: + required: + - logs + - logEntries + properties: + logs: + description: Logs of the sandbox + type: array + items: + $ref: '#/components/schemas/SandboxLog' + logEntries: + description: Structured logs of the sandbox + type: array + items: + $ref: '#/components/schemas/SandboxLogEntry' + type: object + SandboxMetric: + description: Metric entry with timestamp and line + required: + - timestamp + - timestampUnix + - cpuCount + - cpuUsedPct + - memUsed + - memTotal + - diskUsed + - diskTotal + properties: + timestamp: + type: string + format: date-time + deprecated: true + description: Timestamp of the metric entry + timestampUnix: + type: integer + format: int64 + description: Timestamp of the metric entry in Unix time (seconds since epoch) + cpuCount: + type: integer + format: int32 + description: Number of CPU cores + cpuUsedPct: + type: number + format: float + description: CPU usage percentage + memUsed: + type: integer + format: int64 + description: Memory used in bytes + memTotal: + type: integer + format: int64 + description: Total memory in bytes + diskUsed: + type: integer + format: int64 + description: Disk used in bytes + diskTotal: + type: integer + format: int64 + description: Total disk space in bytes + type: object + SandboxVolumeMount: + type: object + properties: + name: + type: string + description: Name of the volume + path: + type: string + description: Path of the volume + required: + - name + - path + Sandbox: + required: + - templateID + - sandboxID + - clientID + - envdVersion + properties: + templateID: + type: string + description: Identifier of the template from which is the sandbox created + sandboxID: + type: string + description: Identifier of the sandbox + alias: + type: string + description: Alias of the template + clientID: + type: string + deprecated: true + description: Identifier of the client + envdVersion: + $ref: '#/components/schemas/EnvdVersion' + envdAccessToken: + type: + - string + - 'null' + description: 'Access token for authenticating envd requests to this sandbox. + Only returned when the sandbox is created with `secure: true`. Null for + non-secure sandboxes (envd endpoints work without auth).' + trafficAccessToken: + type: + - string + - 'null' + description: Token required for accessing sandbox via proxy. + domain: + type: + - string + - 'null' + description: 'Deprecated: always null. Construct sandbox URLs as `https://{port}-{sandboxID}.e2b.app`.' + deprecated: true + type: object + SandboxDetail: + required: + - templateID + - sandboxID + - clientID + - startedAt + - cpuCount + - memoryMB + - diskSizeMB + - endAt + - state + - envdVersion + properties: + templateID: + type: string + description: Identifier of the template from which is the sandbox created + alias: + type: string + description: Alias of the template + sandboxID: + type: string + description: Identifier of the sandbox + clientID: + type: string + deprecated: true + description: Identifier of the client + startedAt: + type: string + format: date-time + description: Time when the sandbox was started + endAt: + type: string + format: date-time + description: Time when the sandbox will expire + envdVersion: + $ref: '#/components/schemas/EnvdVersion' + envdAccessToken: + type: + - string + - 'null' + description: 'Access token for authenticating envd requests to this sandbox. + Only returned when the sandbox is created with `secure: true`. Null for + non-secure sandboxes (envd endpoints work without auth).' + domain: + type: + - string + - 'null' + description: 'Deprecated: always null. Construct sandbox URLs as `https://{port}-{sandboxID}.e2b.app`.' + deprecated: true + cpuCount: + $ref: '#/components/schemas/CPUCount' + memoryMB: + $ref: '#/components/schemas/MemoryMB' + diskSizeMB: + $ref: '#/components/schemas/DiskSizeMB' + metadata: + $ref: '#/components/schemas/SandboxMetadata' + state: + $ref: '#/components/schemas/SandboxState' + volumeMounts: + type: array + items: + $ref: '#/components/schemas/SandboxVolumeMount' + type: object + ListedSandbox: + required: + - templateID + - sandboxID + - clientID + - startedAt + - cpuCount + - memoryMB + - diskSizeMB + - endAt + - state + - envdVersion + properties: + templateID: + type: string + description: Identifier of the template from which is the sandbox created + alias: + type: string + description: Alias of the template + sandboxID: + type: string + description: Identifier of the sandbox + clientID: + type: string + deprecated: true + description: Identifier of the client + startedAt: + type: string + format: date-time + description: Time when the sandbox was started + endAt: + type: string + format: date-time + description: Time when the sandbox will expire + cpuCount: + $ref: '#/components/schemas/CPUCount' + memoryMB: + $ref: '#/components/schemas/MemoryMB' + diskSizeMB: + $ref: '#/components/schemas/DiskSizeMB' + metadata: + $ref: '#/components/schemas/SandboxMetadata' + state: + $ref: '#/components/schemas/SandboxState' + envdVersion: + $ref: '#/components/schemas/EnvdVersion' + volumeMounts: + type: array + items: + $ref: '#/components/schemas/SandboxVolumeMount' + type: object + SandboxesWithMetrics: + required: + - sandboxes + properties: + sandboxes: + additionalProperties: + $ref: '#/components/schemas/SandboxMetric' + type: object + NewSandbox: + required: + - templateID + properties: + templateID: + type: string + description: Identifier of the required template + timeout: + type: integer + format: int32 + minimum: 0 + default: 15 + description: Time to live for the sandbox in seconds. + autoPause: + type: boolean + default: false + description: Automatically pauses the sandbox after the timeout + autoResume: + $ref: '#/components/schemas/SandboxAutoResumeConfig' + secure: + type: boolean + description: Secure all system communication with sandbox + allow_internet_access: + type: boolean + description: Allow sandbox to access the internet. When set to false, it + behaves the same as specifying denyOut to 0.0.0.0/0 in the network config. + network: + $ref: '#/components/schemas/SandboxNetworkConfig' + metadata: + $ref: '#/components/schemas/SandboxMetadata' + envVars: + $ref: '#/components/schemas/EnvVars' + mcp: + $ref: '#/components/schemas/Mcp' + volumeMounts: + type: array + items: + $ref: '#/components/schemas/SandboxVolumeMount' + type: object + ResumedSandbox: + properties: + timeout: + type: integer + format: int32 + minimum: 0 + default: 15 + description: Time to live for the sandbox in seconds. + autoPause: + type: boolean + deprecated: true + description: Automatically pauses the sandbox after the timeout + type: object + ConnectSandbox: + type: object + required: + - timeout + properties: + timeout: + description: Timeout in seconds from the current time after which the sandbox + should expire + type: integer + format: int32 + minimum: 0 + TeamMetric: + description: Team metric with timestamp + required: + - timestamp + - timestampUnix + - concurrentSandboxes + - sandboxStartRate + properties: + timestamp: + type: string + format: date-time + deprecated: true + description: Timestamp of the metric entry + timestampUnix: + type: integer + format: int64 + description: Timestamp of the metric entry in Unix time (seconds since epoch) + concurrentSandboxes: + type: integer + format: int32 + description: The number of concurrent sandboxes for the team + sandboxStartRate: + type: number + format: float + description: Number of sandboxes started per second + type: object + MaxTeamMetric: + description: Team metric with timestamp + required: + - timestamp + - timestampUnix + - value + properties: + timestamp: + type: string + format: date-time + deprecated: true + description: Timestamp of the metric entry + timestampUnix: + type: integer + format: int64 + description: Timestamp of the metric entry in Unix time (seconds since epoch) + value: + type: number + description: The maximum value of the requested metric in the given interval + type: object + Template: + required: + - templateID + - buildID + - cpuCount + - memoryMB + - diskSizeMB + - public + - createdAt + - updatedAt + - createdBy + - lastSpawnedAt + - spawnCount + - buildCount + - envdVersion + - aliases + - names + - buildStatus + properties: + templateID: + type: string + description: Identifier of the template + buildID: + type: string + description: Identifier of the last successful build for given template + cpuCount: + $ref: '#/components/schemas/CPUCount' + memoryMB: + $ref: '#/components/schemas/MemoryMB' + diskSizeMB: + $ref: '#/components/schemas/DiskSizeMB' + public: + type: boolean + description: Whether the template is public or only accessible by the team + aliases: + type: array + description: Aliases of the template + deprecated: true + items: + type: string + names: + type: array + description: Names of the template (namespace/alias format when namespaced) + items: + type: string + createdAt: + type: string + format: date-time + description: Time when the template was created + updatedAt: + type: string + format: date-time + description: Time when the template was last updated + createdBy: + oneOf: + - $ref: '#/components/schemas/TeamUser' + - type: 'null' + lastSpawnedAt: + type: + - string + - 'null' + format: date-time + description: Time when the template was last used + spawnCount: + type: integer + format: int64 + description: Number of times the template was used + buildCount: + type: integer + format: int32 + description: Number of times the template was built + envdVersion: + $ref: '#/components/schemas/EnvdVersion' + buildStatus: + $ref: '#/components/schemas/TemplateBuildStatus' + type: object + TemplateRequestResponseV3: + required: + - templateID + - buildID + - public + - aliases + - names + - tags + properties: + templateID: + type: string + description: Identifier of the template + buildID: + type: string + description: Identifier of the last successful build for given template + public: + type: boolean + description: Whether the template is public or only accessible by the team + names: + type: array + description: Names of the template + items: + type: string + tags: + type: array + description: Tags assigned to the template build + items: + type: string + aliases: + type: array + description: Aliases of the template + deprecated: true + items: + type: string + type: object + TemplateLegacy: + required: + - templateID + - buildID + - cpuCount + - memoryMB + - diskSizeMB + - public + - createdAt + - updatedAt + - createdBy + - lastSpawnedAt + - spawnCount + - buildCount + - envdVersion + - aliases + properties: + templateID: + type: string + description: Identifier of the template + buildID: + type: string + description: Identifier of the last successful build for given template + cpuCount: + $ref: '#/components/schemas/CPUCount' + memoryMB: + $ref: '#/components/schemas/MemoryMB' + diskSizeMB: + $ref: '#/components/schemas/DiskSizeMB' + public: + type: boolean + description: Whether the template is public or only accessible by the team + aliases: + type: array + description: Aliases of the template + items: + type: string + createdAt: + type: string + format: date-time + description: Time when the template was created + updatedAt: + type: string + format: date-time + description: Time when the template was last updated + createdBy: + oneOf: + - $ref: '#/components/schemas/TeamUser' + - type: 'null' + lastSpawnedAt: + type: + - string + - 'null' + format: date-time + description: Time when the template was last used + spawnCount: + type: integer + format: int64 + description: Number of times the template was used + buildCount: + type: integer + format: int32 + description: Number of times the template was built + envdVersion: + $ref: '#/components/schemas/EnvdVersion' + names: + type: array + description: Names of the template (namespace/alias format when namespaced) + items: + type: string + buildStatus: + $ref: '#/components/schemas/TemplateBuildStatus' + type: object + TemplateBuild: + required: + - buildID + - status + - createdAt + - updatedAt + - cpuCount + - memoryMB + properties: + buildID: + type: string + format: uuid + description: Identifier of the build + status: + $ref: '#/components/schemas/TemplateBuildStatus' + createdAt: + type: string + format: date-time + description: Time when the build was created + updatedAt: + type: string + format: date-time + description: Time when the build was last updated + finishedAt: + type: string + format: date-time + description: Time when the build was finished + cpuCount: + $ref: '#/components/schemas/CPUCount' + memoryMB: + $ref: '#/components/schemas/MemoryMB' + diskSizeMB: + $ref: '#/components/schemas/DiskSizeMB' + envdVersion: + $ref: '#/components/schemas/EnvdVersion' + type: object + TemplateWithBuilds: + required: + - templateID + - public + - aliases + - names + - createdAt + - updatedAt + - lastSpawnedAt + - spawnCount + - builds + properties: + templateID: + type: string + description: Identifier of the template + public: + type: boolean + description: Whether the template is public or only accessible by the team + aliases: + type: array + description: Aliases of the template + deprecated: true + items: + type: string + names: + type: array + description: Names of the template (namespace/alias format when namespaced) + items: + type: string + createdAt: + type: string + format: date-time + description: Time when the template was created + updatedAt: + type: string + format: date-time + description: Time when the template was last updated + lastSpawnedAt: + type: + - string + - 'null' + format: date-time + description: Time when the template was last used + spawnCount: + type: integer + format: int64 + description: Number of times the template was used + builds: + type: array + description: List of builds for the template + items: + $ref: '#/components/schemas/TemplateBuild' + type: object + TemplateAliasResponse: + required: + - templateID + - public + properties: + templateID: + type: string + description: Identifier of the template + public: + type: boolean + description: Whether the template is public or only accessible by the team + type: object + TemplateBuildRequest: + required: + - dockerfile + properties: + alias: + description: Alias of the template + type: string + dockerfile: + description: Dockerfile for the template + type: string + teamID: + type: string + description: Identifier of the team + startCmd: + description: Start command to execute in the template after the build + type: string + readyCmd: + description: Ready check command to execute in the template after the build + type: string + cpuCount: + $ref: '#/components/schemas/CPUCount' + memoryMB: + $ref: '#/components/schemas/MemoryMB' + type: object + TemplateStep: + description: Step in the template build process + required: + - type + properties: + type: + type: string + description: Type of the step + args: + default: [] + type: array + description: Arguments for the step + items: + type: string + filesHash: + type: string + description: Hash of the files used in the step + force: + default: false + type: boolean + description: Whether the step should be forced to run regardless of the + cache + type: object + TemplateBuildRequestV3: + properties: + name: + description: Name of the template. Can include a tag with colon separator + (e.g. "my-template" or "my-template:v1"). If tag is included, it will + be treated as if the tag was provided in the tags array. + type: string + tags: + type: array + description: Tags to assign to the template build + items: + type: string + alias: + description: Alias of the template. Deprecated, use name instead. + type: string + deprecated: true + teamID: + deprecated: true + type: string + description: Identifier of the team + cpuCount: + $ref: '#/components/schemas/CPUCount' + memoryMB: + $ref: '#/components/schemas/MemoryMB' + type: object + TemplateBuildRequestV2: + required: + - alias + properties: + alias: + description: Alias of the template + type: string + teamID: + deprecated: true + type: string + description: Identifier of the team + cpuCount: + $ref: '#/components/schemas/CPUCount' + memoryMB: + $ref: '#/components/schemas/MemoryMB' + type: object + FromImageRegistry: + oneOf: + - $ref: '#/components/schemas/AWSRegistry' + - $ref: '#/components/schemas/GCPRegistry' + - $ref: '#/components/schemas/GeneralRegistry' + discriminator: + propertyName: type + mapping: + aws: '#/components/schemas/AWSRegistry' + gcp: '#/components/schemas/GCPRegistry' + registry: '#/components/schemas/GeneralRegistry' + AWSRegistry: + type: object + required: + - type + - awsAccessKeyId + - awsSecretAccessKey + - awsRegion + properties: + type: + type: string + enum: + - aws + description: Type of registry authentication + awsAccessKeyId: + type: string + description: AWS Access Key ID for ECR authentication + awsSecretAccessKey: + type: string + description: AWS Secret Access Key for ECR authentication + awsRegion: + type: string + description: AWS Region where the ECR registry is located + GCPRegistry: + type: object + required: + - type + - serviceAccountJson + properties: + type: + type: string + enum: + - gcp + description: Type of registry authentication + serviceAccountJson: + type: string + description: Service Account JSON for GCP authentication + GeneralRegistry: + type: object + required: + - type + - username + - password + properties: + type: + type: string + enum: + - registry + description: Type of registry authentication + username: + type: string + description: Username to use for the registry + password: + type: string + description: Password to use for the registry + TemplateBuildStartV2: + type: object + properties: + fromImage: + type: string + description: Image to use as a base for the template build + fromTemplate: + type: string + description: Template to use as a base for the template build + fromImageRegistry: + $ref: '#/components/schemas/FromImageRegistry' + force: + default: false + type: boolean + description: Whether the whole build should be forced to run regardless + of the cache + steps: + default: [] + description: List of steps to execute in the template build + type: array + items: + $ref: '#/components/schemas/TemplateStep' + startCmd: + description: Start command to execute in the template after the build + type: string + readyCmd: + description: Ready check command to execute in the template after the build + type: string + TemplateBuildFileUpload: + required: + - present + properties: + present: + type: boolean + description: Whether the file is already present in the cache + url: + description: Url where the file should be uploaded to + type: string + type: object + LogLevel: + type: string + description: Severity level for log entries (e.g. info, warn, error) + BuildLogEntry: + required: + - timestamp + - message + - level + properties: + timestamp: + type: string + format: date-time + description: Timestamp of the log entry + message: + type: string + description: Log message content + level: + $ref: '#/components/schemas/LogLevel' + step: + type: string + description: Step in the build process related to the log entry + type: object + BuildStatusReason: + required: + - message + properties: + message: + type: string + description: Message with the status reason, currently reporting only for + error status + step: + type: string + description: Step that failed + logEntries: + default: [] + description: Log entries related to the status reason + type: array + items: + $ref: '#/components/schemas/BuildLogEntry' + type: object + TemplateBuildStatus: + type: string + description: Status of the template build + enum: + - building + - waiting + - ready + - error + - uploaded + TemplateBuildInfo: + required: + - templateID + - buildID + - status + - logs + - logEntries + properties: + logs: + default: [] + description: Build logs + type: array + items: + type: string + logEntries: + default: [] + description: Build logs structured + type: array + items: + $ref: '#/components/schemas/BuildLogEntry' + templateID: + type: string + description: Identifier of the template + buildID: + type: string + description: Identifier of the build + status: + $ref: '#/components/schemas/TemplateBuildStatus' + reason: + $ref: '#/components/schemas/BuildStatusReason' + type: object + TemplateBuildLogsResponse: + required: + - logs + properties: + logs: + default: [] + description: Build logs structured + type: array + items: + $ref: '#/components/schemas/BuildLogEntry' + type: object + LogsDirection: + type: string + description: Direction of the logs that should be returned + enum: + - forward + - backward + x-enum-varnames: + - LogsDirectionForward + - LogsDirectionBackward + LogsSource: + type: string + description: Source of the logs that should be returned + enum: + - temporary + - persistent + x-enum-varnames: + - LogsSourceTemporary + - LogsSourcePersistent + AssignedTemplateTags: + required: + - tags + - buildID + properties: + tags: + type: array + items: + type: string + description: Assigned tags of the template + buildID: + type: string + format: uuid + description: Identifier of the build associated with these tags + type: object + TemplateTag: + required: + - tag + - buildID + - createdAt + properties: + tag: + type: string + description: The tag name + buildID: + type: string + format: uuid + description: Identifier of the build associated with this tag + createdAt: + type: string + format: date-time + description: Time when the tag was assigned + type: object + AssignTemplateTagsRequest: + required: + - target + - tags + properties: + target: + type: string + description: Target template in "name:tag" format + tags: + description: Tags to assign to the template + type: array + items: + type: string + type: object + DeleteTemplateTagsRequest: + required: + - name + - tags + properties: + name: + type: string + description: Name of the template + tags: + description: Tags to delete + type: array + items: + type: string + type: object +tags: +- name: Sandboxes +- name: Templates +- name: Filesystem +- name: Process +- name: Tags +- name: Teams +- name: Others +security: [] diff --git a/scripts/generate_openapi_reference.py b/scripts/generate_openapi_reference.py new file mode 100755 index 00000000..e4dc4b56 --- /dev/null +++ b/scripts/generate_openapi_reference.py @@ -0,0 +1,1531 @@ +#!/usr/bin/env python3 +"""Generate a merged OpenAPI spec for the full E2B developer-facing API. + +Fetches specs from e2b-dev/infra at specified commits (or latest main), +combines multiple sources into a single openapi-public.yml: + + Sandbox API (served on -.e2b.app): + - Proto-generated OpenAPI for process/filesystem Connect RPC + - Hand-written REST spec (packages/envd/spec/envd.yaml) + - Auto-generated stubs for streaming RPCs (parsed from .proto files) + + Platform API (served on api.e2b.app): + - Main E2B API spec (spec/openapi.yml) + +Usage: + python3 scripts/generate_openapi_reference.py [options] + +Options: + --envd-commit HASH Commit/branch/tag in e2b-dev/infra for envd specs (default: main) + --api-commit HASH Commit/branch/tag in e2b-dev/infra for platform API spec (default: main) + --output FILE Output path (default: openapi-public.yml in repo root) + +Requires: Docker, PyYAML (pip install pyyaml). +""" + +from __future__ import annotations + +import copy +import os +import re +import subprocess +import sys +import tempfile +from dataclasses import dataclass +from glob import glob +from typing import Any + +import yaml + +# --------------------------------------------------------------------------- +# Configuration +# --------------------------------------------------------------------------- + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +DOCS_REPO_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, "..")) + +INFRA_REPO = "https://github.com/e2b-dev/infra.git" + +# Paths within e2b-dev/infra +INFRA_ENVD_SPEC_DIR = "packages/envd/spec" +INFRA_ENVD_REST_SPEC = "packages/envd/spec/envd.yaml" +INFRA_API_SPEC = "spec/openapi.yml" + +DOCKER_IMAGE = "e2b-openapi-generator" + +DOCKERFILE = """\ +FROM golang:1.25-alpine +RUN apk add --no-cache git +RUN go install github.com/bufbuild/buf/cmd/buf@v1.50.0 +RUN go install github.com/sudorandom/protoc-gen-connect-openapi@v0.25.3 +ENV PATH="/go/bin:${PATH}" +""" + +BUF_GEN_YAML = """\ +version: v1 +plugins: + - plugin: connect-openapi + out: /output/generated + opt: + - format=yaml +""" + +# Server definitions for the two API surfaces +SANDBOX_SERVER = { + "url": "https://{port}-{sandboxID}.e2b.app", + "description": "Sandbox API (envd) — runs inside each sandbox", + "variables": { + "port": {"default": "49983", "description": "Port number"}, + "sandboxID": {"default": "$SANDBOX_ID", "description": "Sandbox identifier"}, + }, +} + +PLATFORM_SERVER = { + "url": "https://api.e2b.app", + "description": "E2B Platform API", +} + +# Tag used to mark sandbox-specific paths so we can attach the right server +SANDBOX_TAG = "x-e2b-server" + +# Security scheme name for envd endpoints (must not collide with platform's AccessTokenAuth) +SANDBOX_AUTH_SCHEME = "SandboxAccessTokenAuth" + +# --------------------------------------------------------------------------- +# Proto parsing — auto-detect streaming RPCs +# --------------------------------------------------------------------------- + +@dataclass +class RpcMethod: + """An RPC method parsed from a .proto file.""" + + package: str + service: str + method: str + request_type: str + response_type: str + client_streaming: bool + server_streaming: bool + comment: str + + @property + def path(self) -> str: + return f"/{self.package}.{self.service}/{self.method}" + + @property + def tag(self) -> str: + return f"{self.package}.{self.service}" + + @property + def operation_id(self) -> str: + return f"{self.package}.{self.service}.{self.method}" + + @property + def request_schema_ref(self) -> str: + return f"#/components/schemas/{self.package}.{self.request_type}" + + @property + def response_schema_ref(self) -> str: + return f"#/components/schemas/{self.package}.{self.response_type}" + + @property + def is_streaming(self) -> bool: + return self.client_streaming or self.server_streaming + + @property + def streaming_label(self) -> str: + if self.client_streaming and self.server_streaming: + return "Bidirectional-streaming" + if self.client_streaming: + return "Client-streaming" + if self.server_streaming: + return "Server-streaming" + return "Unary" + + +_PACKAGE_RE = re.compile(r"^package\s+(\w+)\s*;", re.MULTILINE) +_SERVICE_RE = re.compile(r"service\s+(\w+)\s*\{", re.MULTILINE) +_RPC_RE = re.compile( + r"rpc\s+(\w+)\s*\(\s*(stream\s+)?(\w+)\s*\)\s*returns\s*\(\s*(stream\s+)?(\w+)\s*\)" +) + + +def parse_proto_file(path: str) -> list[RpcMethod]: + """Parse a .proto file and return all RPC methods found.""" + with open(path) as f: + content = f.read() + + pkg_match = _PACKAGE_RE.search(content) + if not pkg_match: + return [] + package = pkg_match.group(1) + + methods: list[RpcMethod] = [] + + for svc_match in _SERVICE_RE.finditer(content): + service_name = svc_match.group(1) + brace_start = content.index("{", svc_match.start()) + depth, pos = 1, brace_start + 1 + while depth > 0 and pos < len(content): + if content[pos] == "{": + depth += 1 + elif content[pos] == "}": + depth -= 1 + pos += 1 + service_body = content[brace_start:pos] + + for rpc_match in _RPC_RE.finditer(service_body): + rpc_start = service_body.rfind("\n", 0, rpc_match.start()) + comment = _extract_comment(service_body, rpc_start) + + methods.append(RpcMethod( + package=package, + service=service_name, + method=rpc_match.group(1), + request_type=rpc_match.group(3), + response_type=rpc_match.group(5), + client_streaming=bool(rpc_match.group(2)), + server_streaming=bool(rpc_match.group(4)), + comment=comment, + )) + + return methods + + +def _extract_comment(text: str, before_pos: int) -> str: + """Extract // comment lines immediately above a position in text.""" + lines = text[:before_pos].rstrip().split("\n") + comment_lines: list[str] = [] + for line in reversed(lines): + stripped = line.strip() + if stripped.startswith("//"): + comment_lines.append(stripped.lstrip("/ ")) + elif stripped == "": + continue + else: + break + comment_lines.reverse() + return " ".join(comment_lines) + + +def find_streaming_rpcs(spec_dir: str) -> list[RpcMethod]: + """Scan all .proto files under spec_dir and return streaming RPCs.""" + streaming: list[RpcMethod] = [] + for proto_path in sorted(glob(os.path.join(spec_dir, "**/*.proto"), recursive=True)): + for rpc in parse_proto_file(proto_path): + if rpc.is_streaming: + streaming.append(rpc) + return streaming + + +def build_streaming_path(rpc: RpcMethod) -> dict[str, Any]: + """Build an OpenAPI path item for a streaming RPC.""" + description = ( + f"{rpc.streaming_label} RPC. " + f"{rpc.comment + '. ' if rpc.comment else ''}" + f"Use the Connect protocol with streaming support." + ) + return { + "post": { + "tags": [rpc.tag], + "summary": rpc.method, + "description": description, + "operationId": rpc.operation_id, + "requestBody": { + "content": { + "application/json": { + "schema": {"$ref": rpc.request_schema_ref} + } + }, + "required": True, + }, + "responses": { + "200": { + "description": f"Stream of {rpc.response_type} events", + "content": { + "application/json": { + "schema": {"$ref": rpc.response_schema_ref} + } + }, + }, + }, + } + } + + +# --------------------------------------------------------------------------- +# Docker: fetch specs from e2b-dev/infra and generate OpenAPI from protos +# --------------------------------------------------------------------------- + +def docker_build_image() -> None: + """Build the Docker image with buf + protoc-gen-connect-openapi.""" + print("==> Building Docker image") + with tempfile.NamedTemporaryFile(mode="w", suffix=".Dockerfile", delete=False) as f: + f.write(DOCKERFILE) + dockerfile_path = f.name + try: + subprocess.run( + ["docker", "build", "-t", DOCKER_IMAGE, "-f", dockerfile_path, "."], + check=True, + cwd=DOCS_REPO_ROOT, + ) + finally: + os.unlink(dockerfile_path) + + +@dataclass +class FetchedSpecs: + """Paths to specs fetched from e2b-dev/infra.""" + envd_spec_dir: str # directory containing .proto files + envd_rest_spec: str # path to envd.yaml + api_spec: str # path to spec/openapi.yml + generated_docs: list[str] # raw YAML strings from buf generate + tmpdir: str # temp directory (caller must not delete until done) + + +def docker_fetch_and_generate(envd_commit: str, api_commit: str) -> FetchedSpecs: + """Clone e2b-dev/infra at specified commits, run buf generate, return paths. + + Uses a single Docker container that: + 1. Clones the repo at the envd commit + 2. Copies envd spec files to /output/envd/ + 3. Runs buf generate on the proto files + 4. If api_commit differs, checks out that commit + 5. Copies spec/openapi.yml to /output/api/ + """ + print(f"==> Fetching specs from e2b-dev/infra") + print(f" envd commit: {envd_commit}") + print(f" api commit: {api_commit}") + + tmpdir = tempfile.mkdtemp(prefix="e2b-openapi-") + output_dir = tmpdir + + # Create output subdirectories + for subdir in ("envd", "api", "generated"): + os.makedirs(os.path.join(output_dir, subdir), exist_ok=True) + + # Build the shell script that runs inside Docker + # It handles both commits in a single clone + same_commit = envd_commit == api_commit + if same_commit: + docker_script = f""" +set -e +echo "--- Cloning e2b-dev/infra at {envd_commit} ---" +git clone --depth 1 --branch {envd_commit} {INFRA_REPO} /repo 2>/dev/null || {{ + git clone {INFRA_REPO} /repo + cd /repo + git checkout {envd_commit} +}} +cd /repo + +echo "--- Copying envd specs ---" +cp -r {INFRA_ENVD_SPEC_DIR}/. /output/envd/ + +echo "--- Copying platform API spec ---" +cp {INFRA_API_SPEC} /output/api/openapi.yml + +echo "--- Running buf generate ---" +cd {INFRA_ENVD_SPEC_DIR} +buf generate --template /config/buf.gen.yaml + +echo "--- Done ---" +""" + else: + docker_script = f""" +set -e +echo "--- Cloning e2b-dev/infra ---" +git clone {INFRA_REPO} /repo +cd /repo + +echo "--- Checking out envd commit: {envd_commit} ---" +git checkout {envd_commit} + +echo "--- Copying envd specs ---" +cp -r {INFRA_ENVD_SPEC_DIR}/. /output/envd/ + +echo "--- Running buf generate ---" +cd {INFRA_ENVD_SPEC_DIR} +buf generate --template /config/buf.gen.yaml +cd /repo + +echo "--- Checking out api commit: {api_commit} ---" +git checkout {api_commit} + +echo "--- Copying platform API spec ---" +cp {INFRA_API_SPEC} /output/api/openapi.yml + +echo "--- Done ---" +""" + + # Write buf.gen.yaml config + buf_gen_path = os.path.join(tmpdir, "buf.gen.yaml") + with open(buf_gen_path, "w") as f: + f.write(BUF_GEN_YAML) + + # Write the script to a file + script_path = os.path.join(tmpdir, "run.sh") + with open(script_path, "w") as f: + f.write(docker_script) + + subprocess.run( + [ + "docker", "run", "--rm", + "-v", f"{output_dir}:/output", + "-v", f"{buf_gen_path}:/config/buf.gen.yaml:ro", + "-v", f"{script_path}:/run.sh:ro", + DOCKER_IMAGE, + "sh", "/run.sh", + ], + check=True, + ) + + # Read generated OpenAPI YAML files + generated_dir = os.path.join(output_dir, "generated") + generated_docs: list[str] = [] + for root, _, files in os.walk(generated_dir): + for name in sorted(files): + if name.endswith((".yaml", ".yml")): + path = os.path.join(root, name) + rel = os.path.relpath(path, generated_dir) + print(f" Generated: {rel}") + with open(path) as f: + generated_docs.append(f.read()) + + if not generated_docs: + print("ERROR: No files were generated by buf", file=sys.stderr) + sys.exit(1) + + envd_spec_dir = os.path.join(output_dir, "envd") + envd_rest_spec = os.path.join(envd_spec_dir, "envd.yaml") + api_spec = os.path.join(output_dir, "api", "openapi.yml") + + # Verify required files exist + for path, label in [(envd_rest_spec, "envd.yaml"), (api_spec, "openapi.yml")]: + if not os.path.exists(path): + print(f"ERROR: {label} not found at {path}", file=sys.stderr) + sys.exit(1) + + return FetchedSpecs( + envd_spec_dir=envd_spec_dir, + envd_rest_spec=envd_rest_spec, + api_spec=api_spec, + generated_docs=generated_docs, + tmpdir=tmpdir, + ) + + +# --------------------------------------------------------------------------- +# OpenAPI merging & post-processing +# --------------------------------------------------------------------------- + +def load_yaml_file(path: str) -> str: + """Load a YAML file and return its raw content.""" + print(f"==> Loading spec: {os.path.basename(path)}") + with open(path) as f: + return f.read() + + +def merge_specs(raw_docs: list[str], protected_paths: set[str] | None = None) -> dict[str, Any]: + """Merge multiple raw YAML OpenAPI docs into a single spec. + + Args: + raw_docs: Raw YAML strings to merge (order matters — later docs + overwrite earlier ones for paths and component entries). + protected_paths: Paths that should not be overwritten once set. + Used to prevent the platform API from overwriting + envd paths that share the same name (e.g. /health). + """ + merged: dict[str, Any] = { + "openapi": "3.1.0", + "info": { + "title": "E2B API", + "version": "0.1.0", + "description": ( + "Complete E2B developer API. " + "Platform endpoints are served on api.e2b.app. " + "Sandbox endpoints (envd) are served on {port}-{sandboxID}.e2b.app." + ), + }, + "servers": [PLATFORM_SERVER], + "paths": {}, + "components": {}, + } + + for raw in raw_docs: + doc = yaml.safe_load(raw) + if not doc: + continue + + for path, methods in doc.get("paths", {}).items(): + if protected_paths and path in protected_paths and path in merged["paths"]: + continue + merged["paths"][path] = methods + + for section, entries in doc.get("components", {}).items(): + if isinstance(entries, dict): + merged["components"].setdefault(section, {}).update(entries) + + if "tags" in doc: + merged.setdefault("tags", []).extend(doc["tags"]) + + if "security" in doc: + existing = merged.setdefault("security", []) + for entry in doc["security"]: + if entry not in existing: + existing.append(entry) + + return merged + + +def tag_paths_with_server( + spec: dict[str, Any], + paths: set[str], + server: dict[str, Any], +) -> None: + """Attach a specific server override to a set of paths. + + OpenAPI 3.1 allows per-path server overrides so clients know which + base URL to use for each endpoint. + """ + for path, path_item in spec["paths"].items(): + if path in paths: + path_item["servers"] = [server] + + +def fill_streaming_endpoints(spec: dict[str, Any], streaming_rpcs: list[RpcMethod]) -> None: + """Replace empty {} streaming path items with proper OpenAPI definitions. + + protoc-gen-connect-openapi emits {} for streaming RPCs because OpenAPI + has no native streaming representation. We detect these from the proto + files and fill them in with proper request/response schemas. + """ + for rpc in streaming_rpcs: + if rpc.path in spec["paths"]: + print(f" Filling streaming endpoint: {rpc.path} ({rpc.streaming_label})") + spec["paths"][rpc.path] = build_streaming_path(rpc) + + +# Endpoints that don't require access token auth (matched as "METHOD/path") +AUTH_EXEMPT_ENDPOINTS = { + "get/health", +} + + +def apply_sandbox_auth(spec: dict[str, Any], envd_paths: set[str]) -> None: + """Ensure all envd/sandbox endpoints declare the SandboxAccessTokenAuth security. + + The hand-written envd.yaml already has security declarations, but the + proto-generated Connect RPC endpoints don't. Endpoints listed in + AUTH_EXEMPT_ENDPOINTS are left without auth requirements. + """ + auth_security = [{SANDBOX_AUTH_SCHEME: []}] + for path in envd_paths: + path_item = spec["paths"].get(path) + if not path_item: + continue + for method in ("get", "post", "put", "patch", "delete"): + op = path_item.get(method) + if not op: + continue + key = f"{method}{path}" + if key in AUTH_EXEMPT_ENDPOINTS: + op.pop("security", None) + else: + op["security"] = auth_security + + +def fix_security_schemes(spec: dict[str, Any]) -> None: + """Fix invalid apiKey securityScheme syntax. + + The source envd.yaml uses `scheme: header` which is wrong for + type: apiKey — OpenAPI requires `in: header` instead. + """ + for scheme in spec.get("components", {}).get("securitySchemes", {}).values(): + if scheme.get("type") == "apiKey" and "scheme" in scheme: + scheme["in"] = scheme.pop("scheme") + + +def setup_sandbox_auth_scheme(spec: dict[str, Any]) -> None: + """Define the SandboxAccessTokenAuth security scheme. + + Sandbox endpoints use X-Access-Token header (apiKey type), + not Bearer auth. The envd.yaml source defines an AccessTokenAuth + scheme that conflicts with the platform's AccessTokenAuth + (Authorization: Bearer), so we replace the envd one and keep + the platform one intact. + """ + schemes = spec.setdefault("components", {}).setdefault("securitySchemes", {}) + # The platform API's AccessTokenAuth is Authorization: Bearer. + # Ensure it is correctly defined (the source spec may already have it). + schemes["AccessTokenAuth"] = { + "type": "http", + "scheme": "bearer", + } + # Define the sandbox-specific scheme + schemes[SANDBOX_AUTH_SCHEME] = { + "type": "apiKey", + "in": "header", + "name": "X-Access-Token", + "description": ( + "Sandbox access token (`envdAccessToken`) for authenticating requests to a running sandbox. " + "Returned by: " + "[POST /sandboxes](/docs/api-reference/sandboxes/create-a-sandbox) (on create), " + "[POST /sandboxes/{sandboxID}/connect](/docs/api-reference/sandboxes/connect-to-a-sandbox) (on connect), " + "[POST /sandboxes/{sandboxID}/resume](/docs/api-reference/sandboxes/resume-a-sandbox) (on resume), " + "and [GET /sandboxes/{sandboxID}](/docs/api-reference/sandboxes/get-a-sandbox) (for running or paused sandboxes)." + ), + } + + +# Mapping of (path, method) to desired operationId for the public docs. +# These are added at post-processing time to avoid breaking Go code generation +# (oapi-codegen derives type names from operationIds). +ENVD_OPERATION_IDS: dict[tuple[str, str], str] = { + ("/health", "get"): "getHealth", + ("/metrics", "get"): "getMetrics", + ("/init", "post"): "initSandbox", + ("/envs", "get"): "getEnvVars", + ("/files", "get"): "downloadFile", + ("/files", "post"): "uploadFile", +} + + +def add_operation_ids(spec: dict[str, Any]) -> None: + """Add operationIds to envd endpoints for clean documentation. + + These are added at post-processing time (not in the source spec) to + avoid changing generated Go type names. + """ + count = 0 + for (path, method), op_id in ENVD_OPERATION_IDS.items(): + path_item = spec.get("paths", {}).get(path) + if not path_item: + continue + op = path_item.get(method) + if op and "operationId" not in op: + op["operationId"] = op_id + count += 1 + if count: + print(f"==> Added {count} operationIds to envd endpoints") + + +STREAMING_ENDPOINTS = { + "/filesystem.Filesystem/WatchDir", + "/process.Process/Start", + "/process.Process/Connect", + "/process.Process/StreamInput", +} + + +def fix_spec_issues(spec: dict[str, Any]) -> None: + """Fix known discrepancies between the source spec and the live API. + + These are upstream spec issues that we patch during post-processing + so the published docs match actual API behavior. + """ + schemas = spec.get("components", {}).get("schemas", {}) + paths = spec.get("paths", {}) + fixes = [] + + # 1. TemplateBuildStatus enum missing 'uploaded' + build_status = schemas.get("TemplateBuildStatus") + if build_status and "uploaded" not in build_status.get("enum", []): + build_status["enum"].append("uploaded") + fixes.append("TemplateBuildStatus: added 'uploaded' to enum") + + # 2. volumeMounts required but API doesn't always return it + for name in ("SandboxDetail", "ListedSandbox"): + schema = schemas.get(name, {}) + req = schema.get("required", []) + if "volumeMounts" in req: + req.remove("volumeMounts") + fixes.append(f"{name}: made 'volumeMounts' optional") + + # 3. LogLevel enum too strict — server returns empty/whitespace values + log_level = schemas.get("LogLevel") + if log_level: + if "enum" in log_level: + del log_level["enum"] + log_level["description"] = "Severity level for log entries (e.g. info, warn, error)" + fixes.append("LogLevel: removed enum constraint, fixed description") + + # 4. Metrics schema: add missing fields and set format: int64 on byte/MiB fields + metrics = schemas.get("Metrics") + if metrics and "properties" in metrics: + props = metrics["properties"] + if "mem_used_mib" not in props: + props["mem_used_mib"] = { + "type": "integer", + "description": "Used virtual memory in MiB", + } + fixes.append("Metrics: added 'mem_used_mib'") + if "mem_total_mib" not in props: + props["mem_total_mib"] = { + "type": "integer", + "description": "Total virtual memory in MiB", + } + fixes.append("Metrics: added 'mem_total_mib'") + # Byte and MiB values can exceed int32 — set format: int64 + int64_fields = ("mem_total", "mem_used", "disk_used", "disk_total", + "mem_used_mib", "mem_total_mib") + for field in int64_fields: + if field in props and props[field].get("format") != "int64": + props[field]["format"] = "int64" + fixes.append("Metrics: set format int64 on memory/disk fields") + + # 5. Streaming RPC endpoints: wrong content-type and missing headers + # Server requires application/connect+json with envelope framing, + # not application/json. + connect_version_param = { + "name": "Connect-Protocol-Version", + "in": "header", + "required": True, + "schema": {"$ref": "#/components/schemas/connect-protocol-version"}, + } + connect_timeout_param = { + "name": "Connect-Timeout-Ms", + "in": "header", + "schema": {"$ref": "#/components/schemas/connect-timeout-header"}, + } + for ep_path in STREAMING_ENDPOINTS: + path_item = paths.get(ep_path, {}) + op = path_item.get("post") + if not op: + continue + # Fix request content-type + rb = op.get("requestBody", {}).get("content", {}) + if "application/json" in rb and "application/connect+json" not in rb: + rb["application/connect+json"] = rb.pop("application/json") + # Fix response content-type + for status_code, resp in op.get("responses", {}).items(): + if not isinstance(resp, dict): + continue + rc = resp.get("content", {}) + if "application/json" in rc and "application/connect+json" not in rc: + rc["application/connect+json"] = rc.pop("application/json") + # Add Connect-Protocol-Version and Connect-Timeout-Ms headers + params = op.setdefault("parameters", []) + has_cpv = any(p.get("name") == "Connect-Protocol-Version" for p in params) + if not has_cpv: + params.insert(0, connect_version_param) + params.insert(1, connect_timeout_param) + fixes.append(f"{ep_path}: content-type → application/connect+json, added Connect headers") + + # 6. EndEvent.exitCode not populated — API returns status string instead + end_event = schemas.get("process.ProcessEvent.EndEvent") + if end_event and "properties" in end_event: + ec = end_event["properties"].get("exitCode") + if ec: + ec["deprecated"] = True + ec["description"] = ( + "Deprecated: not populated by the server. " + "Parse the exit code from the `status` string (e.g. \"exit status 0\")." + ) + st = end_event["properties"].get("status") + if st and not st.get("description"): + st["description"] = ( + "Process exit status string (e.g. \"exit status 0\"). " + "Parse the integer exit code from this field." + ) + fixes.append("EndEvent: marked exitCode as deprecated, documented status string") + + # 7. envdAccessToken description misleading — only returned when secure: true + for schema_name in ("Sandbox", "SandboxDetail"): + schema = schemas.get(schema_name, {}) + eat = schema.get("properties", {}).get("envdAccessToken") + if eat: + eat["nullable"] = True + eat["description"] = ( + "Access token for authenticating envd requests to this sandbox. " + "Only returned when the sandbox is created with `secure: true`. " + "Null for non-secure sandboxes (envd endpoints work without auth)." + ) + fixes.append("envdAccessToken: clarified secure-only behavior, marked nullable") + + # 8. Sandbox.domain always null — mark as deprecated + for schema_name in ("Sandbox", "SandboxDetail"): + schema = schemas.get(schema_name, {}) + dom = schema.get("properties", {}).get("domain") + if dom: + dom["deprecated"] = True + dom["description"] = ( + "Deprecated: always null. Construct sandbox URLs as " + "`https://{port}-{sandboxID}.e2b.app`." + ) + fixes.append("Sandbox.domain: marked as deprecated (always null)") + + # 9. GET /templates/{templateID}/files/{hash} returns 201, not 200 + files_path = paths.get("/templates/{templateID}/files/{hash}", {}) + files_get = files_path.get("get") + if files_get: + responses = files_get.get("responses", {}) + if "201" in responses and "200" not in responses: + responses["200"] = responses.pop("201") + responses["200"]["description"] = "Upload link for the tar file containing build layer files" + fixes.append("/templates/{templateID}/files/{hash}: changed 201 → 200 response") + + # 10. Generate operationId for platform endpoints that lack one + def _singularize(word: str) -> str: + """Simple singularization for common API resource names.""" + irregulars = {"aliases": "alias", "statuses": "status", "indices": "index"} + if word in irregulars: + return irregulars[word] + if word.endswith("sses"): + return word # "addresses" etc — skip + if word.endswith("ies"): + return word[:-3] + "y" + if word.endswith("ses") or word.endswith("xes") or word.endswith("zes"): + return word[:-2] + if word.endswith("s") and not word.endswith("ss"): + return word[:-1] + return word + + op_id_count = 0 + seen_ids: dict[str, str] = {} # operationId → path (for dedup) + for ep_path, path_item in paths.items(): + # Skip envd endpoints (already have operationIds) + if "/" in ep_path.lstrip("/") and "." in ep_path.split("/")[1]: + continue + for method in ("get", "post", "put", "patch", "delete", "head", "options"): + op = path_item.get(method) + if not op or op.get("operationId"): + continue + # Build operationId from method + path segments + # Include path params to distinguish e.g. /sandboxes vs /sandboxes/{sandboxID} + # e.g. GET /sandboxes/{sandboxID}/logs → getSandboxLogs + # e.g. GET /v2/sandboxes → listSandboxesV2 + raw_segments = ep_path.strip("/").split("/") + version_suffix = "" + parts = [] + i = 0 + while i < len(raw_segments): + seg = raw_segments[i] + if seg in ("v2", "v3"): + version_suffix = seg.upper() + i += 1 + continue + if seg.startswith("{") and seg.endswith("}"): + # Path param — singularize the previous part if it was a collection + if parts: + parts[-1] = _singularize(parts[-1]) + i += 1 + continue + parts.append(seg) + i += 1 + + # For top-level list endpoints (GET /sandboxes, GET /templates), + # use "list" prefix instead of "get" to distinguish from single-resource GETs + prefix = method + if method == "get" and parts and not any( + s.startswith("{") for s in raw_segments[1:] + ): + # No path params → it's a list/collection endpoint + # But only if the last segment is plural (a collection name) + last = parts[-1] if parts else "" + if last.endswith("s") and last != "status": + prefix = "list" + + name = "".join(p.capitalize() for p in parts) + op_id = f"{prefix}{name}{version_suffix}" + + # Dedup: if collision, append a disambiguator + if op_id in seen_ids: + # Try adding "ById" for single-resource variants + if any(s.startswith("{") for s in raw_segments): + op_id = f"{method}{name}ById{version_suffix}" + if op_id in seen_ids: + op_id = f"{method}{name}{version_suffix}_{len(seen_ids)}" + + seen_ids[op_id] = ep_path + op["operationId"] = op_id + op_id_count += 1 + if op_id_count: + fixes.append(f"Generated operationId for {op_id_count} platform endpoints") + + # 11. Phantom deprecation reference: /v2/sandboxes/{sandboxID}/logs doesn't exist + logs_path = paths.get("/sandboxes/{sandboxID}/logs", {}) + logs_get = logs_path.get("get") + if logs_get and "/v2/" in logs_get.get("description", ""): + logs_get["description"] = "Get sandbox logs." + fixes.append("/sandboxes/{sandboxID}/logs: removed phantom /v2 deprecation reference") + + # 12. Truncated parameter descriptions on metrics endpoints + metrics_desc_suffix = " are returned." + for ep_path in paths: + for method in ("get", "post"): + op = (paths[ep_path] or {}).get(method) + if not op: + continue + for param in op.get("parameters", []): + if not isinstance(param, dict) or param.get("name") not in ("start", "end"): + continue + # Description could be on param or nested in schema + for target in (param, param.get("schema", {})): + desc = target.get("description", "") + if desc and desc.rstrip().endswith("the metrics"): + target["description"] = desc.rstrip() + metrics_desc_suffix + fixes.append(f"{ep_path}: completed truncated '{param['name']}' description") + + # 13. sandboxId → sandboxID casing in 502 error schema + # The 502 response defined on /health uses "sandboxId" (lowercase d) + health_path = paths.get("/health", {}) + health_get = health_path.get("get") + if health_get: + for status_code, resp in health_get.get("responses", {}).items(): + if not isinstance(resp, dict): + continue + for ct, media in resp.get("content", {}).items(): + schema = media.get("schema", {}) + props = schema.get("properties", {}) + if "sandboxId" in props and "sandboxID" not in props: + props["sandboxID"] = props.pop("sandboxId") + req = schema.get("required", []) + for i, r in enumerate(req): + if r == "sandboxId": + req[i] = "sandboxID" + fixes.append("502 error schema: sandboxId → sandboxID") + + # 14. /health missing security: [] and tags + if health_get: + if "security" not in health_get: + health_get["security"] = [] + fixes.append("/health: added security: [] (explicitly no auth)") + if "tags" not in health_get: + health_get["tags"] = ["health"] + fixes.append("/health: added 'health' tag") + + # 15. /files responses: inline $ref responses so Mintlify renders them correctly + # The upstream spec uses YAML anchors that cause issues, and some renderers + # don't resolve response-level $refs properly. + comp_responses = spec.get("components", {}).get("responses", {}) + files_path = paths.get("/files", {}) + for method in ("get", "post"): + op = files_path.get(method) + if not op: + continue + responses = op.get("responses", {}) + for status_code, resp in list(responses.items()): + if not isinstance(resp, dict): + continue + # Inline any $ref to components/responses + ref = resp.get("$ref", "") + if ref.startswith("#/components/responses/"): + ref_name = ref.split("/")[-1] + resolved = comp_responses.get(ref_name) + if resolved: + # Replace with a copy so we don't mutate the shared component + responses[status_code] = copy.deepcopy(resolved) + # Also clean up any anchor-overlaid empty content + elif "$ref" not in resp and "content" in resp: + content = resp["content"] + for ct, media in list(content.items()): + s = media.get("schema", {}) + if s.get("description") == "Empty response": + del content[ct] + if not content: + del resp["content"] + fixes.append("/files: inlined response definitions for GET and POST") + + # 16. Missing type: object on schemas that have properties + obj_fixed = 0 + for schema_name, schema in schemas.items(): + if "properties" in schema and "type" not in schema and "allOf" not in schema and "oneOf" not in schema: + schema["type"] = "object" + obj_fixed += 1 + if obj_fixed: + fixes.append(f"Added type: object to {obj_fixed} schemas") + + # 17. end parameter nesting: description inside schema instead of sibling + for ep_path in paths: + for method in ("get", "post"): + op = (paths[ep_path] or {}).get(method) + if not op: + continue + for param in op.get("parameters", []): + if not isinstance(param, dict) or param.get("name") != "end": + continue + schema = param.get("schema", {}) + if "description" in schema and "description" not in param: + param["description"] = schema.pop("description") + fixes.append(f"{ep_path}: moved 'end' description out of schema") + + # 18. EntryInfo.type enum incomplete — missing "directory" + entry_info = schemas.get("EntryInfo") + if entry_info: + type_prop = entry_info.get("properties", {}).get("type") + if type_prop and type_prop.get("enum") == ["file"]: + type_prop["enum"] = ["file", "directory"] + fixes.append("EntryInfo.type: added 'directory' to enum") + + # 19. SandboxMetadata and EnvVars lack type: object + for name in ("SandboxMetadata", "EnvVars"): + schema = schemas.get(name, {}) + if "additionalProperties" in schema and "type" not in schema: + schema["type"] = "object" + fixes.append(f"{name}: added type: object") + + # 20. TemplateLegacy missing 'names' and 'buildStatus' fields + tpl_legacy = schemas.get("TemplateLegacy") + if tpl_legacy and "properties" in tpl_legacy: + props = tpl_legacy["properties"] + if "names" not in props: + props["names"] = { + "type": "array", + "description": "Names of the template (namespace/alias format when namespaced)", + "items": {"type": "string"}, + } + fixes.append("TemplateLegacy: added 'names' property") + if "buildStatus" not in props: + props["buildStatus"] = {"$ref": "#/components/schemas/TemplateBuildStatus"} + fixes.append("TemplateLegacy: added 'buildStatus' property") + + # 21. connect-protocol-version: redundant enum + const + cpv = schemas.get("connect-protocol-version") + if cpv and "enum" in cpv and "const" in cpv: + del cpv["enum"] + fixes.append("connect-protocol-version: removed redundant enum (const is sufficient)") + + # 22. filesystem.EntryInfo.size union type undocumented + fs_entry = schemas.get("filesystem.EntryInfo") + if fs_entry and "properties" in fs_entry: + size_prop = fs_entry["properties"].get("size") + if size_prop and isinstance(size_prop.get("type"), list): + size_prop["description"] = ( + "File size in bytes. Encoded as string for values exceeding " + "JSON number precision (int64)." + ) + fixes.append("filesystem.EntryInfo.size: documented integer/string union type") + + # 23. GET /health 502 uses application/connect+json — change to application/json + if health_get: + for status_code, resp in health_get.get("responses", {}).items(): + if not isinstance(resp, dict): + continue + content = resp.get("content", {}) + if "application/connect+json" in content and "application/json" not in content: + content["application/json"] = content.pop("application/connect+json") + fixes.append(f"/health {status_code}: content-type → application/json") + + # 24. PATCH /templates/{templateID} (deprecated) returns empty object — + # use TemplateUpdateResponse like v2 + patch_v1_path = paths.get("/templates/{templateID}", {}) + patch_v1 = patch_v1_path.get("patch") + if patch_v1: + resp_200 = patch_v1.get("responses", {}).get("200", {}) + # Replace the entire content dict (don't modify shared YAML anchor object) + resp_200["content"] = { + "application/json": { + "schema": {"$ref": "#/components/schemas/TemplateUpdateResponse"} + } + } + fixes.append("PATCH /templates/{templateID}: response → TemplateUpdateResponse") + + # 25. POST /sandboxes/{sandboxID}/refreshes missing 500 response + refreshes_path = paths.get("/sandboxes/{sandboxID}/refreshes", {}) + refreshes_post = refreshes_path.get("post") + if refreshes_post: + responses = refreshes_post.get("responses", {}) + if "500" not in responses: + responses["500"] = {"$ref": "#/components/responses/500"} + fixes.append("/sandboxes/{sandboxID}/refreshes: added 500 response") + + # 25. Add per-status error examples to every error response in every operation + status_examples = { + "400": {"code": 400, "message": "Bad request: invalid or missing request parameters"}, + "401": {"code": 401, "message": "Authentication error: missing or invalid API key"}, + "403": {"code": 403, "message": "Forbidden: insufficient permissions"}, + "404": {"code": 404, "message": "Not found: the requested resource does not exist"}, + "409": {"code": 409, "message": "Conflict: the resource is in a conflicting state"}, + "500": {"code": 500, "message": "Server error: an unexpected error occurred"}, + "507": {"code": 507, "message": "Insufficient storage: not enough disk space"}, + } + for path_item in spec.get("paths", {}).values(): + for method in ("get", "post", "put", "patch", "delete", "head", "options"): + op = path_item.get(method) + if not op: + continue + for status_code, resp in op.get("responses", {}).items(): + if not isinstance(resp, dict) or "$ref" in resp: + continue + example = status_examples.get(str(status_code)) + if not example: + continue + json_media = resp.get("content", {}).get("application/json") + if not json_media: + continue + schema = json_media.get("schema", {}) + # Only add example if schema references Error + ref = schema.get("$ref", "") + if ref.endswith("/Error") and "example" not in json_media: + json_media["example"] = example + # Also set examples on component-level responses + comp_responses = spec.get("components", {}).get("responses", {}) + for status_code, example in status_examples.items(): + resp = comp_responses.get(status_code) + if not resp or "content" not in resp: + continue + json_media = resp["content"].get("application/json") + if json_media and "example" not in json_media: + json_media["example"] = example + fixes.append("Error responses: added per-status example values") + + # 26. Add short summary fields to platform endpoints for Mintlify sidebar names + SUMMARIES: dict[tuple[str, str], str] = { + # Sandboxes + ("/sandboxes", "get"): "List sandboxes", + ("/sandboxes", "post"): "Create sandbox", + ("/v2/sandboxes", "get"): "List sandboxes (v2)", + ("/sandboxes/metrics", "get"): "List sandbox metrics", + ("/sandboxes/{sandboxID}/logs", "get"): "Get sandbox logs", + ("/sandboxes/{sandboxID}", "get"): "Get sandbox", + ("/sandboxes/{sandboxID}", "delete"): "Delete sandbox", + ("/sandboxes/{sandboxID}/metrics", "get"): "Get sandbox metrics", + ("/sandboxes/{sandboxID}/pause", "post"): "Pause sandbox", + ("/sandboxes/{sandboxID}/resume", "post"): "Resume sandbox", + ("/sandboxes/{sandboxID}/connect", "post"): "Connect to sandbox", + ("/sandboxes/{sandboxID}/timeout", "post"): "Set sandbox timeout", + ("/sandboxes/{sandboxID}/refreshes", "post"): "Refresh sandbox", + # Templates + ("/v3/templates", "post"): "Create template (v3)", + ("/v2/templates", "post"): "Create template (v2)", + ("/templates/{templateID}/files/{hash}", "get"): "Get build upload link", + ("/templates", "get"): "List templates", + ("/templates", "post"): "Create template", + ("/templates/{templateID}", "get"): "Get template", + ("/templates/{templateID}", "post"): "Rebuild template", + ("/templates/{templateID}", "delete"): "Delete template", + ("/templates/{templateID}", "patch"): "Update template", + ("/templates/{templateID}/builds/{buildID}", "post"): "Start build", + ("/v2/templates/{templateID}/builds/{buildID}", "post"): "Start build (v2)", + ("/v2/templates/{templateID}", "patch"): "Update template (v2)", + ("/templates/{templateID}/builds/{buildID}/status", "get"): "Get build status", + ("/templates/{templateID}/builds/{buildID}/logs", "get"): "Get build logs", + ("/templates/aliases/{alias}", "get"): "Get template by alias", + # Tags + ("/templates/tags", "post"): "Assign tags", + ("/templates/tags", "delete"): "Delete tags", + ("/templates/{templateID}/tags", "get"): "List template tags", + # Teams + ("/teams", "get"): "List teams", + ("/teams/{teamID}/metrics", "get"): "Get team metrics", + ("/teams/{teamID}/metrics/max", "get"): "Get team metrics max", + } + summary_count = 0 + for (path_str, method), summary in SUMMARIES.items(): + op = paths.get(path_str, {}).get(method) + if op: + op["summary"] = summary + summary_count += 1 + if summary_count: + fixes.append(f"Added summary to {summary_count} platform endpoints") + + # 27. Replace nullable: true with OpenAPI 3.1.0 type arrays + # In 3.1.0, nullable was removed. Use type: ["string", "null"] instead, + # or oneOf with type: 'null' for $ref properties. + nullable_fixed = 0 + for schema_name, schema in schemas.items(): + if "properties" not in schema: + continue + for prop_name, prop in schema["properties"].items(): + if not isinstance(prop, dict) or not prop.pop("nullable", False): + continue + # allOf + nullable → oneOf: [allOf[...], type: 'null'] + if "allOf" in prop: + all_of = prop.pop("allOf") + prop["oneOf"] = all_of + [{"type": "null"}] + # plain type + nullable → type: [original, "null"] + elif "type" in prop: + orig_type = prop["type"] + if isinstance(orig_type, list): + if "null" not in orig_type: + orig_type.append("null") + else: + prop["type"] = [orig_type, "null"] + # $ref + nullable → oneOf: [$ref, type: 'null'] + elif "$ref" in prop: + ref = prop.pop("$ref") + prop["oneOf"] = [{"$ref": ref}, {"type": "null"}] + # additionalProperties + nullable (e.g. McpConfig) + elif "additionalProperties" in prop: + prop["type"] = ["object", "null"] + nullable_fixed += 1 + if nullable_fixed: + fixes.append(f"Replaced nullable: true with 3.1.0 type arrays on {nullable_fixed} properties") + + if fixes: + print(f"==> Fixed {len(fixes)} spec issues:") + for f in fixes: + print(f" {f}") + + +def _strip_supabase_security(path_item: dict[str, Any]) -> None: + """Remove Supabase security entries from all operations in a path item. + + Each operation's security list is an OR of auth options. We remove + any option that references a Supabase scheme, keeping the rest. + """ + for method in ("get", "post", "put", "patch", "delete", "head", "options"): + op = path_item.get(method) + if not op or "security" not in op: + continue + op["security"] = [ + sec_req for sec_req in op["security"] + if not any("supabase" in key.lower() for key in sec_req) + ] + + +def _has_admin_token_security(path_item: dict[str, Any]) -> bool: + """Check if any operation in a path item references AdminToken auth.""" + for method in ("get", "post", "put", "patch", "delete", "head", "options"): + op = path_item.get(method) + if not op: + continue + for sec_req in op.get("security", []): + if any("admin" in key.lower() for key in sec_req): + return True + return False + + +def filter_paths(spec: dict[str, Any]) -> None: + """Clean up paths that should not appear in the public spec. + + - Removes access-token and api-key endpoints + - Removes endpoints using AdminToken auth + - Strips Supabase auth entries from all operations + - Removes Supabase and AdminToken securityScheme definitions + """ + # Remove excluded paths + excluded_prefixes = ("/access-tokens", "/api-keys", "/volumes", "/snapshots") + excluded_exact = {"/v2/sandboxes/{sandboxID}/logs", "/init", + "/sandboxes/{sandboxID}/snapshots"} + to_remove = [ + p for p in spec["paths"] + if p.startswith(excluded_prefixes) or p in excluded_exact + ] + + # Remove admin-only paths + for path, path_item in spec["paths"].items(): + if path not in to_remove and _has_admin_token_security(path_item): + to_remove.append(path) + + for path in to_remove: + del spec["paths"][path] + if to_remove: + print(f"==> Removed {len(to_remove)} paths (volumes, snapshots, admin, internal)") + + # Strip supabase security entries from all operations + for path_item in spec["paths"].values(): + _strip_supabase_security(path_item) + + # Remove supabase and admin security scheme definitions + schemes = spec.get("components", {}).get("securitySchemes", {}) + remove_keys = [k for k in schemes if "supabase" in k.lower() or "admin" in k.lower()] + for key in remove_keys: + del schemes[key] + if remove_keys: + print(f"==> Removed {len(remove_keys)} internal security schemes") + + +def remove_orphaned_schemas(spec: dict[str, Any]) -> None: + """Remove component schemas that are not referenced anywhere in the spec. + Runs iteratively since removing schemas may orphan others.""" + all_orphaned: list[str] = [] + + while True: + spec_text = "" + # Serialize paths + top-level refs (excluding components.schemas itself) + for section in ("paths", "security"): + if section in spec: + spec_text += yaml.dump(spec[section], default_flow_style=False) + for section, entries in spec.get("components", {}).items(): + if section != "schemas": + spec_text += yaml.dump(entries, default_flow_style=False) + # Also check cross-references within schemas + schemas = spec.get("components", {}).get("schemas", {}) + schema_text = yaml.dump(schemas, default_flow_style=False) + + orphaned = [] + for name in list(schemas.keys()): + # Use exact ref pattern to avoid substring collisions + # (e.g. "schemas/Foo" matching inside "schemas/FooBar") + ref_pattern = f"schemas/{name}'" + # Referenced from paths/responses/params + if ref_pattern in spec_text: + continue + # Referenced from other schemas (exclude self-definition) + used = False + for other_name, other_schema in schemas.items(): + if other_name == name: + continue + if ref_pattern in yaml.dump(other_schema, default_flow_style=False): + used = True + break + if not used: + orphaned.append(name) + + if not orphaned: + break + + for name in orphaned: + del schemas[name] + all_orphaned.extend(orphaned) + + if all_orphaned: + print(f"==> Removed {len(all_orphaned)} orphaned schemas: {', '.join(sorted(all_orphaned))}") + + +SANDBOX_NOT_FOUND_RESPONSE = { + "description": "Sandbox not found", + "content": { + "application/json": { + "schema": { + "type": "object", + "required": ["sandboxId", "message", "code"], + "properties": { + "sandboxId": { + "type": "string", + "description": "Identifier of the sandbox", + "example": "i1234abcd5678efgh90jk", + }, + "message": { + "type": "string", + "description": "Error message", + "example": "The sandbox was not found", + }, + "code": { + "type": "integer", + "description": "Error code", + "example": 502, + }, + }, + } + } + }, +} + + +EMPTY_RESPONSE_CONTENT = { + "application/json": { + "schema": {"type": "object", "description": "Empty response"} + } +} + + +def add_sandbox_not_found(spec: dict[str, Any], envd_paths: set[str]) -> None: + """Add a 502 response to all sandbox/envd endpoints. + + The load balancer returns 502 when a sandbox is not found. + """ + count = 0 + for path in envd_paths: + path_item = spec["paths"].get(path) + if not path_item: + continue + for method in ("get", "post", "put", "patch", "delete"): + op = path_item.get(method) + if op and "502" not in op.get("responses", {}): + op.setdefault("responses", {})["502"] = SANDBOX_NOT_FOUND_RESPONSE + count += 1 + if count: + print(f"==> Added 502 sandbox-not-found response to {count} operations") + + +def fill_empty_responses(spec: dict[str, Any]) -> None: + """Add an empty content block to any 2xx response that lacks one. + + Mintlify requires a content block on every response to render correctly. + """ + filled = 0 + stripped = 0 + for path, path_item in spec.get("paths", {}).items(): + for method in ("get", "post", "put", "patch", "delete", "head", "options"): + op = path_item.get(method) + if not op: + continue + responses = op.get("responses", {}) + # Remove "default" responses (generic Connect error envelopes) + if "default" in responses: + del responses["default"] + stripped += 1 + for status, resp in responses.items(): + if not isinstance(resp, dict): + continue + # 204 = No Content: remove any content block + if str(status) == "204": + resp.pop("content", None) + continue + # Skip responses that use $ref (content comes from the referenced response) + if "$ref" in resp: + continue + if str(status).startswith("2") and "content" not in resp: + resp["content"] = EMPTY_RESPONSE_CONTENT + filled += 1 + if filled: + print(f"==> Added empty content block to {filled} responses") + if stripped: + print(f"==> Removed {stripped} default error responses") + + +def rename_and_reorder_tags(spec: dict[str, Any]) -> None: + """Rename tags and reorder them for the documentation sidebar.""" + TAG_RENAME = { + "sandboxes": "Sandboxes", + "templates": "Templates", + "filesystem.Filesystem": "Filesystem", + "process.Process": "Process", + "tags": "Tags", + "auth": "Teams", + "health": "Others", + "files": "Others", + } + TAG_ORDER = ["Sandboxes", "Templates", "Filesystem", "Process", "Tags", "Teams", "Others"] + + # Rename tags on all operations; tag untagged ones as "Others" + for path_item in spec.get("paths", {}).values(): + for method in ("get", "post", "put", "patch", "delete", "head", "options"): + op = path_item.get(method) + if not op: + continue + if "tags" not in op: + op["tags"] = ["Others"] + else: + op["tags"] = [TAG_RENAME.get(t, t) for t in op["tags"]] + + # Rebuild the top-level tags list in the desired order + spec["tags"] = [{"name": t} for t in TAG_ORDER] + + # Reorder paths so Mintlify renders sections in the desired order. + # Mintlify uses path order (not the tags array) to determine sidebar order. + tag_priority = {t: i for i, t in enumerate(TAG_ORDER)} + + def path_sort_key(item: tuple[str, dict]) -> int: + path_str, path_item = item + for method in ("get", "post", "put", "patch", "delete", "head", "options"): + op = path_item.get(method) + if op and "tags" in op: + return tag_priority.get(op["tags"][0], len(TAG_ORDER)) + return len(TAG_ORDER) + + spec["paths"] = dict(sorted(spec["paths"].items(), key=path_sort_key)) + print(f"==> Renamed and reordered {len(TAG_ORDER)} tags") + + +# --------------------------------------------------------------------------- +# Entrypoint +# --------------------------------------------------------------------------- + +def main() -> None: + # Parse CLI args + envd_commit = "main" + api_commit = "main" + output_path = os.path.join(DOCS_REPO_ROOT, "openapi-public.yml") + + args = sys.argv[1:] + i = 0 + while i < len(args): + if args[i] == "--envd-commit" and i + 1 < len(args): + envd_commit = args[i + 1] + i += 2 + elif args[i] == "--api-commit" and i + 1 < len(args): + api_commit = args[i + 1] + i += 2 + elif args[i] == "--output" and i + 1 < len(args): + output_path = args[i + 1] + i += 2 + elif args[i] in ("--help", "-h"): + print(__doc__) + sys.exit(0) + else: + print(f"Unknown argument: {args[i]}", file=sys.stderr) + print(__doc__, file=sys.stderr) + sys.exit(2) + i += 1 + + print("=" * 60) + print(" E2B OpenAPI Reference Generator") + print("=" * 60) + print(f" Source repo: {INFRA_REPO}") + print(f" envd commit: {envd_commit}") + print(f" api commit: {api_commit}") + print(f" Output: {output_path}") + print() + + # Build Docker image + docker_build_image() + + # Fetch specs and generate proto OpenAPI + specs = docker_fetch_and_generate(envd_commit, api_commit) + + try: + # --- Sandbox API (envd) --- + envd_rest_doc = load_yaml_file(specs.envd_rest_spec) + proto_docs = specs.generated_docs + + # Track which paths come from envd so we can set their server + envd_raw_docs = [envd_rest_doc] + proto_docs + envd_paths: set[str] = set() + for raw in envd_raw_docs: + doc = yaml.safe_load(raw) + if doc and "paths" in doc: + envd_paths.update(doc["paths"].keys()) + + # --- Platform API --- + api_doc = load_yaml_file(specs.api_spec) + + # --- Merge everything --- + # Order: envd first, then platform API (platform schemas take precedence + # for shared names like Error since they're more complete). + # Protect envd paths so the platform API doesn't overwrite them + # (e.g. /health exists in both but the envd version is authoritative). + merged = merge_specs(envd_raw_docs + [api_doc], protected_paths=envd_paths) + + # Auto-detect and fill streaming RPC endpoints + streaming_rpcs = find_streaming_rpcs(specs.envd_spec_dir) + print(f"==> Found {len(streaming_rpcs)} streaming RPCs in proto files") + fill_streaming_endpoints(merged, streaming_rpcs) + for rpc in streaming_rpcs: + envd_paths.add(rpc.path) + + # Attach per-path server overrides so each path has exactly one server + tag_paths_with_server(merged, envd_paths, SANDBOX_SERVER) + platform_paths = set(merged["paths"].keys()) - envd_paths + tag_paths_with_server(merged, platform_paths, PLATFORM_SERVER) + + # Ensure all sandbox endpoints declare auth + apply_sandbox_auth(merged, envd_paths) + + # Add 502 sandbox-not-found to all envd endpoints + add_sandbox_not_found(merged, envd_paths) + + # Fix known issues + fix_security_schemes(merged) + setup_sandbox_auth_scheme(merged) + add_operation_ids(merged) + fix_spec_issues(merged) + + # Remove internal/unwanted paths + filter_paths(merged) + + # Ensure all 2xx responses have a content block (required by Mintlify) + fill_empty_responses(merged) + + # Clean up unreferenced schemas left over from filtered paths + remove_orphaned_schemas(merged) + + # Rename and reorder tags for documentation sidebar + rename_and_reorder_tags(merged) + + # Write output + with open(output_path, "w") as f: + yaml.dump(merged, f, default_flow_style=False, sort_keys=False, allow_unicode=True) + + print(f"\n==> Written to {output_path}") + + finally: + # Clean up temp directory + import shutil + shutil.rmtree(specs.tmpdir, ignore_errors=True) + + +if __name__ == "__main__": + main() diff --git a/scripts/validate_api_reference.py b/scripts/validate_api_reference.py new file mode 100755 index 00000000..572ecf35 --- /dev/null +++ b/scripts/validate_api_reference.py @@ -0,0 +1,2442 @@ +#!/usr/bin/env python3 +""" +E2B OpenAPI Specification Validator + +Validates the openapi-public.yml spec against the live E2B API by calling +every endpoint and deeply comparing response schemas. + +Usage: + E2B_API_KEY=e2b_... python3 scripts/validate_api_reference.py [options] + +Environment: + E2B_API_KEY Required. API key for X-API-Key auth. + E2B_ACCESS_TOKEN Optional. Bearer token for AccessTokenAuth (needed for + GET /teams and legacy template endpoints). + E2B_TEAM_ID Optional. Team ID (auto-discovered if not set). + +Options: + --output FILE Report output path (default: openapi-validation-report.md) + --verbose Show detailed request/response logs + --skip-sandbox Skip phases requiring sandbox creation + --phase N Run only phase N (1-12) + --timeout SECS HTTP request timeout (default: 15) + --help Show this help message + +Dependencies: stdlib + PyYAML +""" + +import json +import os +import re +import ssl +import struct +import sys +import time +import urllib.error +import urllib.parse +import urllib.request +from dataclasses import dataclass, field +from datetime import datetime, timezone +from pathlib import Path + +import yaml + +# --------------------------------------------------------------------------- +# CONFIG +# --------------------------------------------------------------------------- + +E2B_DOMAIN = os.getenv("E2B_DOMAIN", "e2b.app") +PLATFORM_URL = f"https://api.{E2B_DOMAIN}" +ENVD_PORT = 49983 +SPEC_PATH = Path(__file__).resolve().parent.parent / "openapi-public.yml" + +FAKE_SANDBOX_ID = "nonexistent-sandbox-000000" +FAKE_TEMPLATE_ID = "nonexistent-template-000000" +FAKE_BUILD_ID = "00000000-0000-0000-0000-000000000000" +FAKE_ALIAS = "nonexistent-alias-000000" +FAKE_HASH = "0" * 64 +FAKE_TEAM_ID = "00000000-0000-0000-0000-000000000000" + +# RFC3339 regex for date-time format validation +RFC3339_RE = re.compile( + r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?(Z|[+-]\d{2}:\d{2})$" +) +UUID_RE = re.compile( + r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$", re.I +) + + +# --------------------------------------------------------------------------- +# DATA STRUCTURES +# --------------------------------------------------------------------------- + + +@dataclass +class Finding: + severity: str # "critical" | "minor" + category: str # "schema" | "status_code" | "extra_field" | "missing_field" | "type_mismatch" | "auth" | "format" + endpoint: str # "POST /sandboxes" + message: str + expected: str = "" + actual: str = "" + + +@dataclass +class EndpointResult: + method: str + path: str + tested: bool = False + expected_status: int = 0 + actual_status: int = 0 + findings: list[Finding] = field(default_factory=list) + skip_reason: str | None = None + response_body: object = None + surface: str = "platform" # "platform" | "sandbox" + + +@dataclass +class SpecIssue: + category: str + description: str + location: str = "" + + +# --------------------------------------------------------------------------- +# HTTP HELPERS +# --------------------------------------------------------------------------- + +_ctx = ssl.create_default_context() +VERBOSE = False + + +def _redact(headers: dict | None) -> dict: + if not headers: + return {} + out = {} + for k, v in headers.items(): + if k.lower() in ("x-api-key", "x-access-token", "authorization"): + out[k] = v[:12] + "..." if len(v) > 12 else "***" + else: + out[k] = v + return out + + +def _parse_connect_frames(data: bytes): + """Parse Connect streaming envelope frames. Returns first data frame as parsed JSON, or list of frames.""" + frames = [] + offset = 0 + while offset + 5 <= len(data): + flags = data[offset] + length = struct.unpack(">I", data[offset + 1:offset + 5])[0] + offset += 5 + if offset + length > len(data): + break + payload = data[offset:offset + length] + offset += length + if flags & 0x02: + # End-of-stream / trailers frame + try: + frames.append({"_trailers": json.loads(payload.decode("utf-8", errors="replace"))}) + except json.JSONDecodeError: + frames.append({"_trailers_raw": payload.decode("utf-8", errors="replace")}) + else: + # Data frame + try: + frames.append(json.loads(payload.decode("utf-8", errors="replace"))) + except json.JSONDecodeError: + pass + if not frames: + return None + if len(frames) == 1: + return frames[0] + return frames + + +def http_request( + method: str, + url: str, + headers: dict | None = None, + params: dict | None = None, + body: dict | None = None, + raw_body: bytes | None = None, + content_type: str | None = None, + timeout: int = 15, +) -> tuple[int, dict | str | list | None, dict]: + """Make HTTP request. Returns (status, parsed_body, response_headers).""" + if params: + url += "?" + urllib.parse.urlencode(params, doseq=True) + + if VERBOSE: + print(f" >>> {method} {url}") + if headers: + print(f" Headers: {_redact(headers)}") + if body is not None: + s = json.dumps(body, indent=2) + print(f" Body: {s[:300]}{'...' if len(s) > 300 else ''}") + + if raw_body is not None: + data = raw_body + elif body is not None: + data = json.dumps(body).encode("utf-8") + else: + data = None + + req = urllib.request.Request(url, data=data, method=method) + for k, v in (headers or {}).items(): + req.add_header(k, v) + if content_type: + req.add_header("Content-Type", content_type) + elif body is not None and not req.has_header("Content-type"): + req.add_header("Content-Type", "application/json") + + resp_headers = {} + raw_bytes = b"" + raw = "" + status = 0 + try: + resp = urllib.request.urlopen(req, timeout=timeout, context=_ctx) + status = resp.status + resp_headers = dict(resp.headers) + # Read in chunks to capture partial streaming data on timeout. + # For streaming responses, read() blocks until stream ends or timeout. + # Reading in small chunks allows us to capture early frames. + chunks = [] + import socket as _sock + try: + while True: + # read1() returns after a single system read call, unlike read() + # which blocks until it gets the full amount. This is essential + # for streaming responses where data arrives in small frames. + chunk = resp.read1(65536) if hasattr(resp, "read1") else resp.read(65536) + if not chunk: + break + chunks.append(chunk) + except (_sock.timeout, TimeoutError, OSError): + pass # Timeout during streaming read — use whatever we got + raw_bytes = b"".join(chunks) + raw = raw_bytes.decode("utf-8", errors="replace") + except urllib.error.HTTPError as e: + status = e.code + resp_headers = dict(e.headers) if e.headers else {} + raw_bytes = e.read() if e.fp else b"" + raw = raw_bytes.decode("utf-8", errors="replace") + except Exception as e: + if VERBOSE: + print(f" <<< ERROR: {e}") + return 0, f"Connection error: {e}", {} + + # Try to decode Connect streaming envelopes if content type suggests it + resp_ct = resp_headers.get("Content-Type", "") or resp_headers.get("content-type", "") + parsed = None + if "application/connect+" in resp_ct and len(raw_bytes) >= 5: + parsed = _parse_connect_frames(raw_bytes) + if parsed is None: + try: + parsed = json.loads(raw) if raw else None + except json.JSONDecodeError: + parsed = raw[:500] if raw else None + + if VERBOSE: + print(f" <<< {status}") + if parsed is not None: + s = json.dumps(parsed, indent=2) if isinstance(parsed, (dict, list)) else str(parsed) + print(f" Response: {s[:500]}{'...' if len(s) > 500 else ''}") + + return status, parsed, resp_headers + + +def ctrl(method: str, path: str, **kwargs): + """Platform API request.""" + return http_request(method, f"{PLATFORM_URL}{path}", **kwargs) + + +def envd(method: str, sandbox_id: str, path: str, **kwargs): + """Sandbox (envd) API request.""" + url = f"https://{ENVD_PORT}-{sandbox_id}.{E2B_DOMAIN}{path}" + return http_request(method, url, **kwargs) + + +def api_key_hdr(api_key: str) -> dict: + return {"X-API-Key": api_key} + + +def bearer_hdr(token: str) -> dict: + return {"Authorization": f"Bearer {token}"} + + +def sandbox_hdr(token: str) -> dict: + """Headers for sandbox REST calls (X-Access-Token + Basic user identity).""" + return { + "X-Access-Token": token, + "Authorization": "Basic dXNlcjo=", + } + + +def connect_hdr(token: str | None = None) -> dict: + """Headers for Connect RPC unary calls.""" + h = { + "Connect-Protocol-Version": "1", + "Content-Type": "application/json", + "Authorization": "Basic dXNlcjo=", + } + if token: + h["X-Access-Token"] = token + return h + + +def connect_stream_hdr(token: str | None = None) -> dict: + """Headers for Connect RPC streaming calls (server-stream / client-stream).""" + h = { + "Connect-Protocol-Version": "1", + "Content-Type": "application/connect+json", + "Authorization": "Basic dXNlcjo=", + } + if token: + h["X-Access-Token"] = token + return h + + +def connect_envelope(payload: dict) -> bytes: + """Wrap a JSON payload in a Connect streaming envelope (flags + uint32 length + data).""" + data = json.dumps(payload).encode("utf-8") + return struct.pack(">BI", 0, len(data)) + data + + +def multipart_upload(sandbox_id: str, file_path: str, content: bytes, token: str | None = None) -> tuple[int, object, dict]: + """Upload file via multipart/form-data.""" + boundary = "----E2BValidation" + str(int(time.time())) + body_parts = [] + body_parts.append(f"--{boundary}".encode()) + body_parts.append(f'Content-Disposition: form-data; name="file"; filename="{file_path.split("/")[-1]}"'.encode()) + body_parts.append(b"Content-Type: application/octet-stream") + body_parts.append(b"") + body_parts.append(content) + body_parts.append(f"--{boundary}--".encode()) + raw_body = b"\r\n".join(body_parts) + + headers = {"Authorization": "Basic dXNlcjo="} + if token: + headers["X-Access-Token"] = token + + url = f"https://{ENVD_PORT}-{sandbox_id}.{E2B_DOMAIN}/files" + params = {"path": file_path} + return http_request( + "POST", url, headers=headers, params=params, + raw_body=raw_body, + content_type=f"multipart/form-data; boundary={boundary}", + ) + + +# --------------------------------------------------------------------------- +# SPEC LOADING & REF RESOLUTION +# --------------------------------------------------------------------------- + +def load_spec(path: Path) -> dict: + with open(path) as f: + return yaml.safe_load(f) + + +def resolve_ref(spec: dict, ref: str, _seen: set | None = None) -> dict: + """Resolve a $ref like '#/components/schemas/Foo' into the actual schema.""" + if _seen is None: + _seen = set() + if ref in _seen: + return {} # cycle + _seen.add(ref) + + parts = ref.lstrip("#/").split("/") + node = spec + for p in parts: + node = node.get(p, {}) + if not isinstance(node, dict): + return {} + + # If resolved node itself has a $ref, follow it + if "$ref" in node: + return resolve_ref(spec, node["$ref"], _seen) + return node + + +def resolve_schema(spec: dict, schema: dict) -> dict: + """Resolve a schema, following $ref if present.""" + if not isinstance(schema, dict): + return schema + if "$ref" in schema: + return resolve_ref(spec, schema["$ref"]) + return schema + + +# --------------------------------------------------------------------------- +# SCHEMA VALIDATION ENGINE +# --------------------------------------------------------------------------- + +def validate_schema(actual, schema: dict, spec: dict, path: str = "$") -> list[Finding]: + """ + Validate actual data against an OpenAPI 3.1 schema. + Returns list of findings (mismatches). + """ + findings = [] + if not isinstance(schema, dict): + return findings + + # Resolve $ref + schema = resolve_schema(spec, schema) + + # Handle nullable + is_nullable = schema.get("nullable", False) + schema_type = schema.get("type") + + # OpenAPI 3.1 nullable as type list: type: [string, "null"] + if isinstance(schema_type, list): + if actual is None: + if "null" in schema_type: + return findings + findings.append(Finding( + "critical", "type_mismatch", "", + f"At {path}: got null but type {schema_type} doesn't include null", + str(schema_type), "null", + )) + return findings + # Filter out "null" to get the real type + real_types = [t for t in schema_type if t != "null"] + if len(real_types) == 1: + schema_type = real_types[0] + else: + # Multiple types (union) - check if any match + for rt in real_types: + test_schema = {**schema, "type": rt} + test_findings = validate_schema(actual, test_schema, spec, path) + if not test_findings: + return [] + findings.append(Finding( + "critical", "type_mismatch", "", + f"At {path}: value doesn't match any of types {real_types}", + str(real_types), type(actual).__name__, + )) + return findings + + if actual is None: + if is_nullable: + return findings + # Some fields may legitimately be absent (not required) — handled by caller + return findings + + # Handle allOf + if "allOf" in schema: + for i, sub in enumerate(schema["allOf"]): + findings.extend(validate_schema(actual, sub, spec, f"{path}/allOf[{i}]")) + return findings + + # Handle oneOf + if "oneOf" in schema: + matched = False + for sub in schema["oneOf"]: + sub_resolved = resolve_schema(spec, sub) + sub_findings = validate_schema(actual, sub_resolved, spec, path) + if not sub_findings: + matched = True + break + if not matched: + # Don't report as critical for oneOf - just note it + findings.append(Finding( + "minor", "schema", "", + f"At {path}: value didn't match any oneOf variant", + "one of oneOf variants", str(type(actual).__name__), + )) + return findings + + # Handle anyOf + if "anyOf" in schema: + for sub in schema["anyOf"]: + sub_resolved = resolve_schema(spec, sub) + sub_findings = validate_schema(actual, sub_resolved, spec, path) + if not sub_findings: + return findings + findings.append(Finding( + "minor", "schema", "", + f"At {path}: value didn't match any anyOf variant", + "one of anyOf variants", str(type(actual).__name__), + )) + return findings + + # Type checking + if schema_type: + type_ok = _check_type(actual, schema_type) + if not type_ok: + findings.append(Finding( + "critical", "type_mismatch", "", + f"At {path}: expected type '{schema_type}', got {type(actual).__name__}", + schema_type, type(actual).__name__, + )) + return findings # Skip deeper checks if type is wrong + + # Format checking + fmt = schema.get("format") + if fmt and isinstance(actual, str): + fmt_err = _check_format(actual, fmt, path) + if fmt_err: + findings.append(fmt_err) + + # Enum checking + enum_vals = schema.get("enum") + if enum_vals is not None and actual not in enum_vals: + findings.append(Finding( + "critical", "schema", "", + f"At {path}: value '{actual}' not in enum {enum_vals}", + str(enum_vals), str(actual), + )) + + # Object validation + if schema_type == "object" or (schema_type is None and "properties" in schema): + if isinstance(actual, dict): + findings.extend(_validate_object(actual, schema, spec, path)) + + # Array validation + if schema_type == "array" and isinstance(actual, list): + items_schema = schema.get("items") + if items_schema: + for i, item in enumerate(actual[:10]): # Validate first 10 items + findings.extend(validate_schema(item, items_schema, spec, f"{path}[{i}]")) + + return findings + + +def _check_type(value, expected_type: str) -> bool: + """Check if Python value matches OpenAPI type.""" + if expected_type == "string": + return isinstance(value, str) + elif expected_type == "integer": + return isinstance(value, int) and not isinstance(value, bool) + elif expected_type == "number": + return isinstance(value, (int, float)) and not isinstance(value, bool) + elif expected_type == "boolean": + return isinstance(value, bool) + elif expected_type == "array": + return isinstance(value, list) + elif expected_type == "object": + return isinstance(value, dict) + return True # Unknown type, don't fail + + +def _check_format(value: str, fmt: str, path: str) -> Finding | None: + """Check string format. Returns Finding if invalid.""" + if fmt == "date-time": + if not RFC3339_RE.match(value): + return Finding( + "minor", "format", "", + f"At {path}: '{value}' doesn't match date-time format (RFC3339)", + "RFC3339 date-time", value, + ) + elif fmt == "uuid": + if not UUID_RE.match(value): + return Finding( + "minor", "format", "", + f"At {path}: '{value}' doesn't match UUID format", + "UUID", value, + ) + return None + + +def _validate_object(actual: dict, schema: dict, spec: dict, path: str) -> list[Finding]: + """Validate object properties, required fields, and extra fields.""" + findings = [] + properties = schema.get("properties", {}) + required = schema.get("required", []) + + # Check required fields + for req_field in required: + if req_field not in actual: + findings.append(Finding( + "critical", "missing_field", "", + f"At {path}: required field '{req_field}' is missing", + f"field '{req_field}'", "absent", + )) + + # Validate each property that exists + for prop_name, prop_schema in properties.items(): + if prop_name in actual: + resolved = resolve_schema(spec, prop_schema) + findings.extend(validate_schema(actual[prop_name], resolved, spec, f"{path}.{prop_name}")) + + # Check for extra undocumented fields + if properties: + additional = schema.get("additionalProperties") + if additional is False or (additional is None and schema.get("additionalProperties") is not None): + pass # additionalProperties: false — strict + known_fields = set(properties.keys()) + extra = set(actual.keys()) - known_fields + if extra and properties: + for ef in extra: + findings.append(Finding( + "minor", "extra_field", "", + f"At {path}: undocumented field '{ef}'", + "not in spec", str(type(actual[ef]).__name__), + )) + + return findings + + +# --------------------------------------------------------------------------- +# SANDBOX LIFECYCLE MANAGER +# --------------------------------------------------------------------------- + + +class SandboxManager: + def __init__(self, api_key: str): + self.api_key = api_key + self.sandbox_id: str | None = None + self.access_token: str | None = None + + def create(self) -> bool: + """Create a sandbox. Returns True on success.""" + print("\n Creating test sandbox...") + h = api_key_hdr(self.api_key) + status, body, _ = ctrl("POST", "/sandboxes", headers=h, + body={"templateID": "base", "timeout": 600, "secure": True}) + if status != 201 or not isinstance(body, dict): + print(f" FAILED to create sandbox: {status}") + return False + self.sandbox_id = body.get("sandboxID") + self.access_token = body.get("envdAccessToken") or body.get("trafficAccessToken") or "" + + if not self.access_token: + # Try /connect to get token + c_status, c_body, _ = ctrl("POST", f"/sandboxes/{self.sandbox_id}/connect", + headers=h, body={"timeout": 600}) + if c_status in (200, 201) and isinstance(c_body, dict): + self.access_token = (c_body.get("envdAccessToken") + or c_body.get("accessToken") + or c_body.get("trafficAccessToken") or "") + + print(f" Sandbox ID: {self.sandbox_id}") + print(f" Token: {self.access_token[:20]}..." if self.access_token else " No access token!") + time.sleep(2) # Allow boot + return True + + def ensure_alive(self) -> bool: + """Check if sandbox is alive. Returns False if dead.""" + if not self.sandbox_id: + return False + try: + status, _, _ = envd("GET", self.sandbox_id, "/health", timeout=5) + return status in (200, 204) + except Exception: + return False + + def set_timeout(self, seconds: int = 600): + if self.sandbox_id: + ctrl("POST", f"/sandboxes/{self.sandbox_id}/timeout", + headers=api_key_hdr(self.api_key), body={"timeout": seconds}) + + def cleanup(self): + if self.sandbox_id: + print(f"\n Cleaning up sandbox {self.sandbox_id}...") + ctrl("DELETE", f"/sandboxes/{self.sandbox_id}", + headers=api_key_hdr(self.api_key)) + self.sandbox_id = None + + +# --------------------------------------------------------------------------- +# TEAM ID DISCOVERY +# --------------------------------------------------------------------------- + +def discover_team_id(api_key: str, env_team_id: str | None, + access_token: str | None = None) -> str | None: + if env_team_id: + return env_team_id + # Try GET /teams with Bearer token first (most reliable source) + if access_token: + status, body, _ = ctrl("GET", "/teams", headers=bearer_hdr(access_token)) + if status == 200 and isinstance(body, list): + for team in body: + if team.get("isDefault"): + tid = team.get("teamID") + if tid: + return tid + # Fall back to first team if none is default + for team in body: + tid = team.get("teamID") + if tid: + return tid + # Fall back to templates/sandboxes with API key + h = api_key_hdr(api_key) + status, body, _ = ctrl("GET", "/templates", headers=h) + if status == 200 and isinstance(body, list): + for tpl in body: + for key in ("teamID", "team_id", "teamId"): + tid = tpl.get(key) + if tid: + return tid + status, body, _ = ctrl("GET", "/sandboxes", headers=h) + if status == 200 and isinstance(body, list): + for sbx in body: + for key in ("teamID", "team_id", "teamId"): + tid = sbx.get(key) + if tid: + return tid + return None + + +# --------------------------------------------------------------------------- +# SPEC-LEVEL ANALYSIS +# --------------------------------------------------------------------------- + +def analyze_spec(spec: dict) -> list[SpecIssue]: + """Analyze the spec for best-practice issues.""" + issues = [] + paths = spec.get("paths", {}) + schemas = spec.get("components", {}).get("schemas", {}) + + # Collect all $ref targets used + all_refs = set() + _collect_refs(spec, all_refs) + + # Track operation details + for path_str, methods in paths.items(): + for method, op in methods.items(): + if not isinstance(op, dict) or "responses" not in op: + continue + op_label = f"{method.upper()} {path_str}" + + # 1. Missing operationId + if "operationId" not in op: + issues.append(SpecIssue( + "missing_operationId", + f"Operation '{op_label}' has no operationId", + op_label, + )) + + # 2. Missing summary + if "summary" not in op and "description" not in op: + issues.append(SpecIssue( + "missing_summary", + f"Operation '{op_label}' has no summary or description", + op_label, + )) + + # 3. Check parameters for missing descriptions + for param in op.get("parameters", []): + if isinstance(param, dict) and "$ref" not in param: + if "description" not in param: + issues.append(SpecIssue( + "missing_param_description", + f"Parameter '{param.get('name', '?')}' in '{op_label}' has no description", + op_label, + )) + + # 10. Deprecated without migration note + if op.get("deprecated"): + desc = op.get("description", "") + if not any(w in desc.lower() for w in ["use ", "replaced", "instead", "v2", "v3", "migration"]): + issues.append(SpecIssue( + "deprecated_no_migration", + f"Deprecated operation '{op_label}' has no migration note in description", + op_label, + )) + + # 4. Schema properties missing descriptions + for schema_name, schema_def in schemas.items(): + if not isinstance(schema_def, dict): + continue + for prop_name, prop_def in schema_def.get("properties", {}).items(): + if isinstance(prop_def, dict) and "$ref" not in prop_def: + if "description" not in prop_def and "title" not in prop_def: + issues.append(SpecIssue( + "missing_schema_description", + f"Property '{prop_name}' in schema '{schema_name}' has no description", + f"schemas/{schema_name}", + )) + + # 7. Naming inconsistencies (camelCase vs snake_case in parameters) + param_names = set() + for path_str, methods in paths.items(): + for method, op in methods.items(): + if not isinstance(op, dict): + continue + for param in op.get("parameters", []): + if isinstance(param, dict): + name = param.get("name", "") + if name: + param_names.add(name) + camel = [n for n in param_names if "_" not in n and n[0].islower()] + snake = [n for n in param_names if "_" in n] + if camel and snake: + issues.append(SpecIssue( + "naming_inconsistency", + f"Mixed naming: camelCase params ({', '.join(sorted(camel)[:5])}) and " + f"snake_case params ({', '.join(sorted(snake)[:5])})", + "parameters", + )) + + # 8. Orphaned schemas + for schema_name in schemas: + ref_str = f"#/components/schemas/{schema_name}" + if ref_str not in all_refs: + issues.append(SpecIssue( + "orphaned_schema", + f"Schema '{schema_name}' is defined but never referenced", + f"schemas/{schema_name}", + )) + + # 9. Truncated descriptions + for path_str, methods in paths.items(): + for method, op in methods.items(): + if not isinstance(op, dict): + continue + for param in op.get("parameters", []): + if isinstance(param, dict): + desc = param.get("description", "") + if isinstance(desc, str) and desc and not desc.rstrip().endswith(('.', ')', '"', ':', ']', '`')): + last_word = desc.rstrip().split()[-1] if desc.strip() else "" + if last_word and last_word[0].islower() and len(desc) > 30: + issues.append(SpecIssue( + "truncated_description", + f"Possible truncated description: '...{desc[-40:]}'", + f"{method.upper()} {path_str}", + )) + + # 11. Check LogLevel description correctness + log_level = schemas.get("LogLevel", {}) + if log_level.get("description", "").lower() == "state of the sandbox": + issues.append(SpecIssue( + "wrong_description", + "LogLevel description says 'State of the sandbox' — should describe log severity levels", + "schemas/LogLevel", + )) + + # 13. Server override correctness + for path_str, methods in paths.items(): + servers = methods.get("servers", []) + # Check if any operation on this path is a sandbox endpoint + sandbox_tags = {"Others", "Filesystem", "Process"} + is_sandbox_path = False + for method, op in methods.items(): + if not isinstance(op, dict): + continue + for tag in (op.get("tags") or []): + if tag in sandbox_tags: + is_sandbox_path = True + break + if is_sandbox_path and not servers: + issues.append(SpecIssue( + "missing_server_override", + f"Sandbox endpoint '{path_str}' has no server override", + path_str, + )) + + return issues + + +def _collect_refs(node, refs: set): + """Recursively collect all $ref values in the spec.""" + if isinstance(node, dict): + if "$ref" in node: + refs.add(node["$ref"]) + for v in node.values(): + _collect_refs(v, refs) + elif isinstance(node, list): + for item in node: + _collect_refs(item, refs) + + +# --------------------------------------------------------------------------- +# TEST PHASES +# --------------------------------------------------------------------------- + +def run_phase_1_teams(api_key: str, team_id: str | None, spec: dict, + access_token: str | None = None) -> list[EndpointResult]: + """Phase 1: Platform — Teams (auth checks + teams read).""" + results = [] + h = api_key_hdr(api_key) + + # GET /teams (requires AccessTokenAuth — Bearer token, not ApiKeyAuth) + print("\n Teams") + print(" GET /teams") + ep = EndpointResult("GET", "/teams", surface="platform") + if access_token: + status, body, _ = ctrl("GET", "/teams", headers=bearer_hdr(access_token)) + else: + status, body, _ = ctrl("GET", "/teams", headers=h) + ep.tested = True + ep.expected_status = 200 + ep.actual_status = status + ep.response_body = body + if status == 401 and not access_token: + ep.findings.append(Finding( + "minor", "auth", "GET /teams", + "GET /teams requires AccessTokenAuth (Bearer) — set E2B_ACCESS_TOKEN to test", + )) + elif status == 401: + ep.findings.append(Finding( + "critical", "auth", "GET /teams", + f"Bearer token rejected: got {status}", "200", str(status), + )) + elif status == 200 and isinstance(body, list): + schema = {"type": "array", "items": {"allOf": [{"$ref": "#/components/schemas/Team"}]}} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /teams")) + results.append(ep) + + if not team_id: + print(" [SKIP] No team ID — skipping team metrics") + return results + + now = int(time.time()) + + # GET /teams/{teamID}/metrics + print(f" GET /teams/{team_id[:16]}../metrics") + ep = EndpointResult("GET", "/teams/{teamID}/metrics", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", f"/teams/{team_id}/metrics", headers=h, + params={"start": now - 3600, "end": now}) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"type": "array", "items": {"$ref": "#/components/schemas/TeamMetric"}} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /teams/{teamID}/metrics")) + elif status != 200: + ep.findings.append(Finding("critical", "status_code", "GET /teams/{teamID}/metrics", + f"Expected 200, got {status}", "200", str(status))) + results.append(ep) + + # Error case: missing params + ep2 = EndpointResult("GET", "/teams/{teamID}/metrics", surface="platform") + ep2.tested = True + ep2.expected_status = 400 + status, body, _ = ctrl("GET", f"/teams/{team_id}/metrics", headers=h) + ep2.actual_status = status + if status != 400: + ep2.findings.append(Finding("minor", "status_code", "GET /teams/{teamID}/metrics", + f"Missing params: expected 400, got {status}", "400", str(status))) + results.append(ep2) + + # GET /teams/{teamID}/metrics/max + print(f" GET /teams/{team_id[:16]}../metrics/max") + ep = EndpointResult("GET", "/teams/{teamID}/metrics/max", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", f"/teams/{team_id}/metrics/max", headers=h, + params={"start": now - 3600, "end": now, "metric": "concurrent_sandboxes"}) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"$ref": "#/components/schemas/MaxTeamMetric"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /teams/{teamID}/metrics/max")) + elif status != 200: + ep.findings.append(Finding("critical", "status_code", "GET /teams/{teamID}/metrics/max", + f"Expected 200, got {status}", "200", str(status))) + results.append(ep) + + # Error: 403 wrong team + ep2 = EndpointResult("GET", "/teams/{teamID}/metrics/max", surface="platform") + ep2.tested = True + ep2.expected_status = 403 + status, body, _ = ctrl("GET", f"/teams/{FAKE_TEAM_ID}/metrics/max", headers=h, + params={"start": now - 3600, "end": now, "metric": "concurrent_sandboxes"}) + ep2.actual_status = status + if status != 403: + ep2.findings.append(Finding("minor", "status_code", "GET /teams/{teamID}/metrics/max", + f"Wrong team: expected 403, got {status}", "403", str(status))) + results.append(ep2) + + return results + + +def run_phase_2_templates_read(api_key: str, spec: dict) -> tuple[list[EndpointResult], str | None, str | None, str | None]: + """Phase 2: Templates read-only. Returns (results, template_id, build_id, alias, template_name, template_tag).""" + results = [] + h = api_key_hdr(api_key) + template_id = None + build_id = None + alias = None + template_name = None + template_tag = None + + print("\n Phase 2: Platform — Templates (read-only)") + + # GET /templates + print(" GET /templates") + ep = EndpointResult("GET", "/templates", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", "/templates", headers=h) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, list): + schema = {"type": "array", "items": {"allOf": [{"$ref": "#/components/schemas/Template"}]}} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /templates")) + for tpl in body: + if not template_id: + template_id = tpl.get("templateID") + if not build_id: + build_id = tpl.get("buildID") + aliases = tpl.get("aliases") + if aliases and isinstance(aliases, list) and aliases and not alias: + alias = aliases[0] + names = tpl.get("names") + if names and isinstance(names, list) and names and not template_name: + template_name = names[0] + if template_id and build_id and alias and template_name: + break + results.append(ep) + + # GET /templates/{templateID} + if template_id: + print(f" GET /templates/{template_id}") + ep = EndpointResult("GET", "/templates/{templateID}", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", f"/templates/{template_id}", headers=h) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"$ref": "#/components/schemas/TemplateWithBuilds"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /templates/{templateID}")) + # Extract build_id from builds list + if isinstance(body, dict) and not build_id: + builds = body.get("builds", []) + if builds and isinstance(builds, list): + build_id = builds[0].get("buildID") + results.append(ep) + + # GET /templates/{templateID}/tags + if template_id: + print(f" GET /templates/{template_id}/tags") + ep = EndpointResult("GET", "/templates/{templateID}/tags", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", f"/templates/{template_id}/tags", headers=h) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, list) and body: + template_tag = body[0].get("tag") + results.append(ep) + + # GET /templates/{templateID} 404 + print(" GET /templates/{templateID} -> 404") + ep = EndpointResult("GET", "/templates/{templateID}", surface="platform") + ep.tested = True + ep.expected_status = 404 + status, body, _ = ctrl("GET", f"/templates/{FAKE_TEMPLATE_ID}", headers=h) + ep.actual_status = status + if status != 404: + ep.findings.append(Finding("minor", "status_code", "GET /templates/{templateID}", + f"Non-existent: expected 404, got {status}", "404", str(status))) + results.append(ep) + + # GET /templates/{templateID}/builds/{buildID}/status + if template_id and build_id: + print(f" GET .../builds/{build_id[:16]}../status") + ep = EndpointResult("GET", "/templates/{templateID}/builds/{buildID}/status", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", f"/templates/{template_id}/builds/{build_id}/status", headers=h) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"$ref": "#/components/schemas/TemplateBuildInfo"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "GET /templates/{templateID}/builds/{buildID}/status")) + results.append(ep) + + # GET /templates/{templateID}/builds/{buildID}/logs + if template_id and build_id: + print(f" GET .../builds/{build_id[:16]}../logs") + ep = EndpointResult("GET", "/templates/{templateID}/builds/{buildID}/logs", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", f"/templates/{template_id}/builds/{build_id}/logs", headers=h) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"$ref": "#/components/schemas/TemplateBuildLogsResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "GET /templates/{templateID}/builds/{buildID}/logs")) + results.append(ep) + + # GET /templates/{templateID}/files/{hash} — expect 404 + print(f" GET /templates/{{templateID}}/files/{{hash}} -> 404") + ep = EndpointResult("GET", "/templates/{templateID}/files/{hash}", surface="platform") + ep.tested = True + ep.expected_status = 404 + tid = template_id or FAKE_TEMPLATE_ID + status, body, _ = ctrl("GET", f"/templates/{tid}/files/{FAKE_HASH}", headers=h) + ep.actual_status = status + if status not in (400, 404): + ep.findings.append(Finding("minor", "status_code", "GET /templates/{templateID}/files/{hash}", + f"Expected 404, got {status}", "404", str(status))) + results.append(ep) + + return results, template_id, build_id, alias, template_name, template_tag + + +def run_phase_3_templates_write(api_key: str, spec: dict, template_id: str | None, template_name: str | None = None, template_tag: str | None = None) -> list[EndpointResult]: + """Phase 3: Templates write operations.""" + results = [] + h = api_key_hdr(api_key) + test_template_name = "_validation_test_template" + + print("\n Phase 3: Platform — Templates (write)") + + # ------------------------------------------------------------------ + # POST /v3/templates (202 — create template, then clean up) + # ------------------------------------------------------------------ + print(" POST /v3/templates (202 — create)") + ep = EndpointResult("POST", "/v3/templates", surface="platform") + ep.tested = True + ep.expected_status = 202 + status, body, _ = ctrl("POST", "/v3/templates", headers=h, + body={"name": test_template_name, "cpuCount": 1, "memoryMB": 128}) + ep.actual_status = status + ep.response_body = body + v3_template_id = None + v3_build_id = None + if status == 202 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/TemplateRequestResponseV3"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /v3/templates")) + v3_template_id = body.get("templateID") + v3_build_id = body.get("buildID") + elif status != 202: + ep.findings.append(Finding("critical", "status_code", "POST /v3/templates", + f"Expected 202, got {status}", "202", str(status))) + results.append(ep) + + # GET /templates/aliases/{alias} — use the template we just created + if test_template_name and v3_template_id: + print(f" GET /templates/aliases/{test_template_name}") + ep = EndpointResult("GET", "/templates/aliases/{alias}", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", f"/templates/aliases/{test_template_name}", headers=h) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"$ref": "#/components/schemas/TemplateAliasResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /templates/aliases/{alias}")) + results.append(ep) + else: + ep = EndpointResult("GET", "/templates/aliases/{alias}", surface="platform") + ep.skip_reason = "No test template created" + results.append(ep) + + # POST /v3/templates (400 — empty body) + print(" POST /v3/templates (400 — empty)") + ep = EndpointResult("POST", "/v3/templates", surface="platform") + ep.tested = True + ep.expected_status = 400 + status, body, _ = ctrl("POST", "/v3/templates", headers=h, body={}) + ep.actual_status = status + if status != 400: + ep.findings.append(Finding("minor", "status_code", "POST /v3/templates", + f"Empty body: expected 400, got {status}", "400", str(status))) + results.append(ep) + + # ------------------------------------------------------------------ + # PATCH /v2/templates/{templateID} (200 — update, then restore) + # ------------------------------------------------------------------ + patch_tid = v3_template_id or template_id + if patch_tid: + print(f" PATCH /v2/templates/{patch_tid} (200 — toggle public)") + ep = EndpointResult("PATCH", "/v2/templates/{templateID}", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("PATCH", f"/v2/templates/{patch_tid}", headers=h, + body={"public": False}) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/TemplateUpdateResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "PATCH /v2/templates/{templateID}")) + elif status != 200: + ep.findings.append(Finding("critical", "status_code", "PATCH /v2/templates/{templateID}", + f"Expected 200, got {status}", "200", str(status))) + results.append(ep) + + # PATCH /templates/{templateID} (deprecated, same test) + print(f" PATCH /templates/{patch_tid} (deprecated, 200)") + ep = EndpointResult("PATCH", "/templates/{templateID}", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("PATCH", f"/templates/{patch_tid}", headers=h, + body={"public": False}) + ep.actual_status = status + if status not in (200, 400): + ep.findings.append(Finding("minor", "status_code", "PATCH /templates/{templateID}", + f"Expected 200, got {status}", "200", str(status))) + results.append(ep) + else: + # Fallback: 404 tests with fake IDs + print(" PATCH /v2/templates/{templateID} (404 — no template)") + ep = EndpointResult("PATCH", "/v2/templates/{templateID}", surface="platform") + ep.tested = True + ep.expected_status = 404 + status, body, _ = ctrl("PATCH", f"/v2/templates/{FAKE_TEMPLATE_ID}", headers=h, body={}) + ep.actual_status = status + if status not in (400, 404): + ep.findings.append(Finding("minor", "status_code", "PATCH /v2/templates/{templateID}", + f"Expected 404, got {status}", "404", str(status))) + results.append(ep) + + print(" PATCH /templates/{templateID} (deprecated, 404)") + ep = EndpointResult("PATCH", "/templates/{templateID}", surface="platform") + ep.tested = True + ep.expected_status = 404 + status, body, _ = ctrl("PATCH", f"/templates/{FAKE_TEMPLATE_ID}", headers=h, body={}) + ep.actual_status = status + if status not in (400, 404): + ep.findings.append(Finding("minor", "status_code", "PATCH /templates/{templateID}", + f"Expected 404, got {status}", "404", str(status))) + results.append(ep) + + # ------------------------------------------------------------------ + # POST /v2/templates/{templateID}/builds/{buildID} (202 — start build) + # ------------------------------------------------------------------ + if v3_template_id and v3_build_id: + print(f" POST /v2/.../builds/{v3_build_id[:16]}.. (202 — start build)") + ep = EndpointResult("POST", "/v2/templates/{templateID}/builds/{buildID}", surface="platform") + ep.tested = True + ep.expected_status = 202 + status, body, _ = ctrl("POST", + f"/v2/templates/{v3_template_id}/builds/{v3_build_id}", + headers=h, body={"fromImage": "ubuntu:latest"}) + ep.actual_status = status + if status not in (202, 400): + ep.findings.append(Finding("minor", "status_code", + "POST /v2/.../builds/{buildID}", + f"Expected 202, got {status}", "202", str(status))) + results.append(ep) + else: + print(" POST /v2/.../builds/{buildID} (404 — no template)") + ep = EndpointResult("POST", "/v2/templates/{templateID}/builds/{buildID}", surface="platform") + ep.tested = True + ep.expected_status = 404 + status, body, _ = ctrl("POST", + f"/v2/templates/{FAKE_TEMPLATE_ID}/builds/{FAKE_BUILD_ID}", + headers=h, body={}) + ep.actual_status = status + if status not in (400, 404): + ep.findings.append(Finding("minor", "status_code", + "POST /v2/.../builds/{buildID}", + f"Expected 404, got {status}", "404", str(status))) + results.append(ep) + + # ------------------------------------------------------------------ + # POST /v2/templates (deprecated, 202 — create template, then clean up) + # ------------------------------------------------------------------ + v2_test_name = "_validation_test_v2" + print(f" POST /v2/templates (202 — create)") + ep = EndpointResult("POST", "/v2/templates", surface="platform") + ep.tested = True + ep.expected_status = 202 + status, body, _ = ctrl("POST", "/v2/templates", headers=h, + body={"alias": v2_test_name, "cpuCount": 1, "memoryMB": 128}) + ep.actual_status = status + ep.response_body = body + v2_template_id = None + if status == 202 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/TemplateLegacy"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /v2/templates")) + v2_template_id = body.get("templateID") + elif status != 202: + ep.findings.append(Finding("critical", "status_code", "POST /v2/templates", + f"Expected 202, got {status}", "202", str(status))) + results.append(ep) + + # POST /v2/templates (400 — empty body) + print(" POST /v2/templates (400 — empty)") + ep = EndpointResult("POST", "/v2/templates", surface="platform") + ep.tested = True + ep.expected_status = 400 + status, body, _ = ctrl("POST", "/v2/templates", headers=h, body={}) + ep.actual_status = status + if status != 400: + ep.findings.append(Finding("minor", "status_code", "POST /v2/templates", + f"Empty body: expected 400, got {status}", "400", str(status))) + results.append(ep) + + # ------------------------------------------------------------------ + # Tags + # ------------------------------------------------------------------ + + # POST /templates/tags (400 — empty body) + print(" POST /templates/tags (400 — empty)") + ep = EndpointResult("POST", "/templates/tags", surface="platform") + ep.tested = True + ep.expected_status = 400 + status, body, _ = ctrl("POST", "/templates/tags", headers=h, body={}) + ep.actual_status = status + if status != 400: + ep.findings.append(Finding("minor", "status_code", "POST /templates/tags", + f"Empty body: expected 400, got {status}", "400", str(status))) + results.append(ep) + + # POST /templates/tags (201 — assign tag) + test_tag = "_validation_test" + if template_name and template_tag: + # template_name may include a tag (e.g. "team/name:latest"), strip it + base_name = template_name.split(":")[0] + # target references the existing build via name:existing_tag + target = f"{base_name}:{template_tag}" + print(f" POST /templates/tags (201 — assign '{test_tag}')") + ep = EndpointResult("POST", "/templates/tags", surface="platform") + ep.tested = True + ep.expected_status = 201 + status, body, _ = ctrl("POST", "/templates/tags", headers=h, + body={"target": target, "tags": [test_tag]}) + ep.actual_status = status + ep.response_body = body + if status == 201 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/AssignedTemplateTags"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /templates/tags")) + elif status != 201: + ep.findings.append(Finding("critical", "status_code", "POST /templates/tags", + f"Expected 201, got {status}", "201", str(status))) + results.append(ep) + + # DELETE /templates/tags (204 — remove the test tag) + print(f" DELETE /templates/tags (204 — remove '{test_tag}')") + ep = EndpointResult("DELETE", "/templates/tags", surface="platform") + ep.tested = True + ep.expected_status = 204 + status, body, _ = ctrl("DELETE", "/templates/tags", headers=h, + body={"name": base_name, "tags": [test_tag]}) + ep.actual_status = status + if status != 204: + ep.findings.append(Finding("critical", "status_code", "DELETE /templates/tags", + f"Expected 204, got {status}", "204", str(status))) + results.append(ep) + else: + print(" POST /templates/tags (skip — no template name/tag discovered)") + print(" DELETE /templates/tags (skip — no template name/tag discovered)") + + # DELETE /templates/tags (400 — empty body) + print(" DELETE /templates/tags (400 — empty)") + ep = EndpointResult("DELETE", "/templates/tags", surface="platform") + ep.tested = True + ep.expected_status = 400 + status, body, _ = ctrl("DELETE", "/templates/tags", headers=h, body={}) + ep.actual_status = status + if status != 400: + ep.findings.append(Finding("minor", "status_code", "DELETE /templates/tags", + f"Empty body: expected 400, got {status}", "400", str(status))) + results.append(ep) + + # ------------------------------------------------------------------ + # Clean up test templates + DELETE /templates/{templateID} + # ------------------------------------------------------------------ + for cleanup_id, label in [(v2_template_id, "v2 test"), (v3_template_id, "v3 test")]: + if cleanup_id: + print(f" DELETE /templates/{cleanup_id} ({label} cleanup)") + ep = EndpointResult("DELETE", "/templates/{templateID}", surface="platform") + ep.tested = True + ep.expected_status = 204 + status, body, _ = ctrl("DELETE", f"/templates/{cleanup_id}", headers=h) + ep.actual_status = status + if status != 204: + ep.findings.append(Finding("minor", "status_code", "DELETE /templates/{templateID}", + f"Cleanup {label}: expected 204, got {status}", "204", str(status))) + results.append(ep) + + # DELETE /templates/{templateID} (404 — non-existent) + print(" DELETE /templates/{templateID} (404)") + ep = EndpointResult("DELETE", "/templates/{templateID}", surface="platform") + ep.tested = True + ep.expected_status = 404 + status, body, _ = ctrl("DELETE", f"/templates/{FAKE_TEMPLATE_ID}", headers=h) + ep.actual_status = status + if status not in (400, 404): + ep.findings.append(Finding("minor", "status_code", "DELETE /templates/{templateID}", + f"Expected 404, got {status}", "404", str(status))) + results.append(ep) + + return results + + +def run_phase_4_sandboxes_read(api_key: str, spec: dict, sbx: SandboxManager) -> list[EndpointResult]: + """Phase 4: Sandboxes create + read.""" + results = [] + h = api_key_hdr(api_key) + + print("\n Phase 4: Platform — Sandboxes (create + read)") + + # POST /sandboxes -> 201 (already created, validate the response shape) + # We re-create to capture schema + print(" POST /sandboxes (validate schema)") + ep = EndpointResult("POST", "/sandboxes", surface="platform") + ep.tested = True + ep.expected_status = 201 + status, body, _ = ctrl("POST", "/sandboxes", headers=h, + body={"templateID": "base", "timeout": 30, "secure": True}) + ep.actual_status = status + ep.response_body = body + if status == 201 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/Sandbox"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /sandboxes")) + # Clean up the extra sandbox + extra_id = body.get("sandboxID") + if extra_id: + ctrl("DELETE", f"/sandboxes/{extra_id}", headers=h) + elif status != 201: + ep.findings.append(Finding("critical", "status_code", "POST /sandboxes", + f"Expected 201, got {status}", "201", str(status))) + results.append(ep) + + # POST /sandboxes 400 (empty body) + print(" POST /sandboxes (400 — empty)") + ep = EndpointResult("POST", "/sandboxes", surface="platform") + ep.tested = True + ep.expected_status = 400 + status, body, _ = ctrl("POST", "/sandboxes", headers=h, body={}) + ep.actual_status = status + if status != 400: + ep.findings.append(Finding("minor", "status_code", "POST /sandboxes", + f"Empty body: expected 400, got {status}", "400", str(status))) + results.append(ep) + + # GET /sandboxes + print(" GET /sandboxes") + ep = EndpointResult("GET", "/sandboxes", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", "/sandboxes", headers=h) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, list): + schema = {"type": "array", "items": {"allOf": [{"$ref": "#/components/schemas/ListedSandbox"}]}} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /sandboxes")) + results.append(ep) + + # GET /v2/sandboxes + print(" GET /v2/sandboxes") + ep = EndpointResult("GET", "/v2/sandboxes", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", "/v2/sandboxes", headers=h, params={"state": "running"}) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, list): + schema = {"type": "array", "items": {"allOf": [{"$ref": "#/components/schemas/ListedSandbox"}]}} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /v2/sandboxes")) + results.append(ep) + + # GET /sandboxes/{sandboxID} + if sbx.sandbox_id: + print(f" GET /sandboxes/{sbx.sandbox_id}") + ep = EndpointResult("GET", "/sandboxes/{sandboxID}", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", f"/sandboxes/{sbx.sandbox_id}", headers=h) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"$ref": "#/components/schemas/SandboxDetail"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /sandboxes/{sandboxID}")) + results.append(ep) + + # GET /sandboxes/{sandboxID} 404 + print(" GET /sandboxes/{sandboxID} (404)") + ep = EndpointResult("GET", "/sandboxes/{sandboxID}", surface="platform") + ep.tested = True + ep.expected_status = 404 + status, body, _ = ctrl("GET", f"/sandboxes/{FAKE_SANDBOX_ID}", headers=h) + ep.actual_status = status + if status != 404: + ep.findings.append(Finding("minor", "status_code", "GET /sandboxes/{sandboxID}", + f"Non-existent: expected 404, got {status}", "404", str(status))) + results.append(ep) + + return results + + +def run_phase_5_sandbox_actions(api_key: str, spec: dict, sbx: SandboxManager) -> list[EndpointResult]: + """Phase 5: Sandbox actions (timeout, refreshes, connect, logs, metrics).""" + results = [] + h = api_key_hdr(api_key) + sid = sbx.sandbox_id + + print("\n Phase 5: Platform — Sandbox actions") + + if not sid: + print(" [SKIP] No sandbox") + return results + + # POST /sandboxes/{sandboxID}/timeout -> 204 + print(" POST .../timeout") + ep = EndpointResult("POST", "/sandboxes/{sandboxID}/timeout", surface="platform") + ep.tested = True + ep.expected_status = 204 + status, body, _ = ctrl("POST", f"/sandboxes/{sid}/timeout", headers=h, body={"timeout": 600}) + ep.actual_status = status + if status != 204: + ep.findings.append(Finding("critical", "status_code", "POST /sandboxes/{sandboxID}/timeout", + f"Expected 204, got {status}", "204", str(status))) + results.append(ep) + + # POST /sandboxes/{sandboxID}/refreshes -> 204 + print(" POST .../refreshes") + ep = EndpointResult("POST", "/sandboxes/{sandboxID}/refreshes", surface="platform") + ep.tested = True + ep.expected_status = 204 + status, body, _ = ctrl("POST", f"/sandboxes/{sid}/refreshes", headers=h, body={"duration": 60}) + ep.actual_status = status + if status not in (200, 204): + ep.findings.append(Finding("critical", "status_code", "POST /sandboxes/{sandboxID}/refreshes", + f"Expected 204, got {status}", "204", str(status))) + results.append(ep) + + # POST /sandboxes/{sandboxID}/connect -> 200/201 + print(" POST .../connect") + ep = EndpointResult("POST", "/sandboxes/{sandboxID}/connect", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("POST", f"/sandboxes/{sid}/connect", headers=h, body={"timeout": 600}) + ep.actual_status = status + ep.response_body = body + if status in (200, 201) and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/Sandbox"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /sandboxes/{sandboxID}/connect")) + elif status not in (200, 201): + ep.findings.append(Finding("critical", "status_code", "POST /sandboxes/{sandboxID}/connect", + f"Expected 200/201, got {status}", "200 or 201", str(status))) + results.append(ep) + + # GET /sandboxes/{sandboxID}/logs (deprecated) + print(" GET .../logs (deprecated)") + ep = EndpointResult("GET", "/sandboxes/{sandboxID}/logs", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", f"/sandboxes/{sid}/logs", headers=h) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"$ref": "#/components/schemas/SandboxLogs"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /sandboxes/{sandboxID}/logs")) + results.append(ep) + + # GET /v2/sandboxes/{sandboxID}/logs — endpoint doesn't exist on server, skipped + + # GET /sandboxes/{sandboxID}/metrics + now = int(time.time()) + print(" GET .../metrics") + ep = EndpointResult("GET", "/sandboxes/{sandboxID}/metrics", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", f"/sandboxes/{sid}/metrics", headers=h, + params={"start": now - 300, "end": now}) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, list): + schema = {"type": "array", "items": {"$ref": "#/components/schemas/SandboxMetric"}} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /sandboxes/{sandboxID}/metrics")) + results.append(ep) + + # GET /sandboxes/metrics + print(" GET /sandboxes/metrics") + ep = EndpointResult("GET", "/sandboxes/metrics", surface="platform") + ep.tested = True + ep.expected_status = 200 + status, body, _ = ctrl("GET", "/sandboxes/metrics", headers=h, + params={"sandbox_ids": sid}) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"$ref": "#/components/schemas/SandboxesWithMetrics"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /sandboxes/metrics")) + results.append(ep) + + return results + + +def run_phase_6_health_system(spec: dict, sbx: SandboxManager) -> list[EndpointResult]: + """Phase 6: Sandbox — Health & System endpoints.""" + results = [] + sid = sbx.sandbox_id + token = sbx.access_token + + print("\n Phase 6: Sandbox — Health & System") + + if not sid: + print(" [SKIP] No sandbox") + return results + + # GET /health — returns 204 (no content) + print(" GET /health") + ep = EndpointResult("GET", "/health", surface="sandbox") + ep.tested = True + ep.expected_status = 204 + status, body, _ = envd("GET", sid, "/health") + ep.actual_status = status + if status != 204: + ep.findings.append(Finding("critical", "status_code", "GET /health", + f"Expected 204, got {status}", "204", str(status))) + results.append(ep) + + # GET /metrics + print(" GET /metrics") + ep = EndpointResult("GET", "/metrics", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("GET", sid, "/metrics", headers=sandbox_hdr(token)) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/Metrics"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /metrics")) + results.append(ep) + + # GET /envs + print(" GET /envs") + ep = EndpointResult("GET", "/envs", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("GET", sid, "/envs", headers=sandbox_hdr(token)) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/EnvVars"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "GET /envs")) + results.append(ep) + + return results + + +def run_phase_7_filesystem_rpc(spec: dict, sbx: SandboxManager) -> list[EndpointResult]: + """Phase 7: Filesystem Connect RPC endpoints.""" + results = [] + sid = sbx.sandbox_id + token = sbx.access_token + + print("\n Phase 7: Sandbox — Filesystem (Connect RPC)") + + if not sid: + print(" [SKIP] No sandbox") + return results + + h = connect_hdr(token) + + # MakeDir + print(" MakeDir /tmp/test-validation-dir") + ep = EndpointResult("POST", "/filesystem.Filesystem/MakeDir", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("POST", sid, "/filesystem.Filesystem/MakeDir", + headers=h, body={"path": "/tmp/test-validation-dir"}) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/filesystem.MakeDirResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "POST /filesystem.Filesystem/MakeDir")) + results.append(ep) + + # Stat + print(" Stat /tmp/test-validation-dir") + ep = EndpointResult("POST", "/filesystem.Filesystem/Stat", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("POST", sid, "/filesystem.Filesystem/Stat", + headers=h, body={"path": "/tmp/test-validation-dir"}) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/filesystem.StatResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "POST /filesystem.Filesystem/Stat")) + results.append(ep) + + # ListDir + print(" ListDir /tmp") + ep = EndpointResult("POST", "/filesystem.Filesystem/ListDir", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("POST", sid, "/filesystem.Filesystem/ListDir", + headers=h, body={"path": "/tmp"}) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/filesystem.ListDirResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "POST /filesystem.Filesystem/ListDir")) + results.append(ep) + + # Move + print(" Move /tmp/test-validation-dir -> /tmp/test-validation-moved") + ep = EndpointResult("POST", "/filesystem.Filesystem/Move", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("POST", sid, "/filesystem.Filesystem/Move", + headers=h, body={"source": "/tmp/test-validation-dir", + "destination": "/tmp/test-validation-moved"}) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/filesystem.MoveResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "POST /filesystem.Filesystem/Move")) + results.append(ep) + + # Remove + print(" Remove /tmp/test-validation-moved") + ep = EndpointResult("POST", "/filesystem.Filesystem/Remove", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("POST", sid, "/filesystem.Filesystem/Remove", + headers=h, body={"path": "/tmp/test-validation-moved"}) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"$ref": "#/components/schemas/filesystem.RemoveResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "POST /filesystem.Filesystem/Remove")) + results.append(ep) + + # Error case: Stat non-existent + print(" Stat /nonexistent -> error") + ep = EndpointResult("POST", "/filesystem.Filesystem/Stat", surface="sandbox") + ep.tested = True + ep.expected_status = 404 + status, body, _ = envd("POST", sid, "/filesystem.Filesystem/Stat", + headers=h, body={"path": "/nonexistent/path/xyz"}) + ep.actual_status = status + if isinstance(body, dict) and "code" in body: + # Validate connect.error schema + schema = {"$ref": "#/components/schemas/connect.error"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "POST /filesystem.Filesystem/Stat (error)")) + results.append(ep) + + return results + + +def run_phase_8_files_rest(spec: dict, sbx: SandboxManager) -> list[EndpointResult]: + """Phase 8: Files REST endpoints.""" + results = [] + sid = sbx.sandbox_id + token = sbx.access_token + + print("\n Phase 8: Sandbox — Files (REST)") + + if not sid: + print(" [SKIP] No sandbox") + return results + + # POST /files — upload + print(" POST /files (upload test-file.txt)") + ep = EndpointResult("POST", "/files", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + test_content = b"Hello from E2B validation script" + status, body, _ = multipart_upload(sid, "/tmp/test-file.txt", test_content, token=token) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"type": "array", "items": {"$ref": "#/components/schemas/EntryInfo"}} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /files")) + results.append(ep) + + # GET /files — download + print(" GET /files (download test-file.txt)") + ep = EndpointResult("GET", "/files", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, resp_headers = envd("GET", sid, "/files", + headers=sandbox_hdr(token) if token else None, + params={"path": "/tmp/test-file.txt"}) + ep.actual_status = status + if status == 200: + # Verify content + ct = resp_headers.get("Content-Type", "") + if "octet-stream" not in ct and "text" not in ct: + ep.findings.append(Finding("minor", "schema", "GET /files", + f"Expected application/octet-stream, got Content-Type: {ct}", + "application/octet-stream", ct)) + results.append(ep) + + # GET /files 404 + print(" GET /files (404)") + ep = EndpointResult("GET", "/files", surface="sandbox") + ep.tested = True + ep.expected_status = 404 + status, body, _ = envd("GET", sid, "/files", + headers=sandbox_hdr(token) if token else None, + params={"path": "/nonexistent/file.txt"}) + ep.actual_status = status + if status != 404: + ep.findings.append(Finding("minor", "status_code", "GET /files", + f"Non-existent file: expected 404, got {status}", "404", str(status))) + results.append(ep) + + return results + + +def run_phase_9_watcher(spec: dict, sbx: SandboxManager) -> list[EndpointResult]: + """Phase 9: Filesystem Watcher.""" + results = [] + sid = sbx.sandbox_id + token = sbx.access_token + + print("\n Phase 9: Sandbox — Filesystem Watcher") + + if not sid: + print(" [SKIP] No sandbox") + return results + + h = connect_hdr(token) + watcher_id = None + + # CreateWatcher + print(" CreateWatcher /tmp") + ep = EndpointResult("POST", "/filesystem.Filesystem/CreateWatcher", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("POST", sid, "/filesystem.Filesystem/CreateWatcher", + headers=h, body={"path": "/tmp", "recursive": False}) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/filesystem.CreateWatcherResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "POST /filesystem.Filesystem/CreateWatcher")) + watcher_id = body.get("watcherId") + results.append(ep) + + # GetWatcherEvents + if watcher_id: + print(f" GetWatcherEvents (watcher: {watcher_id})") + ep = EndpointResult("POST", "/filesystem.Filesystem/GetWatcherEvents", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("POST", sid, "/filesystem.Filesystem/GetWatcherEvents", + headers=h, body={"watcherId": watcher_id}) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/filesystem.GetWatcherEventsResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "POST /filesystem.Filesystem/GetWatcherEvents")) + results.append(ep) + + # RemoveWatcher + print(f" RemoveWatcher {watcher_id}") + ep = EndpointResult("POST", "/filesystem.Filesystem/RemoveWatcher", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("POST", sid, "/filesystem.Filesystem/RemoveWatcher", + headers=h, body={"watcherId": watcher_id}) + ep.actual_status = status + if status == 200: + schema = {"$ref": "#/components/schemas/filesystem.RemoveWatcherResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "POST /filesystem.Filesystem/RemoveWatcher")) + results.append(ep) + else: + # Still record them as tested + for op_name in ("GetWatcherEvents", "RemoveWatcher"): + ep = EndpointResult("POST", f"/filesystem.Filesystem/{op_name}", surface="sandbox") + ep.tested = False + ep.skip_reason = "No watcher_id from CreateWatcher" + results.append(ep) + + return results + + +def run_phase_10_processes(spec: dict, sbx: SandboxManager) -> list[EndpointResult]: + """Phase 10: Process Management.""" + results = [] + sid = sbx.sandbox_id + token = sbx.access_token + + print("\n Phase 10: Sandbox — Process Management") + + if not sid: + print(" [SKIP] No sandbox") + return results + + h = connect_hdr(token) + h_stream = connect_stream_hdr(token) + + # Start echo hello (streaming — uses application/connect+json) + print(" Start: echo hello") + ep = EndpointResult("POST", "/process.Process/Start", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + start_payload = {"process": {"cmd": "/bin/echo", "args": ["hello"], "envs": {}}, "tag": "test-echo"} + status, body, _ = envd("POST", sid, "/process.Process/Start", + headers=h_stream, raw_body=connect_envelope(start_payload), timeout=5) + ep.actual_status = status + ep.response_body = body + if status == 200: + # Streaming response may be NDJSON (newline-delimited JSON) + # Try to parse as regular JSON first, then as NDJSON + if isinstance(body, dict): + schema = {"$ref": "#/components/schemas/process.StartResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /process.Process/Start")) + results.append(ep) + + # List processes (unary — application/json) + print(" List") + ep = EndpointResult("POST", "/process.Process/List", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + status, body, _ = envd("POST", sid, "/process.Process/List", + headers=h, body={}) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/process.ListResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /process.Process/List")) + results.append(ep) + + # Start sleep 60 (long-running, streaming) + print(" Start: sleep 60") + sleep_payload = {"process": {"cmd": "/bin/sleep", "args": ["60"], "envs": {}}, "tag": "test-sleep"} + status_start, body_start, _ = envd("POST", sid, "/process.Process/Start", + headers=h_stream, + raw_body=connect_envelope(sleep_payload), timeout=5) + sleep_pid = None + if isinstance(body_start, dict): + event = body_start.get("event", {}) + start_event = event.get("start", {}) + sleep_pid = start_event.get("pid") + elif isinstance(body_start, str): + # NDJSON: try to parse first line + for line in body_start.strip().split("\n"): + line = line.strip() + if line: + try: + parsed = json.loads(line) + if isinstance(parsed, dict): + event = parsed.get("event", {}) + start_event = event.get("start", {}) + if start_event.get("pid"): + sleep_pid = start_event["pid"] + break + except json.JSONDecodeError: + pass + print(f" sleep PID: {sleep_pid}") + + # Connect to process (streaming) + print(" Connect to sleep process") + ep = EndpointResult("POST", "/process.Process/Connect", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + connect_payload = {"process": {"pid": sleep_pid}} if sleep_pid else {"process": {"tag": "test-sleep"}} + status, body, _ = envd("POST", sid, "/process.Process/Connect", + headers=h_stream, raw_body=connect_envelope(connect_payload), timeout=3) + ep.actual_status = status + ep.response_body = body + if status == 200 and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/process.ConnectResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /process.Process/Connect")) + results.append(ep) + + # SendInput (unary) + print(" SendInput") + ep = EndpointResult("POST", "/process.Process/SendInput", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + sel = {"pid": sleep_pid} if sleep_pid else {"tag": "test-sleep"} + status, body, _ = envd("POST", sid, "/process.Process/SendInput", + headers=h, body={"process": sel, "input": {"stdin": "dGVzdA=="}}) # base64 "test" + ep.actual_status = status + if status == 200: + schema = {"$ref": "#/components/schemas/process.SendInputResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /process.Process/SendInput")) + results.append(ep) + + # StreamInput (client-streaming — uses application/connect+json) + print(" StreamInput (client-streaming — limited test)") + ep = EndpointResult("POST", "/process.Process/StreamInput", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + sel = {"pid": sleep_pid} if sleep_pid else {"tag": "test-sleep"} + stream_input_payload = {"start": {"process": sel}} + status, body, _ = envd("POST", sid, "/process.Process/StreamInput", + headers=h_stream, raw_body=connect_envelope(stream_input_payload), timeout=3) + ep.actual_status = status + ep.response_body = body + results.append(ep) + + + + # Update — select existing process without PTY resize (resize requires + # the process to have been started with a PTY, which the streaming Start + # envelope doesn't reliably support in this test harness). + print(" Update (no-op, verify endpoint accepts request)") + ep = EndpointResult("POST", "/process.Process/Update", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + sel = {"pid": sleep_pid} if sleep_pid else {"tag": "test-sleep"} + status, body, _ = envd("POST", sid, "/process.Process/Update", + headers=h, body={"process": sel}) + ep.actual_status = status + ep.response_body = body + if status == 200: + schema = {"$ref": "#/components/schemas/process.UpdateResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /process.Process/Update")) + results.append(ep) + + # SendSignal — kill the sleep process + print(" SendSignal SIGTERM") + ep = EndpointResult("POST", "/process.Process/SendSignal", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + sel = {"pid": sleep_pid} if sleep_pid else {"tag": "test-sleep"} + status, body, _ = envd("POST", sid, "/process.Process/SendSignal", + headers=h, body={"process": sel, "signal": "SIGNAL_SIGTERM"}) + ep.actual_status = status + if status == 200: + schema = {"$ref": "#/components/schemas/process.SendSignalResponse"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), "POST /process.Process/SendSignal")) + results.append(ep) + + return results + + +def run_phase_11_streaming(spec: dict, sbx: SandboxManager) -> list[EndpointResult]: + """Phase 11: Streaming (best-effort).""" + results = [] + sid = sbx.sandbox_id + token = sbx.access_token + + print("\n Phase 11: Sandbox — Streaming (best-effort)") + + if not sid: + print(" [SKIP] No sandbox") + return results + + h = connect_stream_hdr(token) + + # WatchDir (server-streaming) + print(" WatchDir /tmp (server-streaming)") + ep = EndpointResult("POST", "/filesystem.Filesystem/WatchDir", surface="sandbox") + ep.tested = True + ep.expected_status = 200 + watchdir_payload = {"path": "/tmp", "recursive": False} + status, body, _ = envd("POST", sid, "/filesystem.Filesystem/WatchDir", + headers=h, raw_body=connect_envelope(watchdir_payload), timeout=3) + ep.actual_status = status + ep.response_body = body + ep.findings.append(Finding("minor", "schema", "POST /filesystem.Filesystem/WatchDir", + "Server-streaming: only initial frame captured (stdlib limitation)")) + results.append(ep) + + return results + + +def run_phase_12_destructive(api_key: str, spec: dict, sbx: SandboxManager) -> list[EndpointResult]: + """Phase 12: Destructive (last).""" + results = [] + h = api_key_hdr(api_key) + sid = sbx.sandbox_id + + print("\n Phase 12: Platform — Destructive") + + if not sid: + print(" [SKIP] No sandbox") + return results + + # POST /sandboxes/{sandboxID}/pause + print(" POST .../pause") + ep = EndpointResult("POST", "/sandboxes/{sandboxID}/pause", surface="platform") + ep.tested = True + ep.expected_status = 204 + status, body, _ = ctrl("POST", f"/sandboxes/{sid}/pause", headers=h) + ep.actual_status = status + if status not in (204, 409): + ep.findings.append(Finding("critical", "status_code", "POST /sandboxes/{sandboxID}/pause", + f"Expected 204, got {status}", "204", str(status))) + results.append(ep) + + # POST /sandboxes/{sandboxID}/resume (deprecated) + if status == 204: + time.sleep(1) + print(" POST .../resume (deprecated)") + ep = EndpointResult("POST", "/sandboxes/{sandboxID}/resume", surface="platform") + ep.tested = True + ep.expected_status = 201 + status, body, _ = ctrl("POST", f"/sandboxes/{sid}/resume", headers=h, + body={"timeout": 60}) + ep.actual_status = status + ep.response_body = body + if status in (200, 201) and isinstance(body, dict): + schema = {"$ref": "#/components/schemas/Sandbox"} + ep.findings.extend(_tag_findings(validate_schema(body, schema, spec), + "POST /sandboxes/{sandboxID}/resume")) + elif status not in (200, 201): + ep.findings.append(Finding("minor", "status_code", "POST /sandboxes/{sandboxID}/resume", + f"Expected 201, got {status}", "201", str(status))) + results.append(ep) + else: + ep = EndpointResult("POST", "/sandboxes/{sandboxID}/resume", surface="platform") + ep.tested = False + ep.skip_reason = "Pause failed, cannot test resume" + results.append(ep) + + # DELETE /sandboxes/{sandboxID} + print(f" DELETE /sandboxes/{sid}") + ep = EndpointResult("DELETE", "/sandboxes/{sandboxID}", surface="platform") + ep.tested = True + ep.expected_status = 204 + status, body, _ = ctrl("DELETE", f"/sandboxes/{sid}", headers=h) + ep.actual_status = status + if status != 204: + ep.findings.append(Finding("critical", "status_code", "DELETE /sandboxes/{sandboxID}", + f"Expected 204, got {status}", "204", str(status))) + results.append(ep) + sbx.sandbox_id = None # Mark as cleaned up + + return results + + +# --------------------------------------------------------------------------- +# HELPERS +# --------------------------------------------------------------------------- + +def _tag_findings(findings: list[Finding], endpoint: str) -> list[Finding]: + """Tag all findings with the endpoint name.""" + for f in findings: + if not f.endpoint: + f.endpoint = endpoint + return findings + + +# --------------------------------------------------------------------------- +# REPORT GENERATION +# --------------------------------------------------------------------------- + +def generate_report( + all_results: list[EndpointResult], + spec_issues: list[SpecIssue], + start_time: float, + end_time: float, +) -> str: + """Generate the markdown validation report.""" + now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC") + duration = end_time - start_time + + # Count findings — only critical matters for CI + all_findings = [] + for r in all_results: + all_findings.extend(f for f in r.findings if f.severity == "critical") + tested = sum(1 for r in all_results if r.tested) + total = len(all_results) + + lines = [] + lines.append("# E2B OpenAPI Spec Validation Report\n") + lines.append(f"**Date**: {now}") + lines.append(f"**Spec Version**: 0.1.0") + lines.append(f"**Endpoints Tested**: {tested} / {total}") + lines.append(f"**Critical Findings**: {len(all_findings)}") + lines.append(f"**Duration**: {duration:.1f}s\n") + + # Executive Summary + lines.append("## Executive Summary\n") + if not all_findings: + lines.append("No critical findings. The spec matches the live API behavior.") + else: + lines.append(f"Found {len(all_findings)} critical discrepancies between the spec and the live API. " + f"See details below.") + lines.append("") + + # Endpoint Results — Platform + lines.append("## Endpoint Results\n") + lines.append("### Platform API\n") + platform_results = [r for r in all_results if r.surface == "platform"] + for r in platform_results: + _render_endpoint_result(lines, r) + + # Endpoint Results — Sandbox + lines.append("### Sandbox API (envd)\n") + sandbox_results = [r for r in all_results if r.surface == "sandbox"] + for r in sandbox_results: + _render_endpoint_result(lines, r) + + # Critical Findings Summary + lines.append("## Critical Findings\n") + lines.append("Issues where the spec does not match the actual API behavior.\n") + if all_findings: + lines.append("| # | Endpoint | Category | Finding | Expected | Actual |") + lines.append("|---|----------|----------|---------|----------|--------|") + for i, f in enumerate(all_findings, 1): + lines.append(f"| {i} | {f.endpoint} | {f.category} | {f.message[:80]} | {f.expected} | {f.actual} |") + else: + lines.append("None found.") + lines.append("") + + # Best-Practice Recommendations + lines.append("### Best-Practice Recommendations\n") + lines.append("Holistic improvements to make the spec production-quality.\n") + if spec_issues: + lines.append("| # | Category | Recommendation |") + lines.append("|---|----------|----------------|") + for i, issue in enumerate(spec_issues, 1): + lines.append(f"| {i} | {issue.category} | {issue.description} |") + else: + lines.append("None found.") + lines.append("") + + # Streaming Endpoints + lines.append("## Streaming Endpoints\n") + lines.append("Document what was tested and what could not be validated for each of the 4 streaming endpoints.\n") + lines.append("| Endpoint | What was tested | Limitations |") + lines.append("|----------|----------------|-------------|") + streaming_eps = [ + ("POST /filesystem.Filesystem/WatchDir", "Initial HTTP response captured", "Server-streaming: only first frame via stdlib urllib"), + ("POST /process.Process/Connect", "Initial HTTP response captured", "Server-streaming: only first frame via stdlib urllib"), + ("POST /process.Process/Start", "Initial HTTP response captured", "Server-streaming: only first frame via stdlib urllib"), + ("POST /process.Process/StreamInput", "Initial HTTP request sent", "Client-streaming: cannot maintain stream via stdlib urllib"), + ] + for ep, tested_desc, limitation in streaming_eps: + lines.append(f"| {ep} | {tested_desc} | {limitation} |") + lines.append("") + + # Deprecated Endpoints + lines.append("## Deprecated Endpoints\n") + lines.append("For each deprecated endpoint: does it still work? What does the spec say the replacement is?\n") + lines.append("| Endpoint | Still works? | Replacement | Notes |") + lines.append("|----------|-------------|-------------|-------|") + deprecated_eps = [ + ("GET /sandboxes/{sandboxID}/logs", "Yes", "N/A (v2 endpoint doesn't exist)", "v1 returns 200"), + ("POST /sandboxes/{sandboxID}/resume", "Yes", "POST /sandboxes/{sandboxID}/connect", "Returns Sandbox schema"), + ("POST /v2/templates", "Yes", "POST /v3/templates", "v2 requires alias field"), + ("POST /templates", "Needs Bearer", "POST /v3/templates", "Uses AccessTokenAuth"), + ("POST /templates/{templateID}", "Needs Bearer", "POST /v3/templates", "Rebuild, uses AccessTokenAuth"), + ("PATCH /templates/{templateID}", "Yes", "PATCH /v2/templates/{templateID}", "Update template"), + ("POST /templates/{templateID}/builds/{buildID}", "Needs Bearer", "POST /v2/.../builds/{buildID}", "Start build"), + ] + for ep, works, replacement, notes in deprecated_eps: + lines.append(f"| {ep} | {works} | {replacement} | {notes} |") + lines.append("") + + # Untested Scenarios + lines.append("## Untested Scenarios\n") + lines.append("List any endpoints or scenarios you could not test, and why.\n") + lines.append("| Endpoint | Reason |") + lines.append("|----------|--------|") + untested = [r for r in all_results if not r.tested] + for r in untested: + lines.append(f"| {r.method} {r.path} | {r.skip_reason or 'Unknown'} |") + # General limitations + lines.append("| Rate limiting (429) | Cannot safely trigger without affecting quota |") + lines.append("| Conflict (409) | Requires specific data state |") + lines.append("| Internal errors (500) | Cannot reliably reproduce |") + lines.append("") + + return "\n".join(lines) + + +def _render_endpoint_result(lines: list[str], r: EndpointResult): + """Render a single endpoint result to markdown.""" + icon = "YES" if r.tested else "NO" + critical_findings = [f for f in r.findings if f.severity == "critical"] + lines.append(f"#### {r.method} {r.path}") + lines.append(f"- **Tested**: {icon}" + (f" ({r.skip_reason})" if not r.tested and r.skip_reason else "")) + if r.tested: + lines.append(f"- **Expected Status**: {r.expected_status}") + lines.append(f"- **Actual Status**: {r.actual_status}") + lines.append(f"- **Response Schema**:") + if critical_findings: + missing = [f for f in critical_findings if f.category == "missing_field"] + extra = [f for f in critical_findings if f.category == "extra_field"] + types = [f for f in critical_findings if f.category == "type_mismatch"] + other = [f for f in critical_findings if f.category not in ("missing_field", "extra_field", "type_mismatch")] + lines.append(f" - Required fields present: {'list missing: ' + ', '.join(f.message for f in missing) if missing else 'YES'}") + lines.append(f" - Extra undocumented fields: {', '.join(f.message for f in extra) if extra else 'none'}") + lines.append(f" - Type mismatches: {', '.join(f.message for f in types) if types else 'none'}") + if other: + lines.append(f"- **Findings**:") + for f in other: + lines.append(f" - [CRITICAL] {f.message}") + else: + lines.append(f" - Required fields present: YES") + lines.append(f" - Extra undocumented fields: none") + lines.append(f" - Type mismatches: none") + lines.append("") + + +# --------------------------------------------------------------------------- +# CLI & MAIN +# --------------------------------------------------------------------------- + +def print_help(): + print(__doc__) + sys.exit(0) + + +def main(): + if "--help" in sys.argv or "-h" in sys.argv: + print_help() + + api_key = os.environ.get("E2B_API_KEY") + if not api_key: + print("Error: E2B_API_KEY environment variable is required") + sys.exit(2) + + global VERBOSE + VERBOSE = "--verbose" in sys.argv + + skip_sandbox = "--skip-sandbox" in sys.argv + output_path = "openapi-validation-report.md" + phase_filter = None + http_timeout = 15 + + # Parse args + args = sys.argv[1:] + i = 0 + while i < len(args): + if args[i] == "--output" and i + 1 < len(args): + output_path = args[i + 1] + i += 2 + elif args[i] == "--phase" and i + 1 < len(args): + phase_filter = int(args[i + 1]) + i += 2 + elif args[i] == "--timeout" and i + 1 < len(args): + http_timeout = int(args[i + 1]) + i += 2 + else: + i += 1 + + env_team_id = os.environ.get("E2B_TEAM_ID") + access_token = os.environ.get("E2B_ACCESS_TOKEN") + + print("=" * 60) + print(" E2B OpenAPI Spec Validation") + print("=" * 60) + print(f" Spec: {SPEC_PATH.name}") + print(f" Platform URL: {PLATFORM_URL}") + print(f" Envd port: {ENVD_PORT}") + print(f" API Key: {api_key[:10]}...{api_key[-4:]}") + print(f" Access Token: {access_token[:10]}...{access_token[-4:]}" if access_token else " Access Token: (not set)") + print(f" Skip sandbox: {skip_sandbox}") + print(f" Verbose: {VERBOSE}") + print(f" Output: {output_path}") + if phase_filter: + print(f" Phase filter: {phase_filter}") + + # Load spec + spec = load_spec(SPEC_PATH) + print(f" Spec paths: {len(spec.get('paths', {}))}") + + # Discover team ID + team_id = discover_team_id(api_key, env_team_id, access_token=access_token) + print(f" Team ID: {team_id[:16]}..." if team_id else " Team ID: (not found)") + + start_time = time.time() + all_results: list[EndpointResult] = [] + sbx = SandboxManager(api_key) + + def should_run(phase: int) -> bool: + return phase_filter is None or phase_filter == phase + + try: + # Phase 1: Teams (includes 401 auth checks) + if should_run(1): + all_results.extend(run_phase_1_teams(api_key, team_id, spec, access_token=access_token)) + + # Phase 2: Templates (read) + template_id = None + build_id = None + alias = None + template_name = None + template_tag = None + if should_run(2): + phase2_results, template_id, build_id, alias, template_name, template_tag = run_phase_2_templates_read(api_key, spec) + all_results.extend(phase2_results) + + # Phase 3: Templates (write) + if should_run(3): + all_results.extend(run_phase_3_templates_write(api_key, spec, template_id, template_name, template_tag)) + + # Create sandbox for phases 4-12 + if not skip_sandbox and any(should_run(p) for p in range(4, 13)): + if not sbx.create(): + print(" FATAL: Cannot create sandbox. Skipping sandbox-dependent phases.") + skip_sandbox = True + + # Phase 4: Sandboxes (read) + if should_run(4): + all_results.extend(run_phase_4_sandboxes_read(api_key, spec, sbx)) + + # Phase 5: Sandbox actions + if should_run(5) and not skip_sandbox: + all_results.extend(run_phase_5_sandbox_actions(api_key, spec, sbx)) + + # Phase 6: Health & System + if should_run(6) and not skip_sandbox: + all_results.extend(run_phase_6_health_system(spec, sbx)) + + # Phase 7: Filesystem RPC + if should_run(7) and not skip_sandbox: + all_results.extend(run_phase_7_filesystem_rpc(spec, sbx)) + + # Phase 8: Files REST + if should_run(8) and not skip_sandbox: + all_results.extend(run_phase_8_files_rest(spec, sbx)) + + # Phase 9: Watcher + if should_run(9) and not skip_sandbox: + all_results.extend(run_phase_9_watcher(spec, sbx)) + + # Phase 10: Processes + if should_run(10) and not skip_sandbox: + all_results.extend(run_phase_10_processes(spec, sbx)) + + # Phase 11: Streaming + if should_run(11) and not skip_sandbox: + all_results.extend(run_phase_11_streaming(spec, sbx)) + + # Phase 12: Destructive + if should_run(12) and not skip_sandbox: + all_results.extend(run_phase_12_destructive(api_key, spec, sbx)) + + finally: + # Ensure cleanup + if sbx.sandbox_id: + sbx.cleanup() + + end_time = time.time() + + # Flag status-code mismatches that individual tests didn't already catch. + # Skip endpoints that already raised a status_code or auth finding. + for r in all_results: + if (r.tested and r.expected_status and r.actual_status + and r.actual_status != r.expected_status + and not any(f.category in ("status_code", "auth") for f in r.findings)): + r.findings.append(Finding( + "critical", "status_code", f"{r.method} {r.path}", + f"Expected {r.expected_status}, got {r.actual_status}", + str(r.expected_status), str(r.actual_status))) + + # Spec-level analysis + print("\n Analyzing spec for best-practice issues...") + spec_issues = analyze_spec(spec) + print(f" Found {len(spec_issues)} spec-level issues") + + # Generate report + print(f"\n Generating report: {output_path}") + report = generate_report(all_results, spec_issues, start_time, end_time) + with open(output_path, "w") as f: + f.write(report) + + # Summary — only critical findings matter (CI pass/fail) + all_findings = [] + for r in all_results: + all_findings.extend(f for f in r.findings if f.severity == "critical") + tested = sum(1 for r in all_results if r.tested) + + print("\n" + "=" * 60) + print(f" Results: {tested} endpoints tested") + print(f" Findings: {len(all_findings)} critical") + if all_findings: + for f in all_findings: + print(f" - {f.endpoint}: {f.message}") + print(f" Report written to: {output_path}") + print("=" * 60) + + sys.exit(1 if all_findings else 0) + + +if __name__ == "__main__": + main() diff --git a/style.css b/style.css index 1b297867..42c4455d 100644 --- a/style.css +++ b/style.css @@ -90,3 +90,13 @@ code { background-color: rgba(255, 136, 0, 0.25) !important; border-color: rgba(255, 136, 0, 0.6) !important; } + +/* Hide scrollbar on sidebar API endpoint rows with deprecated badges */ +#sidebar a .overflow-x-hidden { + scrollbar-width: none !important; + -ms-overflow-style: none !important; +} + +#sidebar a .overflow-x-hidden::-webkit-scrollbar { + display: none !important; +}