diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml new file mode 100644 index 00000000000..f907486b3e8 --- /dev/null +++ b/.github/workflows/deploy-docs.yml @@ -0,0 +1,49 @@ +name: Deploy Docs to Github Pages + +on: + # Trigger the workflow every time you push to the `main` branch + # Using a different branch name? Replace `main` with your branch’s name + push: + branches: [main] + # Allows you to run this workflow manually from the Actions tab on GitHub. + workflow_dispatch: + +# Allow this job to clone the repo and create a page deployment +permissions: + contents: read + pages: write + id-token: write + +# Allow one concurrent deployment +concurrency: + group: 'pages' + cancel-in-progress: true + +jobs: + build: + runs-on: ubuntu-latest + defaults: + run: + working-directory: docs + steps: + - name: Checkout + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + + - name: Install, build, and upload site output + uses: withastro/action@fc88a7002f0c62327f26fb1b15f2a1d581249278 # v5.1.0 + with: + path: docs # The root location of your Astro project inside the repository. (optional) + # node-version: 22 # The specific version of Node that should be used to build your site. Defaults to 22. (optional) + # package-manager: pnpm@latest # The Node package manager that should be used to install dependencies and build your site. Automatically detected based on your lockfile. (optional) + # build-cmd: pnpm run build # The command to run to build your site. Runs the package build script/task by default. (optional) + + deploy: + needs: build + runs-on: ubuntu-latest + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5 diff --git a/.github/workflows/translations.yml b/.github/workflows/translations.yml new file mode 100644 index 00000000000..111ea4c81ba --- /dev/null +++ b/.github/workflows/translations.yml @@ -0,0 +1,78 @@ +name: Crowdin Translations + +on: + # Allow manual runs from the Actions tab + workflow_dispatch: + inputs: + upload_sources: + description: 'Upload source strings to Crowdin' + type: boolean + default: true + download_translations: + description: 'Download translations from Crowdin' + type: boolean + default: true + + # Upload sources & download translations when source files change on main + push: + branches: + - main + paths: + - 'invokeai/frontend/web/public/locales/en.json' + - 'docs/src/content/i18n/en.json' + - 'docs/src/content/docs/**/*.md' + - 'docs/src/content/docs/**/*.mdx' + - '!docs/src/content/docs/[a-z][a-z]/**' + - '!docs/src/content/docs/[a-z][a-z]-*/**' + - 'crowdin.yml' + +permissions: + contents: write + pull-requests: write + +jobs: + crowdin-sync: + name: Sync with Crowdin + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Crowdin Sync + uses: crowdin/github-action@v2 + with: + # Upload sources on push to main or when manually requested + upload_sources: ${{ github.event_name != 'workflow_dispatch' || inputs.upload_sources }} + upload_translations: false + + # Download translations on push to main or when manually requested + download_translations: ${{ github.event_name != 'workflow_dispatch' || inputs.download_translations }} + + # PR settings for downloaded translations + create_pull_request: true + pull_request_title: 'i18n: update translations from Crowdin' + pull_request_body: | + Automated pull request from [Crowdin](https://crowdin.com). + + This PR updates translations for: + - **Web App UI** (`invokeai/frontend/web/public/locales/`) + - **Documentation UI Strings** (`docs/src/content/i18n/`) + - **Documentation Content** (`docs/src/content/docs//`) + pull_request_base_branch_name: main + pull_request_labels: 'i18n' + + # Commit settings + localization_branch_name: crowdin/translations + commit_message: 'i18n: update translations from Crowdin' + + # Use the config file at the repo root + config: crowdin.yml + + # Skip untranslated strings/files to keep partial translations clean + download_translations_args: '--skip-untranslated-strings' + + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CROWDIN_PROJECT_ID: ${{ secrets.CROWDIN_PROJECT_ID }} + CROWDIN_PERSONAL_TOKEN: ${{ secrets.CROWDIN_PERSONAL_TOKEN }} diff --git a/crowdin.yml b/crowdin.yml new file mode 100644 index 00000000000..a9105c260e3 --- /dev/null +++ b/crowdin.yml @@ -0,0 +1,30 @@ +# Crowdin Configuration +# https://developer.crowdin.com/configuration-file/ + +project_id_env: CROWDIN_PROJECT_ID +api_token_env: CROWDIN_PERSONAL_TOKEN + +preserve_hierarchy: true + +# Map Crowdin's zh-TW to zh-Hant to match the existing file convention +languages_mapping: + locale: + zh-TW: zh-Hant + +files: + # Web App UI Translations + - source: /invokeai/frontend/web/public/locales/en.json + translation: /invokeai/frontend/web/public/locales/%locale%.json + + # Documentation - Starlight UI Strings + - source: /docs/src/content/i18n/en.json + translation: /docs/src/content/i18n/%locale%.json + + # Documentation - Content Pages (MD and MDX) + - source: /docs/src/content/docs/**/*.{md,mdx} + translation: /docs/src/content/docs/%locale%/**/%original_file_name% + # Exclude translations directory to avoid re-uploading them as source files + ignore: + - /docs/src/content/docs/%locale%/**/* + # Translate full paragraphs rather than splitting into sentences + content_segmentation: 0 diff --git a/docs/CODE_OF_CONDUCT.md b/docs-old/CODE_OF_CONDUCT.md similarity index 100% rename from docs/CODE_OF_CONDUCT.md rename to docs-old/CODE_OF_CONDUCT.md diff --git a/docs/RELEASE.md b/docs-old/RELEASE.md similarity index 100% rename from docs/RELEASE.md rename to docs-old/RELEASE.md diff --git a/docs/assets/Lincoln-and-Parrot-512-transparent.png b/docs-old/assets/Lincoln-and-Parrot-512-transparent.png similarity index 100% rename from docs/assets/Lincoln-and-Parrot-512-transparent.png rename to docs-old/assets/Lincoln-and-Parrot-512-transparent.png diff --git a/docs/assets/Lincoln-and-Parrot-512.png b/docs-old/assets/Lincoln-and-Parrot-512.png similarity index 100% rename from docs/assets/Lincoln-and-Parrot-512.png rename to docs-old/assets/Lincoln-and-Parrot-512.png diff --git a/docs/assets/canvas/biker_granny.png b/docs-old/assets/canvas/biker_granny.png similarity index 100% rename from docs/assets/canvas/biker_granny.png rename to docs-old/assets/canvas/biker_granny.png diff --git a/docs/assets/canvas/biker_jacket_granny.png b/docs-old/assets/canvas/biker_jacket_granny.png similarity index 100% rename from docs/assets/canvas/biker_jacket_granny.png rename to docs-old/assets/canvas/biker_jacket_granny.png diff --git a/docs/assets/canvas/mask_granny.png b/docs-old/assets/canvas/mask_granny.png similarity index 100% rename from docs/assets/canvas/mask_granny.png rename to docs-old/assets/canvas/mask_granny.png diff --git a/docs/assets/canvas/staging_area.png b/docs-old/assets/canvas/staging_area.png similarity index 100% rename from docs/assets/canvas/staging_area.png rename to docs-old/assets/canvas/staging_area.png diff --git a/docs/assets/canvas_preview.png b/docs-old/assets/canvas_preview.png similarity index 100% rename from docs/assets/canvas_preview.png rename to docs-old/assets/canvas_preview.png diff --git a/docs/assets/colab_notebook.png b/docs-old/assets/colab_notebook.png similarity index 100% rename from docs/assets/colab_notebook.png rename to docs-old/assets/colab_notebook.png diff --git a/docs/assets/concepts/image1.png b/docs-old/assets/concepts/image1.png similarity index 100% rename from docs/assets/concepts/image1.png rename to docs-old/assets/concepts/image1.png diff --git a/docs/assets/concepts/image2.png b/docs-old/assets/concepts/image2.png similarity index 100% rename from docs/assets/concepts/image2.png rename to docs-old/assets/concepts/image2.png diff --git a/docs/assets/concepts/image3.png b/docs-old/assets/concepts/image3.png similarity index 100% rename from docs/assets/concepts/image3.png rename to docs-old/assets/concepts/image3.png diff --git a/docs/assets/concepts/image4.png b/docs-old/assets/concepts/image4.png similarity index 100% rename from docs/assets/concepts/image4.png rename to docs-old/assets/concepts/image4.png diff --git a/docs/assets/concepts/image5.png b/docs-old/assets/concepts/image5.png similarity index 100% rename from docs/assets/concepts/image5.png rename to docs-old/assets/concepts/image5.png diff --git a/docs/assets/contributing/html-detail.png b/docs-old/assets/contributing/html-detail.png similarity index 100% rename from docs/assets/contributing/html-detail.png rename to docs-old/assets/contributing/html-detail.png diff --git a/docs/assets/contributing/html-overview.png b/docs-old/assets/contributing/html-overview.png similarity index 100% rename from docs/assets/contributing/html-overview.png rename to docs-old/assets/contributing/html-overview.png diff --git a/docs/assets/contributing/resize_invocation.png b/docs-old/assets/contributing/resize_invocation.png similarity index 100% rename from docs/assets/contributing/resize_invocation.png rename to docs-old/assets/contributing/resize_invocation.png diff --git a/docs/assets/contributing/resize_node_editor.png b/docs-old/assets/contributing/resize_node_editor.png similarity index 100% rename from docs/assets/contributing/resize_node_editor.png rename to docs-old/assets/contributing/resize_node_editor.png diff --git a/docs/assets/control-panel-2.png b/docs-old/assets/control-panel-2.png similarity index 100% rename from docs/assets/control-panel-2.png rename to docs-old/assets/control-panel-2.png diff --git a/docs/assets/dream-py-demo.png b/docs-old/assets/dream-py-demo.png similarity index 100% rename from docs/assets/dream-py-demo.png rename to docs-old/assets/dream-py-demo.png diff --git a/docs/assets/dream_web_server.png b/docs-old/assets/dream_web_server.png similarity index 100% rename from docs/assets/dream_web_server.png rename to docs-old/assets/dream_web_server.png diff --git a/docs/assets/features/restoration-montage.png b/docs-old/assets/features/restoration-montage.png similarity index 100% rename from docs/assets/features/restoration-montage.png rename to docs-old/assets/features/restoration-montage.png diff --git a/docs/assets/features/upscale-dialog.png b/docs-old/assets/features/upscale-dialog.png similarity index 100% rename from docs/assets/features/upscale-dialog.png rename to docs-old/assets/features/upscale-dialog.png diff --git a/docs/assets/features/upscaling-montage.png b/docs-old/assets/features/upscaling-montage.png similarity index 100% rename from docs/assets/features/upscaling-montage.png rename to docs-old/assets/features/upscaling-montage.png diff --git a/docs/assets/gallery/board_settings.png b/docs-old/assets/gallery/board_settings.png similarity index 100% rename from docs/assets/gallery/board_settings.png rename to docs-old/assets/gallery/board_settings.png diff --git a/docs/assets/gallery/board_tabs.png b/docs-old/assets/gallery/board_tabs.png similarity index 100% rename from docs/assets/gallery/board_tabs.png rename to docs-old/assets/gallery/board_tabs.png diff --git a/docs/assets/gallery/board_thumbnails.png b/docs-old/assets/gallery/board_thumbnails.png similarity index 100% rename from docs/assets/gallery/board_thumbnails.png rename to docs-old/assets/gallery/board_thumbnails.png diff --git a/docs/assets/gallery/gallery.png b/docs-old/assets/gallery/gallery.png similarity index 100% rename from docs/assets/gallery/gallery.png rename to docs-old/assets/gallery/gallery.png diff --git a/docs/assets/gallery/image_menu.png b/docs-old/assets/gallery/image_menu.png similarity index 100% rename from docs/assets/gallery/image_menu.png rename to docs-old/assets/gallery/image_menu.png diff --git a/docs/assets/gallery/info_button.png b/docs-old/assets/gallery/info_button.png similarity index 100% rename from docs/assets/gallery/info_button.png rename to docs-old/assets/gallery/info_button.png diff --git a/docs/assets/gallery/thumbnail_menu.png b/docs-old/assets/gallery/thumbnail_menu.png similarity index 100% rename from docs/assets/gallery/thumbnail_menu.png rename to docs-old/assets/gallery/thumbnail_menu.png diff --git a/docs/assets/gallery/top_controls.png b/docs-old/assets/gallery/top_controls.png similarity index 100% rename from docs/assets/gallery/top_controls.png rename to docs-old/assets/gallery/top_controls.png diff --git a/docs/assets/img2img/000019.1592514025.png b/docs-old/assets/img2img/000019.1592514025.png similarity index 100% rename from docs/assets/img2img/000019.1592514025.png rename to docs-old/assets/img2img/000019.1592514025.png diff --git a/docs/assets/img2img/000019.steps.png b/docs-old/assets/img2img/000019.steps.png similarity index 100% rename from docs/assets/img2img/000019.steps.png rename to docs-old/assets/img2img/000019.steps.png diff --git a/docs/assets/img2img/000030.1592514025.png b/docs-old/assets/img2img/000030.1592514025.png similarity index 100% rename from docs/assets/img2img/000030.1592514025.png rename to docs-old/assets/img2img/000030.1592514025.png diff --git a/docs/assets/img2img/000030.step-0.png b/docs-old/assets/img2img/000030.step-0.png similarity index 100% rename from docs/assets/img2img/000030.step-0.png rename to docs-old/assets/img2img/000030.step-0.png diff --git a/docs/assets/img2img/000030.steps.gravity.png b/docs-old/assets/img2img/000030.steps.gravity.png similarity index 100% rename from docs/assets/img2img/000030.steps.gravity.png rename to docs-old/assets/img2img/000030.steps.gravity.png diff --git a/docs/assets/img2img/000032.1592514025.png b/docs-old/assets/img2img/000032.1592514025.png similarity index 100% rename from docs/assets/img2img/000032.1592514025.png rename to docs-old/assets/img2img/000032.1592514025.png diff --git a/docs/assets/img2img/000032.step-0.png b/docs-old/assets/img2img/000032.step-0.png similarity index 100% rename from docs/assets/img2img/000032.step-0.png rename to docs-old/assets/img2img/000032.step-0.png diff --git a/docs/assets/img2img/000032.steps.gravity.png b/docs-old/assets/img2img/000032.steps.gravity.png similarity index 100% rename from docs/assets/img2img/000032.steps.gravity.png rename to docs-old/assets/img2img/000032.steps.gravity.png diff --git a/docs/assets/img2img/000034.1592514025.png b/docs-old/assets/img2img/000034.1592514025.png similarity index 100% rename from docs/assets/img2img/000034.1592514025.png rename to docs-old/assets/img2img/000034.1592514025.png diff --git a/docs/assets/img2img/000034.steps.png b/docs-old/assets/img2img/000034.steps.png similarity index 100% rename from docs/assets/img2img/000034.steps.png rename to docs-old/assets/img2img/000034.steps.png diff --git a/docs/assets/img2img/000035.1592514025.png b/docs-old/assets/img2img/000035.1592514025.png similarity index 100% rename from docs/assets/img2img/000035.1592514025.png rename to docs-old/assets/img2img/000035.1592514025.png diff --git a/docs/assets/img2img/000035.steps.gravity.png b/docs-old/assets/img2img/000035.steps.gravity.png similarity index 100% rename from docs/assets/img2img/000035.steps.gravity.png rename to docs-old/assets/img2img/000035.steps.gravity.png diff --git a/docs/assets/img2img/000045.1592514025.png b/docs-old/assets/img2img/000045.1592514025.png similarity index 100% rename from docs/assets/img2img/000045.1592514025.png rename to docs-old/assets/img2img/000045.1592514025.png diff --git a/docs/assets/img2img/000045.steps.gravity.png b/docs-old/assets/img2img/000045.steps.gravity.png similarity index 100% rename from docs/assets/img2img/000045.steps.gravity.png rename to docs-old/assets/img2img/000045.steps.gravity.png diff --git a/docs/assets/img2img/000046.1592514025.png b/docs-old/assets/img2img/000046.1592514025.png similarity index 100% rename from docs/assets/img2img/000046.1592514025.png rename to docs-old/assets/img2img/000046.1592514025.png diff --git a/docs/assets/img2img/000046.steps.gravity.png b/docs-old/assets/img2img/000046.steps.gravity.png similarity index 100% rename from docs/assets/img2img/000046.steps.gravity.png rename to docs-old/assets/img2img/000046.steps.gravity.png diff --git a/docs/assets/img2img/fire-drawing.png b/docs-old/assets/img2img/fire-drawing.png similarity index 100% rename from docs/assets/img2img/fire-drawing.png rename to docs-old/assets/img2img/fire-drawing.png diff --git a/docs/assets/inpainting/000019.curly.hair.deselected.png b/docs-old/assets/inpainting/000019.curly.hair.deselected.png similarity index 100% rename from docs/assets/inpainting/000019.curly.hair.deselected.png rename to docs-old/assets/inpainting/000019.curly.hair.deselected.png diff --git a/docs/assets/inpainting/000019.curly.hair.masked.png b/docs-old/assets/inpainting/000019.curly.hair.masked.png similarity index 100% rename from docs/assets/inpainting/000019.curly.hair.masked.png rename to docs-old/assets/inpainting/000019.curly.hair.masked.png diff --git a/docs/assets/inpainting/000019.curly.hair.selected.png b/docs-old/assets/inpainting/000019.curly.hair.selected.png similarity index 100% rename from docs/assets/inpainting/000019.curly.hair.selected.png rename to docs-old/assets/inpainting/000019.curly.hair.selected.png diff --git a/docs/assets/inpainting/000024.801380492.png b/docs-old/assets/inpainting/000024.801380492.png similarity index 100% rename from docs/assets/inpainting/000024.801380492.png rename to docs-old/assets/inpainting/000024.801380492.png diff --git a/docs/assets/installer-walkthrough/choose-gpu.png b/docs-old/assets/installer-walkthrough/choose-gpu.png similarity index 100% rename from docs/assets/installer-walkthrough/choose-gpu.png rename to docs-old/assets/installer-walkthrough/choose-gpu.png diff --git a/docs/assets/installer-walkthrough/confirm-directory.png b/docs-old/assets/installer-walkthrough/confirm-directory.png similarity index 100% rename from docs/assets/installer-walkthrough/confirm-directory.png rename to docs-old/assets/installer-walkthrough/confirm-directory.png diff --git a/docs/assets/installer-walkthrough/downloading-models.png b/docs-old/assets/installer-walkthrough/downloading-models.png similarity index 100% rename from docs/assets/installer-walkthrough/downloading-models.png rename to docs-old/assets/installer-walkthrough/downloading-models.png diff --git a/docs/assets/installer-walkthrough/installing-models.png b/docs-old/assets/installer-walkthrough/installing-models.png similarity index 100% rename from docs/assets/installer-walkthrough/installing-models.png rename to docs-old/assets/installer-walkthrough/installing-models.png diff --git a/docs/assets/installer-walkthrough/settings-form.png b/docs-old/assets/installer-walkthrough/settings-form.png similarity index 100% rename from docs/assets/installer-walkthrough/settings-form.png rename to docs-old/assets/installer-walkthrough/settings-form.png diff --git a/docs/assets/installer-walkthrough/unpacked-zipfile.png b/docs-old/assets/installer-walkthrough/unpacked-zipfile.png similarity index 100% rename from docs/assets/installer-walkthrough/unpacked-zipfile.png rename to docs-old/assets/installer-walkthrough/unpacked-zipfile.png diff --git a/docs/assets/installing-models/model-installer-controlnet.png b/docs-old/assets/installing-models/model-installer-controlnet.png similarity index 100% rename from docs/assets/installing-models/model-installer-controlnet.png rename to docs-old/assets/installing-models/model-installer-controlnet.png diff --git a/docs/assets/installing-models/webui-models-1.png b/docs-old/assets/installing-models/webui-models-1.png similarity index 100% rename from docs/assets/installing-models/webui-models-1.png rename to docs-old/assets/installing-models/webui-models-1.png diff --git a/docs/assets/installing-models/webui-models-2.png b/docs-old/assets/installing-models/webui-models-2.png similarity index 100% rename from docs/assets/installing-models/webui-models-2.png rename to docs-old/assets/installing-models/webui-models-2.png diff --git a/docs/assets/installing-models/webui-models-3.png b/docs-old/assets/installing-models/webui-models-3.png similarity index 100% rename from docs/assets/installing-models/webui-models-3.png rename to docs-old/assets/installing-models/webui-models-3.png diff --git a/docs/assets/installing-models/webui-models-4.png b/docs-old/assets/installing-models/webui-models-4.png similarity index 100% rename from docs/assets/installing-models/webui-models-4.png rename to docs-old/assets/installing-models/webui-models-4.png diff --git a/docs/assets/invoke-control-panel-1.png b/docs-old/assets/invoke-control-panel-1.png similarity index 100% rename from docs/assets/invoke-control-panel-1.png rename to docs-old/assets/invoke-control-panel-1.png diff --git a/docs/assets/invoke-web-server-1.png b/docs-old/assets/invoke-web-server-1.png similarity index 100% rename from docs/assets/invoke-web-server-1.png rename to docs-old/assets/invoke-web-server-1.png diff --git a/docs/assets/invoke-web-server-2.png b/docs-old/assets/invoke-web-server-2.png similarity index 100% rename from docs/assets/invoke-web-server-2.png rename to docs-old/assets/invoke-web-server-2.png diff --git a/docs/assets/invoke-web-server-3.png b/docs-old/assets/invoke-web-server-3.png similarity index 100% rename from docs/assets/invoke-web-server-3.png rename to docs-old/assets/invoke-web-server-3.png diff --git a/docs/assets/invoke-web-server-4.png b/docs-old/assets/invoke-web-server-4.png similarity index 100% rename from docs/assets/invoke-web-server-4.png rename to docs-old/assets/invoke-web-server-4.png diff --git a/docs/assets/invoke-web-server-5.png b/docs-old/assets/invoke-web-server-5.png similarity index 100% rename from docs/assets/invoke-web-server-5.png rename to docs-old/assets/invoke-web-server-5.png diff --git a/docs/assets/invoke-web-server-6.png b/docs-old/assets/invoke-web-server-6.png similarity index 100% rename from docs/assets/invoke-web-server-6.png rename to docs-old/assets/invoke-web-server-6.png diff --git a/docs/assets/invoke-web-server-7.png b/docs-old/assets/invoke-web-server-7.png similarity index 100% rename from docs/assets/invoke-web-server-7.png rename to docs-old/assets/invoke-web-server-7.png diff --git a/docs/assets/invoke-web-server-8.png b/docs-old/assets/invoke-web-server-8.png similarity index 100% rename from docs/assets/invoke-web-server-8.png rename to docs-old/assets/invoke-web-server-8.png diff --git a/docs/assets/invoke-web-server-9.png b/docs-old/assets/invoke-web-server-9.png similarity index 100% rename from docs/assets/invoke-web-server-9.png rename to docs-old/assets/invoke-web-server-9.png diff --git a/docs/assets/invoke_ai_banner.png b/docs-old/assets/invoke_ai_banner.png similarity index 100% rename from docs/assets/invoke_ai_banner.png rename to docs-old/assets/invoke_ai_banner.png diff --git a/docs/assets/invoke_web_dark.png b/docs-old/assets/invoke_web_dark.png similarity index 100% rename from docs/assets/invoke_web_dark.png rename to docs-old/assets/invoke_web_dark.png diff --git a/docs/assets/invoke_web_light.png b/docs-old/assets/invoke_web_light.png similarity index 100% rename from docs/assets/invoke_web_light.png rename to docs-old/assets/invoke_web_light.png diff --git a/docs/assets/invoke_web_server.png b/docs-old/assets/invoke_web_server.png similarity index 100% rename from docs/assets/invoke_web_server.png rename to docs-old/assets/invoke_web_server.png diff --git a/docs/assets/join-us-on-discord-image.png b/docs-old/assets/join-us-on-discord-image.png similarity index 100% rename from docs/assets/join-us-on-discord-image.png rename to docs-old/assets/join-us-on-discord-image.png diff --git a/docs/assets/logo.png b/docs-old/assets/logo.png similarity index 100% rename from docs/assets/logo.png rename to docs-old/assets/logo.png diff --git a/docs/assets/lora-example-0.png b/docs-old/assets/lora-example-0.png similarity index 100% rename from docs/assets/lora-example-0.png rename to docs-old/assets/lora-example-0.png diff --git a/docs/assets/lora-example-1.png b/docs-old/assets/lora-example-1.png similarity index 100% rename from docs/assets/lora-example-1.png rename to docs-old/assets/lora-example-1.png diff --git a/docs/assets/lora-example-2.png b/docs-old/assets/lora-example-2.png similarity index 100% rename from docs/assets/lora-example-2.png rename to docs-old/assets/lora-example-2.png diff --git a/docs/assets/lora-example-3.png b/docs-old/assets/lora-example-3.png similarity index 100% rename from docs/assets/lora-example-3.png rename to docs-old/assets/lora-example-3.png diff --git a/docs/assets/negative_prompt_walkthru/step1.png b/docs-old/assets/negative_prompt_walkthru/step1.png similarity index 100% rename from docs/assets/negative_prompt_walkthru/step1.png rename to docs-old/assets/negative_prompt_walkthru/step1.png diff --git a/docs/assets/negative_prompt_walkthru/step2.png b/docs-old/assets/negative_prompt_walkthru/step2.png similarity index 100% rename from docs/assets/negative_prompt_walkthru/step2.png rename to docs-old/assets/negative_prompt_walkthru/step2.png diff --git a/docs/assets/negative_prompt_walkthru/step3.png b/docs-old/assets/negative_prompt_walkthru/step3.png similarity index 100% rename from docs/assets/negative_prompt_walkthru/step3.png rename to docs-old/assets/negative_prompt_walkthru/step3.png diff --git a/docs/assets/negative_prompt_walkthru/step4.png b/docs-old/assets/negative_prompt_walkthru/step4.png similarity index 100% rename from docs/assets/negative_prompt_walkthru/step4.png rename to docs-old/assets/negative_prompt_walkthru/step4.png diff --git a/docs/assets/nodes/groupsallscale.png b/docs-old/assets/nodes/groupsallscale.png similarity index 100% rename from docs/assets/nodes/groupsallscale.png rename to docs-old/assets/nodes/groupsallscale.png diff --git a/docs/assets/nodes/groupsconditioning.png b/docs-old/assets/nodes/groupsconditioning.png similarity index 100% rename from docs/assets/nodes/groupsconditioning.png rename to docs-old/assets/nodes/groupsconditioning.png diff --git a/docs/assets/nodes/groupscontrol.png b/docs-old/assets/nodes/groupscontrol.png similarity index 100% rename from docs/assets/nodes/groupscontrol.png rename to docs-old/assets/nodes/groupscontrol.png diff --git a/docs/assets/nodes/groupsimgvae.png b/docs-old/assets/nodes/groupsimgvae.png similarity index 100% rename from docs/assets/nodes/groupsimgvae.png rename to docs-old/assets/nodes/groupsimgvae.png diff --git a/docs/assets/nodes/groupsiterate.png b/docs-old/assets/nodes/groupsiterate.png similarity index 100% rename from docs/assets/nodes/groupsiterate.png rename to docs-old/assets/nodes/groupsiterate.png diff --git a/docs/assets/nodes/groupslora.png b/docs-old/assets/nodes/groupslora.png similarity index 100% rename from docs/assets/nodes/groupslora.png rename to docs-old/assets/nodes/groupslora.png diff --git a/docs/assets/nodes/groupsmultigenseeding.png b/docs-old/assets/nodes/groupsmultigenseeding.png similarity index 100% rename from docs/assets/nodes/groupsmultigenseeding.png rename to docs-old/assets/nodes/groupsmultigenseeding.png diff --git a/docs/assets/nodes/groupsnoise.png b/docs-old/assets/nodes/groupsnoise.png similarity index 100% rename from docs/assets/nodes/groupsnoise.png rename to docs-old/assets/nodes/groupsnoise.png diff --git a/docs/assets/nodes/linearview.png b/docs-old/assets/nodes/linearview.png similarity index 100% rename from docs/assets/nodes/linearview.png rename to docs-old/assets/nodes/linearview.png diff --git a/docs/assets/nodes/nodescontrol.png b/docs-old/assets/nodes/nodescontrol.png similarity index 100% rename from docs/assets/nodes/nodescontrol.png rename to docs-old/assets/nodes/nodescontrol.png diff --git a/docs/assets/nodes/nodesi2i.png b/docs-old/assets/nodes/nodesi2i.png similarity index 100% rename from docs/assets/nodes/nodesi2i.png rename to docs-old/assets/nodes/nodesi2i.png diff --git a/docs/assets/nodes/nodest2i.png b/docs-old/assets/nodes/nodest2i.png similarity index 100% rename from docs/assets/nodes/nodest2i.png rename to docs-old/assets/nodes/nodest2i.png diff --git a/docs/assets/nodes/workflow_library.png b/docs-old/assets/nodes/workflow_library.png similarity index 100% rename from docs/assets/nodes/workflow_library.png rename to docs-old/assets/nodes/workflow_library.png diff --git a/docs/assets/outpainting/curly-outcrop-2.png b/docs-old/assets/outpainting/curly-outcrop-2.png similarity index 100% rename from docs/assets/outpainting/curly-outcrop-2.png rename to docs-old/assets/outpainting/curly-outcrop-2.png diff --git a/docs/assets/outpainting/curly-outcrop.png b/docs-old/assets/outpainting/curly-outcrop.png similarity index 100% rename from docs/assets/outpainting/curly-outcrop.png rename to docs-old/assets/outpainting/curly-outcrop.png diff --git a/docs/assets/outpainting/curly-outpaint.png b/docs-old/assets/outpainting/curly-outpaint.png similarity index 100% rename from docs/assets/outpainting/curly-outpaint.png rename to docs-old/assets/outpainting/curly-outpaint.png diff --git a/docs/assets/outpainting/curly.png b/docs-old/assets/outpainting/curly.png similarity index 100% rename from docs/assets/outpainting/curly.png rename to docs-old/assets/outpainting/curly.png diff --git a/docs/assets/prompt-blending/blue-sphere-0.25-red-cube-0.75-hybrid.png b/docs-old/assets/prompt-blending/blue-sphere-0.25-red-cube-0.75-hybrid.png similarity index 100% rename from docs/assets/prompt-blending/blue-sphere-0.25-red-cube-0.75-hybrid.png rename to docs-old/assets/prompt-blending/blue-sphere-0.25-red-cube-0.75-hybrid.png diff --git a/docs/assets/prompt-blending/blue-sphere-0.5-red-cube-0.5-hybrid.png b/docs-old/assets/prompt-blending/blue-sphere-0.5-red-cube-0.5-hybrid.png similarity index 100% rename from docs/assets/prompt-blending/blue-sphere-0.5-red-cube-0.5-hybrid.png rename to docs-old/assets/prompt-blending/blue-sphere-0.5-red-cube-0.5-hybrid.png diff --git a/docs/assets/prompt-blending/blue-sphere-0.5-red-cube-0.5.png b/docs-old/assets/prompt-blending/blue-sphere-0.5-red-cube-0.5.png similarity index 100% rename from docs/assets/prompt-blending/blue-sphere-0.5-red-cube-0.5.png rename to docs-old/assets/prompt-blending/blue-sphere-0.5-red-cube-0.5.png diff --git a/docs/assets/prompt-blending/blue-sphere-0.75-red-cube-0.25-hybrid.png b/docs-old/assets/prompt-blending/blue-sphere-0.75-red-cube-0.25-hybrid.png similarity index 100% rename from docs/assets/prompt-blending/blue-sphere-0.75-red-cube-0.25-hybrid.png rename to docs-old/assets/prompt-blending/blue-sphere-0.75-red-cube-0.25-hybrid.png diff --git a/docs/assets/prompt-blending/blue-sphere-red-cube-hybrid.png b/docs-old/assets/prompt-blending/blue-sphere-red-cube-hybrid.png similarity index 100% rename from docs/assets/prompt-blending/blue-sphere-red-cube-hybrid.png rename to docs-old/assets/prompt-blending/blue-sphere-red-cube-hybrid.png diff --git a/docs/assets/prompt_syntax/apricots--1.png b/docs-old/assets/prompt_syntax/apricots--1.png similarity index 100% rename from docs/assets/prompt_syntax/apricots--1.png rename to docs-old/assets/prompt_syntax/apricots--1.png diff --git a/docs/assets/prompt_syntax/apricots--2.png b/docs-old/assets/prompt_syntax/apricots--2.png similarity index 100% rename from docs/assets/prompt_syntax/apricots--2.png rename to docs-old/assets/prompt_syntax/apricots--2.png diff --git a/docs/assets/prompt_syntax/apricots--3.png b/docs-old/assets/prompt_syntax/apricots--3.png similarity index 100% rename from docs/assets/prompt_syntax/apricots--3.png rename to docs-old/assets/prompt_syntax/apricots--3.png diff --git a/docs/assets/prompt_syntax/apricots-0.png b/docs-old/assets/prompt_syntax/apricots-0.png similarity index 100% rename from docs/assets/prompt_syntax/apricots-0.png rename to docs-old/assets/prompt_syntax/apricots-0.png diff --git a/docs/assets/prompt_syntax/apricots-1.png b/docs-old/assets/prompt_syntax/apricots-1.png similarity index 100% rename from docs/assets/prompt_syntax/apricots-1.png rename to docs-old/assets/prompt_syntax/apricots-1.png diff --git a/docs/assets/prompt_syntax/apricots-2.png b/docs-old/assets/prompt_syntax/apricots-2.png similarity index 100% rename from docs/assets/prompt_syntax/apricots-2.png rename to docs-old/assets/prompt_syntax/apricots-2.png diff --git a/docs/assets/prompt_syntax/apricots-3.png b/docs-old/assets/prompt_syntax/apricots-3.png similarity index 100% rename from docs/assets/prompt_syntax/apricots-3.png rename to docs-old/assets/prompt_syntax/apricots-3.png diff --git a/docs/assets/prompt_syntax/apricots-4.png b/docs-old/assets/prompt_syntax/apricots-4.png similarity index 100% rename from docs/assets/prompt_syntax/apricots-4.png rename to docs-old/assets/prompt_syntax/apricots-4.png diff --git a/docs/assets/prompt_syntax/apricots-5.png b/docs-old/assets/prompt_syntax/apricots-5.png similarity index 100% rename from docs/assets/prompt_syntax/apricots-5.png rename to docs-old/assets/prompt_syntax/apricots-5.png diff --git a/docs/assets/prompt_syntax/mountain-man.png b/docs-old/assets/prompt_syntax/mountain-man.png similarity index 100% rename from docs/assets/prompt_syntax/mountain-man.png rename to docs-old/assets/prompt_syntax/mountain-man.png diff --git a/docs/assets/prompt_syntax/mountain-man1.png b/docs-old/assets/prompt_syntax/mountain-man1.png similarity index 100% rename from docs/assets/prompt_syntax/mountain-man1.png rename to docs-old/assets/prompt_syntax/mountain-man1.png diff --git a/docs/assets/prompt_syntax/mountain-man2.png b/docs-old/assets/prompt_syntax/mountain-man2.png similarity index 100% rename from docs/assets/prompt_syntax/mountain-man2.png rename to docs-old/assets/prompt_syntax/mountain-man2.png diff --git a/docs/assets/prompt_syntax/mountain-man3.png b/docs-old/assets/prompt_syntax/mountain-man3.png similarity index 100% rename from docs/assets/prompt_syntax/mountain-man3.png rename to docs-old/assets/prompt_syntax/mountain-man3.png diff --git a/docs/assets/prompt_syntax/mountain-man4.png b/docs-old/assets/prompt_syntax/mountain-man4.png similarity index 100% rename from docs/assets/prompt_syntax/mountain-man4.png rename to docs-old/assets/prompt_syntax/mountain-man4.png diff --git a/docs/assets/prompt_syntax/mountain1-man.png b/docs-old/assets/prompt_syntax/mountain1-man.png similarity index 100% rename from docs/assets/prompt_syntax/mountain1-man.png rename to docs-old/assets/prompt_syntax/mountain1-man.png diff --git a/docs/assets/prompt_syntax/mountain2-man.png b/docs-old/assets/prompt_syntax/mountain2-man.png similarity index 100% rename from docs/assets/prompt_syntax/mountain2-man.png rename to docs-old/assets/prompt_syntax/mountain2-man.png diff --git a/docs/assets/prompt_syntax/mountain3-man.png b/docs-old/assets/prompt_syntax/mountain3-man.png similarity index 100% rename from docs/assets/prompt_syntax/mountain3-man.png rename to docs-old/assets/prompt_syntax/mountain3-man.png diff --git a/docs/assets/prompt_syntax/sdxl-prompt-concatenated.png b/docs-old/assets/prompt_syntax/sdxl-prompt-concatenated.png similarity index 100% rename from docs/assets/prompt_syntax/sdxl-prompt-concatenated.png rename to docs-old/assets/prompt_syntax/sdxl-prompt-concatenated.png diff --git a/docs/assets/prompt_syntax/sdxl-prompt.png b/docs-old/assets/prompt_syntax/sdxl-prompt.png similarity index 100% rename from docs/assets/prompt_syntax/sdxl-prompt.png rename to docs-old/assets/prompt_syntax/sdxl-prompt.png diff --git a/docs/assets/sdxl-graphs/sdxl-base-example1.json b/docs-old/assets/sdxl-graphs/sdxl-base-example1.json similarity index 100% rename from docs/assets/sdxl-graphs/sdxl-base-example1.json rename to docs-old/assets/sdxl-graphs/sdxl-base-example1.json diff --git a/docs/assets/sdxl-graphs/sdxl-base-refine-example1.json b/docs-old/assets/sdxl-graphs/sdxl-base-refine-example1.json similarity index 100% rename from docs/assets/sdxl-graphs/sdxl-base-refine-example1.json rename to docs-old/assets/sdxl-graphs/sdxl-base-refine-example1.json diff --git a/docs/assets/send-to-icon.png b/docs-old/assets/send-to-icon.png similarity index 100% rename from docs/assets/send-to-icon.png rename to docs-old/assets/send-to-icon.png diff --git a/docs/assets/stable-samples/img2img/mountains-2.png b/docs-old/assets/stable-samples/img2img/mountains-2.png similarity index 100% rename from docs/assets/stable-samples/img2img/mountains-2.png rename to docs-old/assets/stable-samples/img2img/mountains-2.png diff --git a/docs/assets/stable-samples/img2img/mountains-3.png b/docs-old/assets/stable-samples/img2img/mountains-3.png similarity index 100% rename from docs/assets/stable-samples/img2img/mountains-3.png rename to docs-old/assets/stable-samples/img2img/mountains-3.png diff --git a/docs/assets/stable-samples/img2img/sketch-mountains-input.jpg b/docs-old/assets/stable-samples/img2img/sketch-mountains-input.jpg similarity index 100% rename from docs/assets/stable-samples/img2img/sketch-mountains-input.jpg rename to docs-old/assets/stable-samples/img2img/sketch-mountains-input.jpg diff --git a/docs/assets/stable-samples/txt2img/merged-0005.png b/docs-old/assets/stable-samples/txt2img/merged-0005.png similarity index 100% rename from docs/assets/stable-samples/txt2img/merged-0005.png rename to docs-old/assets/stable-samples/txt2img/merged-0005.png diff --git a/docs/assets/stable-samples/txt2img/merged-0006.png b/docs-old/assets/stable-samples/txt2img/merged-0006.png similarity index 100% rename from docs/assets/stable-samples/txt2img/merged-0006.png rename to docs-old/assets/stable-samples/txt2img/merged-0006.png diff --git a/docs/assets/stable-samples/txt2img/merged-0007.png b/docs-old/assets/stable-samples/txt2img/merged-0007.png similarity index 100% rename from docs/assets/stable-samples/txt2img/merged-0007.png rename to docs-old/assets/stable-samples/txt2img/merged-0007.png diff --git a/docs/assets/step1.png b/docs-old/assets/step1.png similarity index 100% rename from docs/assets/step1.png rename to docs-old/assets/step1.png diff --git a/docs/assets/step2.png b/docs-old/assets/step2.png similarity index 100% rename from docs/assets/step2.png rename to docs-old/assets/step2.png diff --git a/docs/assets/step4.png b/docs-old/assets/step4.png similarity index 100% rename from docs/assets/step4.png rename to docs-old/assets/step4.png diff --git a/docs/assets/step5.png b/docs-old/assets/step5.png similarity index 100% rename from docs/assets/step5.png rename to docs-old/assets/step5.png diff --git a/docs/assets/step6.png b/docs-old/assets/step6.png similarity index 100% rename from docs/assets/step6.png rename to docs-old/assets/step6.png diff --git a/docs/assets/step7.png b/docs-old/assets/step7.png similarity index 100% rename from docs/assets/step7.png rename to docs-old/assets/step7.png diff --git a/docs/assets/still-life-inpainted.png b/docs-old/assets/still-life-inpainted.png similarity index 100% rename from docs/assets/still-life-inpainted.png rename to docs-old/assets/still-life-inpainted.png diff --git a/docs/assets/still-life-scaled.jpg b/docs-old/assets/still-life-scaled.jpg similarity index 100% rename from docs/assets/still-life-scaled.jpg rename to docs-old/assets/still-life-scaled.jpg diff --git a/docs/assets/textual-inversion/ti-frontend.png b/docs-old/assets/textual-inversion/ti-frontend.png similarity index 100% rename from docs/assets/textual-inversion/ti-frontend.png rename to docs-old/assets/textual-inversion/ti-frontend.png diff --git a/docs/assets/troubleshooting/broken-dependency.png b/docs-old/assets/troubleshooting/broken-dependency.png similarity index 100% rename from docs/assets/troubleshooting/broken-dependency.png rename to docs-old/assets/troubleshooting/broken-dependency.png diff --git a/docs/assets/truncation_comparison.jpg b/docs-old/assets/truncation_comparison.jpg similarity index 100% rename from docs/assets/truncation_comparison.jpg rename to docs-old/assets/truncation_comparison.jpg diff --git a/docs/assets/upscaling.png b/docs-old/assets/upscaling.png similarity index 100% rename from docs/assets/upscaling.png rename to docs-old/assets/upscaling.png diff --git a/docs/assets/v1-variants-scores.jpg b/docs-old/assets/v1-variants-scores.jpg similarity index 100% rename from docs/assets/v1-variants-scores.jpg rename to docs-old/assets/v1-variants-scores.jpg diff --git a/docs/assets/variation_walkthru/000001.3357757885.png b/docs-old/assets/variation_walkthru/000001.3357757885.png similarity index 100% rename from docs/assets/variation_walkthru/000001.3357757885.png rename to docs-old/assets/variation_walkthru/000001.3357757885.png diff --git a/docs/assets/variation_walkthru/000002.1614299449.png b/docs-old/assets/variation_walkthru/000002.1614299449.png similarity index 100% rename from docs/assets/variation_walkthru/000002.1614299449.png rename to docs-old/assets/variation_walkthru/000002.1614299449.png diff --git a/docs/assets/variation_walkthru/000002.3647897225.png b/docs-old/assets/variation_walkthru/000002.3647897225.png similarity index 100% rename from docs/assets/variation_walkthru/000002.3647897225.png rename to docs-old/assets/variation_walkthru/000002.3647897225.png diff --git a/docs/assets/variation_walkthru/000003.1614299449.png b/docs-old/assets/variation_walkthru/000003.1614299449.png similarity index 100% rename from docs/assets/variation_walkthru/000003.1614299449.png rename to docs-old/assets/variation_walkthru/000003.1614299449.png diff --git a/docs/assets/variation_walkthru/000004.3747154981.png b/docs-old/assets/variation_walkthru/000004.3747154981.png similarity index 100% rename from docs/assets/variation_walkthru/000004.3747154981.png rename to docs-old/assets/variation_walkthru/000004.3747154981.png diff --git a/docs/configuration.md b/docs-old/configuration.md similarity index 100% rename from docs/configuration.md rename to docs-old/configuration.md diff --git a/docs/contributing/ARCHITECTURE.md b/docs-old/contributing/ARCHITECTURE.md similarity index 100% rename from docs/contributing/ARCHITECTURE.md rename to docs-old/contributing/ARCHITECTURE.md diff --git a/docs/contributing/DOWNLOAD_QUEUE.md b/docs-old/contributing/DOWNLOAD_QUEUE.md similarity index 100% rename from docs/contributing/DOWNLOAD_QUEUE.md rename to docs-old/contributing/DOWNLOAD_QUEUE.md diff --git a/docs/contributing/HOTKEYS.md b/docs-old/contributing/HOTKEYS.md similarity index 100% rename from docs/contributing/HOTKEYS.md rename to docs-old/contributing/HOTKEYS.md diff --git a/docs/contributing/INVOCATIONS.md b/docs-old/contributing/INVOCATIONS.md similarity index 100% rename from docs/contributing/INVOCATIONS.md rename to docs-old/contributing/INVOCATIONS.md diff --git a/docs/contributing/LOCAL_DEVELOPMENT.md b/docs-old/contributing/LOCAL_DEVELOPMENT.md similarity index 100% rename from docs/contributing/LOCAL_DEVELOPMENT.md rename to docs-old/contributing/LOCAL_DEVELOPMENT.md diff --git a/docs/contributing/MODEL_MANAGER.md b/docs-old/contributing/MODEL_MANAGER.md similarity index 100% rename from docs/contributing/MODEL_MANAGER.md rename to docs-old/contributing/MODEL_MANAGER.md diff --git a/docs/contributing/NEW_MODEL_INTEGRATION.md b/docs-old/contributing/NEW_MODEL_INTEGRATION.md similarity index 100% rename from docs/contributing/NEW_MODEL_INTEGRATION.md rename to docs-old/contributing/NEW_MODEL_INTEGRATION.md diff --git a/docs/contributing/PR-MERGE-POLICY.md b/docs-old/contributing/PR-MERGE-POLICY.md similarity index 100% rename from docs/contributing/PR-MERGE-POLICY.md rename to docs-old/contributing/PR-MERGE-POLICY.md diff --git a/docs/contributing/TESTS.md b/docs-old/contributing/TESTS.md similarity index 100% rename from docs/contributing/TESTS.md rename to docs-old/contributing/TESTS.md diff --git a/docs/contributing/contribution_guides/development.md b/docs-old/contributing/contribution_guides/development.md similarity index 100% rename from docs/contributing/contribution_guides/development.md rename to docs-old/contributing/contribution_guides/development.md diff --git a/docs/contributing/contribution_guides/documentation.md b/docs-old/contributing/contribution_guides/documentation.md similarity index 100% rename from docs/contributing/contribution_guides/documentation.md rename to docs-old/contributing/contribution_guides/documentation.md diff --git a/docs/contributing/contribution_guides/newContributorChecklist.md b/docs-old/contributing/contribution_guides/newContributorChecklist.md similarity index 100% rename from docs/contributing/contribution_guides/newContributorChecklist.md rename to docs-old/contributing/contribution_guides/newContributorChecklist.md diff --git a/docs/contributing/contribution_guides/translation.md b/docs-old/contributing/contribution_guides/translation.md similarity index 100% rename from docs/contributing/contribution_guides/translation.md rename to docs-old/contributing/contribution_guides/translation.md diff --git a/docs/contributing/contribution_guides/tutorials.md b/docs-old/contributing/contribution_guides/tutorials.md similarity index 100% rename from docs/contributing/contribution_guides/tutorials.md rename to docs-old/contributing/contribution_guides/tutorials.md diff --git a/docs/contributing/contributors.md b/docs-old/contributing/contributors.md similarity index 100% rename from docs/contributing/contributors.md rename to docs-old/contributing/contributors.md diff --git a/docs/contributing/dev-environment.md b/docs-old/contributing/dev-environment.md similarity index 100% rename from docs/contributing/dev-environment.md rename to docs-old/contributing/dev-environment.md diff --git a/docs/contributing/frontend/index.md b/docs-old/contributing/frontend/index.md similarity index 100% rename from docs/contributing/frontend/index.md rename to docs-old/contributing/frontend/index.md diff --git a/docs/contributing/frontend/state-management.md b/docs-old/contributing/frontend/state-management.md similarity index 100% rename from docs/contributing/frontend/state-management.md rename to docs-old/contributing/frontend/state-management.md diff --git a/docs/contributing/frontend/workflows.md b/docs-old/contributing/frontend/workflows.md similarity index 100% rename from docs/contributing/frontend/workflows.md rename to docs-old/contributing/frontend/workflows.md diff --git a/docs/contributing/index.md b/docs-old/contributing/index.md similarity index 100% rename from docs/contributing/index.md rename to docs-old/contributing/index.md diff --git a/docs/faq.md b/docs-old/faq.md similarity index 100% rename from docs/faq.md rename to docs-old/faq.md diff --git a/docs/features/cuda-sysmem-fallback.png b/docs-old/features/cuda-sysmem-fallback.png similarity index 100% rename from docs/features/cuda-sysmem-fallback.png rename to docs-old/features/cuda-sysmem-fallback.png diff --git a/docs/features/database.md b/docs-old/features/database.md similarity index 100% rename from docs/features/database.md rename to docs-old/features/database.md diff --git a/docs/features/gallery.md b/docs-old/features/gallery.md similarity index 100% rename from docs/features/gallery.md rename to docs-old/features/gallery.md diff --git a/docs/features/hotkeys.md b/docs-old/features/hotkeys.md similarity index 100% rename from docs/features/hotkeys.md rename to docs-old/features/hotkeys.md diff --git a/docs/features/low-vram.md b/docs-old/features/low-vram.md similarity index 100% rename from docs/features/low-vram.md rename to docs-old/features/low-vram.md diff --git a/docs/features/orphaned_model_removal.md b/docs-old/features/orphaned_model_removal.md similarity index 100% rename from docs/features/orphaned_model_removal.md rename to docs-old/features/orphaned_model_removal.md diff --git a/docs/help/SAMPLER_CONVERGENCE.md b/docs-old/help/SAMPLER_CONVERGENCE.md similarity index 100% rename from docs/help/SAMPLER_CONVERGENCE.md rename to docs-old/help/SAMPLER_CONVERGENCE.md diff --git a/docs/help/diffusion.md b/docs-old/help/diffusion.md similarity index 100% rename from docs/help/diffusion.md rename to docs-old/help/diffusion.md diff --git a/docs/help/gettingStartedWithAI.md b/docs-old/help/gettingStartedWithAI.md similarity index 100% rename from docs/help/gettingStartedWithAI.md rename to docs-old/help/gettingStartedWithAI.md diff --git a/docs/img/favicon.ico b/docs-old/img/favicon.ico similarity index 100% rename from docs/img/favicon.ico rename to docs-old/img/favicon.ico diff --git a/docs/img/invoke-symbol-wht-lrg.svg b/docs-old/img/invoke-symbol-wht-lrg.svg similarity index 100% rename from docs/img/invoke-symbol-wht-lrg.svg rename to docs-old/img/invoke-symbol-wht-lrg.svg diff --git a/docs/index.md b/docs-old/index.md similarity index 100% rename from docs/index.md rename to docs-old/index.md diff --git a/docs/installation/docker.md b/docs-old/installation/docker.md similarity index 100% rename from docs/installation/docker.md rename to docs-old/installation/docker.md diff --git a/docs/installation/manual.md b/docs-old/installation/manual.md similarity index 100% rename from docs/installation/manual.md rename to docs-old/installation/manual.md diff --git a/docs/installation/models.md b/docs-old/installation/models.md similarity index 100% rename from docs/installation/models.md rename to docs-old/installation/models.md diff --git a/docs/installation/patchmatch.md b/docs-old/installation/patchmatch.md similarity index 100% rename from docs/installation/patchmatch.md rename to docs-old/installation/patchmatch.md diff --git a/docs/installation/quick_start.md b/docs-old/installation/quick_start.md similarity index 100% rename from docs/installation/quick_start.md rename to docs-old/installation/quick_start.md diff --git a/docs/installation/requirements.md b/docs-old/installation/requirements.md similarity index 100% rename from docs/installation/requirements.md rename to docs-old/installation/requirements.md diff --git a/docs/nodes/NODES.md b/docs-old/nodes/NODES.md similarity index 100% rename from docs/nodes/NODES.md rename to docs-old/nodes/NODES.md diff --git a/docs/nodes/NODES_MIGRATION_V3_V4.md b/docs-old/nodes/NODES_MIGRATION_V3_V4.md similarity index 100% rename from docs/nodes/NODES_MIGRATION_V3_V4.md rename to docs-old/nodes/NODES_MIGRATION_V3_V4.md diff --git a/docs/nodes/comfyToInvoke.md b/docs-old/nodes/comfyToInvoke.md similarity index 100% rename from docs/nodes/comfyToInvoke.md rename to docs-old/nodes/comfyToInvoke.md diff --git a/docs/nodes/communityNodes.md b/docs-old/nodes/communityNodes.md similarity index 100% rename from docs/nodes/communityNodes.md rename to docs-old/nodes/communityNodes.md diff --git a/docs/nodes/contributingNodes.md b/docs-old/nodes/contributingNodes.md similarity index 100% rename from docs/nodes/contributingNodes.md rename to docs-old/nodes/contributingNodes.md diff --git a/docs/nodes/defaultNodes.md b/docs-old/nodes/defaultNodes.md similarity index 100% rename from docs/nodes/defaultNodes.md rename to docs-old/nodes/defaultNodes.md diff --git a/docs/nodes/detailedNodes/faceTools.md b/docs-old/nodes/detailedNodes/faceTools.md similarity index 100% rename from docs/nodes/detailedNodes/faceTools.md rename to docs-old/nodes/detailedNodes/faceTools.md diff --git a/docs/nodes/invocation-api.md b/docs-old/nodes/invocation-api.md similarity index 100% rename from docs/nodes/invocation-api.md rename to docs-old/nodes/invocation-api.md diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 00000000000..6240da8b10b --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,21 @@ +# build output +dist/ +# generated types +.astro/ + +# dependencies +node_modules/ + +# logs +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* + + +# environment variables +.env +.env.production + +# macOS-specific files +.DS_Store diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000000..591a5c353f9 --- /dev/null +++ b/docs/README.md @@ -0,0 +1 @@ +# Invoke AI Documentation diff --git a/docs/astro.config.mjs b/docs/astro.config.mjs new file mode 100644 index 00000000000..2f9573905c8 --- /dev/null +++ b/docs/astro.config.mjs @@ -0,0 +1,103 @@ +// @ts-check +import { defineConfig } from 'astro/config'; +import starlight from '@astrojs/starlight'; + +// Plugins +import starlightLlmsText from 'starlight-llms-txt'; + +// https://astro.build/config +export default defineConfig({ + site: 'https://invoke-ai.github.io', + // base: '/InvokPeAI', + integrations: [ + starlight({ + // Content + title: { + en: 'InvokeAI Documentation', + }, + logo: { + src: './src/assets/invoke-icon-wide.svg', + alt: 'InvokeAI Logo', + replacesTitle: true, + }, + favicon: './src/assets/invoke-icon.svg', + editLink: { + baseUrl: 'https://github.com/invoke-ai/InvokeAI/edit/main/docs', + }, + defaultLocale: 'root', + locales: { + root: { + label: 'English', + lang: 'en', + }, + }, + social: [ + { + icon: 'github', + label: 'GitHub', + href: 'https://github.com/invoke-ai/InvokeAI', + }, + { + icon: 'discord', + label: 'Discord', + href: 'https://discord.gg/ZmtBAhwWhy', + }, + { + icon: 'youtube', + label: 'YouTube', + href: 'https://www.youtube.com/@invokeai', + }, + ], + tableOfContents: { + maxHeadingLevel: 4, + }, + customCss: [ + '@fontsource-variable/inter', + '@fontsource-variable/roboto-mono', + './src/styles/custom.css', + ], + sidebar: [ + { + label: 'Start Here', + autogenerate: { directory: 'start-here' }, + }, + { + label: 'Configuration', + autogenerate: { directory: 'configuration' }, + }, + { + label: 'Concepts', + autogenerate: { directory: 'concepts' }, + }, + { + label: 'Features', + autogenerate: { directory: 'features' }, + }, + { + label: 'Workflows', + autogenerate: { directory: 'workflows' }, + }, + { + label: 'Development', + autogenerate: { directory: 'development', collapsed: true }, + }, + { + label: 'Contributing', + autogenerate: { directory: 'contributing' }, + collapsed: true, + }, + { + label: 'Troubleshooting', + autogenerate: { directory: 'troubleshooting' }, + }, + ], + components: { + ThemeProvider: './src/lib/components/ForceDarkTheme.astro', + ThemeSelect: './src/lib/components/EmptyComponent.astro', + Footer: './src/lib/components/Footer.astro', + PageFrame: './src/layouts/PageFrameExtended.astro', + }, + plugins: [starlightLlmsText()], + }), + ], +}); diff --git a/docs/invoke-config.json b/docs/invoke-config.json new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/nodes/overview.md b/docs/nodes/overview.md deleted file mode 100644 index 736aba9ef6c..00000000000 --- a/docs/nodes/overview.md +++ /dev/null @@ -1,23 +0,0 @@ -# Nodes - -## What are Nodes? - -An Node is simply a single operation that takes in inputs and returns -out outputs. Multiple nodes can be linked together to create more -complex functionality. All InvokeAI features are added through nodes. - -### Anatomy of a Node - -Individual nodes are made up of the following: - -- Inputs: Edge points on the left side of the node window where you connect outputs from other nodes. -- Outputs: Edge points on the right side of the node window where you connect to inputs on other nodes. -- Options: Various options which are either manually configured, or overridden by connecting an output from another node to the input. - -With nodes, you can can easily extend the image generation capabilities of InvokeAI, and allow you build workflows that suit your needs. - -You can read more about nodes and the node editor [here](../nodes/NODES.md). - -## Downloading New Nodes - -To download a new node, visit our list of [Community Nodes](../nodes/communityNodes.md). These are nodes that have been created by the community, for the community. diff --git a/docs/package.json b/docs/package.json new file mode 100644 index 00000000000..caf5939dca9 --- /dev/null +++ b/docs/package.json @@ -0,0 +1,28 @@ +{ + "name": "docs", + "type": "module", + "version": "0.0.1", + "scripts": { + "dev": "astro dev", + "start": "astro dev", + "build": "astro build", + "preview": "astro preview", + "astro": "astro" + }, + "dependencies": { + "@astrojs/markdoc": "^0.15.10", + "@astrojs/starlight": "^0.37.6", + "@astrojs/starlight-markdoc": "^0.5.1", + "@fontsource-variable/inter": "^5.2.8", + "@fontsource-variable/roboto-mono": "^5.2.8", + "astro": "^5.17.3", + "mermaid": "^11.12.3", + "rehype-external-links": "^3.0.0", + "sharp": "^0.34.2", + "starlight-llms-txt": "^0.7.0" + }, + "devDependencies": { + "node-addon-api": "^8.5.0", + "node-gyp": "^12.2.0" + } +} \ No newline at end of file diff --git a/docs/pnpm-lock.yaml b/docs/pnpm-lock.yaml new file mode 100644 index 00000000000..c45c14bca51 --- /dev/null +++ b/docs/pnpm-lock.yaml @@ -0,0 +1,6015 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@astrojs/markdoc': + specifier: ^0.15.10 + version: 0.15.10(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)) + '@astrojs/starlight': + specifier: ^0.37.6 + version: 0.37.6(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)) + '@astrojs/starlight-markdoc': + specifier: ^0.5.1 + version: 0.5.1(@astrojs/markdoc@0.15.10(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)))(@astrojs/starlight@0.37.6(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3))) + '@fontsource-variable/inter': + specifier: ^5.2.8 + version: 5.2.8 + '@fontsource-variable/roboto-mono': + specifier: ^5.2.8 + version: 5.2.8 + astro: + specifier: ^5.17.3 + version: 5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3) + mermaid: + specifier: ^11.12.3 + version: 11.12.3 + rehype-external-links: + specifier: ^3.0.0 + version: 3.0.0 + sharp: + specifier: ^0.34.2 + version: 0.34.5 + starlight-llms-txt: + specifier: ^0.7.0 + version: 0.7.0(@astrojs/starlight@0.37.6(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)))(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)) + devDependencies: + node-addon-api: + specifier: ^8.5.0 + version: 8.5.0 + node-gyp: + specifier: ^12.2.0 + version: 12.2.0 + +packages: + + '@antfu/install-pkg@1.1.0': + resolution: {integrity: sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==} + + '@astrojs/compiler@2.13.1': + resolution: {integrity: sha512-f3FN83d2G/v32ipNClRKgYv30onQlMZX1vCeZMjPsMMPl1mDpmbl0+N5BYo4S/ofzqJyS5hvwacEo0CCVDn/Qg==} + + '@astrojs/internal-helpers@0.7.5': + resolution: {integrity: sha512-vreGnYSSKhAjFJCWAwe/CNhONvoc5lokxtRoZims+0wa3KbHBdPHSSthJsKxPd8d/aic6lWKpRTYGY/hsgK6EA==} + + '@astrojs/markdoc@0.15.10': + resolution: {integrity: sha512-05V4Vy4J14aDpBstKbuyO7ArY+VCF80SElWyx80ddeWkUiOERw4SjGbnzgAI4AQtDdJ+pJdlv/16TMM65+YafA==} + engines: {node: 18.20.8 || ^20.3.0 || >=22.0.0} + peerDependencies: + astro: ^5.0.0 + + '@astrojs/markdown-remark@6.3.10': + resolution: {integrity: sha512-kk4HeYR6AcnzC4QV8iSlOfh+N8TZ3MEStxPyenyCtemqn8IpEATBFMTJcfrNW32dgpt6MY3oCkMM/Tv3/I4G3A==} + + '@astrojs/mdx@4.3.13': + resolution: {integrity: sha512-IHDHVKz0JfKBy3//52JSiyWv089b7GVSChIXLrlUOoTLWowG3wr2/8hkaEgEyd/vysvNQvGk+QhysXpJW5ve6Q==} + engines: {node: 18.20.8 || ^20.3.0 || >=22.0.0} + peerDependencies: + astro: ^5.0.0 + + '@astrojs/prism@3.3.0': + resolution: {integrity: sha512-q8VwfU/fDZNoDOf+r7jUnMC2//H2l0TuQ6FkGJL8vD8nw/q5KiL3DS1KKBI3QhI9UQhpJ5dc7AtqfbXWuOgLCQ==} + engines: {node: 18.20.8 || ^20.3.0 || >=22.0.0} + + '@astrojs/sitemap@3.7.0': + resolution: {integrity: sha512-+qxjUrz6Jcgh+D5VE1gKUJTA3pSthuPHe6Ao5JCxok794Lewx8hBFaWHtOnN0ntb2lfOf7gvOi9TefUswQ/ZVA==} + + '@astrojs/starlight-markdoc@0.5.1': + resolution: {integrity: sha512-YxTvCHr/0ovDUhuRm0xeCjjGYtnqpa9EtTwc9fTbRO7Zpnsu1mDmm2L9HHSBWZ4YsFb005/GJYNDjk4WXNLoUg==} + peerDependencies: + '@astrojs/markdoc': '>=0.12.1' + '@astrojs/starlight': '>=0.35.0' + + '@astrojs/starlight@0.37.6': + resolution: {integrity: sha512-wQrKwH431q+8FsLBnNQeG+R36TMtEGxTQ2AuiVpcx9APcazvL3n7wVW8mMmYyxX0POjTnxlcWPkdMGR3Yj1L+w==} + peerDependencies: + astro: ^5.5.0 + + '@astrojs/telemetry@3.3.0': + resolution: {integrity: sha512-UFBgfeldP06qu6khs/yY+q1cDAaArM2/7AEIqQ9Cuvf7B1hNLq0xDrZkct+QoIGyjq56y8IaE2I3CTvG99mlhQ==} + engines: {node: 18.20.8 || ^20.3.0 || >=22.0.0} + + '@babel/helper-string-parser@7.27.1': + resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.29.0': + resolution: {integrity: sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/runtime@7.28.6': + resolution: {integrity: sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==} + engines: {node: '>=6.9.0'} + + '@babel/types@7.29.0': + resolution: {integrity: sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==} + engines: {node: '>=6.9.0'} + + '@braintree/sanitize-url@7.1.2': + resolution: {integrity: sha512-jigsZK+sMF/cuiB7sERuo9V7N9jx+dhmHHnQyDSVdpZwVutaBu7WvNYqMDLSgFgfB30n452TP3vjDAvFC973mA==} + + '@capsizecss/unpack@4.0.0': + resolution: {integrity: sha512-VERIM64vtTP1C4mxQ5thVT9fK0apjPFobqybMtA1UdUujWka24ERHbRHFGmpbbhp73MhV+KSsHQH9C6uOTdEQA==} + engines: {node: '>=18'} + + '@chevrotain/cst-dts-gen@11.1.1': + resolution: {integrity: sha512-fRHyv6/f542qQqiRGalrfJl/evD39mAvbJLCekPazhiextEatq1Jx1K/i9gSd5NNO0ds03ek0Cbo/4uVKmOBcw==} + + '@chevrotain/gast@11.1.1': + resolution: {integrity: sha512-Ko/5vPEYy1vn5CbCjjvnSO4U7GgxyGm+dfUZZJIWTlQFkXkyym0jFYrWEU10hyCjrA7rQtiHtBr0EaZqvHFZvg==} + + '@chevrotain/regexp-to-ast@11.1.1': + resolution: {integrity: sha512-ctRw1OKSXkOrR8VTvOxrQ5USEc4sNrfwXHa1NuTcR7wre4YbjPcKw+82C2uylg/TEwFRgwLmbhlln4qkmDyteg==} + + '@chevrotain/types@11.1.1': + resolution: {integrity: sha512-wb2ToxG8LkgPYnKe9FH8oGn3TMCBdnwiuNC5l5y+CtlaVRbCytU0kbVsk6CGrqTL4ZN4ksJa0TXOYbxpbthtqw==} + + '@chevrotain/utils@11.1.1': + resolution: {integrity: sha512-71eTYMzYXYSFPrbg/ZwftSaSDld7UYlS8OQa3lNnn9jzNtpFbaReRRyghzqS7rI3CDaorqpPJJcXGHK+FE1TVQ==} + + '@ctrl/tinycolor@4.2.0': + resolution: {integrity: sha512-kzyuwOAQnXJNLS9PSyrk0CWk35nWJW/zl/6KvnTBMFK65gm7U1/Z5BqjxeapjZCIhQcM/DsrEmcbRwDyXyXK4A==} + engines: {node: '>=14'} + + '@emnapi/runtime@1.8.1': + resolution: {integrity: sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==} + + '@esbuild/aix-ppc64@0.25.12': + resolution: {integrity: sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/aix-ppc64@0.27.3': + resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.25.12': + resolution: {integrity: sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm64@0.27.3': + resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.25.12': + resolution: {integrity: sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-arm@0.27.3': + resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.25.12': + resolution: {integrity: sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/android-x64@0.27.3': + resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.25.12': + resolution: {integrity: sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-arm64@0.27.3': + resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.25.12': + resolution: {integrity: sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/darwin-x64@0.27.3': + resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.25.12': + resolution: {integrity: sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-arm64@0.27.3': + resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.25.12': + resolution: {integrity: sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.27.3': + resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.25.12': + resolution: {integrity: sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm64@0.27.3': + resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.25.12': + resolution: {integrity: sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-arm@0.27.3': + resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.25.12': + resolution: {integrity: sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-ia32@0.27.3': + resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.25.12': + resolution: {integrity: sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-loong64@0.27.3': + resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.25.12': + resolution: {integrity: sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-mips64el@0.27.3': + resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.25.12': + resolution: {integrity: sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-ppc64@0.27.3': + resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.25.12': + resolution: {integrity: sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-riscv64@0.27.3': + resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.25.12': + resolution: {integrity: sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-s390x@0.27.3': + resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.25.12': + resolution: {integrity: sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/linux-x64@0.27.3': + resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.25.12': + resolution: {integrity: sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-arm64@0.27.3': + resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.25.12': + resolution: {integrity: sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.27.3': + resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.25.12': + resolution: {integrity: sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-arm64@0.27.3': + resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.25.12': + resolution: {integrity: sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.27.3': + resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/openharmony-arm64@0.25.12': + resolution: {integrity: sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + + '@esbuild/openharmony-arm64@0.27.3': + resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.25.12': + resolution: {integrity: sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/sunos-x64@0.27.3': + resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.25.12': + resolution: {integrity: sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-arm64@0.27.3': + resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.25.12': + resolution: {integrity: sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-ia32@0.27.3': + resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.25.12': + resolution: {integrity: sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@esbuild/win32-x64@0.27.3': + resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@expressive-code/core@0.41.6': + resolution: {integrity: sha512-FvJQP+hG0jWi/FLBSmvHInDqWR7jNANp9PUDjdMqSshHb0y7sxx3vHuoOr6SgXjWw+MGLqorZyPQ0aAlHEok6g==} + + '@expressive-code/plugin-frames@0.41.6': + resolution: {integrity: sha512-d+hkSYXIQot6fmYnOmWAM+7TNWRv/dhfjMsNq+mIZz8Tb4mPHOcgcfZeEM5dV9TDL0ioQNvtcqQNuzA1sRPjxg==} + + '@expressive-code/plugin-shiki@0.41.6': + resolution: {integrity: sha512-Y6zmKBmsIUtWTzdefqlzm/h9Zz0Rc4gNdt2GTIH7fhHH2I9+lDYCa27BDwuBhjqcos6uK81Aca9dLUC4wzN+ng==} + + '@expressive-code/plugin-text-markers@0.41.6': + resolution: {integrity: sha512-PBFa1wGyYzRExMDzBmAWC6/kdfG1oLn4pLpBeTfIRrALPjcGA/59HP3e7q9J0Smk4pC7U+lWkA2LHR8FYV8U7Q==} + + '@fontsource-variable/inter@5.2.8': + resolution: {integrity: sha512-kOfP2D+ykbcX/P3IFnokOhVRNoTozo5/JxhAIVYLpea/UBmCQ/YWPBfWIDuBImXX/15KH+eKh4xpEUyS2sQQGQ==} + + '@fontsource-variable/roboto-mono@5.2.8': + resolution: {integrity: sha512-6M2U3wGIUxYNKRrUoKls8BRRIPDA57T8J0agqwyDkiEHrLEEAqptsxcUl3eTm6tnRNEn6yEm4pCefvtnujebDA==} + + '@iconify/types@2.0.0': + resolution: {integrity: sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==} + + '@iconify/utils@3.1.0': + resolution: {integrity: sha512-Zlzem1ZXhI1iHeeERabLNzBHdOa4VhQbqAcOQaMKuTuyZCpwKbC2R4Dd0Zo3g9EAc+Y4fiarO8HIHRAth7+skw==} + + '@img/colour@1.0.0': + resolution: {integrity: sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==} + engines: {node: '>=18'} + + '@img/sharp-darwin-arm64@0.34.5': + resolution: {integrity: sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [darwin] + + '@img/sharp-darwin-x64@0.34.5': + resolution: {integrity: sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-darwin-arm64@1.2.4': + resolution: {integrity: sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==} + cpu: [arm64] + os: [darwin] + + '@img/sharp-libvips-darwin-x64@1.2.4': + resolution: {integrity: sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-linux-arm64@1.2.4': + resolution: {integrity: sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==} + cpu: [arm64] + os: [linux] + libc: [glibc] + + '@img/sharp-libvips-linux-arm@1.2.4': + resolution: {integrity: sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==} + cpu: [arm] + os: [linux] + libc: [glibc] + + '@img/sharp-libvips-linux-ppc64@1.2.4': + resolution: {integrity: sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==} + cpu: [ppc64] + os: [linux] + libc: [glibc] + + '@img/sharp-libvips-linux-riscv64@1.2.4': + resolution: {integrity: sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==} + cpu: [riscv64] + os: [linux] + libc: [glibc] + + '@img/sharp-libvips-linux-s390x@1.2.4': + resolution: {integrity: sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==} + cpu: [s390x] + os: [linux] + libc: [glibc] + + '@img/sharp-libvips-linux-x64@1.2.4': + resolution: {integrity: sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==} + cpu: [x64] + os: [linux] + libc: [glibc] + + '@img/sharp-libvips-linuxmusl-arm64@1.2.4': + resolution: {integrity: sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==} + cpu: [arm64] + os: [linux] + libc: [musl] + + '@img/sharp-libvips-linuxmusl-x64@1.2.4': + resolution: {integrity: sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==} + cpu: [x64] + os: [linux] + libc: [musl] + + '@img/sharp-linux-arm64@0.34.5': + resolution: {integrity: sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + libc: [glibc] + + '@img/sharp-linux-arm@0.34.5': + resolution: {integrity: sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm] + os: [linux] + libc: [glibc] + + '@img/sharp-linux-ppc64@0.34.5': + resolution: {integrity: sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [ppc64] + os: [linux] + libc: [glibc] + + '@img/sharp-linux-riscv64@0.34.5': + resolution: {integrity: sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [riscv64] + os: [linux] + libc: [glibc] + + '@img/sharp-linux-s390x@0.34.5': + resolution: {integrity: sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [s390x] + os: [linux] + libc: [glibc] + + '@img/sharp-linux-x64@0.34.5': + resolution: {integrity: sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + libc: [glibc] + + '@img/sharp-linuxmusl-arm64@0.34.5': + resolution: {integrity: sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + libc: [musl] + + '@img/sharp-linuxmusl-x64@0.34.5': + resolution: {integrity: sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + libc: [musl] + + '@img/sharp-wasm32@0.34.5': + resolution: {integrity: sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [wasm32] + + '@img/sharp-win32-arm64@0.34.5': + resolution: {integrity: sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [win32] + + '@img/sharp-win32-ia32@0.34.5': + resolution: {integrity: sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [ia32] + os: [win32] + + '@img/sharp-win32-x64@0.34.5': + resolution: {integrity: sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [win32] + + '@isaacs/cliui@9.0.0': + resolution: {integrity: sha512-AokJm4tuBHillT+FpMtxQ60n8ObyXBatq7jD2/JA9dxbDDokKQm8KMht5ibGzLVU9IJDIKK4TPKgMHEYMn3lMg==} + engines: {node: '>=18'} + + '@isaacs/fs-minipass@4.0.1': + resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==} + engines: {node: '>=18.0.0'} + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@markdoc/markdoc@0.5.4': + resolution: {integrity: sha512-36YFNlqFk//gVNGm5xZaTWVwbAVF2AOmVjf1tiUrS6tCoD/YSkVy2E3CkAfhc5MlKcjparL/QFHCopxL4zRyaQ==} + engines: {node: '>=14.7.0'} + peerDependencies: + '@types/react': '*' + react: '*' + peerDependenciesMeta: + '@types/react': + optional: true + react: + optional: true + + '@mdx-js/mdx@3.1.1': + resolution: {integrity: sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==} + + '@mermaid-js/parser@1.0.0': + resolution: {integrity: sha512-vvK0Hi/VWndxoh03Mmz6wa1KDriSPjS2XMZL/1l19HFwygiObEEoEwSDxOqyLzzAI6J2PU3261JjTMTO7x+BPw==} + + '@npmcli/agent@4.0.0': + resolution: {integrity: sha512-kAQTcEN9E8ERLVg5AsGwLNoFb+oEG6engbqAU2P43gD4JEIkNGMHdVQ096FsOAAYpZPB0RSt0zgInKIAS1l5QA==} + engines: {node: ^20.17.0 || >=22.9.0} + + '@npmcli/fs@5.0.0': + resolution: {integrity: sha512-7OsC1gNORBEawOa5+j2pXN9vsicaIOH5cPXxoR6fJOmH6/EXpJB2CajXOu1fPRFun2m1lktEFX11+P89hqO/og==} + engines: {node: ^20.17.0 || >=22.9.0} + + '@oslojs/encoding@1.1.0': + resolution: {integrity: sha512-70wQhgYmndg4GCPxPPxPGevRKqTIJ2Nh4OkiMWmDAVYsTQ+Ta7Sq+rPevXyXGdzr30/qZBnyOalCszoMxlyldQ==} + + '@pagefind/darwin-arm64@1.4.0': + resolution: {integrity: sha512-2vMqkbv3lbx1Awea90gTaBsvpzgRs7MuSgKDxW0m9oV1GPZCZbZBJg/qL83GIUEN2BFlY46dtUZi54pwH+/pTQ==} + cpu: [arm64] + os: [darwin] + + '@pagefind/darwin-x64@1.4.0': + resolution: {integrity: sha512-e7JPIS6L9/cJfow+/IAqknsGqEPjJnVXGjpGm25bnq+NPdoD3c/7fAwr1OXkG4Ocjx6ZGSCijXEV4ryMcH2E3A==} + cpu: [x64] + os: [darwin] + + '@pagefind/default-ui@1.4.0': + resolution: {integrity: sha512-wie82VWn3cnGEdIjh4YwNESyS1G6vRHwL6cNjy9CFgNnWW/PGRjsLq300xjVH5sfPFK3iK36UxvIBymtQIEiSQ==} + + '@pagefind/freebsd-x64@1.4.0': + resolution: {integrity: sha512-WcJVypXSZ+9HpiqZjFXMUobfFfZZ6NzIYtkhQ9eOhZrQpeY5uQFqNWLCk7w9RkMUwBv1HAMDW3YJQl/8OqsV0Q==} + cpu: [x64] + os: [freebsd] + + '@pagefind/linux-arm64@1.4.0': + resolution: {integrity: sha512-PIt8dkqt4W06KGmQjONw7EZbhDF+uXI7i0XtRLN1vjCUxM9vGPdtJc2mUyVPevjomrGz5M86M8bqTr6cgDp1Uw==} + cpu: [arm64] + os: [linux] + + '@pagefind/linux-x64@1.4.0': + resolution: {integrity: sha512-z4oddcWwQ0UHrTHR8psLnVlz6USGJ/eOlDPTDYZ4cI8TK8PgwRUPQZp9D2iJPNIPcS6Qx/E4TebjuGJOyK8Mmg==} + cpu: [x64] + os: [linux] + + '@pagefind/windows-x64@1.4.0': + resolution: {integrity: sha512-NkT+YAdgS2FPCn8mIA9bQhiBs+xmniMGq1LFPDhcFn0+2yIUEiIG06t7bsZlhdjknEQRTSdT7YitP6fC5qwP0g==} + cpu: [x64] + os: [win32] + + '@rollup/pluginutils@5.3.0': + resolution: {integrity: sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q==} + engines: {node: '>=14.0.0'} + peerDependencies: + rollup: ^1.20.0||^2.0.0||^3.0.0||^4.0.0 + peerDependenciesMeta: + rollup: + optional: true + + '@rollup/rollup-android-arm-eabi@4.57.1': + resolution: {integrity: sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.57.1': + resolution: {integrity: sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.57.1': + resolution: {integrity: sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.57.1': + resolution: {integrity: sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.57.1': + resolution: {integrity: sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.57.1': + resolution: {integrity: sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.57.1': + resolution: {integrity: sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==} + cpu: [arm] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-arm-musleabihf@4.57.1': + resolution: {integrity: sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==} + cpu: [arm] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-arm64-gnu@4.57.1': + resolution: {integrity: sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==} + cpu: [arm64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-arm64-musl@4.57.1': + resolution: {integrity: sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==} + cpu: [arm64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-loong64-gnu@4.57.1': + resolution: {integrity: sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==} + cpu: [loong64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-loong64-musl@4.57.1': + resolution: {integrity: sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==} + cpu: [loong64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-ppc64-gnu@4.57.1': + resolution: {integrity: sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==} + cpu: [ppc64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-ppc64-musl@4.57.1': + resolution: {integrity: sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==} + cpu: [ppc64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-riscv64-gnu@4.57.1': + resolution: {integrity: sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==} + cpu: [riscv64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-riscv64-musl@4.57.1': + resolution: {integrity: sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==} + cpu: [riscv64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-s390x-gnu@4.57.1': + resolution: {integrity: sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==} + cpu: [s390x] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-x64-gnu@4.57.1': + resolution: {integrity: sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==} + cpu: [x64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-x64-musl@4.57.1': + resolution: {integrity: sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==} + cpu: [x64] + os: [linux] + libc: [musl] + + '@rollup/rollup-openbsd-x64@4.57.1': + resolution: {integrity: sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==} + cpu: [x64] + os: [openbsd] + + '@rollup/rollup-openharmony-arm64@4.57.1': + resolution: {integrity: sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.57.1': + resolution: {integrity: sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.57.1': + resolution: {integrity: sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-gnu@4.57.1': + resolution: {integrity: sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==} + cpu: [x64] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.57.1': + resolution: {integrity: sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==} + cpu: [x64] + os: [win32] + + '@shikijs/core@3.22.0': + resolution: {integrity: sha512-iAlTtSDDbJiRpvgL5ugKEATDtHdUVkqgHDm/gbD2ZS9c88mx7G1zSYjjOxp5Qa0eaW0MAQosFRmJSk354PRoQA==} + + '@shikijs/engine-javascript@3.22.0': + resolution: {integrity: sha512-jdKhfgW9CRtj3Tor0L7+yPwdG3CgP7W+ZEqSsojrMzCjD1e0IxIbwUMDDpYlVBlC08TACg4puwFGkZfLS+56Tw==} + + '@shikijs/engine-oniguruma@3.22.0': + resolution: {integrity: sha512-DyXsOG0vGtNtl7ygvabHd7Mt5EY8gCNqR9Y7Lpbbd/PbJvgWrqaKzH1JW6H6qFkuUa8aCxoiYVv8/YfFljiQxA==} + + '@shikijs/langs@3.22.0': + resolution: {integrity: sha512-x/42TfhWmp6H00T6uwVrdTJGKgNdFbrEdhaDwSR5fd5zhQ1Q46bHq9EO61SCEWJR0HY7z2HNDMaBZp8JRmKiIA==} + + '@shikijs/themes@3.22.0': + resolution: {integrity: sha512-o+tlOKqsr6FE4+mYJG08tfCFDS+3CG20HbldXeVoyP+cYSUxDhrFf3GPjE60U55iOkkjbpY2uC3It/eeja35/g==} + + '@shikijs/types@3.22.0': + resolution: {integrity: sha512-491iAekgKDBFE67z70Ok5a8KBMsQ2IJwOWw3us/7ffQkIBCyOQfm/aNwVMBUriP02QshIfgHCBSIYAl3u2eWjg==} + + '@shikijs/vscode-textmate@10.0.2': + resolution: {integrity: sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==} + + '@types/braces@3.0.5': + resolution: {integrity: sha512-SQFof9H+LXeWNz8wDe7oN5zu7ket0qwMu5vZubW4GCJ8Kkeh6nBWUz87+KTz/G3Kqsrp0j/W253XJb3KMEeg3w==} + + '@types/d3-array@3.2.2': + resolution: {integrity: sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==} + + '@types/d3-axis@3.0.6': + resolution: {integrity: sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==} + + '@types/d3-brush@3.0.6': + resolution: {integrity: sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==} + + '@types/d3-chord@3.0.6': + resolution: {integrity: sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==} + + '@types/d3-color@3.1.3': + resolution: {integrity: sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==} + + '@types/d3-contour@3.0.6': + resolution: {integrity: sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==} + + '@types/d3-delaunay@6.0.4': + resolution: {integrity: sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==} + + '@types/d3-dispatch@3.0.7': + resolution: {integrity: sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==} + + '@types/d3-drag@3.0.7': + resolution: {integrity: sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==} + + '@types/d3-dsv@3.0.7': + resolution: {integrity: sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==} + + '@types/d3-ease@3.0.2': + resolution: {integrity: sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==} + + '@types/d3-fetch@3.0.7': + resolution: {integrity: sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==} + + '@types/d3-force@3.0.10': + resolution: {integrity: sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==} + + '@types/d3-format@3.0.4': + resolution: {integrity: sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==} + + '@types/d3-geo@3.1.0': + resolution: {integrity: sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==} + + '@types/d3-hierarchy@3.1.7': + resolution: {integrity: sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==} + + '@types/d3-interpolate@3.0.4': + resolution: {integrity: sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==} + + '@types/d3-path@3.1.1': + resolution: {integrity: sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==} + + '@types/d3-polygon@3.0.2': + resolution: {integrity: sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==} + + '@types/d3-quadtree@3.0.6': + resolution: {integrity: sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==} + + '@types/d3-random@3.0.3': + resolution: {integrity: sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==} + + '@types/d3-scale-chromatic@3.1.0': + resolution: {integrity: sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==} + + '@types/d3-scale@4.0.9': + resolution: {integrity: sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==} + + '@types/d3-selection@3.0.11': + resolution: {integrity: sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==} + + '@types/d3-shape@3.1.8': + resolution: {integrity: sha512-lae0iWfcDeR7qt7rA88BNiqdvPS5pFVPpo5OfjElwNaT2yyekbM0C9vK+yqBqEmHr6lDkRnYNoTBYlAgJa7a4w==} + + '@types/d3-time-format@4.0.3': + resolution: {integrity: sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==} + + '@types/d3-time@3.0.4': + resolution: {integrity: sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==} + + '@types/d3-timer@3.0.2': + resolution: {integrity: sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==} + + '@types/d3-transition@3.0.9': + resolution: {integrity: sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==} + + '@types/d3-zoom@3.0.8': + resolution: {integrity: sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==} + + '@types/d3@7.4.3': + resolution: {integrity: sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==} + + '@types/debug@4.1.12': + resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + + '@types/estree-jsx@1.0.5': + resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==} + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + + '@types/geojson@7946.0.16': + resolution: {integrity: sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==} + + '@types/hast@3.0.4': + resolution: {integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==} + + '@types/js-yaml@4.0.9': + resolution: {integrity: sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg==} + + '@types/linkify-it@3.0.5': + resolution: {integrity: sha512-yg6E+u0/+Zjva+buc3EIb+29XEg4wltq7cSmd4Uc2EE/1nUVmxyzpX6gUXD0V8jIrG0r7YeOGVIbYRkxeooCtw==} + + '@types/markdown-it@12.2.3': + resolution: {integrity: sha512-GKMHFfv3458yYy+v/N8gjufHO6MSZKCOXpZc5GXIWWy8uldwfmPn98vp81gZ5f9SVw8YYBctgfJ22a2d7AOMeQ==} + + '@types/mdast@4.0.4': + resolution: {integrity: sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==} + + '@types/mdurl@2.0.0': + resolution: {integrity: sha512-RGdgjQUZba5p6QEFAVx2OGb8rQDL/cPRG7GiedRzMcJ1tYnUANBncjbSB1NRGwbvjcPeikRABz2nshyPk1bhWg==} + + '@types/mdx@2.0.13': + resolution: {integrity: sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==} + + '@types/micromatch@4.0.10': + resolution: {integrity: sha512-5jOhFDElqr4DKTrTEbnW8DZ4Hz5LRUEmyrGpCMrD/NphYv3nUnaF08xmSLx1rGGnyEs/kFnhiw6dCgcDqMr5PQ==} + + '@types/ms@2.1.0': + resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} + + '@types/nlcst@2.0.3': + resolution: {integrity: sha512-vSYNSDe6Ix3q+6Z7ri9lyWqgGhJTmzRjZRqyq15N0Z/1/UnVsno9G/N40NBijoYx2seFDIl0+B2mgAb9mezUCA==} + + '@types/node@17.0.45': + resolution: {integrity: sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==} + + '@types/sax@1.2.7': + resolution: {integrity: sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==} + + '@types/trusted-types@2.0.7': + resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + + '@types/unist@2.0.11': + resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==} + + '@types/unist@3.0.3': + resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==} + + '@ungap/structured-clone@1.3.0': + resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==} + + abbrev@4.0.0: + resolution: {integrity: sha512-a1wflyaL0tHtJSmLSOVybYhy22vRih4eduhhrkcjgrWGnRfrZtovJ2FRjxuTtkkj47O/baf0R86QU5OuYpz8fA==} + engines: {node: ^20.17.0 || >=22.9.0} + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.15.0: + resolution: {integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==} + engines: {node: '>=0.4.0'} + hasBin: true + + agent-base@7.1.4: + resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} + engines: {node: '>= 14'} + + ansi-align@3.0.1: + resolution: {integrity: sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-regex@6.2.2: + resolution: {integrity: sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==} + engines: {node: '>=12'} + + ansi-styles@6.2.3: + resolution: {integrity: sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==} + engines: {node: '>=12'} + + anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + + arg@5.0.2: + resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + aria-query@5.3.2: + resolution: {integrity: sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==} + engines: {node: '>= 0.4'} + + array-iterate@2.0.1: + resolution: {integrity: sha512-I1jXZMjAgCMmxT4qxXfPXa6SthSoE8h6gkSI9BGGNv8mP8G/v0blc+qFnZu6K42vTOiuME596QaLO0TP3Lk0xg==} + + astring@1.9.0: + resolution: {integrity: sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==} + hasBin: true + + astro-expressive-code@0.41.6: + resolution: {integrity: sha512-l47tb1uhmVIebHUkw+HEPtU/av0G4O8Q34g2cbkPvC7/e9ZhANcjUUciKt9Hp6gSVDdIuXBBLwJQn2LkeGMOAw==} + peerDependencies: + astro: ^4.0.0-beta || ^5.0.0-beta || ^3.3.0 || ^6.0.0-beta + + astro@5.17.3: + resolution: {integrity: sha512-69dcfPe8LsHzklwj+hl+vunWUbpMB6pmg35mACjetxbJeUNNys90JaBM8ZiwsPK689SAj/4Zqb1ayaANls9/MA==} + engines: {node: 18.20.8 || ^20.3.0 || >=22.0.0, npm: '>=9.6.5', pnpm: '>=7.1.0'} + hasBin: true + + axobject-query@4.1.0: + resolution: {integrity: sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==} + engines: {node: '>= 0.4'} + + bail@2.0.2: + resolution: {integrity: sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==} + + balanced-match@4.0.2: + resolution: {integrity: sha512-x0K50QvKQ97fdEz2kPehIerj+YTeptKF9hyYkKf6egnwmMWAkADiO0QCzSp0R5xN8FTZgYaBfSaue46Ej62nMg==} + engines: {node: 20 || >=22} + + base-64@1.0.0: + resolution: {integrity: sha512-kwDPIFCGx0NZHog36dj+tHiwP4QMzsZ3AgMViUBKI0+V5n4U0ufTCUMhnQ04diaRI8EX/QcPfql7zlhZ7j4zgg==} + + bcp-47-match@2.0.3: + resolution: {integrity: sha512-JtTezzbAibu8G0R9op9zb3vcWZd9JF6M0xOYGPn0fNCd7wOpRB1mU2mH9T8gaBGbAAyIIVgB2G7xG0GP98zMAQ==} + + bcp-47@2.1.0: + resolution: {integrity: sha512-9IIS3UPrvIa1Ej+lVDdDwO7zLehjqsaByECw0bu2RRGP73jALm6FYbzI5gWbgHLvNdkvfXB5YrSbocZdOS0c0w==} + + boolbase@1.0.0: + resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} + + boxen@8.0.1: + resolution: {integrity: sha512-F3PH5k5juxom4xktynS7MoFY+NUWH5LC4CnH11YB8NPew+HLpmBLCybSAEyb2F+4pRXhuhWqFesoQd6DAyc2hw==} + engines: {node: '>=18'} + + brace-expansion@5.0.2: + resolution: {integrity: sha512-Pdk8c9poy+YhOgVWw1JNN22/HcivgKWwpxKq04M/jTmHyCZn12WPJebZxdjSa5TmBqISrUSgNYU3eRORljfCCw==} + engines: {node: 20 || >=22} + + braces@3.0.3: + resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} + engines: {node: '>=8'} + + cacache@20.0.3: + resolution: {integrity: sha512-3pUp4e8hv07k1QlijZu6Kn7c9+ZpWWk4j3F8N3xPuCExULobqJydKYOTj1FTq58srkJsXvO7LbGAH4C0ZU3WGw==} + engines: {node: ^20.17.0 || >=22.9.0} + + camelcase@8.0.0: + resolution: {integrity: sha512-8WB3Jcas3swSvjIeA2yvCJ+Miyz5l1ZmB6HFb9R1317dt9LCQoswg/BGrmAmkWVEszSrrg4RwmO46qIm2OEnSA==} + engines: {node: '>=16'} + + ccount@2.0.1: + resolution: {integrity: sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==} + + chalk@5.6.2: + resolution: {integrity: sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==} + engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} + + character-entities-html4@2.1.0: + resolution: {integrity: sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==} + + character-entities-legacy@3.0.0: + resolution: {integrity: sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==} + + character-entities@2.0.2: + resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==} + + character-reference-invalid@2.0.1: + resolution: {integrity: sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==} + + chevrotain-allstar@0.3.1: + resolution: {integrity: sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==} + peerDependencies: + chevrotain: ^11.0.0 + + chevrotain@11.1.1: + resolution: {integrity: sha512-f0yv5CPKaFxfsPTBzX7vGuim4oIC1/gcS7LUGdBSwl2dU6+FON6LVUksdOo1qJjoUvXNn45urgh8C+0a24pACQ==} + + chokidar@5.0.0: + resolution: {integrity: sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw==} + engines: {node: '>= 20.19.0'} + + chownr@3.0.0: + resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==} + engines: {node: '>=18'} + + ci-info@4.4.0: + resolution: {integrity: sha512-77PSwercCZU2Fc4sX94eF8k8Pxte6JAwL4/ICZLFjJLqegs7kCuAsqqj/70NQF6TvDpgFjkubQB2FW2ZZddvQg==} + engines: {node: '>=8'} + + cli-boxes@3.0.0: + resolution: {integrity: sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==} + engines: {node: '>=10'} + + clsx@2.1.1: + resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} + engines: {node: '>=6'} + + collapse-white-space@2.1.0: + resolution: {integrity: sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==} + + comma-separated-tokens@2.0.3: + resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==} + + commander@11.1.0: + resolution: {integrity: sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==} + engines: {node: '>=16'} + + commander@7.2.0: + resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} + engines: {node: '>= 10'} + + commander@8.3.0: + resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==} + engines: {node: '>= 12'} + + common-ancestor-path@1.0.1: + resolution: {integrity: sha512-L3sHRo1pXXEqX8VU28kfgUY+YGsk09hPqZiZmLacNib6XNTCM8ubYeT7ryXQw8asB1sKgcU5lkB7ONug08aB8w==} + + confbox@0.1.8: + resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==} + + cookie-es@1.2.2: + resolution: {integrity: sha512-+W7VmiVINB+ywl1HGXJXmrqkOhpKrIiVZV6tQuV54ZyQC7MMuBt81Vc336GMLoHBq5hV/F9eXgt5Mnx0Rha5Fg==} + + cookie@1.1.1: + resolution: {integrity: sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==} + engines: {node: '>=18'} + + cose-base@1.0.3: + resolution: {integrity: sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==} + + cose-base@2.2.0: + resolution: {integrity: sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==} + + crossws@0.3.5: + resolution: {integrity: sha512-ojKiDvcmByhwa8YYqbQI/hg7MEU0NC03+pSdEq4ZUnZR9xXpwk7E43SMNGkn+JxJGPFtNvQ48+vV2p+P1ml5PA==} + + css-select@5.2.2: + resolution: {integrity: sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw==} + + css-selector-parser@3.3.0: + resolution: {integrity: sha512-Y2asgMGFqJKF4fq4xHDSlFYIkeVfRsm69lQC1q9kbEsH5XtnINTMrweLkjYMeaUgiXBy/uvKeO/a1JHTNnmB2g==} + + css-tree@2.2.1: + resolution: {integrity: sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0, npm: '>=7.0.0'} + + css-tree@3.1.0: + resolution: {integrity: sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0} + + css-what@6.2.2: + resolution: {integrity: sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA==} + engines: {node: '>= 6'} + + cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + csso@5.0.5: + resolution: {integrity: sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0, npm: '>=7.0.0'} + + cytoscape-cose-bilkent@4.1.0: + resolution: {integrity: sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==} + peerDependencies: + cytoscape: ^3.2.0 + + cytoscape-fcose@2.2.0: + resolution: {integrity: sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==} + peerDependencies: + cytoscape: ^3.2.0 + + cytoscape@3.33.1: + resolution: {integrity: sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==} + engines: {node: '>=0.10'} + + d3-array@2.12.1: + resolution: {integrity: sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==} + + d3-array@3.2.4: + resolution: {integrity: sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==} + engines: {node: '>=12'} + + d3-axis@3.0.0: + resolution: {integrity: sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==} + engines: {node: '>=12'} + + d3-brush@3.0.0: + resolution: {integrity: sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==} + engines: {node: '>=12'} + + d3-chord@3.0.1: + resolution: {integrity: sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==} + engines: {node: '>=12'} + + d3-color@3.1.0: + resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==} + engines: {node: '>=12'} + + d3-contour@4.0.2: + resolution: {integrity: sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==} + engines: {node: '>=12'} + + d3-delaunay@6.0.4: + resolution: {integrity: sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==} + engines: {node: '>=12'} + + d3-dispatch@3.0.1: + resolution: {integrity: sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==} + engines: {node: '>=12'} + + d3-drag@3.0.0: + resolution: {integrity: sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==} + engines: {node: '>=12'} + + d3-dsv@3.0.1: + resolution: {integrity: sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==} + engines: {node: '>=12'} + hasBin: true + + d3-ease@3.0.1: + resolution: {integrity: sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==} + engines: {node: '>=12'} + + d3-fetch@3.0.1: + resolution: {integrity: sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==} + engines: {node: '>=12'} + + d3-force@3.0.0: + resolution: {integrity: sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==} + engines: {node: '>=12'} + + d3-format@3.1.2: + resolution: {integrity: sha512-AJDdYOdnyRDV5b6ArilzCPPwc1ejkHcoyFarqlPqT7zRYjhavcT3uSrqcMvsgh2CgoPbK3RCwyHaVyxYcP2Arg==} + engines: {node: '>=12'} + + d3-geo@3.1.1: + resolution: {integrity: sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==} + engines: {node: '>=12'} + + d3-hierarchy@3.1.2: + resolution: {integrity: sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==} + engines: {node: '>=12'} + + d3-interpolate@3.0.1: + resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} + engines: {node: '>=12'} + + d3-path@1.0.9: + resolution: {integrity: sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==} + + d3-path@3.1.0: + resolution: {integrity: sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==} + engines: {node: '>=12'} + + d3-polygon@3.0.1: + resolution: {integrity: sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==} + engines: {node: '>=12'} + + d3-quadtree@3.0.1: + resolution: {integrity: sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==} + engines: {node: '>=12'} + + d3-random@3.0.1: + resolution: {integrity: sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==} + engines: {node: '>=12'} + + d3-sankey@0.12.3: + resolution: {integrity: sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==} + + d3-scale-chromatic@3.1.0: + resolution: {integrity: sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==} + engines: {node: '>=12'} + + d3-scale@4.0.2: + resolution: {integrity: sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==} + engines: {node: '>=12'} + + d3-selection@3.0.0: + resolution: {integrity: sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==} + engines: {node: '>=12'} + + d3-shape@1.3.7: + resolution: {integrity: sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==} + + d3-shape@3.2.0: + resolution: {integrity: sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==} + engines: {node: '>=12'} + + d3-time-format@4.1.0: + resolution: {integrity: sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==} + engines: {node: '>=12'} + + d3-time@3.1.0: + resolution: {integrity: sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==} + engines: {node: '>=12'} + + d3-timer@3.0.1: + resolution: {integrity: sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==} + engines: {node: '>=12'} + + d3-transition@3.0.1: + resolution: {integrity: sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==} + engines: {node: '>=12'} + peerDependencies: + d3-selection: 2 - 3 + + d3-zoom@3.0.0: + resolution: {integrity: sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==} + engines: {node: '>=12'} + + d3@7.9.0: + resolution: {integrity: sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==} + engines: {node: '>=12'} + + dagre-d3-es@7.0.13: + resolution: {integrity: sha512-efEhnxpSuwpYOKRm/L5KbqoZmNNukHa/Flty4Wp62JRvgH2ojwVgPgdYyr4twpieZnyRDdIH7PY2mopX26+j2Q==} + + dayjs@1.11.19: + resolution: {integrity: sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==} + + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + decode-named-character-reference@1.3.0: + resolution: {integrity: sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==} + + defu@6.1.4: + resolution: {integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==} + + delaunator@5.0.1: + resolution: {integrity: sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==} + + dequal@2.0.3: + resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} + engines: {node: '>=6'} + + destr@2.0.5: + resolution: {integrity: sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==} + + detect-libc@2.1.2: + resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} + engines: {node: '>=8'} + + deterministic-object-hash@2.0.2: + resolution: {integrity: sha512-KxektNH63SrbfUyDiwXqRb1rLwKt33AmMv+5Nhsw1kqZ13SJBRTgZHtGbE+hH3a1mVW1cz+4pqSWVPAtLVXTzQ==} + engines: {node: '>=18'} + + devalue@5.6.2: + resolution: {integrity: sha512-nPRkjWzzDQlsejL1WVifk5rvcFi/y1onBRxjaFMjZeR9mFpqu2gmAZ9xUB9/IEanEP/vBtGeGganC/GO1fmufg==} + + devlop@1.1.0: + resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==} + + diff@8.0.3: + resolution: {integrity: sha512-qejHi7bcSD4hQAZE0tNAawRK1ZtafHDmMTMkrrIGgSLl7hTnQHmKCeB45xAcbfTqK2zowkM3j3bHt/4b/ARbYQ==} + engines: {node: '>=0.3.1'} + + direction@2.0.1: + resolution: {integrity: sha512-9S6m9Sukh1cZNknO1CWAr2QAWsbKLafQiyM5gZ7VgXHeuaoUwffKN4q6NC4A/Mf9iiPlOXQEKW/Mv/mh9/3YFA==} + hasBin: true + + dlv@1.1.3: + resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==} + + dom-serializer@2.0.0: + resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==} + + domelementtype@2.3.0: + resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==} + + domhandler@5.0.3: + resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} + engines: {node: '>= 4'} + + dompurify@3.3.1: + resolution: {integrity: sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==} + + domutils@3.2.2: + resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==} + + dset@3.1.4: + resolution: {integrity: sha512-2QF/g9/zTaPDc3BjNcVTGoBbXBgYfMTTceLaYcFJ/W9kggFUkhxD/hMEeuLKbugyef9SqAx8cpgwlIP/jinUTA==} + engines: {node: '>=4'} + + emoji-regex@10.6.0: + resolution: {integrity: sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + encoding@0.1.13: + resolution: {integrity: sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==} + + entities@4.5.0: + resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} + engines: {node: '>=0.12'} + + entities@6.0.1: + resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==} + engines: {node: '>=0.12'} + + entities@7.0.1: + resolution: {integrity: sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==} + engines: {node: '>=0.12'} + + env-paths@2.2.1: + resolution: {integrity: sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==} + engines: {node: '>=6'} + + err-code@2.0.3: + resolution: {integrity: sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==} + + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + + esast-util-from-estree@2.0.0: + resolution: {integrity: sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==} + + esast-util-from-js@2.0.1: + resolution: {integrity: sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==} + + esbuild@0.25.12: + resolution: {integrity: sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==} + engines: {node: '>=18'} + hasBin: true + + esbuild@0.27.3: + resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==} + engines: {node: '>=18'} + hasBin: true + + escape-string-regexp@5.0.0: + resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==} + engines: {node: '>=12'} + + estree-util-attach-comments@3.0.0: + resolution: {integrity: sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==} + + estree-util-build-jsx@3.0.1: + resolution: {integrity: sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==} + + estree-util-is-identifier-name@3.0.0: + resolution: {integrity: sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==} + + estree-util-scope@1.0.0: + resolution: {integrity: sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==} + + estree-util-to-js@2.0.0: + resolution: {integrity: sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==} + + estree-util-visit@2.0.0: + resolution: {integrity: sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==} + + estree-walker@2.0.2: + resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} + + estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + + eventemitter3@5.0.4: + resolution: {integrity: sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==} + + exponential-backoff@3.1.3: + resolution: {integrity: sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==} + + expressive-code@0.41.6: + resolution: {integrity: sha512-W/5+IQbrpCIM5KGLjO35wlp1NCwDOOVQb+PAvzEoGkW1xjGM807ZGfBKptNWH6UECvt6qgmLyWolCMYKh7eQmA==} + + extend@3.0.2: + resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} + + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + + fill-range@7.1.1: + resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} + engines: {node: '>=8'} + + flattie@1.1.1: + resolution: {integrity: sha512-9UbaD6XdAL97+k/n+N7JwX46K/M6Zc6KcFYskrYL8wbBV/Uyk0CTAMY0VT+qiK5PM7AIc9aTWYtq65U7T+aCNQ==} + engines: {node: '>=8'} + + fontace@0.4.1: + resolution: {integrity: sha512-lDMvbAzSnHmbYMTEld5qdtvNH2/pWpICOqpean9IgC7vUbUJc3k+k5Dokp85CegamqQpFbXf0rAVkbzpyTA8aw==} + + fontkitten@1.0.2: + resolution: {integrity: sha512-piJxbLnkD9Xcyi7dWJRnqszEURixe7CrF/efBfbffe2DPyabmuIuqraruY8cXTs19QoM8VJzx47BDRVNXETM7Q==} + engines: {node: '>=20'} + + fs-minipass@3.0.3: + resolution: {integrity: sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + get-east-asian-width@1.4.0: + resolution: {integrity: sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==} + engines: {node: '>=18'} + + github-slugger@2.0.0: + resolution: {integrity: sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==} + + glob@13.0.3: + resolution: {integrity: sha512-/g3B0mC+4x724v1TgtBlBtt2hPi/EWptsIAmXUx9Z2rvBYleQcsrmaOzd5LyL50jf/Soi83ZDJmw2+XqvH/EeA==} + engines: {node: 20 || >=22} + + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + + h3@1.15.5: + resolution: {integrity: sha512-xEyq3rSl+dhGX2Lm0+eFQIAzlDN6Fs0EcC4f7BNUmzaRX/PTzeuM+Tr2lHB8FoXggsQIeXLj8EDVgs5ywxyxmg==} + + hachure-fill@0.5.2: + resolution: {integrity: sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==} + + hast-util-embedded@3.0.0: + resolution: {integrity: sha512-naH8sld4Pe2ep03qqULEtvYr7EjrLK2QHY8KJR6RJkTUjPGObe1vnx585uzem2hGra+s1q08DZZpfgDVYRbaXA==} + + hast-util-format@1.1.0: + resolution: {integrity: sha512-yY1UDz6bC9rDvCWHpx12aIBGRG7krurX0p0Fm6pT547LwDIZZiNr8a+IHDogorAdreULSEzP82Nlv5SZkHZcjA==} + + hast-util-from-html@2.0.3: + resolution: {integrity: sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==} + + hast-util-from-parse5@8.0.3: + resolution: {integrity: sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==} + + hast-util-has-property@3.0.0: + resolution: {integrity: sha512-MNilsvEKLFpV604hwfhVStK0usFY/QmM5zX16bo7EjnAEGofr5YyI37kzopBlZJkHD4t887i+q/C8/tr5Q94cA==} + + hast-util-is-body-ok-link@3.0.1: + resolution: {integrity: sha512-0qpnzOBLztXHbHQenVB8uNuxTnm/QBFUOmdOSsEn7GnBtyY07+ENTWVFBAnXd/zEgd9/SUG3lRY7hSIBWRgGpQ==} + + hast-util-is-element@3.0.0: + resolution: {integrity: sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==} + + hast-util-minify-whitespace@1.0.1: + resolution: {integrity: sha512-L96fPOVpnclQE0xzdWb/D12VT5FabA7SnZOUMtL1DbXmYiHJMXZvFkIZfiMmTCNJHUeO2K9UYNXoVyfz+QHuOw==} + + hast-util-parse-selector@4.0.0: + resolution: {integrity: sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==} + + hast-util-phrasing@3.0.1: + resolution: {integrity: sha512-6h60VfI3uBQUxHqTyMymMZnEbNl1XmEGtOxxKYL7stY2o601COo62AWAYBQR9lZbYXYSBoxag8UpPRXK+9fqSQ==} + + hast-util-raw@9.1.0: + resolution: {integrity: sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==} + + hast-util-select@6.0.4: + resolution: {integrity: sha512-RqGS1ZgI0MwxLaKLDxjprynNzINEkRHY2i8ln4DDjgv9ZhcYVIHN9rlpiYsqtFwrgpYU361SyWDQcGNIBVu3lw==} + + hast-util-to-estree@3.1.3: + resolution: {integrity: sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w==} + + hast-util-to-html@9.0.5: + resolution: {integrity: sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==} + + hast-util-to-jsx-runtime@2.3.6: + resolution: {integrity: sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==} + + hast-util-to-mdast@10.1.2: + resolution: {integrity: sha512-FiCRI7NmOvM4y+f5w32jPRzcxDIz+PUqDwEqn1A+1q2cdp3B8Gx7aVrXORdOKjMNDQsD1ogOr896+0jJHW1EFQ==} + + hast-util-to-parse5@8.0.1: + resolution: {integrity: sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==} + + hast-util-to-string@3.0.1: + resolution: {integrity: sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A==} + + hast-util-to-text@4.0.2: + resolution: {integrity: sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A==} + + hast-util-whitespace@3.0.0: + resolution: {integrity: sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==} + + hastscript@9.0.1: + resolution: {integrity: sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==} + + html-escaper@3.0.3: + resolution: {integrity: sha512-RuMffC89BOWQoY0WKGpIhn5gX3iI54O6nRA0yC124NYVtzjmFWBIiFd8M0x+ZdX0P9R4lADg1mgP8C7PxGOWuQ==} + + html-void-elements@3.0.0: + resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==} + + html-whitespace-sensitive-tag-names@3.0.1: + resolution: {integrity: sha512-q+310vW8zmymYHALr1da4HyXUQ0zgiIwIicEfotYPWGN0OJVEN/58IJ3A4GBYcEq3LGAZqKb+ugvP0GNB9CEAA==} + + htmlparser2@10.1.0: + resolution: {integrity: sha512-VTZkM9GWRAtEpveh7MSF6SjjrpNVNNVJfFup7xTY3UpFtm67foy9HDVXneLtFVt4pMz5kZtgNcvCniNFb1hlEQ==} + + http-cache-semantics@4.2.0: + resolution: {integrity: sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==} + + http-proxy-agent@7.0.2: + resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} + engines: {node: '>= 14'} + + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + + i18next@23.16.8: + resolution: {integrity: sha512-06r/TitrM88Mg5FdUXAKL96dJMzgqLE5dv3ryBAra4KCwD9mJ4ndOTS95ZuymIGoE+2hzfdaMak2X11/es7ZWg==} + + iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + + import-meta-resolve@4.2.0: + resolution: {integrity: sha512-Iqv2fzaTQN28s/FwZAoFq0ZSs/7hMAHJVX+w8PZl3cY19Pxk6jFFalxQoIfW2826i/fDLXv8IiEZRIT0lDuWcg==} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inline-style-parser@0.2.7: + resolution: {integrity: sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==} + + internmap@1.0.1: + resolution: {integrity: sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==} + + internmap@2.0.3: + resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} + engines: {node: '>=12'} + + ip-address@10.1.0: + resolution: {integrity: sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==} + engines: {node: '>= 12'} + + iron-webcrypto@1.2.1: + resolution: {integrity: sha512-feOM6FaSr6rEABp/eDfVseKyTMDt+KGpeB35SkVn9Tyn0CqvVsY3EwI0v5i8nMHyJnzCIQf7nsy3p41TPkJZhg==} + + is-absolute-url@4.0.1: + resolution: {integrity: sha512-/51/TKE88Lmm7Gc4/8btclNXWS+g50wXhYJq8HWIBAGUBnoAdRu1aXeh364t/O7wXDAcTJDP8PNuNKWUDWie+A==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + is-alphabetical@2.0.1: + resolution: {integrity: sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==} + + is-alphanumerical@2.0.1: + resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==} + + is-decimal@2.0.1: + resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==} + + is-docker@3.0.0: + resolution: {integrity: sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + hasBin: true + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-hexadecimal@2.0.1: + resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==} + + is-inside-container@1.0.0: + resolution: {integrity: sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==} + engines: {node: '>=14.16'} + hasBin: true + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + is-plain-obj@4.1.0: + resolution: {integrity: sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==} + engines: {node: '>=12'} + + is-wsl@3.1.0: + resolution: {integrity: sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==} + engines: {node: '>=16'} + + isexe@4.0.0: + resolution: {integrity: sha512-FFUtZMpoZ8RqHS3XeXEmHWLA4thH+ZxCv2lOiPIn1Xc7CxrqhWzNSDzD+/chS/zbYezmiwWLdQC09JdQKmthOw==} + engines: {node: '>=20'} + + jackspeak@4.2.3: + resolution: {integrity: sha512-ykkVRwrYvFm1nb2AJfKKYPr0emF6IiXDYUaFx4Zn9ZuIH7MrzEZ3sD5RlqGXNRpHtvUHJyOnCEFxOlNDtGo7wg==} + engines: {node: 20 || >=22} + + jiti@2.6.1: + resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==} + hasBin: true + + js-yaml@4.1.1: + resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} + hasBin: true + + katex@0.16.28: + resolution: {integrity: sha512-YHzO7721WbmAL6Ov1uzN/l5mY5WWWhJBSW+jq4tkfZfsxmo1hu6frS0EOswvjBUnWE6NtjEs48SFn5CQESRLZg==} + hasBin: true + + khroma@2.1.0: + resolution: {integrity: sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==} + + kleur@3.0.3: + resolution: {integrity: sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==} + engines: {node: '>=6'} + + klona@2.0.6: + resolution: {integrity: sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA==} + engines: {node: '>= 8'} + + langium@4.2.1: + resolution: {integrity: sha512-zu9QWmjpzJcomzdJQAHgDVhLGq5bLosVak1KVa40NzQHXfqr4eAHupvnPOVXEoLkg6Ocefvf/93d//SB7du4YQ==} + engines: {node: '>=20.10.0', npm: '>=10.2.3'} + + layout-base@1.0.2: + resolution: {integrity: sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==} + + layout-base@2.0.1: + resolution: {integrity: sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==} + + lodash-es@4.17.23: + resolution: {integrity: sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg==} + + longest-streak@3.1.0: + resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==} + + lru-cache@11.2.6: + resolution: {integrity: sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==} + engines: {node: 20 || >=22} + + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + + magicast@0.5.2: + resolution: {integrity: sha512-E3ZJh4J3S9KfwdjZhe2afj6R9lGIN5Pher1pF39UGrXRqq/VDaGVIGN13BjHd2u8B61hArAGOnso7nBOouW3TQ==} + + make-fetch-happen@15.0.3: + resolution: {integrity: sha512-iyyEpDty1mwW3dGlYXAJqC/azFn5PPvgKVwXayOGBSmKLxhKZ9fg4qIan2ePpp1vJIwfFiO34LAPZgq9SZW9Aw==} + engines: {node: ^20.17.0 || >=22.9.0} + + markdown-extensions@2.0.0: + resolution: {integrity: sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==} + engines: {node: '>=16'} + + markdown-table@3.0.4: + resolution: {integrity: sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==} + + marked@16.4.2: + resolution: {integrity: sha512-TI3V8YYWvkVf3KJe1dRkpnjs68JUPyEa5vjKrp1XEEJUAOaQc+Qj+L1qWbPd0SJuAdQkFU0h73sXXqwDYxsiDA==} + engines: {node: '>= 20'} + hasBin: true + + mdast-util-definitions@6.0.0: + resolution: {integrity: sha512-scTllyX6pnYNZH/AIp/0ePz6s4cZtARxImwoPJ7kS42n+MnVsI4XbnG6d4ibehRIldYMWM2LD7ImQblVhUejVQ==} + + mdast-util-directive@3.1.0: + resolution: {integrity: sha512-I3fNFt+DHmpWCYAT7quoM6lHf9wuqtI+oCOfvILnoicNIqjh5E3dEJWiXuYME2gNe8vl1iMQwyUHa7bgFmak6Q==} + + mdast-util-find-and-replace@3.0.2: + resolution: {integrity: sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==} + + mdast-util-from-markdown@2.0.2: + resolution: {integrity: sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==} + + mdast-util-gfm-autolink-literal@2.0.1: + resolution: {integrity: sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==} + + mdast-util-gfm-footnote@2.1.0: + resolution: {integrity: sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==} + + mdast-util-gfm-strikethrough@2.0.0: + resolution: {integrity: sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==} + + mdast-util-gfm-table@2.0.0: + resolution: {integrity: sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==} + + mdast-util-gfm-task-list-item@2.0.0: + resolution: {integrity: sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==} + + mdast-util-gfm@3.1.0: + resolution: {integrity: sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==} + + mdast-util-mdx-expression@2.0.1: + resolution: {integrity: sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==} + + mdast-util-mdx-jsx@3.2.0: + resolution: {integrity: sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==} + + mdast-util-mdx@3.0.0: + resolution: {integrity: sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==} + + mdast-util-mdxjs-esm@2.0.1: + resolution: {integrity: sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==} + + mdast-util-phrasing@4.1.0: + resolution: {integrity: sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==} + + mdast-util-to-hast@13.2.1: + resolution: {integrity: sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==} + + mdast-util-to-markdown@2.1.2: + resolution: {integrity: sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==} + + mdast-util-to-string@4.0.0: + resolution: {integrity: sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==} + + mdn-data@2.0.28: + resolution: {integrity: sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==} + + mdn-data@2.12.2: + resolution: {integrity: sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==} + + mermaid@11.12.3: + resolution: {integrity: sha512-wN5ZSgJQIC+CHJut9xaKWsknLxaFBwCPwPkGTSUYrTiHORWvpT8RxGk849HPnpUAQ+/9BPRqYb80jTpearrHzQ==} + + micromark-core-commonmark@2.0.3: + resolution: {integrity: sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==} + + micromark-extension-directive@3.0.2: + resolution: {integrity: sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==} + + micromark-extension-gfm-autolink-literal@2.1.0: + resolution: {integrity: sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==} + + micromark-extension-gfm-footnote@2.1.0: + resolution: {integrity: sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==} + + micromark-extension-gfm-strikethrough@2.1.0: + resolution: {integrity: sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==} + + micromark-extension-gfm-table@2.1.1: + resolution: {integrity: sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==} + + micromark-extension-gfm-tagfilter@2.0.0: + resolution: {integrity: sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==} + + micromark-extension-gfm-task-list-item@2.1.0: + resolution: {integrity: sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==} + + micromark-extension-gfm@3.0.0: + resolution: {integrity: sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==} + + micromark-extension-mdx-expression@3.0.1: + resolution: {integrity: sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q==} + + micromark-extension-mdx-jsx@3.0.2: + resolution: {integrity: sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ==} + + micromark-extension-mdx-md@2.0.0: + resolution: {integrity: sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==} + + micromark-extension-mdxjs-esm@3.0.0: + resolution: {integrity: sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==} + + micromark-extension-mdxjs@3.0.0: + resolution: {integrity: sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==} + + micromark-factory-destination@2.0.1: + resolution: {integrity: sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==} + + micromark-factory-label@2.0.1: + resolution: {integrity: sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==} + + micromark-factory-mdx-expression@2.0.3: + resolution: {integrity: sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==} + + micromark-factory-space@2.0.1: + resolution: {integrity: sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==} + + micromark-factory-title@2.0.1: + resolution: {integrity: sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==} + + micromark-factory-whitespace@2.0.1: + resolution: {integrity: sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==} + + micromark-util-character@2.1.1: + resolution: {integrity: sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==} + + micromark-util-chunked@2.0.1: + resolution: {integrity: sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==} + + micromark-util-classify-character@2.0.1: + resolution: {integrity: sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==} + + micromark-util-combine-extensions@2.0.1: + resolution: {integrity: sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==} + + micromark-util-decode-numeric-character-reference@2.0.2: + resolution: {integrity: sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==} + + micromark-util-decode-string@2.0.1: + resolution: {integrity: sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==} + + micromark-util-encode@2.0.1: + resolution: {integrity: sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==} + + micromark-util-events-to-acorn@2.0.3: + resolution: {integrity: sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==} + + micromark-util-html-tag-name@2.0.1: + resolution: {integrity: sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==} + + micromark-util-normalize-identifier@2.0.1: + resolution: {integrity: sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==} + + micromark-util-resolve-all@2.0.1: + resolution: {integrity: sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==} + + micromark-util-sanitize-uri@2.0.1: + resolution: {integrity: sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==} + + micromark-util-subtokenize@2.1.0: + resolution: {integrity: sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==} + + micromark-util-symbol@2.0.1: + resolution: {integrity: sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==} + + micromark-util-types@2.0.2: + resolution: {integrity: sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==} + + micromark@4.0.2: + resolution: {integrity: sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==} + + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + engines: {node: '>=8.6'} + + minimatch@10.2.0: + resolution: {integrity: sha512-ugkC31VaVg9cF0DFVoADH12k6061zNZkZON+aX8AWsR9GhPcErkcMBceb6znR8wLERM2AkkOxy2nWRLpT9Jq5w==} + engines: {node: 20 || >=22} + + minipass-collect@2.0.1: + resolution: {integrity: sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw==} + engines: {node: '>=16 || 14 >=14.17'} + + minipass-fetch@5.0.1: + resolution: {integrity: sha512-yHK8pb0iCGat0lDrs/D6RZmCdaBT64tULXjdxjSMAqoDi18Q3qKEUTHypHQZQd9+FYpIS+lkvpq6C/R6SbUeRw==} + engines: {node: ^20.17.0 || >=22.9.0} + + minipass-flush@1.0.5: + resolution: {integrity: sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==} + engines: {node: '>= 8'} + + minipass-pipeline@1.2.4: + resolution: {integrity: sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==} + engines: {node: '>=8'} + + minipass-sized@2.0.0: + resolution: {integrity: sha512-zSsHhto5BcUVM2m1LurnXY6M//cGhVaegT71OfOXoprxT6o780GZd792ea6FfrQkuU4usHZIUczAQMRUE2plzA==} + engines: {node: '>=8'} + + minipass@3.3.6: + resolution: {integrity: sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==} + engines: {node: '>=8'} + + minipass@7.1.2: + resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} + engines: {node: '>=16 || 14 >=14.17'} + + minizlib@3.1.0: + resolution: {integrity: sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==} + engines: {node: '>= 18'} + + mlly@1.8.0: + resolution: {integrity: sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==} + + mrmime@2.0.1: + resolution: {integrity: sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==} + engines: {node: '>=10'} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + negotiator@1.0.0: + resolution: {integrity: sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==} + engines: {node: '>= 0.6'} + + neotraverse@0.6.18: + resolution: {integrity: sha512-Z4SmBUweYa09+o6pG+eASabEpP6QkQ70yHj351pQoEXIs8uHbaU2DWVmzBANKgflPa47A50PtB2+NgRpQvr7vA==} + engines: {node: '>= 10'} + + nlcst-to-string@4.0.0: + resolution: {integrity: sha512-YKLBCcUYKAg0FNlOBT6aI91qFmSiFKiluk655WzPF+DDMA02qIyy8uiRqI8QXtcFpEvll12LpL5MXqEmAZ+dcA==} + + node-addon-api@8.5.0: + resolution: {integrity: sha512-/bRZty2mXUIFY/xU5HLvveNHlswNJej+RnxBjOMkidWfwZzgTbPG1E3K5TOxRLOR+5hX7bSofy8yf1hZevMS8A==} + engines: {node: ^18 || ^20 || >= 21} + + node-fetch-native@1.6.7: + resolution: {integrity: sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q==} + + node-gyp@12.2.0: + resolution: {integrity: sha512-q23WdzrQv48KozXlr0U1v9dwO/k59NHeSzn6loGcasyf0UnSrtzs8kRxM+mfwJSf0DkX0s43hcqgnSO4/VNthQ==} + engines: {node: ^20.17.0 || >=22.9.0} + hasBin: true + + node-mock-http@1.0.4: + resolution: {integrity: sha512-8DY+kFsDkNXy1sJglUfuODx1/opAGJGyrTuFqEoN90oRc2Vk0ZbD4K2qmKXBBEhZQzdKHIVfEJpDU8Ak2NJEvQ==} + + nopt@9.0.0: + resolution: {integrity: sha512-Zhq3a+yFKrYwSBluL4H9XP3m3y5uvQkB/09CwDruCiRmR/UJYnn9W4R48ry0uGC70aeTPKLynBtscP9efFFcPw==} + engines: {node: ^20.17.0 || >=22.9.0} + hasBin: true + + normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + nth-check@2.1.1: + resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} + + ofetch@1.5.1: + resolution: {integrity: sha512-2W4oUZlVaqAPAil6FUg/difl6YhqhUR7x2eZY4bQCko22UXg3hptq9KLQdqFClV+Wu85UX7hNtdGTngi/1BxcA==} + + ohash@2.0.11: + resolution: {integrity: sha512-RdR9FQrFwNBNXAr4GixM8YaRZRJ5PUWbKYbE5eOsrwAjJW0q2REGcf79oYPsLyskQCZG1PLN+S/K1V00joZAoQ==} + + oniguruma-parser@0.12.1: + resolution: {integrity: sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==} + + oniguruma-to-es@4.3.4: + resolution: {integrity: sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==} + + p-limit@6.2.0: + resolution: {integrity: sha512-kuUqqHNUqoIWp/c467RI4X6mmyuojY5jGutNU0wVTmEOOfcuwLqyMVoAi9MKi2Ak+5i9+nhmrK4ufZE8069kHA==} + engines: {node: '>=18'} + + p-map@7.0.4: + resolution: {integrity: sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ==} + engines: {node: '>=18'} + + p-queue@8.1.1: + resolution: {integrity: sha512-aNZ+VfjobsWryoiPnEApGGmf5WmNsCo9xu8dfaYamG5qaLP7ClhLN6NgsFe6SwJ2UbLEBK5dv9x8Mn5+RVhMWQ==} + engines: {node: '>=18'} + + p-timeout@6.1.4: + resolution: {integrity: sha512-MyIV3ZA/PmyBN/ud8vV9XzwTrNtR4jFrObymZYnZqMmW0zA8Z17vnT0rBgFE/TlohB+YCHqXMgZzb3Csp49vqg==} + engines: {node: '>=14.16'} + + package-manager-detector@1.6.0: + resolution: {integrity: sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==} + + pagefind@1.4.0: + resolution: {integrity: sha512-z2kY1mQlL4J8q5EIsQkLzQjilovKzfNVhX8De6oyE6uHpfFtyBaqUpcl/XzJC/4fjD8vBDyh1zolimIcVrCn9g==} + hasBin: true + + parse-entities@4.0.2: + resolution: {integrity: sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==} + + parse-latin@7.0.0: + resolution: {integrity: sha512-mhHgobPPua5kZ98EF4HWiH167JWBfl4pvAIXXdbaVohtK7a6YBOy56kvhCqduqyo/f3yrHFWmqmiMg/BkBkYYQ==} + + parse5@7.3.0: + resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==} + + path-data-parser@0.1.0: + resolution: {integrity: sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==} + + path-scurry@2.0.1: + resolution: {integrity: sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA==} + engines: {node: 20 || >=22} + + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + + piccolore@0.1.3: + resolution: {integrity: sha512-o8bTeDWjE086iwKrROaDf31K0qC/BENdm15/uH9usSC/uZjJOKb2YGiVHfLY4GhwsERiPI1jmwI2XrA7ACOxVw==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} + engines: {node: '>=12'} + + pkg-types@1.3.1: + resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} + + points-on-curve@0.2.0: + resolution: {integrity: sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==} + + points-on-path@0.2.1: + resolution: {integrity: sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==} + + postcss-nested@6.2.0: + resolution: {integrity: sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.2.14 + + postcss-selector-parser@6.1.2: + resolution: {integrity: sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==} + engines: {node: '>=4'} + + postcss@8.5.6: + resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} + engines: {node: ^10 || ^12 || >=14} + + prismjs@1.30.0: + resolution: {integrity: sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==} + engines: {node: '>=6'} + + proc-log@6.1.0: + resolution: {integrity: sha512-iG+GYldRf2BQ0UDUAd6JQ/RwzaQy6mXmsk/IzlYyal4A4SNFw54MeH4/tLkF4I5WoWG9SQwuqWzS99jaFQHBuQ==} + engines: {node: ^20.17.0 || >=22.9.0} + + promise-retry@2.0.1: + resolution: {integrity: sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==} + engines: {node: '>=10'} + + prompts@2.4.2: + resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==} + engines: {node: '>= 6'} + + property-information@7.1.0: + resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==} + + radix3@1.1.2: + resolution: {integrity: sha512-b484I/7b8rDEdSDKckSSBA8knMpcdsXudlE/LNL639wFoHKwLbEkQFZHWEYwDC0wa0FKUcCY+GAF73Z7wxNVFA==} + + readdirp@5.0.0: + resolution: {integrity: sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==} + engines: {node: '>= 20.19.0'} + + recma-build-jsx@1.0.0: + resolution: {integrity: sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==} + + recma-jsx@1.0.1: + resolution: {integrity: sha512-huSIy7VU2Z5OLv6oFLosQGGDqPqdO1iq6bWNAdhzMxSJP7RAso4fCZ1cKu8j9YHCZf3TPrq4dw3okhrylgcd7w==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + recma-parse@1.0.0: + resolution: {integrity: sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==} + + recma-stringify@1.0.0: + resolution: {integrity: sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==} + + regex-recursion@6.0.2: + resolution: {integrity: sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==} + + regex-utilities@2.3.0: + resolution: {integrity: sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==} + + regex@6.1.0: + resolution: {integrity: sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==} + + rehype-expressive-code@0.41.6: + resolution: {integrity: sha512-aBMX8kxPtjmDSFUdZlAWJkMvsQ4ZMASfee90JWIAV8tweltXLzkWC3q++43ToTelI8ac5iC0B3/S/Cl4Ql1y2g==} + + rehype-external-links@3.0.0: + resolution: {integrity: sha512-yp+e5N9V3C6bwBeAC4n796kc86M4gJCdlVhiMTxIrJG5UHDMh+PJANf9heqORJbt1nrCbDwIlAZKjANIaVBbvw==} + + rehype-format@5.0.1: + resolution: {integrity: sha512-zvmVru9uB0josBVpr946OR8ui7nJEdzZobwLOOqHb/OOD88W0Vk2SqLwoVOj0fM6IPCCO6TaV9CvQvJMWwukFQ==} + + rehype-minify-whitespace@6.0.2: + resolution: {integrity: sha512-Zk0pyQ06A3Lyxhe9vGtOtzz3Z0+qZ5+7icZ/PL/2x1SHPbKao5oB/g/rlc6BCTajqBb33JcOe71Ye1oFsuYbnw==} + + rehype-parse@9.0.1: + resolution: {integrity: sha512-ksCzCD0Fgfh7trPDxr2rSylbwq9iYDkSn8TCDmEJ49ljEUBxDVCzCHv7QNzZOfODanX4+bWQ4WZqLCRWYLfhag==} + + rehype-raw@7.0.0: + resolution: {integrity: sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==} + + rehype-recma@1.0.0: + resolution: {integrity: sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==} + + rehype-remark@10.0.1: + resolution: {integrity: sha512-EmDndlb5NVwXGfUa4c9GPK+lXeItTilLhE6ADSaQuHr4JUlKw9MidzGzx4HpqZrNCt6vnHmEifXQiiA+CEnjYQ==} + + rehype-stringify@10.0.1: + resolution: {integrity: sha512-k9ecfXHmIPuFVI61B9DeLPN0qFHfawM6RsuX48hoqlaKSF61RskNjSm1lI8PhBEM0MRdLxVVm4WmTqJQccH9mA==} + + rehype@13.0.2: + resolution: {integrity: sha512-j31mdaRFrwFRUIlxGeuPXXKWQxet52RBQRvCmzl5eCefn/KGbomK5GMHNMsOJf55fgo3qw5tST5neDuarDYR2A==} + + remark-directive@3.0.1: + resolution: {integrity: sha512-gwglrEQEZcZYgVyG1tQuA+h58EZfq5CSULw7J90AFuCTyib1thgHPoqQ+h9iFvU6R+vnZ5oNFQR5QKgGpk741A==} + + remark-gfm@4.0.1: + resolution: {integrity: sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==} + + remark-mdx@3.1.1: + resolution: {integrity: sha512-Pjj2IYlUY3+D8x00UJsIOg5BEvfMyeI+2uLPn9VO9Wg4MEtN/VTIq2NEJQfde9PnX15KgtHyl9S0BcTnWrIuWg==} + + remark-parse@11.0.0: + resolution: {integrity: sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==} + + remark-rehype@11.1.2: + resolution: {integrity: sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==} + + remark-smartypants@3.0.2: + resolution: {integrity: sha512-ILTWeOriIluwEvPjv67v7Blgrcx+LZOkAUVtKI3putuhlZm84FnqDORNXPPm+HY3NdZOMhyDwZ1E+eZB/Df5dA==} + engines: {node: '>=16.0.0'} + + remark-stringify@11.0.0: + resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + + retext-latin@4.0.0: + resolution: {integrity: sha512-hv9woG7Fy0M9IlRQloq/N6atV82NxLGveq+3H2WOi79dtIYWN8OaxogDm77f8YnVXJL2VD3bbqowu5E3EMhBYA==} + + retext-smartypants@6.2.0: + resolution: {integrity: sha512-kk0jOU7+zGv//kfjXEBjdIryL1Acl4i9XNkHxtM7Tm5lFiCog576fjNC9hjoR7LTKQ0DsPWy09JummSsH1uqfQ==} + + retext-stringify@4.0.0: + resolution: {integrity: sha512-rtfN/0o8kL1e+78+uxPTqu1Klt0yPzKuQ2BfWwwfgIUSayyzxpM1PJzkKt4V8803uB9qSy32MvI7Xep9khTpiA==} + + retext@9.0.0: + resolution: {integrity: sha512-sbMDcpHCNjvlheSgMfEcVrZko3cDzdbe1x/e7G66dFp0Ff7Mldvi2uv6JkJQzdRcvLYE8CA8Oe8siQx8ZOgTcA==} + + retry@0.12.0: + resolution: {integrity: sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==} + engines: {node: '>= 4'} + + robust-predicates@3.0.2: + resolution: {integrity: sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==} + + rollup@4.57.1: + resolution: {integrity: sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + roughjs@4.6.6: + resolution: {integrity: sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==} + + rw@1.3.3: + resolution: {integrity: sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==} + + safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + + sax@1.4.4: + resolution: {integrity: sha512-1n3r/tGXO6b6VXMdFT54SHzT9ytu9yr7TaELowdYpMqY/Ao7EnlQGmAQ1+RatX7Tkkdm6hONI2owqNx2aZj5Sw==} + engines: {node: '>=11.0.0'} + + semver@7.7.4: + resolution: {integrity: sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==} + engines: {node: '>=10'} + hasBin: true + + sharp@0.34.5: + resolution: {integrity: sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + + shiki@3.22.0: + resolution: {integrity: sha512-LBnhsoYEe0Eou4e1VgJACes+O6S6QC0w71fCSp5Oya79inkwkm15gQ1UF6VtQ8j/taMDh79hAB49WUk8ALQW3g==} + + sisteransi@1.0.5: + resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} + + sitemap@8.0.2: + resolution: {integrity: sha512-LwktpJcyZDoa0IL6KT++lQ53pbSrx2c9ge41/SeLTyqy2XUNA6uR4+P9u5IVo5lPeL2arAcOKn1aZAxoYbCKlQ==} + engines: {node: '>=14.0.0', npm: '>=6.0.0'} + hasBin: true + + smart-buffer@4.2.0: + resolution: {integrity: sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==} + engines: {node: '>= 6.0.0', npm: '>= 3.0.0'} + + smol-toml@1.6.0: + resolution: {integrity: sha512-4zemZi0HvTnYwLfrpk/CF9LOd9Lt87kAt50GnqhMpyF9U3poDAP2+iukq2bZsO/ufegbYehBkqINbsWxj4l4cw==} + engines: {node: '>= 18'} + + socks-proxy-agent@8.0.5: + resolution: {integrity: sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==} + engines: {node: '>= 14'} + + socks@2.8.7: + resolution: {integrity: sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==} + engines: {node: '>= 10.0.0', npm: '>= 3.0.0'} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + source-map@0.7.6: + resolution: {integrity: sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==} + engines: {node: '>= 12'} + + space-separated-tokens@2.0.2: + resolution: {integrity: sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==} + + ssri@13.0.1: + resolution: {integrity: sha512-QUiRf1+u9wPTL/76GTYlKttDEBWV1ga9ZXW8BG6kfdeyyM8LGPix9gROyg9V2+P0xNyF3X2Go526xKFdMZrHSQ==} + engines: {node: ^20.17.0 || >=22.9.0} + + starlight-llms-txt@0.7.0: + resolution: {integrity: sha512-KAay6JLXqB0GiNQ481z3Z/h/y4xeAU55TUGLz+npjxcRvN3h/7rDxjmyLiphZF8xfoqqSTduQPanl5Ct4Je6kA==} + engines: {node: ^18.17.1 || ^20.3.0 || >=21.0.0} + peerDependencies: + '@astrojs/starlight': '>=0.31' + astro: ^5.15.9 + + stream-replace-string@2.0.0: + resolution: {integrity: sha512-TlnjJ1C0QrmxRNrON00JvaFFlNh5TTG00APw23j74ET7gkQpTASi6/L2fuiav8pzK715HXtUeClpBTw2NPSn6w==} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + string-width@7.2.0: + resolution: {integrity: sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==} + engines: {node: '>=18'} + + stringify-entities@4.0.4: + resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-ansi@7.1.2: + resolution: {integrity: sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==} + engines: {node: '>=12'} + + style-to-js@1.1.21: + resolution: {integrity: sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==} + + style-to-object@1.0.14: + resolution: {integrity: sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==} + + stylis@4.3.6: + resolution: {integrity: sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==} + + svgo@4.0.0: + resolution: {integrity: sha512-VvrHQ+9uniE+Mvx3+C9IEe/lWasXCU0nXMY2kZeLrHNICuRiC8uMPyM14UEaMOFA5mhyQqEkB02VoQ16n3DLaw==} + engines: {node: '>=16'} + hasBin: true + + tar@7.5.7: + resolution: {integrity: sha512-fov56fJiRuThVFXD6o6/Q354S7pnWMJIVlDBYijsTNx6jKSE4pvrDTs6lUnmGvNyfJwFQQwWy3owKz1ucIhveQ==} + engines: {node: '>=18'} + + tiny-inflate@1.0.3: + resolution: {integrity: sha512-pkY1fj1cKHb2seWDy0B16HeWyczlJA9/WW3u3c4z/NiWDsO3DOU5D7nhTLE9CF0yXv/QZFY7sEJmj24dK+Rrqw==} + + tinyexec@1.0.2: + resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} + engines: {node: '>=18'} + + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + engines: {node: '>=12.0.0'} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + trim-lines@3.0.1: + resolution: {integrity: sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==} + + trim-trailing-lines@2.1.0: + resolution: {integrity: sha512-5UR5Biq4VlVOtzqkm2AZlgvSlDJtME46uV0br0gENbwN4l5+mMKT4b9gJKqWtuL2zAIqajGJGuvbCbcAJUZqBg==} + + trough@2.2.0: + resolution: {integrity: sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==} + + ts-dedent@2.2.0: + resolution: {integrity: sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==} + engines: {node: '>=6.10'} + + tsconfck@3.1.6: + resolution: {integrity: sha512-ks6Vjr/jEw0P1gmOVwutM3B7fWxoWBL2KRDb1JfqGVawBmO5UsvmWOQFGHBPl5yxYz4eERr19E6L7NMv+Fej4w==} + engines: {node: ^18 || >=20} + hasBin: true + peerDependencies: + typescript: ^5.0.0 + peerDependenciesMeta: + typescript: + optional: true + + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + + type-fest@4.41.0: + resolution: {integrity: sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==} + engines: {node: '>=16'} + + typescript@5.9.3: + resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} + engines: {node: '>=14.17'} + hasBin: true + + ufo@1.6.3: + resolution: {integrity: sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q==} + + ultrahtml@1.6.0: + resolution: {integrity: sha512-R9fBn90VTJrqqLDwyMph+HGne8eqY1iPfYhPzZrvKpIfwkWZbcYlfpsb8B9dTvBfpy1/hqAD7Wi8EKfP9e8zdw==} + + uncrypto@0.1.3: + resolution: {integrity: sha512-Ql87qFHB3s/De2ClA9e0gsnS6zXG27SkTiSJwjCc9MebbfapQfuPzumMIUMi38ezPZVNFcHI9sUIepeQfw8J8Q==} + + unified@11.0.5: + resolution: {integrity: sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==} + + unifont@0.7.3: + resolution: {integrity: sha512-b0GtQzKCyuSHGsfj5vyN8st7muZ6VCI4XD4vFlr7Uy1rlWVYxC3npnfk8MyreHxJYrz1ooLDqDzFe9XqQTlAhA==} + + unique-filename@5.0.0: + resolution: {integrity: sha512-2RaJTAvAb4owyjllTfXzFClJ7WsGxlykkPvCr9pA//LD9goVq+m4PPAeBgNodGZ7nSrntT/auWpJ6Y5IFXcfjg==} + engines: {node: ^20.17.0 || >=22.9.0} + + unique-slug@6.0.0: + resolution: {integrity: sha512-4Lup7Ezn8W3d52/xBhZBVdx323ckxa7DEvd9kPQHppTkLoJXw6ltrBCyj5pnrxj0qKDxYMJ56CoxNuFCscdTiw==} + engines: {node: ^20.17.0 || >=22.9.0} + + unist-util-find-after@5.0.0: + resolution: {integrity: sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ==} + + unist-util-is@6.0.1: + resolution: {integrity: sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==} + + unist-util-modify-children@4.0.0: + resolution: {integrity: sha512-+tdN5fGNddvsQdIzUF3Xx82CU9sMM+fA0dLgR9vOmT0oPT2jH+P1nd5lSqfCfXAw+93NhcXNY2qqvTUtE4cQkw==} + + unist-util-position-from-estree@2.0.0: + resolution: {integrity: sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==} + + unist-util-position@5.0.0: + resolution: {integrity: sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==} + + unist-util-remove-position@5.0.0: + resolution: {integrity: sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==} + + unist-util-remove@4.0.0: + resolution: {integrity: sha512-b4gokeGId57UVRX/eVKej5gXqGlc9+trkORhFJpu9raqZkZhU0zm8Doi05+HaiBsMEIJowL+2WtQ5ItjsngPXg==} + + unist-util-stringify-position@4.0.0: + resolution: {integrity: sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==} + + unist-util-visit-children@3.0.0: + resolution: {integrity: sha512-RgmdTfSBOg04sdPcpTSD1jzoNBjt9a80/ZCzp5cI9n1qPzLZWF9YdvWGN2zmTumP1HWhXKdUWexjy/Wy/lJ7tA==} + + unist-util-visit-parents@6.0.2: + resolution: {integrity: sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==} + + unist-util-visit@5.1.0: + resolution: {integrity: sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==} + + unstorage@1.17.4: + resolution: {integrity: sha512-fHK0yNg38tBiJKp/Vgsq4j0JEsCmgqH58HAn707S7zGkArbZsVr/CwINoi+nh3h98BRCwKvx1K3Xg9u3VV83sw==} + peerDependencies: + '@azure/app-configuration': ^1.8.0 + '@azure/cosmos': ^4.2.0 + '@azure/data-tables': ^13.3.0 + '@azure/identity': ^4.6.0 + '@azure/keyvault-secrets': ^4.9.0 + '@azure/storage-blob': ^12.26.0 + '@capacitor/preferences': ^6 || ^7 || ^8 + '@deno/kv': '>=0.9.0' + '@netlify/blobs': ^6.5.0 || ^7.0.0 || ^8.1.0 || ^9.0.0 || ^10.0.0 + '@planetscale/database': ^1.19.0 + '@upstash/redis': ^1.34.3 + '@vercel/blob': '>=0.27.1' + '@vercel/functions': ^2.2.12 || ^3.0.0 + '@vercel/kv': ^1 || ^2 || ^3 + aws4fetch: ^1.0.20 + db0: '>=0.2.1' + idb-keyval: ^6.2.1 + ioredis: ^5.4.2 + uploadthing: ^7.4.4 + peerDependenciesMeta: + '@azure/app-configuration': + optional: true + '@azure/cosmos': + optional: true + '@azure/data-tables': + optional: true + '@azure/identity': + optional: true + '@azure/keyvault-secrets': + optional: true + '@azure/storage-blob': + optional: true + '@capacitor/preferences': + optional: true + '@deno/kv': + optional: true + '@netlify/blobs': + optional: true + '@planetscale/database': + optional: true + '@upstash/redis': + optional: true + '@vercel/blob': + optional: true + '@vercel/functions': + optional: true + '@vercel/kv': + optional: true + aws4fetch: + optional: true + db0: + optional: true + idb-keyval: + optional: true + ioredis: + optional: true + uploadthing: + optional: true + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + uuid@11.1.0: + resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} + hasBin: true + + vfile-location@5.0.3: + resolution: {integrity: sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==} + + vfile-message@4.0.3: + resolution: {integrity: sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==} + + vfile@6.0.3: + resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} + + vite@6.4.1: + resolution: {integrity: sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + peerDependencies: + '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0 + jiti: '>=1.21.0' + less: '*' + lightningcss: ^1.21.0 + sass: '*' + sass-embedded: '*' + stylus: '*' + sugarss: '*' + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + vitefu@1.1.1: + resolution: {integrity: sha512-B/Fegf3i8zh0yFbpzZ21amWzHmuNlLlmJT6n7bu5e+pCHUKQIfXSYokrqOBGEMMe9UG2sostKQF9mml/vYaWJQ==} + peerDependencies: + vite: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0-beta.0 + peerDependenciesMeta: + vite: + optional: true + + vscode-jsonrpc@8.2.0: + resolution: {integrity: sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==} + engines: {node: '>=14.0.0'} + + vscode-languageserver-protocol@3.17.5: + resolution: {integrity: sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==} + + vscode-languageserver-textdocument@1.0.12: + resolution: {integrity: sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==} + + vscode-languageserver-types@3.17.5: + resolution: {integrity: sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==} + + vscode-languageserver@9.0.1: + resolution: {integrity: sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==} + hasBin: true + + vscode-uri@3.1.0: + resolution: {integrity: sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==} + + web-namespaces@2.0.1: + resolution: {integrity: sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==} + + which-pm-runs@1.1.0: + resolution: {integrity: sha512-n1brCuqClxfFfq/Rb0ICg9giSZqCS+pLtccdag6C2HyufBrh3fBOiy9nb6ggRMvWOVH5GrdJskj5iGTZNxd7SA==} + engines: {node: '>=4'} + + which@6.0.1: + resolution: {integrity: sha512-oGLe46MIrCRqX7ytPUf66EAYvdeMIZYn3WaocqqKZAxrBpkqHfL/qvTyJ/bTk5+AqHCjXmrv3CEWgy368zhRUg==} + engines: {node: ^20.17.0 || >=22.9.0} + hasBin: true + + widest-line@5.0.0: + resolution: {integrity: sha512-c9bZp7b5YtRj2wOe6dlj32MK+Bx/M/d+9VB2SHM1OtsUHR0aV0tdP6DWh/iMt0kWi1t5g1Iudu6hQRNd1A4PVA==} + engines: {node: '>=18'} + + wrap-ansi@9.0.2: + resolution: {integrity: sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==} + engines: {node: '>=18'} + + xxhash-wasm@1.1.0: + resolution: {integrity: sha512-147y/6YNh+tlp6nd/2pWq38i9h6mz/EuQ6njIrmW8D1BS5nCqs0P6DG+m6zTGnNz5I+uhZ0SHxBs9BsPrwcKDA==} + + yallist@4.0.0: + resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} + + yallist@5.0.0: + resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==} + engines: {node: '>=18'} + + yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + + yocto-queue@1.2.2: + resolution: {integrity: sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ==} + engines: {node: '>=12.20'} + + yocto-spinner@0.2.3: + resolution: {integrity: sha512-sqBChb33loEnkoXte1bLg45bEBsOP9N1kzQh5JZNKj/0rik4zAPTNSAVPj3uQAdc6slYJ0Ksc403G2XgxsJQFQ==} + engines: {node: '>=18.19'} + + yoctocolors@2.1.2: + resolution: {integrity: sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==} + engines: {node: '>=18'} + + zod-to-json-schema@3.25.1: + resolution: {integrity: sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA==} + peerDependencies: + zod: ^3.25 || ^4 + + zod-to-ts@1.2.0: + resolution: {integrity: sha512-x30XE43V+InwGpvTySRNz9kB7qFU8DlyEy7BsSTCHPH1R0QasMmHWZDCzYm6bVXtj/9NNJAZF3jW8rzFvH5OFA==} + peerDependencies: + typescript: ^4.9.4 || ^5.0.2 + zod: ^3 + + zod@3.25.76: + resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} + + zwitch@2.0.4: + resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==} + +snapshots: + + '@antfu/install-pkg@1.1.0': + dependencies: + package-manager-detector: 1.6.0 + tinyexec: 1.0.2 + + '@astrojs/compiler@2.13.1': {} + + '@astrojs/internal-helpers@0.7.5': {} + + '@astrojs/markdoc@0.15.10(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3))': + dependencies: + '@astrojs/internal-helpers': 0.7.5 + '@astrojs/markdown-remark': 6.3.10 + '@astrojs/prism': 3.3.0 + '@markdoc/markdoc': 0.5.4 + astro: 5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3) + esbuild: 0.25.12 + github-slugger: 2.0.0 + htmlparser2: 10.1.0 + transitivePeerDependencies: + - '@types/react' + - react + - supports-color + + '@astrojs/markdown-remark@6.3.10': + dependencies: + '@astrojs/internal-helpers': 0.7.5 + '@astrojs/prism': 3.3.0 + github-slugger: 2.0.0 + hast-util-from-html: 2.0.3 + hast-util-to-text: 4.0.2 + import-meta-resolve: 4.2.0 + js-yaml: 4.1.1 + mdast-util-definitions: 6.0.0 + rehype-raw: 7.0.0 + rehype-stringify: 10.0.1 + remark-gfm: 4.0.1 + remark-parse: 11.0.0 + remark-rehype: 11.1.2 + remark-smartypants: 3.0.2 + shiki: 3.22.0 + smol-toml: 1.6.0 + unified: 11.0.5 + unist-util-remove-position: 5.0.0 + unist-util-visit: 5.1.0 + unist-util-visit-parents: 6.0.2 + vfile: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@astrojs/mdx@4.3.13(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3))': + dependencies: + '@astrojs/markdown-remark': 6.3.10 + '@mdx-js/mdx': 3.1.1 + acorn: 8.15.0 + astro: 5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3) + es-module-lexer: 1.7.0 + estree-util-visit: 2.0.0 + hast-util-to-html: 9.0.5 + piccolore: 0.1.3 + rehype-raw: 7.0.0 + remark-gfm: 4.0.1 + remark-smartypants: 3.0.2 + source-map: 0.7.6 + unist-util-visit: 5.1.0 + vfile: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@astrojs/prism@3.3.0': + dependencies: + prismjs: 1.30.0 + + '@astrojs/sitemap@3.7.0': + dependencies: + sitemap: 8.0.2 + stream-replace-string: 2.0.0 + zod: 3.25.76 + + '@astrojs/starlight-markdoc@0.5.1(@astrojs/markdoc@0.15.10(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)))(@astrojs/starlight@0.37.6(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)))': + dependencies: + '@astrojs/markdoc': 0.15.10(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)) + '@astrojs/starlight': 0.37.6(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)) + + '@astrojs/starlight@0.37.6(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3))': + dependencies: + '@astrojs/markdown-remark': 6.3.10 + '@astrojs/mdx': 4.3.13(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)) + '@astrojs/sitemap': 3.7.0 + '@pagefind/default-ui': 1.4.0 + '@types/hast': 3.0.4 + '@types/js-yaml': 4.0.9 + '@types/mdast': 4.0.4 + astro: 5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3) + astro-expressive-code: 0.41.6(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)) + bcp-47: 2.1.0 + hast-util-from-html: 2.0.3 + hast-util-select: 6.0.4 + hast-util-to-string: 3.0.1 + hastscript: 9.0.1 + i18next: 23.16.8 + js-yaml: 4.1.1 + klona: 2.0.6 + magic-string: 0.30.21 + mdast-util-directive: 3.1.0 + mdast-util-to-markdown: 2.1.2 + mdast-util-to-string: 4.0.0 + pagefind: 1.4.0 + rehype: 13.0.2 + rehype-format: 5.0.1 + remark-directive: 3.0.1 + ultrahtml: 1.6.0 + unified: 11.0.5 + unist-util-visit: 5.1.0 + vfile: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@astrojs/telemetry@3.3.0': + dependencies: + ci-info: 4.4.0 + debug: 4.4.3 + dlv: 1.1.3 + dset: 3.1.4 + is-docker: 3.0.0 + is-wsl: 3.1.0 + which-pm-runs: 1.1.0 + transitivePeerDependencies: + - supports-color + + '@babel/helper-string-parser@7.27.1': {} + + '@babel/helper-validator-identifier@7.28.5': {} + + '@babel/parser@7.29.0': + dependencies: + '@babel/types': 7.29.0 + + '@babel/runtime@7.28.6': {} + + '@babel/types@7.29.0': + dependencies: + '@babel/helper-string-parser': 7.27.1 + '@babel/helper-validator-identifier': 7.28.5 + + '@braintree/sanitize-url@7.1.2': {} + + '@capsizecss/unpack@4.0.0': + dependencies: + fontkitten: 1.0.2 + + '@chevrotain/cst-dts-gen@11.1.1': + dependencies: + '@chevrotain/gast': 11.1.1 + '@chevrotain/types': 11.1.1 + lodash-es: 4.17.23 + + '@chevrotain/gast@11.1.1': + dependencies: + '@chevrotain/types': 11.1.1 + lodash-es: 4.17.23 + + '@chevrotain/regexp-to-ast@11.1.1': {} + + '@chevrotain/types@11.1.1': {} + + '@chevrotain/utils@11.1.1': {} + + '@ctrl/tinycolor@4.2.0': {} + + '@emnapi/runtime@1.8.1': + dependencies: + tslib: 2.8.1 + optional: true + + '@esbuild/aix-ppc64@0.25.12': + optional: true + + '@esbuild/aix-ppc64@0.27.3': + optional: true + + '@esbuild/android-arm64@0.25.12': + optional: true + + '@esbuild/android-arm64@0.27.3': + optional: true + + '@esbuild/android-arm@0.25.12': + optional: true + + '@esbuild/android-arm@0.27.3': + optional: true + + '@esbuild/android-x64@0.25.12': + optional: true + + '@esbuild/android-x64@0.27.3': + optional: true + + '@esbuild/darwin-arm64@0.25.12': + optional: true + + '@esbuild/darwin-arm64@0.27.3': + optional: true + + '@esbuild/darwin-x64@0.25.12': + optional: true + + '@esbuild/darwin-x64@0.27.3': + optional: true + + '@esbuild/freebsd-arm64@0.25.12': + optional: true + + '@esbuild/freebsd-arm64@0.27.3': + optional: true + + '@esbuild/freebsd-x64@0.25.12': + optional: true + + '@esbuild/freebsd-x64@0.27.3': + optional: true + + '@esbuild/linux-arm64@0.25.12': + optional: true + + '@esbuild/linux-arm64@0.27.3': + optional: true + + '@esbuild/linux-arm@0.25.12': + optional: true + + '@esbuild/linux-arm@0.27.3': + optional: true + + '@esbuild/linux-ia32@0.25.12': + optional: true + + '@esbuild/linux-ia32@0.27.3': + optional: true + + '@esbuild/linux-loong64@0.25.12': + optional: true + + '@esbuild/linux-loong64@0.27.3': + optional: true + + '@esbuild/linux-mips64el@0.25.12': + optional: true + + '@esbuild/linux-mips64el@0.27.3': + optional: true + + '@esbuild/linux-ppc64@0.25.12': + optional: true + + '@esbuild/linux-ppc64@0.27.3': + optional: true + + '@esbuild/linux-riscv64@0.25.12': + optional: true + + '@esbuild/linux-riscv64@0.27.3': + optional: true + + '@esbuild/linux-s390x@0.25.12': + optional: true + + '@esbuild/linux-s390x@0.27.3': + optional: true + + '@esbuild/linux-x64@0.25.12': + optional: true + + '@esbuild/linux-x64@0.27.3': + optional: true + + '@esbuild/netbsd-arm64@0.25.12': + optional: true + + '@esbuild/netbsd-arm64@0.27.3': + optional: true + + '@esbuild/netbsd-x64@0.25.12': + optional: true + + '@esbuild/netbsd-x64@0.27.3': + optional: true + + '@esbuild/openbsd-arm64@0.25.12': + optional: true + + '@esbuild/openbsd-arm64@0.27.3': + optional: true + + '@esbuild/openbsd-x64@0.25.12': + optional: true + + '@esbuild/openbsd-x64@0.27.3': + optional: true + + '@esbuild/openharmony-arm64@0.25.12': + optional: true + + '@esbuild/openharmony-arm64@0.27.3': + optional: true + + '@esbuild/sunos-x64@0.25.12': + optional: true + + '@esbuild/sunos-x64@0.27.3': + optional: true + + '@esbuild/win32-arm64@0.25.12': + optional: true + + '@esbuild/win32-arm64@0.27.3': + optional: true + + '@esbuild/win32-ia32@0.25.12': + optional: true + + '@esbuild/win32-ia32@0.27.3': + optional: true + + '@esbuild/win32-x64@0.25.12': + optional: true + + '@esbuild/win32-x64@0.27.3': + optional: true + + '@expressive-code/core@0.41.6': + dependencies: + '@ctrl/tinycolor': 4.2.0 + hast-util-select: 6.0.4 + hast-util-to-html: 9.0.5 + hast-util-to-text: 4.0.2 + hastscript: 9.0.1 + postcss: 8.5.6 + postcss-nested: 6.2.0(postcss@8.5.6) + unist-util-visit: 5.1.0 + unist-util-visit-parents: 6.0.2 + + '@expressive-code/plugin-frames@0.41.6': + dependencies: + '@expressive-code/core': 0.41.6 + + '@expressive-code/plugin-shiki@0.41.6': + dependencies: + '@expressive-code/core': 0.41.6 + shiki: 3.22.0 + + '@expressive-code/plugin-text-markers@0.41.6': + dependencies: + '@expressive-code/core': 0.41.6 + + '@fontsource-variable/inter@5.2.8': {} + + '@fontsource-variable/roboto-mono@5.2.8': {} + + '@iconify/types@2.0.0': {} + + '@iconify/utils@3.1.0': + dependencies: + '@antfu/install-pkg': 1.1.0 + '@iconify/types': 2.0.0 + mlly: 1.8.0 + + '@img/colour@1.0.0': {} + + '@img/sharp-darwin-arm64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-darwin-arm64': 1.2.4 + optional: true + + '@img/sharp-darwin-x64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-darwin-x64': 1.2.4 + optional: true + + '@img/sharp-libvips-darwin-arm64@1.2.4': + optional: true + + '@img/sharp-libvips-darwin-x64@1.2.4': + optional: true + + '@img/sharp-libvips-linux-arm64@1.2.4': + optional: true + + '@img/sharp-libvips-linux-arm@1.2.4': + optional: true + + '@img/sharp-libvips-linux-ppc64@1.2.4': + optional: true + + '@img/sharp-libvips-linux-riscv64@1.2.4': + optional: true + + '@img/sharp-libvips-linux-s390x@1.2.4': + optional: true + + '@img/sharp-libvips-linux-x64@1.2.4': + optional: true + + '@img/sharp-libvips-linuxmusl-arm64@1.2.4': + optional: true + + '@img/sharp-libvips-linuxmusl-x64@1.2.4': + optional: true + + '@img/sharp-linux-arm64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linux-arm64': 1.2.4 + optional: true + + '@img/sharp-linux-arm@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linux-arm': 1.2.4 + optional: true + + '@img/sharp-linux-ppc64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linux-ppc64': 1.2.4 + optional: true + + '@img/sharp-linux-riscv64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linux-riscv64': 1.2.4 + optional: true + + '@img/sharp-linux-s390x@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linux-s390x': 1.2.4 + optional: true + + '@img/sharp-linux-x64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linux-x64': 1.2.4 + optional: true + + '@img/sharp-linuxmusl-arm64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-arm64': 1.2.4 + optional: true + + '@img/sharp-linuxmusl-x64@0.34.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-x64': 1.2.4 + optional: true + + '@img/sharp-wasm32@0.34.5': + dependencies: + '@emnapi/runtime': 1.8.1 + optional: true + + '@img/sharp-win32-arm64@0.34.5': + optional: true + + '@img/sharp-win32-ia32@0.34.5': + optional: true + + '@img/sharp-win32-x64@0.34.5': + optional: true + + '@isaacs/cliui@9.0.0': {} + + '@isaacs/fs-minipass@4.0.1': + dependencies: + minipass: 7.1.2 + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@markdoc/markdoc@0.5.4': + optionalDependencies: + '@types/linkify-it': 3.0.5 + '@types/markdown-it': 12.2.3 + + '@mdx-js/mdx@3.1.1': + dependencies: + '@types/estree': 1.0.8 + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdx': 2.0.13 + acorn: 8.15.0 + collapse-white-space: 2.1.0 + devlop: 1.1.0 + estree-util-is-identifier-name: 3.0.0 + estree-util-scope: 1.0.0 + estree-walker: 3.0.3 + hast-util-to-jsx-runtime: 2.3.6 + markdown-extensions: 2.0.0 + recma-build-jsx: 1.0.0 + recma-jsx: 1.0.1(acorn@8.15.0) + recma-stringify: 1.0.0 + rehype-recma: 1.0.0 + remark-mdx: 3.1.1 + remark-parse: 11.0.0 + remark-rehype: 11.1.2 + source-map: 0.7.6 + unified: 11.0.5 + unist-util-position-from-estree: 2.0.0 + unist-util-stringify-position: 4.0.0 + unist-util-visit: 5.1.0 + vfile: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@mermaid-js/parser@1.0.0': + dependencies: + langium: 4.2.1 + + '@npmcli/agent@4.0.0': + dependencies: + agent-base: 7.1.4 + http-proxy-agent: 7.0.2 + https-proxy-agent: 7.0.6 + lru-cache: 11.2.6 + socks-proxy-agent: 8.0.5 + transitivePeerDependencies: + - supports-color + + '@npmcli/fs@5.0.0': + dependencies: + semver: 7.7.4 + + '@oslojs/encoding@1.1.0': {} + + '@pagefind/darwin-arm64@1.4.0': + optional: true + + '@pagefind/darwin-x64@1.4.0': + optional: true + + '@pagefind/default-ui@1.4.0': {} + + '@pagefind/freebsd-x64@1.4.0': + optional: true + + '@pagefind/linux-arm64@1.4.0': + optional: true + + '@pagefind/linux-x64@1.4.0': + optional: true + + '@pagefind/windows-x64@1.4.0': + optional: true + + '@rollup/pluginutils@5.3.0(rollup@4.57.1)': + dependencies: + '@types/estree': 1.0.8 + estree-walker: 2.0.2 + picomatch: 4.0.3 + optionalDependencies: + rollup: 4.57.1 + + '@rollup/rollup-android-arm-eabi@4.57.1': + optional: true + + '@rollup/rollup-android-arm64@4.57.1': + optional: true + + '@rollup/rollup-darwin-arm64@4.57.1': + optional: true + + '@rollup/rollup-darwin-x64@4.57.1': + optional: true + + '@rollup/rollup-freebsd-arm64@4.57.1': + optional: true + + '@rollup/rollup-freebsd-x64@4.57.1': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.57.1': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.57.1': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-loong64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-loong64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-ppc64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-ppc64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-x64-musl@4.57.1': + optional: true + + '@rollup/rollup-openbsd-x64@4.57.1': + optional: true + + '@rollup/rollup-openharmony-arm64@4.57.1': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.57.1': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.57.1': + optional: true + + '@rollup/rollup-win32-x64-gnu@4.57.1': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.57.1': + optional: true + + '@shikijs/core@3.22.0': + dependencies: + '@shikijs/types': 3.22.0 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + hast-util-to-html: 9.0.5 + + '@shikijs/engine-javascript@3.22.0': + dependencies: + '@shikijs/types': 3.22.0 + '@shikijs/vscode-textmate': 10.0.2 + oniguruma-to-es: 4.3.4 + + '@shikijs/engine-oniguruma@3.22.0': + dependencies: + '@shikijs/types': 3.22.0 + '@shikijs/vscode-textmate': 10.0.2 + + '@shikijs/langs@3.22.0': + dependencies: + '@shikijs/types': 3.22.0 + + '@shikijs/themes@3.22.0': + dependencies: + '@shikijs/types': 3.22.0 + + '@shikijs/types@3.22.0': + dependencies: + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + + '@shikijs/vscode-textmate@10.0.2': {} + + '@types/braces@3.0.5': {} + + '@types/d3-array@3.2.2': {} + + '@types/d3-axis@3.0.6': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-brush@3.0.6': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-chord@3.0.6': {} + + '@types/d3-color@3.1.3': {} + + '@types/d3-contour@3.0.6': + dependencies: + '@types/d3-array': 3.2.2 + '@types/geojson': 7946.0.16 + + '@types/d3-delaunay@6.0.4': {} + + '@types/d3-dispatch@3.0.7': {} + + '@types/d3-drag@3.0.7': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-dsv@3.0.7': {} + + '@types/d3-ease@3.0.2': {} + + '@types/d3-fetch@3.0.7': + dependencies: + '@types/d3-dsv': 3.0.7 + + '@types/d3-force@3.0.10': {} + + '@types/d3-format@3.0.4': {} + + '@types/d3-geo@3.1.0': + dependencies: + '@types/geojson': 7946.0.16 + + '@types/d3-hierarchy@3.1.7': {} + + '@types/d3-interpolate@3.0.4': + dependencies: + '@types/d3-color': 3.1.3 + + '@types/d3-path@3.1.1': {} + + '@types/d3-polygon@3.0.2': {} + + '@types/d3-quadtree@3.0.6': {} + + '@types/d3-random@3.0.3': {} + + '@types/d3-scale-chromatic@3.1.0': {} + + '@types/d3-scale@4.0.9': + dependencies: + '@types/d3-time': 3.0.4 + + '@types/d3-selection@3.0.11': {} + + '@types/d3-shape@3.1.8': + dependencies: + '@types/d3-path': 3.1.1 + + '@types/d3-time-format@4.0.3': {} + + '@types/d3-time@3.0.4': {} + + '@types/d3-timer@3.0.2': {} + + '@types/d3-transition@3.0.9': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-zoom@3.0.8': + dependencies: + '@types/d3-interpolate': 3.0.4 + '@types/d3-selection': 3.0.11 + + '@types/d3@7.4.3': + dependencies: + '@types/d3-array': 3.2.2 + '@types/d3-axis': 3.0.6 + '@types/d3-brush': 3.0.6 + '@types/d3-chord': 3.0.6 + '@types/d3-color': 3.1.3 + '@types/d3-contour': 3.0.6 + '@types/d3-delaunay': 6.0.4 + '@types/d3-dispatch': 3.0.7 + '@types/d3-drag': 3.0.7 + '@types/d3-dsv': 3.0.7 + '@types/d3-ease': 3.0.2 + '@types/d3-fetch': 3.0.7 + '@types/d3-force': 3.0.10 + '@types/d3-format': 3.0.4 + '@types/d3-geo': 3.1.0 + '@types/d3-hierarchy': 3.1.7 + '@types/d3-interpolate': 3.0.4 + '@types/d3-path': 3.1.1 + '@types/d3-polygon': 3.0.2 + '@types/d3-quadtree': 3.0.6 + '@types/d3-random': 3.0.3 + '@types/d3-scale': 4.0.9 + '@types/d3-scale-chromatic': 3.1.0 + '@types/d3-selection': 3.0.11 + '@types/d3-shape': 3.1.8 + '@types/d3-time': 3.0.4 + '@types/d3-time-format': 4.0.3 + '@types/d3-timer': 3.0.2 + '@types/d3-transition': 3.0.9 + '@types/d3-zoom': 3.0.8 + + '@types/debug@4.1.12': + dependencies: + '@types/ms': 2.1.0 + + '@types/estree-jsx@1.0.5': + dependencies: + '@types/estree': 1.0.8 + + '@types/estree@1.0.8': {} + + '@types/geojson@7946.0.16': {} + + '@types/hast@3.0.4': + dependencies: + '@types/unist': 3.0.3 + + '@types/js-yaml@4.0.9': {} + + '@types/linkify-it@3.0.5': + optional: true + + '@types/markdown-it@12.2.3': + dependencies: + '@types/linkify-it': 3.0.5 + '@types/mdurl': 2.0.0 + optional: true + + '@types/mdast@4.0.4': + dependencies: + '@types/unist': 3.0.3 + + '@types/mdurl@2.0.0': + optional: true + + '@types/mdx@2.0.13': {} + + '@types/micromatch@4.0.10': + dependencies: + '@types/braces': 3.0.5 + + '@types/ms@2.1.0': {} + + '@types/nlcst@2.0.3': + dependencies: + '@types/unist': 3.0.3 + + '@types/node@17.0.45': {} + + '@types/sax@1.2.7': + dependencies: + '@types/node': 17.0.45 + + '@types/trusted-types@2.0.7': + optional: true + + '@types/unist@2.0.11': {} + + '@types/unist@3.0.3': {} + + '@ungap/structured-clone@1.3.0': {} + + abbrev@4.0.0: {} + + acorn-jsx@5.3.2(acorn@8.15.0): + dependencies: + acorn: 8.15.0 + + acorn@8.15.0: {} + + agent-base@7.1.4: {} + + ansi-align@3.0.1: + dependencies: + string-width: 4.2.3 + + ansi-regex@5.0.1: {} + + ansi-regex@6.2.2: {} + + ansi-styles@6.2.3: {} + + anymatch@3.1.3: + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + + arg@5.0.2: {} + + argparse@2.0.1: {} + + aria-query@5.3.2: {} + + array-iterate@2.0.1: {} + + astring@1.9.0: {} + + astro-expressive-code@0.41.6(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)): + dependencies: + astro: 5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3) + rehype-expressive-code: 0.41.6 + + astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3): + dependencies: + '@astrojs/compiler': 2.13.1 + '@astrojs/internal-helpers': 0.7.5 + '@astrojs/markdown-remark': 6.3.10 + '@astrojs/telemetry': 3.3.0 + '@capsizecss/unpack': 4.0.0 + '@oslojs/encoding': 1.1.0 + '@rollup/pluginutils': 5.3.0(rollup@4.57.1) + acorn: 8.15.0 + aria-query: 5.3.2 + axobject-query: 4.1.0 + boxen: 8.0.1 + ci-info: 4.4.0 + clsx: 2.1.1 + common-ancestor-path: 1.0.1 + cookie: 1.1.1 + cssesc: 3.0.0 + debug: 4.4.3 + deterministic-object-hash: 2.0.2 + devalue: 5.6.2 + diff: 8.0.3 + dlv: 1.1.3 + dset: 3.1.4 + es-module-lexer: 1.7.0 + esbuild: 0.27.3 + estree-walker: 3.0.3 + flattie: 1.1.1 + fontace: 0.4.1 + github-slugger: 2.0.0 + html-escaper: 3.0.3 + http-cache-semantics: 4.2.0 + import-meta-resolve: 4.2.0 + js-yaml: 4.1.1 + magic-string: 0.30.21 + magicast: 0.5.2 + mrmime: 2.0.1 + neotraverse: 0.6.18 + p-limit: 6.2.0 + p-queue: 8.1.1 + package-manager-detector: 1.6.0 + piccolore: 0.1.3 + picomatch: 4.0.3 + prompts: 2.4.2 + rehype: 13.0.2 + semver: 7.7.4 + shiki: 3.22.0 + smol-toml: 1.6.0 + svgo: 4.0.0 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + tsconfck: 3.1.6(typescript@5.9.3) + ultrahtml: 1.6.0 + unifont: 0.7.3 + unist-util-visit: 5.1.0 + unstorage: 1.17.4 + vfile: 6.0.3 + vite: 6.4.1(jiti@2.6.1) + vitefu: 1.1.1(vite@6.4.1(jiti@2.6.1)) + xxhash-wasm: 1.1.0 + yargs-parser: 21.1.1 + yocto-spinner: 0.2.3 + zod: 3.25.76 + zod-to-json-schema: 3.25.1(zod@3.25.76) + zod-to-ts: 1.2.0(typescript@5.9.3)(zod@3.25.76) + optionalDependencies: + sharp: 0.34.5 + transitivePeerDependencies: + - '@azure/app-configuration' + - '@azure/cosmos' + - '@azure/data-tables' + - '@azure/identity' + - '@azure/keyvault-secrets' + - '@azure/storage-blob' + - '@capacitor/preferences' + - '@deno/kv' + - '@netlify/blobs' + - '@planetscale/database' + - '@types/node' + - '@upstash/redis' + - '@vercel/blob' + - '@vercel/functions' + - '@vercel/kv' + - aws4fetch + - db0 + - idb-keyval + - ioredis + - jiti + - less + - lightningcss + - rollup + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + - tsx + - typescript + - uploadthing + - yaml + + axobject-query@4.1.0: {} + + bail@2.0.2: {} + + balanced-match@4.0.2: + dependencies: + jackspeak: 4.2.3 + + base-64@1.0.0: {} + + bcp-47-match@2.0.3: {} + + bcp-47@2.1.0: + dependencies: + is-alphabetical: 2.0.1 + is-alphanumerical: 2.0.1 + is-decimal: 2.0.1 + + boolbase@1.0.0: {} + + boxen@8.0.1: + dependencies: + ansi-align: 3.0.1 + camelcase: 8.0.0 + chalk: 5.6.2 + cli-boxes: 3.0.0 + string-width: 7.2.0 + type-fest: 4.41.0 + widest-line: 5.0.0 + wrap-ansi: 9.0.2 + + brace-expansion@5.0.2: + dependencies: + balanced-match: 4.0.2 + + braces@3.0.3: + dependencies: + fill-range: 7.1.1 + + cacache@20.0.3: + dependencies: + '@npmcli/fs': 5.0.0 + fs-minipass: 3.0.3 + glob: 13.0.3 + lru-cache: 11.2.6 + minipass: 7.1.2 + minipass-collect: 2.0.1 + minipass-flush: 1.0.5 + minipass-pipeline: 1.2.4 + p-map: 7.0.4 + ssri: 13.0.1 + unique-filename: 5.0.0 + + camelcase@8.0.0: {} + + ccount@2.0.1: {} + + chalk@5.6.2: {} + + character-entities-html4@2.1.0: {} + + character-entities-legacy@3.0.0: {} + + character-entities@2.0.2: {} + + character-reference-invalid@2.0.1: {} + + chevrotain-allstar@0.3.1(chevrotain@11.1.1): + dependencies: + chevrotain: 11.1.1 + lodash-es: 4.17.23 + + chevrotain@11.1.1: + dependencies: + '@chevrotain/cst-dts-gen': 11.1.1 + '@chevrotain/gast': 11.1.1 + '@chevrotain/regexp-to-ast': 11.1.1 + '@chevrotain/types': 11.1.1 + '@chevrotain/utils': 11.1.1 + lodash-es: 4.17.23 + + chokidar@5.0.0: + dependencies: + readdirp: 5.0.0 + + chownr@3.0.0: {} + + ci-info@4.4.0: {} + + cli-boxes@3.0.0: {} + + clsx@2.1.1: {} + + collapse-white-space@2.1.0: {} + + comma-separated-tokens@2.0.3: {} + + commander@11.1.0: {} + + commander@7.2.0: {} + + commander@8.3.0: {} + + common-ancestor-path@1.0.1: {} + + confbox@0.1.8: {} + + cookie-es@1.2.2: {} + + cookie@1.1.1: {} + + cose-base@1.0.3: + dependencies: + layout-base: 1.0.2 + + cose-base@2.2.0: + dependencies: + layout-base: 2.0.1 + + crossws@0.3.5: + dependencies: + uncrypto: 0.1.3 + + css-select@5.2.2: + dependencies: + boolbase: 1.0.0 + css-what: 6.2.2 + domhandler: 5.0.3 + domutils: 3.2.2 + nth-check: 2.1.1 + + css-selector-parser@3.3.0: {} + + css-tree@2.2.1: + dependencies: + mdn-data: 2.0.28 + source-map-js: 1.2.1 + + css-tree@3.1.0: + dependencies: + mdn-data: 2.12.2 + source-map-js: 1.2.1 + + css-what@6.2.2: {} + + cssesc@3.0.0: {} + + csso@5.0.5: + dependencies: + css-tree: 2.2.1 + + cytoscape-cose-bilkent@4.1.0(cytoscape@3.33.1): + dependencies: + cose-base: 1.0.3 + cytoscape: 3.33.1 + + cytoscape-fcose@2.2.0(cytoscape@3.33.1): + dependencies: + cose-base: 2.2.0 + cytoscape: 3.33.1 + + cytoscape@3.33.1: {} + + d3-array@2.12.1: + dependencies: + internmap: 1.0.1 + + d3-array@3.2.4: + dependencies: + internmap: 2.0.3 + + d3-axis@3.0.0: {} + + d3-brush@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + + d3-chord@3.0.1: + dependencies: + d3-path: 3.1.0 + + d3-color@3.1.0: {} + + d3-contour@4.0.2: + dependencies: + d3-array: 3.2.4 + + d3-delaunay@6.0.4: + dependencies: + delaunator: 5.0.1 + + d3-dispatch@3.0.1: {} + + d3-drag@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-selection: 3.0.0 + + d3-dsv@3.0.1: + dependencies: + commander: 7.2.0 + iconv-lite: 0.6.3 + rw: 1.3.3 + + d3-ease@3.0.1: {} + + d3-fetch@3.0.1: + dependencies: + d3-dsv: 3.0.1 + + d3-force@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-quadtree: 3.0.1 + d3-timer: 3.0.1 + + d3-format@3.1.2: {} + + d3-geo@3.1.1: + dependencies: + d3-array: 3.2.4 + + d3-hierarchy@3.1.2: {} + + d3-interpolate@3.0.1: + dependencies: + d3-color: 3.1.0 + + d3-path@1.0.9: {} + + d3-path@3.1.0: {} + + d3-polygon@3.0.1: {} + + d3-quadtree@3.0.1: {} + + d3-random@3.0.1: {} + + d3-sankey@0.12.3: + dependencies: + d3-array: 2.12.1 + d3-shape: 1.3.7 + + d3-scale-chromatic@3.1.0: + dependencies: + d3-color: 3.1.0 + d3-interpolate: 3.0.1 + + d3-scale@4.0.2: + dependencies: + d3-array: 3.2.4 + d3-format: 3.1.2 + d3-interpolate: 3.0.1 + d3-time: 3.1.0 + d3-time-format: 4.1.0 + + d3-selection@3.0.0: {} + + d3-shape@1.3.7: + dependencies: + d3-path: 1.0.9 + + d3-shape@3.2.0: + dependencies: + d3-path: 3.1.0 + + d3-time-format@4.1.0: + dependencies: + d3-time: 3.1.0 + + d3-time@3.1.0: + dependencies: + d3-array: 3.2.4 + + d3-timer@3.0.1: {} + + d3-transition@3.0.1(d3-selection@3.0.0): + dependencies: + d3-color: 3.1.0 + d3-dispatch: 3.0.1 + d3-ease: 3.0.1 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-timer: 3.0.1 + + d3-zoom@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + + d3@7.9.0: + dependencies: + d3-array: 3.2.4 + d3-axis: 3.0.0 + d3-brush: 3.0.0 + d3-chord: 3.0.1 + d3-color: 3.1.0 + d3-contour: 4.0.2 + d3-delaunay: 6.0.4 + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-dsv: 3.0.1 + d3-ease: 3.0.1 + d3-fetch: 3.0.1 + d3-force: 3.0.0 + d3-format: 3.1.2 + d3-geo: 3.1.1 + d3-hierarchy: 3.1.2 + d3-interpolate: 3.0.1 + d3-path: 3.1.0 + d3-polygon: 3.0.1 + d3-quadtree: 3.0.1 + d3-random: 3.0.1 + d3-scale: 4.0.2 + d3-scale-chromatic: 3.1.0 + d3-selection: 3.0.0 + d3-shape: 3.2.0 + d3-time: 3.1.0 + d3-time-format: 4.1.0 + d3-timer: 3.0.1 + d3-transition: 3.0.1(d3-selection@3.0.0) + d3-zoom: 3.0.0 + + dagre-d3-es@7.0.13: + dependencies: + d3: 7.9.0 + lodash-es: 4.17.23 + + dayjs@1.11.19: {} + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + decode-named-character-reference@1.3.0: + dependencies: + character-entities: 2.0.2 + + defu@6.1.4: {} + + delaunator@5.0.1: + dependencies: + robust-predicates: 3.0.2 + + dequal@2.0.3: {} + + destr@2.0.5: {} + + detect-libc@2.1.2: {} + + deterministic-object-hash@2.0.2: + dependencies: + base-64: 1.0.0 + + devalue@5.6.2: {} + + devlop@1.1.0: + dependencies: + dequal: 2.0.3 + + diff@8.0.3: {} + + direction@2.0.1: {} + + dlv@1.1.3: {} + + dom-serializer@2.0.0: + dependencies: + domelementtype: 2.3.0 + domhandler: 5.0.3 + entities: 4.5.0 + + domelementtype@2.3.0: {} + + domhandler@5.0.3: + dependencies: + domelementtype: 2.3.0 + + dompurify@3.3.1: + optionalDependencies: + '@types/trusted-types': 2.0.7 + + domutils@3.2.2: + dependencies: + dom-serializer: 2.0.0 + domelementtype: 2.3.0 + domhandler: 5.0.3 + + dset@3.1.4: {} + + emoji-regex@10.6.0: {} + + emoji-regex@8.0.0: {} + + encoding@0.1.13: + dependencies: + iconv-lite: 0.6.3 + optional: true + + entities@4.5.0: {} + + entities@6.0.1: {} + + entities@7.0.1: {} + + env-paths@2.2.1: {} + + err-code@2.0.3: {} + + es-module-lexer@1.7.0: {} + + esast-util-from-estree@2.0.0: + dependencies: + '@types/estree-jsx': 1.0.5 + devlop: 1.1.0 + estree-util-visit: 2.0.0 + unist-util-position-from-estree: 2.0.0 + + esast-util-from-js@2.0.1: + dependencies: + '@types/estree-jsx': 1.0.5 + acorn: 8.15.0 + esast-util-from-estree: 2.0.0 + vfile-message: 4.0.3 + + esbuild@0.25.12: + optionalDependencies: + '@esbuild/aix-ppc64': 0.25.12 + '@esbuild/android-arm': 0.25.12 + '@esbuild/android-arm64': 0.25.12 + '@esbuild/android-x64': 0.25.12 + '@esbuild/darwin-arm64': 0.25.12 + '@esbuild/darwin-x64': 0.25.12 + '@esbuild/freebsd-arm64': 0.25.12 + '@esbuild/freebsd-x64': 0.25.12 + '@esbuild/linux-arm': 0.25.12 + '@esbuild/linux-arm64': 0.25.12 + '@esbuild/linux-ia32': 0.25.12 + '@esbuild/linux-loong64': 0.25.12 + '@esbuild/linux-mips64el': 0.25.12 + '@esbuild/linux-ppc64': 0.25.12 + '@esbuild/linux-riscv64': 0.25.12 + '@esbuild/linux-s390x': 0.25.12 + '@esbuild/linux-x64': 0.25.12 + '@esbuild/netbsd-arm64': 0.25.12 + '@esbuild/netbsd-x64': 0.25.12 + '@esbuild/openbsd-arm64': 0.25.12 + '@esbuild/openbsd-x64': 0.25.12 + '@esbuild/openharmony-arm64': 0.25.12 + '@esbuild/sunos-x64': 0.25.12 + '@esbuild/win32-arm64': 0.25.12 + '@esbuild/win32-ia32': 0.25.12 + '@esbuild/win32-x64': 0.25.12 + + esbuild@0.27.3: + optionalDependencies: + '@esbuild/aix-ppc64': 0.27.3 + '@esbuild/android-arm': 0.27.3 + '@esbuild/android-arm64': 0.27.3 + '@esbuild/android-x64': 0.27.3 + '@esbuild/darwin-arm64': 0.27.3 + '@esbuild/darwin-x64': 0.27.3 + '@esbuild/freebsd-arm64': 0.27.3 + '@esbuild/freebsd-x64': 0.27.3 + '@esbuild/linux-arm': 0.27.3 + '@esbuild/linux-arm64': 0.27.3 + '@esbuild/linux-ia32': 0.27.3 + '@esbuild/linux-loong64': 0.27.3 + '@esbuild/linux-mips64el': 0.27.3 + '@esbuild/linux-ppc64': 0.27.3 + '@esbuild/linux-riscv64': 0.27.3 + '@esbuild/linux-s390x': 0.27.3 + '@esbuild/linux-x64': 0.27.3 + '@esbuild/netbsd-arm64': 0.27.3 + '@esbuild/netbsd-x64': 0.27.3 + '@esbuild/openbsd-arm64': 0.27.3 + '@esbuild/openbsd-x64': 0.27.3 + '@esbuild/openharmony-arm64': 0.27.3 + '@esbuild/sunos-x64': 0.27.3 + '@esbuild/win32-arm64': 0.27.3 + '@esbuild/win32-ia32': 0.27.3 + '@esbuild/win32-x64': 0.27.3 + + escape-string-regexp@5.0.0: {} + + estree-util-attach-comments@3.0.0: + dependencies: + '@types/estree': 1.0.8 + + estree-util-build-jsx@3.0.1: + dependencies: + '@types/estree-jsx': 1.0.5 + devlop: 1.1.0 + estree-util-is-identifier-name: 3.0.0 + estree-walker: 3.0.3 + + estree-util-is-identifier-name@3.0.0: {} + + estree-util-scope@1.0.0: + dependencies: + '@types/estree': 1.0.8 + devlop: 1.1.0 + + estree-util-to-js@2.0.0: + dependencies: + '@types/estree-jsx': 1.0.5 + astring: 1.9.0 + source-map: 0.7.6 + + estree-util-visit@2.0.0: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/unist': 3.0.3 + + estree-walker@2.0.2: {} + + estree-walker@3.0.3: + dependencies: + '@types/estree': 1.0.8 + + eventemitter3@5.0.4: {} + + exponential-backoff@3.1.3: {} + + expressive-code@0.41.6: + dependencies: + '@expressive-code/core': 0.41.6 + '@expressive-code/plugin-frames': 0.41.6 + '@expressive-code/plugin-shiki': 0.41.6 + '@expressive-code/plugin-text-markers': 0.41.6 + + extend@3.0.2: {} + + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + + fill-range@7.1.1: + dependencies: + to-regex-range: 5.0.1 + + flattie@1.1.1: {} + + fontace@0.4.1: + dependencies: + fontkitten: 1.0.2 + + fontkitten@1.0.2: + dependencies: + tiny-inflate: 1.0.3 + + fs-minipass@3.0.3: + dependencies: + minipass: 7.1.2 + + fsevents@2.3.3: + optional: true + + get-east-asian-width@1.4.0: {} + + github-slugger@2.0.0: {} + + glob@13.0.3: + dependencies: + minimatch: 10.2.0 + minipass: 7.1.2 + path-scurry: 2.0.1 + + graceful-fs@4.2.11: {} + + h3@1.15.5: + dependencies: + cookie-es: 1.2.2 + crossws: 0.3.5 + defu: 6.1.4 + destr: 2.0.5 + iron-webcrypto: 1.2.1 + node-mock-http: 1.0.4 + radix3: 1.1.2 + ufo: 1.6.3 + uncrypto: 0.1.3 + + hachure-fill@0.5.2: {} + + hast-util-embedded@3.0.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-is-element: 3.0.0 + + hast-util-format@1.1.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-embedded: 3.0.0 + hast-util-minify-whitespace: 1.0.1 + hast-util-phrasing: 3.0.1 + hast-util-whitespace: 3.0.0 + html-whitespace-sensitive-tag-names: 3.0.1 + unist-util-visit-parents: 6.0.2 + + hast-util-from-html@2.0.3: + dependencies: + '@types/hast': 3.0.4 + devlop: 1.1.0 + hast-util-from-parse5: 8.0.3 + parse5: 7.3.0 + vfile: 6.0.3 + vfile-message: 4.0.3 + + hast-util-from-parse5@8.0.3: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + devlop: 1.1.0 + hastscript: 9.0.1 + property-information: 7.1.0 + vfile: 6.0.3 + vfile-location: 5.0.3 + web-namespaces: 2.0.1 + + hast-util-has-property@3.0.0: + dependencies: + '@types/hast': 3.0.4 + + hast-util-is-body-ok-link@3.0.1: + dependencies: + '@types/hast': 3.0.4 + + hast-util-is-element@3.0.0: + dependencies: + '@types/hast': 3.0.4 + + hast-util-minify-whitespace@1.0.1: + dependencies: + '@types/hast': 3.0.4 + hast-util-embedded: 3.0.0 + hast-util-is-element: 3.0.0 + hast-util-whitespace: 3.0.0 + unist-util-is: 6.0.1 + + hast-util-parse-selector@4.0.0: + dependencies: + '@types/hast': 3.0.4 + + hast-util-phrasing@3.0.1: + dependencies: + '@types/hast': 3.0.4 + hast-util-embedded: 3.0.0 + hast-util-has-property: 3.0.0 + hast-util-is-body-ok-link: 3.0.1 + hast-util-is-element: 3.0.0 + + hast-util-raw@9.1.0: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + '@ungap/structured-clone': 1.3.0 + hast-util-from-parse5: 8.0.3 + hast-util-to-parse5: 8.0.1 + html-void-elements: 3.0.0 + mdast-util-to-hast: 13.2.1 + parse5: 7.3.0 + unist-util-position: 5.0.0 + unist-util-visit: 5.1.0 + vfile: 6.0.3 + web-namespaces: 2.0.1 + zwitch: 2.0.4 + + hast-util-select@6.0.4: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + bcp-47-match: 2.0.3 + comma-separated-tokens: 2.0.3 + css-selector-parser: 3.3.0 + devlop: 1.1.0 + direction: 2.0.1 + hast-util-has-property: 3.0.0 + hast-util-to-string: 3.0.1 + hast-util-whitespace: 3.0.0 + nth-check: 2.1.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + unist-util-visit: 5.1.0 + zwitch: 2.0.4 + + hast-util-to-estree@3.1.3: + dependencies: + '@types/estree': 1.0.8 + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + comma-separated-tokens: 2.0.3 + devlop: 1.1.0 + estree-util-attach-comments: 3.0.0 + estree-util-is-identifier-name: 3.0.0 + hast-util-whitespace: 3.0.0 + mdast-util-mdx-expression: 2.0.1 + mdast-util-mdx-jsx: 3.2.0 + mdast-util-mdxjs-esm: 2.0.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + style-to-js: 1.1.21 + unist-util-position: 5.0.0 + zwitch: 2.0.4 + transitivePeerDependencies: + - supports-color + + hast-util-to-html@9.0.5: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + ccount: 2.0.1 + comma-separated-tokens: 2.0.3 + hast-util-whitespace: 3.0.0 + html-void-elements: 3.0.0 + mdast-util-to-hast: 13.2.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + stringify-entities: 4.0.4 + zwitch: 2.0.4 + + hast-util-to-jsx-runtime@2.3.6: + dependencies: + '@types/estree': 1.0.8 + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + comma-separated-tokens: 2.0.3 + devlop: 1.1.0 + estree-util-is-identifier-name: 3.0.0 + hast-util-whitespace: 3.0.0 + mdast-util-mdx-expression: 2.0.1 + mdast-util-mdx-jsx: 3.2.0 + mdast-util-mdxjs-esm: 2.0.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + style-to-js: 1.1.21 + unist-util-position: 5.0.0 + vfile-message: 4.0.3 + transitivePeerDependencies: + - supports-color + + hast-util-to-mdast@10.1.2: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + '@ungap/structured-clone': 1.3.0 + hast-util-phrasing: 3.0.1 + hast-util-to-html: 9.0.5 + hast-util-to-text: 4.0.2 + hast-util-whitespace: 3.0.0 + mdast-util-phrasing: 4.1.0 + mdast-util-to-hast: 13.2.1 + mdast-util-to-string: 4.0.0 + rehype-minify-whitespace: 6.0.2 + trim-trailing-lines: 2.1.0 + unist-util-position: 5.0.0 + unist-util-visit: 5.1.0 + + hast-util-to-parse5@8.0.1: + dependencies: + '@types/hast': 3.0.4 + comma-separated-tokens: 2.0.3 + devlop: 1.1.0 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + web-namespaces: 2.0.1 + zwitch: 2.0.4 + + hast-util-to-string@3.0.1: + dependencies: + '@types/hast': 3.0.4 + + hast-util-to-text@4.0.2: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + hast-util-is-element: 3.0.0 + unist-util-find-after: 5.0.0 + + hast-util-whitespace@3.0.0: + dependencies: + '@types/hast': 3.0.4 + + hastscript@9.0.1: + dependencies: + '@types/hast': 3.0.4 + comma-separated-tokens: 2.0.3 + hast-util-parse-selector: 4.0.0 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + + html-escaper@3.0.3: {} + + html-void-elements@3.0.0: {} + + html-whitespace-sensitive-tag-names@3.0.1: {} + + htmlparser2@10.1.0: + dependencies: + domelementtype: 2.3.0 + domhandler: 5.0.3 + domutils: 3.2.2 + entities: 7.0.1 + + http-cache-semantics@4.2.0: {} + + http-proxy-agent@7.0.2: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + i18next@23.16.8: + dependencies: + '@babel/runtime': 7.28.6 + + iconv-lite@0.6.3: + dependencies: + safer-buffer: 2.1.2 + + import-meta-resolve@4.2.0: {} + + imurmurhash@0.1.4: {} + + inline-style-parser@0.2.7: {} + + internmap@1.0.1: {} + + internmap@2.0.3: {} + + ip-address@10.1.0: {} + + iron-webcrypto@1.2.1: {} + + is-absolute-url@4.0.1: {} + + is-alphabetical@2.0.1: {} + + is-alphanumerical@2.0.1: + dependencies: + is-alphabetical: 2.0.1 + is-decimal: 2.0.1 + + is-decimal@2.0.1: {} + + is-docker@3.0.0: {} + + is-fullwidth-code-point@3.0.0: {} + + is-hexadecimal@2.0.1: {} + + is-inside-container@1.0.0: + dependencies: + is-docker: 3.0.0 + + is-number@7.0.0: {} + + is-plain-obj@4.1.0: {} + + is-wsl@3.1.0: + dependencies: + is-inside-container: 1.0.0 + + isexe@4.0.0: {} + + jackspeak@4.2.3: + dependencies: + '@isaacs/cliui': 9.0.0 + + jiti@2.6.1: + optional: true + + js-yaml@4.1.1: + dependencies: + argparse: 2.0.1 + + katex@0.16.28: + dependencies: + commander: 8.3.0 + + khroma@2.1.0: {} + + kleur@3.0.3: {} + + klona@2.0.6: {} + + langium@4.2.1: + dependencies: + chevrotain: 11.1.1 + chevrotain-allstar: 0.3.1(chevrotain@11.1.1) + vscode-languageserver: 9.0.1 + vscode-languageserver-textdocument: 1.0.12 + vscode-uri: 3.1.0 + + layout-base@1.0.2: {} + + layout-base@2.0.1: {} + + lodash-es@4.17.23: {} + + longest-streak@3.1.0: {} + + lru-cache@11.2.6: {} + + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + + magicast@0.5.2: + dependencies: + '@babel/parser': 7.29.0 + '@babel/types': 7.29.0 + source-map-js: 1.2.1 + + make-fetch-happen@15.0.3: + dependencies: + '@npmcli/agent': 4.0.0 + cacache: 20.0.3 + http-cache-semantics: 4.2.0 + minipass: 7.1.2 + minipass-fetch: 5.0.1 + minipass-flush: 1.0.5 + minipass-pipeline: 1.2.4 + negotiator: 1.0.0 + proc-log: 6.1.0 + promise-retry: 2.0.1 + ssri: 13.0.1 + transitivePeerDependencies: + - supports-color + + markdown-extensions@2.0.0: {} + + markdown-table@3.0.4: {} + + marked@16.4.2: {} + + mdast-util-definitions@6.0.0: + dependencies: + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + unist-util-visit: 5.1.0 + + mdast-util-directive@3.1.0: + dependencies: + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + ccount: 2.0.1 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + parse-entities: 4.0.2 + stringify-entities: 4.0.4 + unist-util-visit-parents: 6.0.2 + transitivePeerDependencies: + - supports-color + + mdast-util-find-and-replace@3.0.2: + dependencies: + '@types/mdast': 4.0.4 + escape-string-regexp: 5.0.0 + unist-util-is: 6.0.1 + unist-util-visit-parents: 6.0.2 + + mdast-util-from-markdown@2.0.2: + dependencies: + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + decode-named-character-reference: 1.3.0 + devlop: 1.1.0 + mdast-util-to-string: 4.0.0 + micromark: 4.0.2 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-decode-string: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + unist-util-stringify-position: 4.0.0 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-autolink-literal@2.0.1: + dependencies: + '@types/mdast': 4.0.4 + ccount: 2.0.1 + devlop: 1.1.0 + mdast-util-find-and-replace: 3.0.2 + micromark-util-character: 2.1.1 + + mdast-util-gfm-footnote@2.1.0: + dependencies: + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + micromark-util-normalize-identifier: 2.0.1 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-strikethrough@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-table@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + devlop: 1.1.0 + markdown-table: 3.0.4 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-task-list-item@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm@3.1.0: + dependencies: + mdast-util-from-markdown: 2.0.2 + mdast-util-gfm-autolink-literal: 2.0.1 + mdast-util-gfm-footnote: 2.1.0 + mdast-util-gfm-strikethrough: 2.0.0 + mdast-util-gfm-table: 2.0.0 + mdast-util-gfm-task-list-item: 2.0.0 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-mdx-expression@2.0.1: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-mdx-jsx@3.2.0: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + ccount: 2.0.1 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + parse-entities: 4.0.2 + stringify-entities: 4.0.4 + unist-util-stringify-position: 4.0.0 + vfile-message: 4.0.3 + transitivePeerDependencies: + - supports-color + + mdast-util-mdx@3.0.0: + dependencies: + mdast-util-from-markdown: 2.0.2 + mdast-util-mdx-expression: 2.0.1 + mdast-util-mdx-jsx: 3.2.0 + mdast-util-mdxjs-esm: 2.0.1 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-mdxjs-esm@2.0.1: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-phrasing@4.1.0: + dependencies: + '@types/mdast': 4.0.4 + unist-util-is: 6.0.1 + + mdast-util-to-hast@13.2.1: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + '@ungap/structured-clone': 1.3.0 + devlop: 1.1.0 + micromark-util-sanitize-uri: 2.0.1 + trim-lines: 3.0.1 + unist-util-position: 5.0.0 + unist-util-visit: 5.1.0 + vfile: 6.0.3 + + mdast-util-to-markdown@2.1.2: + dependencies: + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + longest-streak: 3.1.0 + mdast-util-phrasing: 4.1.0 + mdast-util-to-string: 4.0.0 + micromark-util-classify-character: 2.0.1 + micromark-util-decode-string: 2.0.1 + unist-util-visit: 5.1.0 + zwitch: 2.0.4 + + mdast-util-to-string@4.0.0: + dependencies: + '@types/mdast': 4.0.4 + + mdn-data@2.0.28: {} + + mdn-data@2.12.2: {} + + mermaid@11.12.3: + dependencies: + '@braintree/sanitize-url': 7.1.2 + '@iconify/utils': 3.1.0 + '@mermaid-js/parser': 1.0.0 + '@types/d3': 7.4.3 + cytoscape: 3.33.1 + cytoscape-cose-bilkent: 4.1.0(cytoscape@3.33.1) + cytoscape-fcose: 2.2.0(cytoscape@3.33.1) + d3: 7.9.0 + d3-sankey: 0.12.3 + dagre-d3-es: 7.0.13 + dayjs: 1.11.19 + dompurify: 3.3.1 + katex: 0.16.28 + khroma: 2.1.0 + lodash-es: 4.17.23 + marked: 16.4.2 + roughjs: 4.6.6 + stylis: 4.3.6 + ts-dedent: 2.2.0 + uuid: 11.1.0 + + micromark-core-commonmark@2.0.3: + dependencies: + decode-named-character-reference: 1.3.0 + devlop: 1.1.0 + micromark-factory-destination: 2.0.1 + micromark-factory-label: 2.0.1 + micromark-factory-space: 2.0.1 + micromark-factory-title: 2.0.1 + micromark-factory-whitespace: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-classify-character: 2.0.1 + micromark-util-html-tag-name: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-subtokenize: 2.1.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-directive@3.0.2: + dependencies: + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-factory-whitespace: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + parse-entities: 4.0.2 + + micromark-extension-gfm-autolink-literal@2.1.0: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-footnote@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-core-commonmark: 2.0.3 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-strikethrough@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-util-chunked: 2.0.1 + micromark-util-classify-character: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-table@2.1.1: + dependencies: + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-tagfilter@2.0.0: + dependencies: + micromark-util-types: 2.0.2 + + micromark-extension-gfm-task-list-item@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm@3.0.0: + dependencies: + micromark-extension-gfm-autolink-literal: 2.1.0 + micromark-extension-gfm-footnote: 2.1.0 + micromark-extension-gfm-strikethrough: 2.1.0 + micromark-extension-gfm-table: 2.1.1 + micromark-extension-gfm-tagfilter: 2.0.0 + micromark-extension-gfm-task-list-item: 2.1.0 + micromark-util-combine-extensions: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-mdx-expression@3.0.1: + dependencies: + '@types/estree': 1.0.8 + devlop: 1.1.0 + micromark-factory-mdx-expression: 2.0.3 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-events-to-acorn: 2.0.3 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-mdx-jsx@3.0.2: + dependencies: + '@types/estree': 1.0.8 + devlop: 1.1.0 + estree-util-is-identifier-name: 3.0.0 + micromark-factory-mdx-expression: 2.0.3 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-events-to-acorn: 2.0.3 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + vfile-message: 4.0.3 + + micromark-extension-mdx-md@2.0.0: + dependencies: + micromark-util-types: 2.0.2 + + micromark-extension-mdxjs-esm@3.0.0: + dependencies: + '@types/estree': 1.0.8 + devlop: 1.1.0 + micromark-core-commonmark: 2.0.3 + micromark-util-character: 2.1.1 + micromark-util-events-to-acorn: 2.0.3 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + unist-util-position-from-estree: 2.0.0 + vfile-message: 4.0.3 + + micromark-extension-mdxjs@3.0.0: + dependencies: + acorn: 8.15.0 + acorn-jsx: 5.3.2(acorn@8.15.0) + micromark-extension-mdx-expression: 3.0.1 + micromark-extension-mdx-jsx: 3.0.2 + micromark-extension-mdx-md: 2.0.0 + micromark-extension-mdxjs-esm: 3.0.0 + micromark-util-combine-extensions: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-destination@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-label@2.0.1: + dependencies: + devlop: 1.1.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-mdx-expression@2.0.3: + dependencies: + '@types/estree': 1.0.8 + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-events-to-acorn: 2.0.3 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + unist-util-position-from-estree: 2.0.0 + vfile-message: 4.0.3 + + micromark-factory-space@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-types: 2.0.2 + + micromark-factory-title@2.0.1: + dependencies: + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-whitespace@2.0.1: + dependencies: + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-character@2.1.1: + dependencies: + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-chunked@2.0.1: + dependencies: + micromark-util-symbol: 2.0.1 + + micromark-util-classify-character@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-combine-extensions@2.0.1: + dependencies: + micromark-util-chunked: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-decode-numeric-character-reference@2.0.2: + dependencies: + micromark-util-symbol: 2.0.1 + + micromark-util-decode-string@2.0.1: + dependencies: + decode-named-character-reference: 1.3.0 + micromark-util-character: 2.1.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-symbol: 2.0.1 + + micromark-util-encode@2.0.1: {} + + micromark-util-events-to-acorn@2.0.3: + dependencies: + '@types/estree': 1.0.8 + '@types/unist': 3.0.3 + devlop: 1.1.0 + estree-util-visit: 2.0.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + vfile-message: 4.0.3 + + micromark-util-html-tag-name@2.0.1: {} + + micromark-util-normalize-identifier@2.0.1: + dependencies: + micromark-util-symbol: 2.0.1 + + micromark-util-resolve-all@2.0.1: + dependencies: + micromark-util-types: 2.0.2 + + micromark-util-sanitize-uri@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-encode: 2.0.1 + micromark-util-symbol: 2.0.1 + + micromark-util-subtokenize@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-util-chunked: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-symbol@2.0.1: {} + + micromark-util-types@2.0.2: {} + + micromark@4.0.2: + dependencies: + '@types/debug': 4.1.12 + debug: 4.4.3 + decode-named-character-reference: 1.3.0 + devlop: 1.1.0 + micromark-core-commonmark: 2.0.3 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-combine-extensions: 2.0.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-encode: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-subtokenize: 2.1.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + transitivePeerDependencies: + - supports-color + + micromatch@4.0.8: + dependencies: + braces: 3.0.3 + picomatch: 2.3.1 + + minimatch@10.2.0: + dependencies: + brace-expansion: 5.0.2 + + minipass-collect@2.0.1: + dependencies: + minipass: 7.1.2 + + minipass-fetch@5.0.1: + dependencies: + minipass: 7.1.2 + minipass-sized: 2.0.0 + minizlib: 3.1.0 + optionalDependencies: + encoding: 0.1.13 + + minipass-flush@1.0.5: + dependencies: + minipass: 3.3.6 + + minipass-pipeline@1.2.4: + dependencies: + minipass: 3.3.6 + + minipass-sized@2.0.0: + dependencies: + minipass: 7.1.2 + + minipass@3.3.6: + dependencies: + yallist: 4.0.0 + + minipass@7.1.2: {} + + minizlib@3.1.0: + dependencies: + minipass: 7.1.2 + + mlly@1.8.0: + dependencies: + acorn: 8.15.0 + pathe: 2.0.3 + pkg-types: 1.3.1 + ufo: 1.6.3 + + mrmime@2.0.1: {} + + ms@2.1.3: {} + + nanoid@3.3.11: {} + + negotiator@1.0.0: {} + + neotraverse@0.6.18: {} + + nlcst-to-string@4.0.0: + dependencies: + '@types/nlcst': 2.0.3 + + node-addon-api@8.5.0: {} + + node-fetch-native@1.6.7: {} + + node-gyp@12.2.0: + dependencies: + env-paths: 2.2.1 + exponential-backoff: 3.1.3 + graceful-fs: 4.2.11 + make-fetch-happen: 15.0.3 + nopt: 9.0.0 + proc-log: 6.1.0 + semver: 7.7.4 + tar: 7.5.7 + tinyglobby: 0.2.15 + which: 6.0.1 + transitivePeerDependencies: + - supports-color + + node-mock-http@1.0.4: {} + + nopt@9.0.0: + dependencies: + abbrev: 4.0.0 + + normalize-path@3.0.0: {} + + nth-check@2.1.1: + dependencies: + boolbase: 1.0.0 + + ofetch@1.5.1: + dependencies: + destr: 2.0.5 + node-fetch-native: 1.6.7 + ufo: 1.6.3 + + ohash@2.0.11: {} + + oniguruma-parser@0.12.1: {} + + oniguruma-to-es@4.3.4: + dependencies: + oniguruma-parser: 0.12.1 + regex: 6.1.0 + regex-recursion: 6.0.2 + + p-limit@6.2.0: + dependencies: + yocto-queue: 1.2.2 + + p-map@7.0.4: {} + + p-queue@8.1.1: + dependencies: + eventemitter3: 5.0.4 + p-timeout: 6.1.4 + + p-timeout@6.1.4: {} + + package-manager-detector@1.6.0: {} + + pagefind@1.4.0: + optionalDependencies: + '@pagefind/darwin-arm64': 1.4.0 + '@pagefind/darwin-x64': 1.4.0 + '@pagefind/freebsd-x64': 1.4.0 + '@pagefind/linux-arm64': 1.4.0 + '@pagefind/linux-x64': 1.4.0 + '@pagefind/windows-x64': 1.4.0 + + parse-entities@4.0.2: + dependencies: + '@types/unist': 2.0.11 + character-entities-legacy: 3.0.0 + character-reference-invalid: 2.0.1 + decode-named-character-reference: 1.3.0 + is-alphanumerical: 2.0.1 + is-decimal: 2.0.1 + is-hexadecimal: 2.0.1 + + parse-latin@7.0.0: + dependencies: + '@types/nlcst': 2.0.3 + '@types/unist': 3.0.3 + nlcst-to-string: 4.0.0 + unist-util-modify-children: 4.0.0 + unist-util-visit-children: 3.0.0 + vfile: 6.0.3 + + parse5@7.3.0: + dependencies: + entities: 6.0.1 + + path-data-parser@0.1.0: {} + + path-scurry@2.0.1: + dependencies: + lru-cache: 11.2.6 + minipass: 7.1.2 + + pathe@2.0.3: {} + + piccolore@0.1.3: {} + + picocolors@1.1.1: {} + + picomatch@2.3.1: {} + + picomatch@4.0.3: {} + + pkg-types@1.3.1: + dependencies: + confbox: 0.1.8 + mlly: 1.8.0 + pathe: 2.0.3 + + points-on-curve@0.2.0: {} + + points-on-path@0.2.1: + dependencies: + path-data-parser: 0.1.0 + points-on-curve: 0.2.0 + + postcss-nested@6.2.0(postcss@8.5.6): + dependencies: + postcss: 8.5.6 + postcss-selector-parser: 6.1.2 + + postcss-selector-parser@6.1.2: + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + postcss@8.5.6: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + prismjs@1.30.0: {} + + proc-log@6.1.0: {} + + promise-retry@2.0.1: + dependencies: + err-code: 2.0.3 + retry: 0.12.0 + + prompts@2.4.2: + dependencies: + kleur: 3.0.3 + sisteransi: 1.0.5 + + property-information@7.1.0: {} + + radix3@1.1.2: {} + + readdirp@5.0.0: {} + + recma-build-jsx@1.0.0: + dependencies: + '@types/estree': 1.0.8 + estree-util-build-jsx: 3.0.1 + vfile: 6.0.3 + + recma-jsx@1.0.1(acorn@8.15.0): + dependencies: + acorn: 8.15.0 + acorn-jsx: 5.3.2(acorn@8.15.0) + estree-util-to-js: 2.0.0 + recma-parse: 1.0.0 + recma-stringify: 1.0.0 + unified: 11.0.5 + + recma-parse@1.0.0: + dependencies: + '@types/estree': 1.0.8 + esast-util-from-js: 2.0.1 + unified: 11.0.5 + vfile: 6.0.3 + + recma-stringify@1.0.0: + dependencies: + '@types/estree': 1.0.8 + estree-util-to-js: 2.0.0 + unified: 11.0.5 + vfile: 6.0.3 + + regex-recursion@6.0.2: + dependencies: + regex-utilities: 2.3.0 + + regex-utilities@2.3.0: {} + + regex@6.1.0: + dependencies: + regex-utilities: 2.3.0 + + rehype-expressive-code@0.41.6: + dependencies: + expressive-code: 0.41.6 + + rehype-external-links@3.0.0: + dependencies: + '@types/hast': 3.0.4 + '@ungap/structured-clone': 1.3.0 + hast-util-is-element: 3.0.0 + is-absolute-url: 4.0.1 + space-separated-tokens: 2.0.2 + unist-util-visit: 5.1.0 + + rehype-format@5.0.1: + dependencies: + '@types/hast': 3.0.4 + hast-util-format: 1.1.0 + + rehype-minify-whitespace@6.0.2: + dependencies: + '@types/hast': 3.0.4 + hast-util-minify-whitespace: 1.0.1 + + rehype-parse@9.0.1: + dependencies: + '@types/hast': 3.0.4 + hast-util-from-html: 2.0.3 + unified: 11.0.5 + + rehype-raw@7.0.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-raw: 9.1.0 + vfile: 6.0.3 + + rehype-recma@1.0.0: + dependencies: + '@types/estree': 1.0.8 + '@types/hast': 3.0.4 + hast-util-to-estree: 3.1.3 + transitivePeerDependencies: + - supports-color + + rehype-remark@10.0.1: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + hast-util-to-mdast: 10.1.2 + unified: 11.0.5 + vfile: 6.0.3 + + rehype-stringify@10.0.1: + dependencies: + '@types/hast': 3.0.4 + hast-util-to-html: 9.0.5 + unified: 11.0.5 + + rehype@13.0.2: + dependencies: + '@types/hast': 3.0.4 + rehype-parse: 9.0.1 + rehype-stringify: 10.0.1 + unified: 11.0.5 + + remark-directive@3.0.1: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-directive: 3.1.0 + micromark-extension-directive: 3.0.2 + unified: 11.0.5 + transitivePeerDependencies: + - supports-color + + remark-gfm@4.0.1: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-gfm: 3.1.0 + micromark-extension-gfm: 3.0.0 + remark-parse: 11.0.0 + remark-stringify: 11.0.0 + unified: 11.0.5 + transitivePeerDependencies: + - supports-color + + remark-mdx@3.1.1: + dependencies: + mdast-util-mdx: 3.0.0 + micromark-extension-mdxjs: 3.0.0 + transitivePeerDependencies: + - supports-color + + remark-parse@11.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-from-markdown: 2.0.2 + micromark-util-types: 2.0.2 + unified: 11.0.5 + transitivePeerDependencies: + - supports-color + + remark-rehype@11.1.2: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + mdast-util-to-hast: 13.2.1 + unified: 11.0.5 + vfile: 6.0.3 + + remark-smartypants@3.0.2: + dependencies: + retext: 9.0.0 + retext-smartypants: 6.2.0 + unified: 11.0.5 + unist-util-visit: 5.1.0 + + remark-stringify@11.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-to-markdown: 2.1.2 + unified: 11.0.5 + + retext-latin@4.0.0: + dependencies: + '@types/nlcst': 2.0.3 + parse-latin: 7.0.0 + unified: 11.0.5 + + retext-smartypants@6.2.0: + dependencies: + '@types/nlcst': 2.0.3 + nlcst-to-string: 4.0.0 + unist-util-visit: 5.1.0 + + retext-stringify@4.0.0: + dependencies: + '@types/nlcst': 2.0.3 + nlcst-to-string: 4.0.0 + unified: 11.0.5 + + retext@9.0.0: + dependencies: + '@types/nlcst': 2.0.3 + retext-latin: 4.0.0 + retext-stringify: 4.0.0 + unified: 11.0.5 + + retry@0.12.0: {} + + robust-predicates@3.0.2: {} + + rollup@4.57.1: + dependencies: + '@types/estree': 1.0.8 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.57.1 + '@rollup/rollup-android-arm64': 4.57.1 + '@rollup/rollup-darwin-arm64': 4.57.1 + '@rollup/rollup-darwin-x64': 4.57.1 + '@rollup/rollup-freebsd-arm64': 4.57.1 + '@rollup/rollup-freebsd-x64': 4.57.1 + '@rollup/rollup-linux-arm-gnueabihf': 4.57.1 + '@rollup/rollup-linux-arm-musleabihf': 4.57.1 + '@rollup/rollup-linux-arm64-gnu': 4.57.1 + '@rollup/rollup-linux-arm64-musl': 4.57.1 + '@rollup/rollup-linux-loong64-gnu': 4.57.1 + '@rollup/rollup-linux-loong64-musl': 4.57.1 + '@rollup/rollup-linux-ppc64-gnu': 4.57.1 + '@rollup/rollup-linux-ppc64-musl': 4.57.1 + '@rollup/rollup-linux-riscv64-gnu': 4.57.1 + '@rollup/rollup-linux-riscv64-musl': 4.57.1 + '@rollup/rollup-linux-s390x-gnu': 4.57.1 + '@rollup/rollup-linux-x64-gnu': 4.57.1 + '@rollup/rollup-linux-x64-musl': 4.57.1 + '@rollup/rollup-openbsd-x64': 4.57.1 + '@rollup/rollup-openharmony-arm64': 4.57.1 + '@rollup/rollup-win32-arm64-msvc': 4.57.1 + '@rollup/rollup-win32-ia32-msvc': 4.57.1 + '@rollup/rollup-win32-x64-gnu': 4.57.1 + '@rollup/rollup-win32-x64-msvc': 4.57.1 + fsevents: 2.3.3 + + roughjs@4.6.6: + dependencies: + hachure-fill: 0.5.2 + path-data-parser: 0.1.0 + points-on-curve: 0.2.0 + points-on-path: 0.2.1 + + rw@1.3.3: {} + + safer-buffer@2.1.2: {} + + sax@1.4.4: {} + + semver@7.7.4: {} + + sharp@0.34.5: + dependencies: + '@img/colour': 1.0.0 + detect-libc: 2.1.2 + semver: 7.7.4 + optionalDependencies: + '@img/sharp-darwin-arm64': 0.34.5 + '@img/sharp-darwin-x64': 0.34.5 + '@img/sharp-libvips-darwin-arm64': 1.2.4 + '@img/sharp-libvips-darwin-x64': 1.2.4 + '@img/sharp-libvips-linux-arm': 1.2.4 + '@img/sharp-libvips-linux-arm64': 1.2.4 + '@img/sharp-libvips-linux-ppc64': 1.2.4 + '@img/sharp-libvips-linux-riscv64': 1.2.4 + '@img/sharp-libvips-linux-s390x': 1.2.4 + '@img/sharp-libvips-linux-x64': 1.2.4 + '@img/sharp-libvips-linuxmusl-arm64': 1.2.4 + '@img/sharp-libvips-linuxmusl-x64': 1.2.4 + '@img/sharp-linux-arm': 0.34.5 + '@img/sharp-linux-arm64': 0.34.5 + '@img/sharp-linux-ppc64': 0.34.5 + '@img/sharp-linux-riscv64': 0.34.5 + '@img/sharp-linux-s390x': 0.34.5 + '@img/sharp-linux-x64': 0.34.5 + '@img/sharp-linuxmusl-arm64': 0.34.5 + '@img/sharp-linuxmusl-x64': 0.34.5 + '@img/sharp-wasm32': 0.34.5 + '@img/sharp-win32-arm64': 0.34.5 + '@img/sharp-win32-ia32': 0.34.5 + '@img/sharp-win32-x64': 0.34.5 + + shiki@3.22.0: + dependencies: + '@shikijs/core': 3.22.0 + '@shikijs/engine-javascript': 3.22.0 + '@shikijs/engine-oniguruma': 3.22.0 + '@shikijs/langs': 3.22.0 + '@shikijs/themes': 3.22.0 + '@shikijs/types': 3.22.0 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + + sisteransi@1.0.5: {} + + sitemap@8.0.2: + dependencies: + '@types/node': 17.0.45 + '@types/sax': 1.2.7 + arg: 5.0.2 + sax: 1.4.4 + + smart-buffer@4.2.0: {} + + smol-toml@1.6.0: {} + + socks-proxy-agent@8.0.5: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + socks: 2.8.7 + transitivePeerDependencies: + - supports-color + + socks@2.8.7: + dependencies: + ip-address: 10.1.0 + smart-buffer: 4.2.0 + + source-map-js@1.2.1: {} + + source-map@0.7.6: {} + + space-separated-tokens@2.0.2: {} + + ssri@13.0.1: + dependencies: + minipass: 7.1.2 + + starlight-llms-txt@0.7.0(@astrojs/starlight@0.37.6(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)))(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)): + dependencies: + '@astrojs/mdx': 4.3.13(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)) + '@astrojs/starlight': 0.37.6(astro@5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3)) + '@types/hast': 3.0.4 + '@types/micromatch': 4.0.10 + astro: 5.17.3(jiti@2.6.1)(rollup@4.57.1)(typescript@5.9.3) + github-slugger: 2.0.0 + hast-util-select: 6.0.4 + micromatch: 4.0.8 + rehype-parse: 9.0.1 + rehype-remark: 10.0.1 + remark-gfm: 4.0.1 + remark-stringify: 11.0.0 + unified: 11.0.5 + unist-util-remove: 4.0.0 + transitivePeerDependencies: + - supports-color + + stream-replace-string@2.0.0: {} + + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + string-width@7.2.0: + dependencies: + emoji-regex: 10.6.0 + get-east-asian-width: 1.4.0 + strip-ansi: 7.1.2 + + stringify-entities@4.0.4: + dependencies: + character-entities-html4: 2.1.0 + character-entities-legacy: 3.0.0 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-ansi@7.1.2: + dependencies: + ansi-regex: 6.2.2 + + style-to-js@1.1.21: + dependencies: + style-to-object: 1.0.14 + + style-to-object@1.0.14: + dependencies: + inline-style-parser: 0.2.7 + + stylis@4.3.6: {} + + svgo@4.0.0: + dependencies: + commander: 11.1.0 + css-select: 5.2.2 + css-tree: 3.1.0 + css-what: 6.2.2 + csso: 5.0.5 + picocolors: 1.1.1 + sax: 1.4.4 + + tar@7.5.7: + dependencies: + '@isaacs/fs-minipass': 4.0.1 + chownr: 3.0.0 + minipass: 7.1.2 + minizlib: 3.1.0 + yallist: 5.0.0 + + tiny-inflate@1.0.3: {} + + tinyexec@1.0.2: {} + + tinyglobby@0.2.15: + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + trim-lines@3.0.1: {} + + trim-trailing-lines@2.1.0: {} + + trough@2.2.0: {} + + ts-dedent@2.2.0: {} + + tsconfck@3.1.6(typescript@5.9.3): + optionalDependencies: + typescript: 5.9.3 + + tslib@2.8.1: + optional: true + + type-fest@4.41.0: {} + + typescript@5.9.3: {} + + ufo@1.6.3: {} + + ultrahtml@1.6.0: {} + + uncrypto@0.1.3: {} + + unified@11.0.5: + dependencies: + '@types/unist': 3.0.3 + bail: 2.0.2 + devlop: 1.1.0 + extend: 3.0.2 + is-plain-obj: 4.1.0 + trough: 2.2.0 + vfile: 6.0.3 + + unifont@0.7.3: + dependencies: + css-tree: 3.1.0 + ofetch: 1.5.1 + ohash: 2.0.11 + + unique-filename@5.0.0: + dependencies: + unique-slug: 6.0.0 + + unique-slug@6.0.0: + dependencies: + imurmurhash: 0.1.4 + + unist-util-find-after@5.0.0: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + + unist-util-is@6.0.1: + dependencies: + '@types/unist': 3.0.3 + + unist-util-modify-children@4.0.0: + dependencies: + '@types/unist': 3.0.3 + array-iterate: 2.0.1 + + unist-util-position-from-estree@2.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-position@5.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-remove-position@5.0.0: + dependencies: + '@types/unist': 3.0.3 + unist-util-visit: 5.1.0 + + unist-util-remove@4.0.0: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + unist-util-visit-parents: 6.0.2 + + unist-util-stringify-position@4.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-visit-children@3.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-visit-parents@6.0.2: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + + unist-util-visit@5.1.0: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + unist-util-visit-parents: 6.0.2 + + unstorage@1.17.4: + dependencies: + anymatch: 3.1.3 + chokidar: 5.0.0 + destr: 2.0.5 + h3: 1.15.5 + lru-cache: 11.2.6 + node-fetch-native: 1.6.7 + ofetch: 1.5.1 + ufo: 1.6.3 + + util-deprecate@1.0.2: {} + + uuid@11.1.0: {} + + vfile-location@5.0.3: + dependencies: + '@types/unist': 3.0.3 + vfile: 6.0.3 + + vfile-message@4.0.3: + dependencies: + '@types/unist': 3.0.3 + unist-util-stringify-position: 4.0.0 + + vfile@6.0.3: + dependencies: + '@types/unist': 3.0.3 + vfile-message: 4.0.3 + + vite@6.4.1(jiti@2.6.1): + dependencies: + esbuild: 0.25.12 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + postcss: 8.5.6 + rollup: 4.57.1 + tinyglobby: 0.2.15 + optionalDependencies: + fsevents: 2.3.3 + jiti: 2.6.1 + + vitefu@1.1.1(vite@6.4.1(jiti@2.6.1)): + optionalDependencies: + vite: 6.4.1(jiti@2.6.1) + + vscode-jsonrpc@8.2.0: {} + + vscode-languageserver-protocol@3.17.5: + dependencies: + vscode-jsonrpc: 8.2.0 + vscode-languageserver-types: 3.17.5 + + vscode-languageserver-textdocument@1.0.12: {} + + vscode-languageserver-types@3.17.5: {} + + vscode-languageserver@9.0.1: + dependencies: + vscode-languageserver-protocol: 3.17.5 + + vscode-uri@3.1.0: {} + + web-namespaces@2.0.1: {} + + which-pm-runs@1.1.0: {} + + which@6.0.1: + dependencies: + isexe: 4.0.0 + + widest-line@5.0.0: + dependencies: + string-width: 7.2.0 + + wrap-ansi@9.0.2: + dependencies: + ansi-styles: 6.2.3 + string-width: 7.2.0 + strip-ansi: 7.1.2 + + xxhash-wasm@1.1.0: {} + + yallist@4.0.0: {} + + yallist@5.0.0: {} + + yargs-parser@21.1.1: {} + + yocto-queue@1.2.2: {} + + yocto-spinner@0.2.3: + dependencies: + yoctocolors: 2.1.2 + + yoctocolors@2.1.2: {} + + zod-to-json-schema@3.25.1(zod@3.25.76): + dependencies: + zod: 3.25.76 + + zod-to-ts@1.2.0(typescript@5.9.3)(zod@3.25.76): + dependencies: + typescript: 5.9.3 + zod: 3.25.76 + + zod@3.25.76: {} + + zwitch@2.0.4: {} diff --git a/docs/pnpm-workspace.yaml b/docs/pnpm-workspace.yaml new file mode 100644 index 00000000000..d0b7dbe2294 --- /dev/null +++ b/docs/pnpm-workspace.yaml @@ -0,0 +1,3 @@ +onlyBuiltDependencies: + - esbuild + - sharp diff --git a/docs/public/favicon.svg b/docs/public/favicon.svg new file mode 100644 index 00000000000..cba5ac140a2 --- /dev/null +++ b/docs/public/favicon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/src/assets/invoke-icon-wide.svg b/docs/src/assets/invoke-icon-wide.svg new file mode 100644 index 00000000000..cfeff994147 --- /dev/null +++ b/docs/src/assets/invoke-icon-wide.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/docs/src/assets/invoke-icon.svg b/docs/src/assets/invoke-icon.svg new file mode 100644 index 00000000000..17cfdc77da7 --- /dev/null +++ b/docs/src/assets/invoke-icon.svg @@ -0,0 +1,3 @@ + + + diff --git a/docs/src/content.config.ts b/docs/src/content.config.ts new file mode 100644 index 00000000000..e54e72fa2e2 --- /dev/null +++ b/docs/src/content.config.ts @@ -0,0 +1,8 @@ +import { defineCollection } from 'astro:content'; +import { docsLoader, i18nLoader } from '@astrojs/starlight/loaders'; +import { docsSchema, i18nSchema } from '@astrojs/starlight/schema'; + +export const collections = { + docs: defineCollection({ loader: docsLoader(), schema: docsSchema() }), + i18n: defineCollection({ loader: i18nLoader(), schema: i18nSchema() }), +}; diff --git a/docs/src/content/docs/assets/invoke-web-server-1.png b/docs/src/content/docs/assets/invoke-web-server-1.png new file mode 100644 index 00000000000..e1cf27a2176 Binary files /dev/null and b/docs/src/content/docs/assets/invoke-web-server-1.png differ diff --git a/docs/src/content/docs/assets/splash-banner.png b/docs/src/content/docs/assets/splash-banner.png new file mode 100644 index 00000000000..74d23f514d9 Binary files /dev/null and b/docs/src/content/docs/assets/splash-banner.png differ diff --git a/docs/src/content/docs/concepts/diffusion.mdx b/docs/src/content/docs/concepts/diffusion.mdx new file mode 100644 index 00000000000..fc22e3b9328 --- /dev/null +++ b/docs/src/content/docs/concepts/diffusion.mdx @@ -0,0 +1,77 @@ +--- +title: Diffusion +lastUpdated: 2026-02-20 +sidebar: + order: 2 +--- + +import { Card, CardGrid, Steps, Tabs, TabItem } from '@astrojs/starlight/components'; + +Taking the time to understand the diffusion process will help you to understand how to more effectively use InvokeAI. + +## Image Space vs. Latent Space + +There are two main ways Stable Diffusion works — with images, and latents. + + + + Represents images in pixel form that you look at. This is the final visual output you see. + + + Represents compressed inputs. It's in latent space that Stable Diffusion processes images. + + + +:::note[What is a VAE?] + A **VAE (Variational Auto Encoder)** is responsible for compressing and encoding inputs into *latent space*, as well as decoding outputs back into *image space*. +::: + +## Core Components + +To fully understand the diffusion process, we need to understand a few more terms: **U-Net**, **CLIP**, and **conditioning**. + + + + A model trained on a large number of latent images with known amounts of random noise added. The U-Net can be given a slightly noisy image and it will predict the pattern of noise needed to subtract from the image in order to recover the original. + + + **CLIP** is a model that tokenizes and encodes text into **conditioning**. This conditioning guides the model during the denoising steps to produce a new image. + + + +The U-Net and CLIP work together during the image generation process at each denoising step. The U-Net removes noise so that the result is similar to images in its training set, while CLIP guides the U-Net towards creating images that are most similar to your prompt. + +## The Generation Process + + + + When you generate an image using text-to-image, multiple steps occur in latent space: + + + 1. **Noise Generation:** Random noise is generated at the chosen height and width. The noise's characteristics are dictated by the seed. This noise tensor is passed into latent space. We'll call this *noise A*. + 2. **Noise Prediction:** Using a model's U-Net, a noise predictor examines *noise A* and the words tokenized by CLIP from your prompt (conditioning). It generates its own noise tensor to predict what the final image might look like in latent space. We'll call this *noise B*. + 3. **Subtraction:** *Noise B* is subtracted from *noise A* in an attempt to create a latent image consistent with the prompt. This step is repeated for the number of sampler steps chosen. + 4. **Decoding:** The VAE decodes the final latent image from latent space into image space. + + + + Image-to-image is a similar process, with only the first step being different: + + + 1. **Encoding & Adding Noise:** The input image is encoded from image space into latent space by the VAE. Noise is then added to the input latent image. + * **Denoising Strength** dictates how many noise steps are added, and the amount of noise added at each step. + * A strength of `0` means there are 0 steps and no noise added, resulting in an unchanged image. + * A strength of `1` results in the image being completely replaced with noise and a full set of denoising steps are performed. + 2. **Noise Prediction:** Using a model's U-Net, a noise predictor examines the noisy latent image and the conditioning from your prompt. It generates its own noise tensor to predict the final image. + 3. **Subtraction:** The predicted noise is subtracted from the current noise in an attempt to create a latent image consistent with the prompt. This step is repeated for the remaining sampler steps. + 4. **Decoding:** The VAE decodes the final latent image from latent space into image space. + + + + +## Summary + + +- A **Model** provides the CLIP prompt tokenizer, the VAE, and a U-Net (where noise prediction occurs given a prompt and initial noise tensor). +- A **Noise Scheduler** (e.g. `DPM++ 2M Karras`) schedules the subtraction of noise from the latent image across the sampler steps chosen. Less noise is usually subtracted at higher sampler steps. + diff --git a/docs/src/content/docs/concepts/image-generation.mdx b/docs/src/content/docs/concepts/image-generation.mdx new file mode 100644 index 00000000000..3015b27f6db --- /dev/null +++ b/docs/src/content/docs/concepts/image-generation.mdx @@ -0,0 +1,133 @@ +--- +title: Image Generation +lastUpdated: 2026-02-20 +sidebar: + order: 1 +--- + +import { Card, CardGrid, Steps } from '@astrojs/starlight/components'; + +:::tip[New to image generation with AI?] + You're in the right place! This is a high-level walkthrough of some of the concepts and terms you'll see as you start using Invoke. Please note, this is not an exhaustive guide and may be out of date due to the rapidly changing nature of the space. +::: + +## Using InvokeAI + +### Prompt Crafting + +Prompts are the basis of using InvokeAI, providing the models directions on what to generate. As a general rule of thumb, the more detailed your prompt is, the better your result will be. + + + To get started, here's an easy template to use for structuring your prompts: + **Subject, Style, Quality, Aesthetic** + + - **Subject:** What your image will be about. E.g. “a futuristic city with trains”, “penguins floating on icebergs”, “friends sharing beers”. + - **Style:** The style or medium in which your image will be in. E.g. “photograph”, “pencil sketch”, “oil paints”, or “pop art”, “cubism”, “abstract”. + - **Quality:** A particular aspect or trait that you would like to see emphasized in your image. E.g. "award-winning", "featured in relevant set of high quality works", "professionally acclaimed". Many people often use "masterpiece". + - **Aesthetics:** The visual impact and design of the artwork. This can be colors, mood, lighting, setting, etc. + + +There are two prompt boxes: **Positive Prompt** & **Negative Prompt**. + +- A **Positive Prompt** includes words you want the model to reference when creating an image. +- A **Negative Prompt** is for anything you want the model to eliminate when creating an image. It doesn’t always interpret things exactly the way you would, but helps control the generation process. Always try to include a few terms - you can typically use lower quality image terms like “blurry” or “distorted” with good success. + +**Some example prompts you can try on your own:** + +- *A detailed oil painting of a tranquil forest at sunset with vibrant colors and soft, golden light filtering through the trees* +- *friends sharing beers in a busy city, realistic colored pencil sketch, twilight, masterpiece, bright, lively* + +### Generation Workflows + +Invoke offers a number of different workflows for interacting with models to produce images. Each is extremely powerful on its own, but together provide you an unparalleled way of producing high quality creative outputs that align with your vision. + + + + Focuses on the key workflow of using a prompt to generate a new image. It includes other features that help control the generation process as well. + + + Provide an image as a reference (called the “initial image”), which provides more guidance around color and structure to the AI as it generates a new image. + + + An advanced AI-first image editing tool. Drag an image onto the canvas to regenerate elements, edit content or colors (**inpainting**), or extend the image with consistency and clarity (**outpainting**). + + + +### Improving Image Quality + + + 1. **Fine-tuning your prompt:** + + The more specific you are, the closer the image will turn out to what is in your head! Adding more details in the Positive or Negative Prompt can help add/remove pieces of your image to improve it. You can also use advanced techniques like upweighting and downweighting to control the influence of certain words. [Learn more here](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#prompt-syntax-features). + + :::tip + If you're seeing poor results, try adding the things you don't like about the image to your negative prompt. E.g. *distorted, low quality, unrealistic, etc.* + ::: + + 2. **Explore different models:** + + Other models can produce different results due to the data they've been trained on. Each model has specific language and settings it works best with; a model's documentation is your friend here. Play around with some and see what works best for you! + + 3. **Increasing Steps:** + + The number of steps used controls how much time the model is given to produce an image, and depends on the "Scheduler" used. More steps tends to mean better results, but will take longer. We recommend at least 30 steps for most. + + 4. **Tweak and Iterate:** + + Remember, it's best to change one thing at a time so you know what is working and what isn't. Sometimes you just need to try a new image, and other times using a new prompt might be the ticket. + *For testing, consider turning off the "random" Seed. Using the same seed with the same settings will produce the same image, which makes it the perfect way to learn exactly what your changes are doing.* + + 5. **Explore Advanced Settings:** + + InvokeAI has a full suite of tools available to allow you complete control over your image creation process. Check out our [docs if you want to learn more](https://invoke-ai.github.io/InvokeAI/features/). + + +## Terms & Concepts + +:::note + If you're interested in learning more, check out [this presentation](https://docs.google.com/presentation/d/1IO78i8oEXFTZ5peuHHYkVF-Y3e2M6iM5tCnc-YBfcCM/edit?usp=sharing) from one of our maintainers (@lstein). +::: + +### Stable Diffusion + +Stable Diffusion is a deep learning, text-to-image model that is the foundation of the capabilities found in InvokeAI. Since the release of Stable Diffusion, there have been many subsequent models created based on Stable Diffusion that are designed to generate specific types of images. + +### Prompts + +Prompts provide the models directions on what to generate. As a general rule of thumb, the more detailed your prompt is, the better your result will be. + +### Models + +Models are the magic that power InvokeAI. These files represent the output of training a machine on understanding massive amounts of images - providing them with the capability to generate new images using just a text description of what you'd like to see. + +Invoke offers a simple way to download several different models upon installation, but many more can be discovered online, including at [civitai.com](https://civitai.com). Each model can produce a unique style of output, based on the images it was trained on. + +:::note + Models that contain "inpainting" in the name are designed for use with the inpainting feature of the Unified Canvas. +::: + +### Schedulers & Steps + +**Schedulers** guide the process of removing noise (de-noising) from data. They determine: +1. The number of steps to take to remove the noise. +2. Whether the steps are random (stochastic) or predictable (deterministic). +3. The specific method (algorithm) used for de-noising. + +**Steps** represent the number of de-noising iterations each generation goes through. Schedulers can be intricate and there's often a balance to strike between how quickly they can de-noise data and how well they can do it. It's typically advised to experiment with different schedulers to see which one gives the best results. + +### Additional Concepts + + + + LoRAs are like a smaller, more focused version of models, intended to focus on training a better understanding of how a specific character, style, or concept looks. + + + Like LoRAs, embeddings assist with more easily prompting for certain characters, styles, or concepts. They are trained to update the relationship between a specific word (known as the "trigger") and the intended output. + + + ControlNets are neural network models that are able to extract key features from an existing image and use these features to guide the output of the image generation model. + + + A Variational Auto-Encoder (VAE) is an encode/decode model that translates the "latents" image produced during the image generation process to the large pixel images that we see. + + diff --git a/docs/src/content/docs/concepts/nodes-workflows.mdx b/docs/src/content/docs/concepts/nodes-workflows.mdx new file mode 100644 index 00000000000..cd810f845e6 --- /dev/null +++ b/docs/src/content/docs/concepts/nodes-workflows.mdx @@ -0,0 +1,29 @@ +--- +title: Nodes and Workflows +sidebar: + order: 4 +--- + +import { Card, CardGrid } from '@astrojs/starlight/components'; + +## What are Nodes? + +A **Node** is simply a single operation that takes in inputs and returns outputs. Multiple nodes can be linked together to create more complex functionality. All InvokeAI features are added through nodes. + +With nodes, you can easily extend the image generation capabilities of InvokeAI and build workflows that suit your specific needs. + +### Anatomy of a Node + +Individual nodes are made up of the following: + + + + Edge points on the **left side** of the node window where you connect outputs from other nodes. + + + Edge points on the **right side** of the node window where you connect to inputs on other nodes. + + + Various options which are either manually configured, or overridden by connecting an output from another node to the input. + + \ No newline at end of file diff --git a/docs/src/content/docs/concepts/parameters.mdx b/docs/src/content/docs/concepts/parameters.mdx new file mode 100644 index 00000000000..3b466bd6478 --- /dev/null +++ b/docs/src/content/docs/concepts/parameters.mdx @@ -0,0 +1,143 @@ +--- +title: Generation Parameters +lastUpdated: 2026-02-20 +sidebar: + order: 3 +--- + +import { Card, CardGrid, Steps } from '@astrojs/starlight/components'; + +# Sampler Convergence + +As features keep increasing, making the right choices for your needs can become increasingly difficult. What sampler to use? And for how many steps? Do you change the CFG value? Do you use prompt weighting? Do you allow variations? + +Even once you have a result, do you blend it with other images? Pass it through `img2img`? With what strength? Do you use inpainting to correct small details? Outpainting to extend cropped sections? + +The purpose of this series of documents is to help you better understand these tools, so you can make the best out of them. Feel free to contribute with your own findings! + +In this document, we will talk about **sampler convergence**. + + + Looking for a short version? Here is the summary: + + - Results converge as steps (`-s`) are increased (except for `K_DPM_2_A` and `K_EULER_A`). Often at ≥ `-s100`, but may require ≥ `-s700`. + - Producing a batch of candidate images at low (`-s8` to `-s30`) step counts can save you hours of computation. + - `K_HEUN` and `K_DPM_2` converge in fewer steps (but are slower per step). + - `K_DPM_2_A` and `K_EULER_A` incorporate a lot of creativity and variability. + + +## Sampler Performance Overview + + + + *(Tested on M1 Max 64GB, 512x512, 3 sample average)* + + | Sampler | it/s | + | :--- | :--- | + | `DDIM` | 1.89 | + | `PLMS` | 1.86 | + | `K_EULER` | 1.86 | + | `K_LMS` | **1.91** (Fastest) | + | `K_EULER_A` | 1.86 | + | `K_HEUN` | 0.95 *(Slower)* | + | `K_DPM_2` | 0.95 *(Slower)* | + | `K_DPM_2_A` | 0.95 *(Slower)* | + + + + For most use cases, `K_LMS`, `K_HEUN` and `K_DPM_2` are the best choices. + + While `K_HEUN` and `K_DPM_2` run half as fast, they tend to converge twice as quickly as `K_LMS`. + + At very low steps (≤ `-s8`), `K_HEUN` and `K_DPM_2` are not recommended. Use `K_LMS` instead. + + For high variability between steps, use `K_EULER_A` (which runs twice as fast as `K_DPM_2_A`). + + + +--- + +## Sampler Results by Subject + +Let's start by choosing a prompt and using it with each of our 8 samplers, running it for 10, 20, 30, 40, 50 and 100 steps. + +### Anime +> `"an anime girl" -W512 -H512 -C7.5 -S3031912972` + +![Anime Comparison Grid](https://user-images.githubusercontent.com/50542132/191868725-7f7af991-e254-4c1f-83e7-bed8c9b2d34f.png) + +Immediately, you can notice results tend to converge — that is, as `-s` (step) values increase, images look more and more similar until there comes a point where the image no longer changes. + +You can also notice how `DDIM` and `PLMS` eventually tend to converge to K-sampler results as steps are increased. Among K-samplers, `K_HEUN` and `K_DPM_2` seem to require the fewest steps to converge, and even at low step counts they are good indicators of the final result. Finally, `K_DPM_2_A` and `K_EULER_A` seem to do a bit of their own thing and don't keep much similarity with the rest of the samplers. + +### Nature +Now, these results seem interesting, but do they hold for other topics? Let's try! + +> `"valley landscape wallpaper, d&d art, fantasy, painted, 4k, high detail, sharp focus, washed colors, elaborate excellent painted illustration" -W512 -H512 -C7.5 -S1458228930` + +![Nature Comparison Grid](https://user-images.githubusercontent.com/50542132/191868763-b151c69e-0a72-4cf1-a151-5a64edd0c93e.png) + +With nature, you can see how initial results are even more indicative of the final result — more so than with characters/people. `K_HEUN` and `K_DPM_2` are again the quickest indicators, almost right from the start. Results also converge faster (e.g. `K_HEUN` converged at `-s21`). + +### Food +> `"a hamburger with a bowl of french fries" -W512 -H512 -C7.5 -S4053222918` + +![Food Comparison Grid](https://user-images.githubusercontent.com/50542132/191868898-98801a62-885f-4ea1-aee8-563503522aa9.png) + +Again, `K_HEUN` and `K_DPM_2` take the fewest number of steps to be good indicators of the final result. `K_DPM_2_A` and `K_EULER_A` seem to incorporate a lot of creativity/variability, capable of producing rotten hamburgers, but also of adding lettuce to the mix. And they're the only samplers that produced an actual 'bowl of fries'! + +### Animals +> `"grown tiger, full body" -W512 -H512 -C7.5 -S3721629802` + +![Animal Comparison Grid](https://user-images.githubusercontent.com/50542132/191868870-9e3b7d82-b909-429f-893a-13f6ec343454.png) + +`K_HEUN` and `K_DPM_2` once again require the least number of steps to be indicative of the final result (around `-s30`), while other samplers are still struggling with several tails or malformed back legs. + +It also takes longer to converge (for comparison, `K_HEUN` required around 150 steps to converge). This is normal, as producing human/animal faces/bodies is one of the things the model struggles the most with. For these topics, running for more steps will often increase coherence within the composition. + +### People +> `"Ultra realistic photo, (Miranda Bloom-Kerr), young, stunning model, blue eyes, blond hair, beautiful face, intricate, highly detailed, smooth, art by artgerm and greg rutkowski and alphonse mucha, stained glass" -W512 -H512 -C7.5 -S2131956332`. *(This time, we will go up to 300 steps).* + +![People Comparison Grid 1](https://user-images.githubusercontent.com/50542132/191871743-6802f199-0ffd-4986-98c5-df2d8db30d18.png) + +Observing the results, it again takes longer for all samplers to converge (`K_HEUN` took around 150 steps), but we can observe good indicative results much earlier (see: `K_HEUN`). Conversely, `DDIM` and `PLMS` are still undergoing moderate changes (see: lace around her neck), even at `-s300`. + +In fact, as we can see in this other experiment, some samplers can take 700+ steps to converge when generating people. + +![People Comparison Grid 2](https://user-images.githubusercontent.com/50542132/191992123-7e0759d6-6220-42c4-a961-88c7071c5ee6.png) + +Note also the point of convergence may not be the most desirable state (e.g. you might prefer an earlier version of the face that is more rounded), but it will probably be the most coherent regarding arms/hands/face attributes. You can always merge different images with a photo editing tool and pass it through `img2img` to smoothen the composition. + +--- + +## Batch Generation Speedup + +This realization about convergence is very useful because it means you don't need to create a batch of 100 images (`-n100`) at `-s100` just to choose your favorite 2 or 3 images. + +You can produce the same 100 images at `-s10` to `-s30` using a K-sampler (since they converge faster), get a rough idea of the final result, choose your 2 or 3 favorite ones, and then run `-s100` on those specific images to polish details. This technique is **3-8x as quick**. + +:::tip[Time Savings Example] + Assuming 60 seconds per 100 steps: + + - **Method A:** 60s * 100 images = **6000s** (100 images at `-s100`, manually picking 3 favorites). Total time: **1 hour and 40 minutes.** + - **Method B:** 6s * 100 images + 60s * 3 images = **780s** (100 images at `-s10`, manually picking 3 favorites, and running those 3 at `-s100` to polish details). Total time: **13 minutes.** +::: + +## Three Key Takeaways + +Finally, it is relevant to mention that, in general, there are 3 important moments in the process of image formation as steps increase: + + +1. **The Indicator Stage:** + The earliest point at which an image becomes a good indicator of the final result. This is useful for batch generation at low step values to preview outputs before committing to higher steps. +2. **The Coherence Stage:** + The point at which an image becomes coherent, even if different from the final converged result. This is useful for low-step batch generation where quality is improved via other techniques (like inpainting) rather than raw step count. +3. **The Convergence Stage:** + The point at which an image fully converges and stops changing. + + +:::note[Workflow Dictates Strategy] + Remember that your workflow/strategy should define your optimal number of steps, even for the same prompt and seed. For example, if you seek full convergence, you may run `K_LMS` for `-s200`. However, running `K_LMS` for `-s20` (taking one-tenth the time) may perform just as well if your workflow includes adding small missing details via `img2img`. +::: + +![Low Step Sampler Comparison](https://user-images.githubusercontent.com/50542132/192046823-2714cb29-bbf3-4eb1-9213-e27a0963905c.png) diff --git a/docs/src/content/docs/concepts/prompt-syntax.mdx b/docs/src/content/docs/concepts/prompt-syntax.mdx new file mode 100644 index 00000000000..4dddcda81c3 --- /dev/null +++ b/docs/src/content/docs/concepts/prompt-syntax.mdx @@ -0,0 +1,5 @@ +--- +title: Prompting Syntax +--- + +{/* TODO: Finish this page */} diff --git a/docs/src/content/docs/configuration/assets/cuda-sysmem-fallback.png b/docs/src/content/docs/configuration/assets/cuda-sysmem-fallback.png new file mode 100755 index 00000000000..f79e007f871 Binary files /dev/null and b/docs/src/content/docs/configuration/assets/cuda-sysmem-fallback.png differ diff --git a/docs/src/content/docs/configuration/docker.mdx b/docs/src/content/docs/configuration/docker.mdx new file mode 100644 index 00000000000..e8ec4f93c78 --- /dev/null +++ b/docs/src/content/docs/configuration/docker.mdx @@ -0,0 +1,95 @@ +--- +title: Docker +--- + +import { Aside, Tabs, TabItem } from '@astrojs/starlight/components' + +import SystemRequirementsLink from '@components/SystemRequirmentsLink.astro' + + + +:::note[Operating Systems and GPU Support] + + + Docker Desktop on Windows [includes GPU support](https://www.docker.com/blog/wsl-2-gpu-support-for-docker-desktop-on-nvidia-gpus/). + + + Docker can not access the GPU on macOS, so your generation speeds will be slow. Use the [launcher](/getting-started/installation) instead. + + + Configure Docker to access your machine's GPU. + Follow the [NVIDIA](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) or [AMD](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html) documentation. + + +::: + +## TL;DR + +Ensure your Docker setup is able to use your GPU. Then: + +```bash +docker run --runtime=nvidia --gpus=all --publish 9090:9090 ghcr.io/invoke-ai/invokeai +``` + +Once the container starts up, open [http://localhost:9090](http://localhost:9090) in your browser, install some models, and start generating. + +## Build-It-Yourself + +All the docker materials are located inside the [docker](https://github.com/invoke-ai/InvokeAI/tree/main/docker) directory in the Git repo. + +```bash +cd docker +cp .env.sample .env +docker compose up +``` + +We also ship the `run.sh` convenience script. See the `docker/README.md` file for detailed instructions on how to customize the docker setup to your needs. + +### Prerequisites + +#### Install [Docker](https://github.com/santisbon/guides#docker) + +On the [Docker Desktop app](https://docs.docker.com/get-docker/), go to `Preferences` -> `Resources` -> `Advanced`. Increase the CPUs and Memory to avoid this [Issue](https://github.com/invoke-ai/InvokeAI/issues/342). You may need to increase Swap and Disk image size too. + +### Setup + +Set up your environment variables. In the `docker` directory, make a copy of `.env.sample` and name it `.env`. Make changes as necessary. + +Any environment variables supported by InvokeAI can be set here - please see [Configurations](../configuration.md) for further detail. + +At the very least, you might want to set the `INVOKEAI_ROOT` environment variable +to point to the location where you wish to store your InvokeAI models, configuration, and outputs. + +| Environment Variable | Default value | Description | +| --- | --- | --- | +| `INVOKEAI_ROOT` | `~/invokeai` | **Required** - the location of your InvokeAI root directory. It will be created if it does not exist. | +| `HUGGING_FACE_HUB_TOKEN` | | InvokeAI will work without it, but some of the integrations with HuggingFace (like downloading from models from private repositories) may not work | +| `GPU_DRIVER` | `cuda` | Optionally change this to `rocm` to build the image for AMD GPUs. NOTE: Use the `build.sh` script to build the image for this to take effect. | + +#### Build the Image + +Use the standard `docker compose build` command from within the `docker` directory. + +If using an AMD GPU: +a: set the `GPU_DRIVER=rocm` environment variable in `docker-compose.yml` and continue using `docker compose build` as usual, or +b: set `GPU_DRIVER=rocm` in the `.env` file and use the `build.sh` script, provided for convenience + +#### Run the Container + +Use the standard `docker compose up` command, and generally the `docker compose` [CLI](https://docs.docker.com/compose/reference/) as usual. + +Once the container starts up (and configures the InvokeAI root directory if this is a new installation), you can access InvokeAI at [http://localhost:9090](http://localhost:9090) + +## Troubleshooting / FAQ + +
+ "I am running Windows under WSL2, and am seeing a 'no such file or directory' error." + + Your `docker-entrypoint.sh` might have has Windows (CRLF) line endings, depending how you cloned the repository. + To solve this, change the line endings in the `docker-entrypoint.sh` file to `LF`. You can do this in VSCode + (`Ctrl+P` and search for "line endings"), or by using the `dos2unix` utility in WSL. + Finally, you may delete `docker-entrypoint.sh` followed by `git pull; git checkout docker/docker-entrypoint.sh` + to reset the file to its most recent version. + For more information on this issue, see [Docker Desktop documentation](https://docs.docker.com/desktop/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers) + +
diff --git a/docs/src/content/docs/configuration/invokeai-yaml.mdx b/docs/src/content/docs/configuration/invokeai-yaml.mdx new file mode 100644 index 00000000000..bb48c2175ed --- /dev/null +++ b/docs/src/content/docs/configuration/invokeai-yaml.mdx @@ -0,0 +1,84 @@ +--- +title: YAML Config +--- + +import { FileTree } from '@astrojs/starlight/components' + +Runtime settings, including the location of files and directories, memory usage, and performance, are managed via the `invokeai.yaml` config file or environment variables. A subset of settings may be set via commandline arguments. + +Settings sources are used in this order: + +- CLI args +- Environment variables +- `invokeai.yaml` settings +- Fallback: defaults + +### InvokeAI Root Directory + +On startup, InvokeAI searches for its "root" directory. This is the directory that contains models, images, the database, and so on. It also contains a configuration file called `invokeai.yaml`. + + + - models/ + - outputs/ + - databases/ + - workflow_thumbnails/ + - style_presets/ + - nodes/ + - configs/ + - invokeai.example.yaml + - **invokeai.yaml** + + +InvokeAI searches for the root directory in this order: + +1. The `--root ` CLI arg. +2. The environment variable INVOKEAI_ROOT. +3. The directory containing the currently active virtual environment. +4. Fallback: a directory in the current user's home directory named `invokeai`. + +### InvokeAI Configuration File + +Inside the root directory, we read settings from the `invokeai.yaml` file. + +It has two sections - one for internal use and one for user settings: + +```yaml +# Internal metadata - do not edit: +schema_version: 4.0.2 + +# Put user settings here - see https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/: +host: 0.0.0.0 # serve the app on your local network +models_dir: D:\invokeai\models # store models on an external drive +precision: float16 # always use fp16 precision +``` + +The settings in this file will override the defaults. You only need +to change this file if the default for a particular setting doesn't +work for you. + +You'll find an example file next to `invokeai.yaml` that shows the default values. + +Some settings, like [Model Marketplace API Keys], require the YAML +to be formatted correctly. Here is a [basic guide to YAML files]. + +#### Custom Config File Location + +You can use any config file with the `--config` CLI arg. Pass in the path to the `invokeai.yaml` file you want to use. + +Note that environment variables will trump any settings in the config file. + +### Environment Variables + +All settings may be set via environment variables by prefixing `INVOKEAI_` +to the variable name. For example, `INVOKEAI_HOST` would set the `host` +setting. + +For non-primitive values, pass a JSON-encoded string: + +```sh +export INVOKEAI_REMOTE_API_TOKENS='[{"url_regex":"modelmarketplace", "token": "12345"}]' +``` + +We suggest using `invokeai.yaml`, as it is more user-friendly. + +{/* TODO: Insert auto-generated settings table here. */} diff --git a/docs/src/content/docs/configuration/low-vram-mode.mdx b/docs/src/content/docs/configuration/low-vram-mode.mdx new file mode 100644 index 00000000000..e2a7d9f7765 --- /dev/null +++ b/docs/src/content/docs/configuration/low-vram-mode.mdx @@ -0,0 +1,176 @@ +--- +title: Low-VRAM mode +--- + +As of v5.6.0, Invoke has a low-VRAM mode. It works on systems with dedicated GPUs (Nvidia GPUs on Windows/Linux and AMD GPUs on Linux). + +This allows you to generate even if your GPU doesn't have enough VRAM to hold full models. Most users should be able to run even the beefiest models - like the ~24GB unquantised FLUX dev model. + +## Enabling Low-VRAM mode + +To enable Low-VRAM mode, add this line to your `invokeai.yaml` configuration file, then restart Invoke: + +```yaml +enable_partial_loading: true +``` + +**Windows users should also [disable the Nvidia sysmem fallback](#disabling-nvidia-sysmem-fallback-windows-only)**. + +It is possible to fine-tune the settings for best performance or if you still get out-of-memory errors (OOMs). + +!!! tip "How to find `invokeai.yaml`" + + The `invokeai.yaml` configuration file lives in your install directory. To access it, run the **Invoke Community Edition** launcher and click the install location. This will open your install directory in a file explorer window. + + You'll see `invokeai.yaml` there and can edit it with any text editor. After making changes, restart Invoke. + + If you don't see `invokeai.yaml`, launch Invoke once. It will create the file on its first startup. + +## Details and fine-tuning + +Low-VRAM mode involves 4 features, each of which can be configured or fine-tuned: + +- Partial model loading (`enable_partial_loading`) +- PyTorch CUDA allocator config (`pytorch_cuda_alloc_conf`) +- Dynamic RAM and VRAM cache sizes (`max_cache_ram_gb`, `max_cache_vram_gb`) +- Working memory (`device_working_mem_gb`) +- Keeping a RAM weight copy (`keep_ram_copy_of_weights`) + +Read on to learn about these features and understand how to fine-tune them for your system and use-cases. + +### Partial model loading + +Invoke's partial model loading works by streaming model "layers" between RAM and VRAM as they are needed. + +When an operation needs layers that are not in VRAM, but there isn't enough room to load them, inactive layers are offloaded to RAM to make room. + +#### Enabling partial model loading + +As described above, you can enable partial model loading by adding this line to `invokeai.yaml`: + +```yaml +enable_partial_loading: true +``` + +### PyTorch CUDA allocator config + +The PyTorch CUDA allocator's behavior can be configured using the `pytorch_cuda_alloc_conf` config. Tuning the allocator configuration can help to reduce the peak reserved VRAM. The optimal configuration is dependent on many factors (e.g. device type, VRAM, CUDA driver version, etc.), but switching from PyTorch's native allocator to using CUDA's built-in allocator works well on many systems. To try this, add the following line to your `invokeai.yaml` file: + +```yaml +pytorch_cuda_alloc_conf: "backend:cudaMallocAsync" +``` + +A more complete explanation of the available configuration options is [here](https://pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf). + +### Dynamic RAM and VRAM cache sizes + +Loading models from disk is slow and can be a major bottleneck for performance. Invoke uses two model caches - RAM and VRAM - to reduce loading from disk to a minimum. + +By default, Invoke manages these caches' sizes dynamically for best performance. + +#### Fine-tuning cache sizes + +Prior to v5.6.0, the cache sizes were static, and for best performance, many users needed to manually fine-tune the `ram` and `vram` settings in `invokeai.yaml`. + +As of v5.6.0, the caches are dynamically sized. The `ram` and `vram` settings are no longer used, and new settings are added to configure the cache. + +**Most users will not need to fine-tune the cache sizes.** + +But, if your GPU has enough VRAM to hold models fully, you might get a perf boost by manually setting the cache sizes in `invokeai.yaml`: + +```yaml +# The default max cache RAM size is logged on InvokeAI startup. It is determined based on your system RAM / VRAM. +# You can override the default value by setting `max_cache_ram_gb`. +# Increasing `max_cache_ram_gb` will increase the amount of RAM used to cache inactive models, resulting in faster model +# reloads for the cached models. +# As an example, if your system has 32GB of RAM and no other heavy processes, setting the `max_cache_ram_gb` to 28GB +# might be a good value to achieve aggressive model caching. +max_cache_ram_gb: 28 + +# The default max cache VRAM size is adjusted dynamically based on the amount of available VRAM (taking into +# consideration the VRAM used by other processes). +# You can override the default value by setting `max_cache_vram_gb`. +# CAUTION: Most users should not manually set this value. See warning below. +max_cache_vram_gb: 16 +``` + +!!! warning "Max safe value for `max_cache_vram_gb`" + + Most users should not manually configure the `max_cache_vram_gb`. This configuration value takes precedence over the `device_working_mem_gb` and any operations that explicitly reserve additional working memory (e.g. VAE decode). As such, manually configuring it increases the likelihood of encountering out-of-memory errors. + + For users who wish to configure `max_cache_vram_gb`, the max safe value can be determined by subtracting `device_working_mem_gb` from your GPU's VRAM. As described below, the default for `device_working_mem_gb` is 3GB. + + For example, if you have a 12GB GPU, the max safe value for `max_cache_vram_gb` is `12GB - 3GB = 9GB`. + + If you had increased `device_working_mem_gb` to 4GB, then the max safe value for `max_cache_vram_gb` is `12GB - 4GB = 8GB`. + + Most users who override `max_cache_vram_gb` are doing so because they wish to use significantly less VRAM, and should be setting `max_cache_vram_gb` to a value significantly less than the 'max safe value'. + +### Working memory + +Invoke cannot use _all_ of your VRAM for model caching and loading. It requires some VRAM to use as working memory for various operations. + +Invoke reserves 3GB VRAM as working memory by default, which is enough for most use-cases. However, it is possible to fine-tune this setting if you still get OOMs. + +#### Fine-tuning working memory + +You can increase the working memory size in `invokeai.yaml` to prevent OOMs: + +```yaml +# The default is 3GB - bump it up to 4GB to prevent OOMs. +device_working_mem_gb: 4 +``` + +!!! tip "Operations may request more working memory" + + For some operations, we can determine VRAM requirements in advance and allocate additional working memory to prevent OOMs. + + VAE decoding is one such operation. This operation converts the generation process's output into an image. For large image outputs, this might use more than the default working memory size of 3GB. + + During this decoding step, Invoke calculates how much VRAM will be required to decode and requests that much VRAM from the model manager. If the amount exceeds the working memory size, the model manager will offload cached model layers from VRAM until there's enough VRAM to decode. + + Once decoding completes, the model manager "reclaims" the extra VRAM allocated as working memory for future model loading operations. + +### Keeping a RAM weight copy + +Invoke has the option of keeping a RAM copy of all model weights, even when they are loaded onto the GPU. This optimization is _on_ by default, and enables faster model switching and LoRA patching. Disabling this feature will reduce the average RAM load while running Invoke (peak RAM likely won't change), at the cost of slower model switching and LoRA patching. If you have limited RAM, you can disable this optimization: + +```yaml +# Set to false to reduce the average RAM usage at the cost of slower model switching and LoRA patching. +keep_ram_copy_of_weights: false +``` + +### Disabling Nvidia sysmem fallback (Windows only) + +On Windows, Nvidia GPUs are able to use system RAM when their VRAM fills up via **sysmem fallback**. While it sounds like a good idea on the surface, in practice it causes massive slowdowns during generation. + +It is strongly suggested to disable this feature: + +- Open the **NVIDIA Control Panel** app. +- Expand **3D Settings** on the left panel. +- Click **Manage 3D Settings** in the left panel. +- Find **CUDA - Sysmem Fallback Policy** in the right panel and set it to **Prefer No Sysmem Fallback**. + +![cuda-sysmem-fallback](./assets/cuda-sysmem-fallback.png) + +:::tip[Invoke does the same thing, but better] + If the sysmem fallback feature sounds familiar, that's because Invoke's partial model loading strategy is conceptually very similar - use VRAM when there's room, else fall back to RAM. + + Unfortunately, the Nvidia implementation is not optimized for applications like Invoke and does more harm than good. +::: + +## Troubleshooting + +### Windows page file + +Invoke has high virtual memory (a.k.a. 'committed memory') requirements. This can cause issues on Windows if the page file size limits are hit. (See this issue for the technical details on why this happens: https://github.com/invoke-ai/InvokeAI/issues/7563). + +If you run out of page file space, InvokeAI may crash. Often, these crashes will happen with one of the following errors: + +- InvokeAI exits with Windows error code `3221225477` +- InvokeAI crashes without an error, but `eventvwr.msc` reveals an error with code `0xc0000005` (the hex equivalent of `3221225477`) + +If you are running out of page file space, try the following solutions: + +- Make sure that you have sufficient disk space for the page file to grow. Watch your disk usage as Invoke runs. If it climbs near 100% leading up to the crash, then this is very likely the source of the issue. Clear out some disk space to resolve the issue. +- Make sure that your page file is set to "System managed size" (this is the default) rather than a custom size. Under the "System managed size" policy, the page file will grow dynamically as needed. diff --git a/docs/src/content/docs/configuration/patchmatch.mdx b/docs/src/content/docs/configuration/patchmatch.mdx new file mode 100644 index 00000000000..a91e1a8f8b7 --- /dev/null +++ b/docs/src/content/docs/configuration/patchmatch.mdx @@ -0,0 +1,126 @@ +--- +title: Patchmatch +--- + +import { Tabs, TabItem, Steps } from '@astrojs/starlight/components' + +PatchMatch is an algorithm used to infill images. It can greatly improve outpainting results. PyPatchMatch is a python wrapper around a C++ implementation of the algorithm. + +It uses the image data around the target area as a reference to generate new image data of a similar character and quality. + +## Why Use PatchMatch + +In the context of image generation, "outpainting" refers to filling in a transparent area using AI-generated image data. But the AI can't generate without some initial data. We need to first fill in the transparent area with _something_. + +The first step in "outpainting" then, is to fill in the transparent area with something. Generally, you get better results when that initial infill resembles the rest of the image. + +Because PatchMatch generates image data so similar to the rest of the image, it works very well as the first step in outpainting, typically producing better results than other infill methods supported by Invoke (e.g. LaMA, cv2 infill, random tiles). + +### Performance Caveat + +PatchMatch is CPU-bound, and the amount of time it takes increases proportionally as the infill area increases. While the numbers certainly vary depending on system specs, you can expect a noticeable slowdown once you start infilling areas around 512x512 pixels. 1024x1024 pixels can take several seconds to infill. + +## Installation + +Unfortunately, installation can be somewhat challenging, as it requires some things that `pip` cannot install for you. + + + 1. Ensure you have the necessary dependencies installed for your system (see below). + + + + You're in luck! On Windows platforms PyPatchMatch will install automatically on Windows systems with no extra intervention. + + + You need to have opencv installed so that pypatchmatch can be built: + + ```bash + brew install opencv + ``` + + The next time you start `invoke`, after successfully installing opencv, pypatchmatch will be built. + + + Prior to installing PyPatchMatch, you need to take the following steps: + + + + + 1. Install the `build-essential` tools: + + ```sh + sudo apt update # Update package lists + sudo apt install build-essential + ``` + + 2. Install `opencv`: + + ```sh + sudo apt install python3-opencv libopencv-dev + ``` + + 3. Activate the environment you use for invokeai, either with `conda` or with a virtual environment. + + + + + 1. Install the `base-devel` package: + + ```sh + sudo pacman -Syu + sudo pacman -S --needed base-devel + ``` + + 2. Install `opencv`, `blas`, and required dependencies: + + ```sh + sudo pacman -S opencv blas fmt glew vtk hdf5 + ``` + + or for CUDA support + + ```sh + sudo pacman -S opencv-cuda blas fmt glew vtk hdf5 + ``` + + 3. Fix the naming of the `opencv` package configuration file: + + ```sh + cd /usr/lib/pkgconfig/ + ln -sf opencv4.pc opencv.pc + ``` + + + + + + + 2. Install pypatchmatch: + + ```sh + pip install pypatchmatch + ``` + + 3. Confirm that pypatchmatch is installed. At the command-line prompt enter `python`, and then at the `>>>` line type `from patchmatch import patch_match`: It should look like the following: + + ```py + Python 3.12.3 (main, Aug 14 2025, 17:47:21) [GCC 13.3.0] on linux + Type "help", "copyright", "credits" or "license" for more information. + >>> from patchmatch import patch_match + Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch". + rm -rf build/obj libpatchmatch.so + mkdir: created directory 'build/obj' + mkdir: created directory 'build/obj/csrc/' + [dep] csrc/masked_image.cpp ... + [dep] csrc/nnf.cpp ... + [dep] csrc/inpaint.cpp ... + [dep] csrc/pyinterface.cpp ... + [CC] csrc/pyinterface.cpp ... + [CC] csrc/inpaint.cpp ... + [CC] csrc/nnf.cpp ... + [CC] csrc/masked_image.cpp ... + [link] libpatchmatch.so ... + ``` + + If you're not seeing any errors, you're ready to go! + diff --git a/docs/src/content/docs/contributing/code-of-conduct.md b/docs/src/content/docs/contributing/code-of-conduct.md new file mode 100644 index 00000000000..8ada3a81b9b --- /dev/null +++ b/docs/src/content/docs/contributing/code-of-conduct.md @@ -0,0 +1,130 @@ +--- +title: Code of Conduct +--- + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported to the community leaders responsible for enforcement +at https://github.com/invoke-ai/InvokeAI/issues. All complaints will +be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/docs/src/content/docs/contributing/contributors.md b/docs/src/content/docs/contributing/contributors.md new file mode 100644 index 00000000000..eb57feb295e --- /dev/null +++ b/docs/src/content/docs/contributing/contributors.md @@ -0,0 +1,54 @@ +--- +title: Contributors +--- + +We thank [all contributors](https://github.com/invoke-ai/InvokeAI/graphs/contributors) for their time and hard work! + +## Original Author + +- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com) + +## Current Core Team + +- [@lstein](https://github.com/lstein) (Lincoln Stein) - Co-maintainer +- [@blessedcoolant](https://github.com/blessedcoolant) - Co-maintainer +- [@hipsterusername](https://github.com/hipsterusername) (Kent Keirsey) - Co-maintainer, CEO, Positive Vibes +- [@psychedelicious](https://github.com/psychedelicious) (Spencer Mabrito) - Web Team Leader +- [@joshistoast](https://github.com/joshistoast) (Josh Corbett) - Web Development +- [@cheerio](https://github.com/cheerio) (Mary Rogers) - Lead Engineer & Web App Development +- [@ebr](https://github.com/ebr) (Eugene Brodsky) - Cloud/DevOps/Software engineer; your friendly neighbourhood cluster-autoscaler +- [@sunija](https://github.com/sunija) - Standalone version +- [@brandon](https://github.com/brandon) (Brandon Rising) - Platform, Infrastructure, Backend Systems +- [@ryanjdick](https://github.com/ryanjdick) (Ryan Dick) - Machine Learning & Training +- [@JPPhoto](https://github.com/JPPhoto) - Core image generation nodes +- [@dunkeroni](https://github.com/dunkeroni) - Image generation backend +- [@SkunkWorxDark](https://github.com/SkunkWorxDark) - Image generation backend +- [@glimmerleaf](https://github.com/glimmerleaf) (Devon Hopkins) - Community Wizard +- [@gogurt](https://github.com/gogurt) enjoyer - Discord moderator and end user support +- [@whosawhatsis](https://github.com/whosawhatsis) - Discord moderator and end user support +- [@dwringer](https://github.com/dwringer) - Discord moderator and end user support +- [@526christian](https://github.com/526christian) - Discord moderator and end user support +- [@harvester62](https://github.com/harvester62) - Discord moderator and end user support + +## Honored Team Alumni + +- [@StAlKeR7779](https://github.com/StAlKeR7779) (Sergey Borisov) - Torch stack, ONNX, model management, optimization +- [@damian0815](https://github.com/damian0815) - Attention Systems and Compel Maintainer +- [@netsvetaev](https://github.com/netsvetaev) (Artur) - Localization support +- [@Kyle0654](https://github.com/Kyle0654) (Kyle Schouviller) - Node Architect and General Backend Wizard +- [@tildebyte](https://github.com/tildebyte) - Installation and configuration +- [@mauwii](https://github.com/mauwii) (Matthias Wilde) - Installation, release, continuous integration +- [@chainchompa](https://github.com/chainchompa) (Jennifer Player) - Web Development & Chain-Chomping +- [@millu](https://github.com/millu) (Millun Atluri) - Community Wizard, Documentation, Node-wrangler, +- [@genomancer](https://github.com/genomancer) (Gregg Helt) - Controlnet support +- [@keturn](https://github.com/keturn) (Kevin Turner) - Diffusers + +## Original CompVis (Stable Diffusion) Authors + +- [Robin Rombach](https://github.com/rromb) +- [Patrick von Platen](https://github.com/patrickvonplaten) +- [ablattmann](https://github.com/ablattmann) +- [Patrick Esser](https://github.com/pesser) +- [owenvincent](https://github.com/owenvincent) +- [apolinario](https://github.com/apolinario) +- [Charles Packer](https://github.com/cpacker) diff --git a/docs/src/content/docs/contributing/index.md b/docs/src/content/docs/contributing/index.md new file mode 100644 index 00000000000..4a81b8ca125 --- /dev/null +++ b/docs/src/content/docs/contributing/index.md @@ -0,0 +1,56 @@ +--- +title: Contributing to InvokeAI +sidebar: + order: 1 +--- + +Invoke originated as a project built by the community, and that vision carries forward today as we aim to build the best pro-grade tools available. We work together to incorporate the latest in AI/ML research, making these tools available in over 20 languages to artists and creatives around the world as part of our fully permissive OSS project designed for individual users to self-host and use. + +We welcome contributions, whether features, bug fixes, code cleanup, testing, code reviews, documentation or translation. Please check in with us before diving in to code to ensure your work aligns with our vision. + +## Development + +If you’d like to help with development, please see our [development guide](contribution_guides/development.md). + +**New Contributors:** If you’re unfamiliar with contributing to open source projects, take a look at our [new contributor guide](contribution_guides/newContributorChecklist.md). + +## Nodes + +If you’d like to add a Node, please see our [nodes contribution guide](../nodes/contributingNodes.md). + +## Support and Triaging + +Helping support other users in [Discord](https://discord.gg/ZmtBAhwWhy) and on Github are valuable forms of contribution that we greatly appreciate. + +We receive many issues and requests for help from users. We're limited in bandwidth relative to our the user base, so providing answers to questions or helping identify causes of issues is very helpful. By doing this, you enable us to spend time on the highest priority work. + +## Documentation + +If you’d like to help with documentation, please see our [documentation guide](contribution_guides/documentation.md). + +## Translation + +If you'd like to help with translation, please see our [translation guide](contribution_guides/translation.md). + +## Tutorials + +Please reach out to @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy) to help create tutorials for InvokeAI. + +## Contributors + +This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](contributors.md). We thank them for their time, hard work and effort. + +## Code of Conduct + +The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](../CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment. + +By making a contribution to this project, you certify that: + +1. The contribution was created in whole or in part by you and you have the right to submit it under the open-source license indicated in this project’s GitHub repository; or +2. The contribution is based upon previous work that, to the best of your knowledge, is covered under an appropriate open-source license and you have the right under that license to submit that work with modifications, whether created in whole or in part by you, under the same open-source license (unless you are permitted to submit under a different license); or +3. The contribution was provided directly to you by some other person who certified (1) or (2) and you have not modified it; or +4. You understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information you submit with it, including your sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open-source license(s) involved. + +This disclaimer is not a license and does not grant any rights or permissions. You must obtain necessary permissions and licenses, including from third parties, before contributing to this project. + +This disclaimer is provided "as is" without warranty of any kind, whether expressed or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, or non-infringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the contribution or the use or other dealings in the contribution. diff --git a/docs/src/content/docs/contributing/new-contributor-guide.mdx b/docs/src/content/docs/contributing/new-contributor-guide.mdx new file mode 100644 index 00000000000..98de484fbd0 --- /dev/null +++ b/docs/src/content/docs/contributing/new-contributor-guide.mdx @@ -0,0 +1,97 @@ +--- +title: New Contributor Guide +lastUpdated: 2026-02-19 +--- + +import { Steps, LinkCard } from '@astrojs/starlight/components'; + +If you're a new contributor to InvokeAI or Open Source Projects, this is the guide for you. + +## New Contributor Checklist + +- [ ] Set up your local development environment & fork of InvokAI by following [the steps outlined here](/development/setup/dev-environment/#initial-setup) +- [ ] Set up your local tooling with [this guide](../LOCAL_DEVELOPMENT.md). Feel free to skip this step if you already have tooling you're comfortable with. +- [ ] Familiarize yourself with [Git](https://www.atlassian.com/git) & our project structure by reading through the [development documentation](development.md) +- [ ] Join the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord +- [ ] Choose an issue to work on! This can be achieved by asking in the #dev-chat channel, tackling a [good first issue](https://github.com/invoke-ai/InvokeAI/contribute) or finding an item on the [roadmap](https://github.com/orgs/invoke-ai/projects/7). If nothing in any of those places catches your eye, feel free to work on something of interest to you! +- [ ] Make your first Pull Request with the guide below +- [ ] Happy development! Don't be afraid to ask for help - we're happy to help you contribute! + +## How do I make a contribution? + +Never made an open source contribution before? Wondering how contributions work in our project? Here's a quick rundown! + +Before starting these steps, ensure you have your local environment [configured for development](../LOCAL_DEVELOPMENT.md). + + + 1. Find a [good first issue](https://github.com/invoke-ai/InvokeAI/contribute) that you are interested in addressing or a feature that you would like to add. Then, reach out to our team in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord to ensure you are setup for success. + + 2. Fork the [InvokeAI](https://github.com/invoke-ai/InvokeAI) repository to your GitHub profile. This means that you will have a copy of the repository under **your-GitHub-username/InvokeAI**. + + 3. Clone the repository to your local machine using: + + ```bash + git clone https://github.com/your-GitHub-username/InvokeAI.git + ``` + + If you're unfamiliar with using Git through the commandline, [GitHub Desktop](https://desktop.github.com) is a easy-to-use alternative with a UI. You can do all the same steps listed here, but through the interface. 4. Create a new branch for your fix using: + + ```bash + git checkout -b branch-name-here + ``` + + 5. Make the appropriate changes for the issue you are trying to address or the feature that you want to add. + + 6. Add the file contents of the changed files to the "snapshot" git uses to manage the state of the project, also known as the index: + + ```bash + git add -A + ``` + + 7. Store the contents of the index with a descriptive message. + + ```bash + git commit -m "Insert a short message of the changes made here" + ``` + + 8. Push the changes to the remote repository using + + ```bash + git push origin branch-name-here + ``` + + 9. Submit a pull request to the **main** branch of the InvokeAI repository. If you're not sure how to, [follow this guide](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) + + 10. Title the pull request with a short description of the changes made and the issue or bug number associated with your change. For example, you can title an issue like so "Added more log outputting to resolve #1234". + + 11. In the description of the pull request, explain the changes that you made, any issues you think exist with the pull request you made, and any questions you have for the maintainer. It's OK if your pull request is not perfect (no pull request is), the reviewer will be able to help you fix any problems and improve it! + + 12. Wait for the pull request to be reviewed by other collaborators. + + 13. Make changes to the pull request if the reviewer(s) recommend them. + + 14. Celebrate your success after your pull request is merged! + + + + +:::tip[Best Practices] + +- Keep your pull requests small. Smaller pull requests are more likely to be accepted and merged. +- Comments! Commenting your code helps reviewers easily understand your contribution. +- Use Python and Typescript’s typing systems, and consider using an editor with [LSP](https://microsoft.github.io/language-server-protocol/) support to streamline development. +- Make all communications public. This ensure knowledge is shared with the whole community. + +::: + +## **Where can I go for help?** + +If you need help, you can ask questions in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord. + +For frontend related work, **@pyschedelicious** is the best person to reach out to. + +For backend related work, please reach out to **@blessedcoolant**, **@lstein**, **@StAlKeR7779** or **@pyschedelicious**. diff --git a/docs/src/content/docs/development/Architecture/assets/resize_invocation.png b/docs/src/content/docs/development/Architecture/assets/resize_invocation.png new file mode 100644 index 00000000000..a78f8eb86a3 Binary files /dev/null and b/docs/src/content/docs/development/Architecture/assets/resize_invocation.png differ diff --git a/docs/src/content/docs/development/Architecture/assets/resize_node_editor.png b/docs/src/content/docs/development/Architecture/assets/resize_node_editor.png new file mode 100644 index 00000000000..d121ba1aa6d Binary files /dev/null and b/docs/src/content/docs/development/Architecture/assets/resize_node_editor.png differ diff --git a/docs/src/content/docs/development/Architecture/invocations.mdx b/docs/src/content/docs/development/Architecture/invocations.mdx new file mode 100644 index 00000000000..440af048f53 --- /dev/null +++ b/docs/src/content/docs/development/Architecture/invocations.mdx @@ -0,0 +1,425 @@ +--- +title: Invocations +lastUpdated: 2026-02-18 +--- + +import { FileTree, Code, Steps } from '@astrojs/starlight/components' + +# Nodes + +Features in InvokeAI are added in the form of modular nodes systems called +**Invocations**. + +An Invocation is simply a single operation that takes in some inputs and gives +out some outputs. We can then chain multiple Invocations together to create more +complex functionality. + +## Invocations Directory + +InvokeAI Nodes can be found in the `invokeai/app/invocations` directory. These +can be used as examples to create your own nodes. + +New nodes should be added to a subfolder in `nodes` direction found at the root +level of the InvokeAI installation location. Nodes added to this folder will be +able to be used upon application startup. + +Example `nodes` subfolder structure: + + + - nodes + - `__init__.py` Invoke-managed custom node loader + - cool_node + - `__init__.py` see example below + - cool_node.py + - my_node_pack + - `__init__.py` see example below + - tasty_node.py + - bodacious_node.py + - utils.py + - extra_nodes + - fancy_node.py + + +Each node folder must have an `__init__.py` file that imports its nodes. Only +nodes imported in the `__init__.py` file are loaded. See the README in the nodes +folder for more examples: + +```py title="__init__.py" +from .cool_node import ResizeInvocation +```` + +## Creating A New Invocation + +In order to understand the process of creating a new Invocation, let us actually +create one. + +In our example, let us create an Invocation that will take in an image, resize +it and output the resized image. + +The first set of things we need to do when creating a new Invocation are - + + + 1. Create a new class that derives from a predefined parent class called `BaseInvocation`. + 2. Every Invocation must have a `docstring` that describes what this Invocation does. + 3. While not strictly required, we suggest every invocation class name ends in "Invocation", eg "CropImageInvocation". + 4. Every Invocation must use the `@invocation` decorator to provide its unique invocation type. You may provide its title, tags and category using the decorator. + 5. Invocations are strictly typed. We make use of the native [typing](https://docs.python.org/3/library/typing.html) library and the installed [pydantic](https://pydantic-docs.helpmanual.io/) library for validation. + + +So let us do that. + +```py title="resize.py" +from invokeai.invocation_api import ( + BaseInvocation, + invocation, +) + +@invocation('resize') +class ResizeInvocation(BaseInvocation): + '''Resizes an image''' +``` + +That's great. + +Now we have setup the base of our new Invocation. Let us think about what inputs +our Invocation takes. + +- We need an `image` that we are going to resize. +- We will need new `width` and `height` values to which we need to resize the + image to. + +### Inputs + +Every Invocation input must be defined using the `InputField` function. This is +a wrapper around the pydantic `Field` function, which handles a few extra things +and provides type hints. Like everything else, this should be strictly typed and +defined. + +So let us create these inputs for our Invocation. First up, the `image` input we +need. Generally, we can use standard variable types in Python but InvokeAI +already has a custom `ImageField` type that handles all the stuff that is needed +for image inputs. + +But what is this `ImageField` ..? It is a special class type specifically +written to handle how images are dealt with in InvokeAI. We will cover how to +create your own custom field types later in this guide. For now, let's go ahead +and use it. + +```py title="resize.py" +from invokeai.invocation_api import ( + BaseInvocation, + ImageField, + InputField, + invocation, +) + +@invocation('resize') +class ResizeInvocation(BaseInvocation): + + # Inputs + image: ImageField = InputField(description="The input image") +``` + +Let us break down our input code. + +```python +image: ImageField = InputField(description="The input image") +``` + +| Part | Value | Description | +| ---- | ----- | ----------- | +| Name | `image` | The variable that will hold our image. | +| Type Hint | `ImageField` | The type for our field. Indicates that `image` must be an `ImageField`. | +| Field | `InputField(description="The input image")` | Declares `image` as an input field and provides its description. | + +Great. Now let us create our other inputs for `width` and `height` + +```py title="resize.py" +from invokeai.invocation_api import ( + BaseInvocation, + ImageField, + InputField, + invocation, +) + +@invocation('resize') +class ResizeInvocation(BaseInvocation): + + # Inputs + image: ImageField = InputField(description="The input image") + width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image") + height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image") +``` + +As you might have noticed, we added two new arguments to the `InputField` +definition for `width` and `height`, called `gt` and `le`. They stand for +_greater than or equal to_ and _less than or equal to_. + +These impose constraints on those fields, and will raise an exception if the +values do not meet the constraints. Field constraints are provided by +**pydantic**, so anything you see in the **pydantic docs** will work. + +**Note:** _Any time it is possible to define constraints for our field, we +should do it so the frontend has more information on how to parse this field._ + +Perfect. We now have our inputs. Let us do something with these. + +### Invoke Function + +The `invoke` function is where all the magic happens. This function provides you +the `context` parameter that is of the type `InvocationContext` which will give +you access to the current context of the generation and all the other services +that are provided by it by InvokeAI. + +Let us create this function first. + +```py title="resize.py" +from invokeai.invocation_api import ( + BaseInvocation, + ImageField, + InputField, + InvocationContext, + invocation, +) + +@invocation('resize') +class ResizeInvocation(BaseInvocation): + '''Resizes an image''' + + image: ImageField = InputField(description="The input image") + width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image") + height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image") + + def invoke(self, context: InvocationContext): + pass +``` + +### Outputs + +The output of our Invocation will be whatever is returned by this `invoke` +function. Like with our inputs, we need to strongly type and define our outputs +too. + +What is our output going to be? Another image. Normally you'd have to create a +type for this but InvokeAI already offers you an `ImageOutput` type that handles +all the necessary info related to image outputs. So let us use that. + +We will cover how to create your own output types later in this guide. + +```py title="resize.py" +from invokeai.invocation_api import ( + BaseInvocation, + ImageField, + InputField, + InvocationContext, + invocation, +) + +from invokeai.app.invocations.image import ImageOutput + +@invocation('resize') +class ResizeInvocation(BaseInvocation): + '''Resizes an image''' + + image: ImageField = InputField(description="The input image") + width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image") + height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image") + + def invoke(self, context: InvocationContext) -> ImageOutput: + pass +``` + +Perfect. Now that we have our Invocation setup, let us do what we want to do. + +- We will first load the image using one of the services provided by InvokeAI to + load the image. +- We will resize the image using `PIL` to our input data. +- We will output this image in the format we set above. + +So let's do that. + +```py title="resize.py" +from invokeai.invocation_api import ( + BaseInvocation, + ImageField, + InputField, + InvocationContext, + invocation, +) + +from invokeai.app.invocations.image import ImageOutput + +@invocation("resize") +class ResizeInvocation(BaseInvocation): + """Resizes an image""" + + image: ImageField = InputField(description="The input image") + width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image") + height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image") + + def invoke(self, context: InvocationContext) -> ImageOutput: + # Load the input image as a PIL image + image = context.images.get_pil(self.image.image_name) + + # Resize the image + resized_image = image.resize((self.width, self.height)) + + # Save the image + image_dto = context.images.save(image=resized_image) + + # Return an ImageOutput + return ImageOutput.build(image_dto) +``` + +**Note:** Do not be overwhelmed by the `ImageOutput` process. InvokeAI has a +certain way that the images need to be dispatched in order to be stored and read +correctly. In 99% of the cases when dealing with an image output, you can simply +copy-paste the template above. + +### Customization + +We can use the `@invocation` decorator to provide some additional info to the +UI, like a custom title, tags and category. + +We also encourage providing a version. This must be a +[semver](https://semver.org/) version string ("`$MAJOR`.`$MINOR`.`$PATCH`"). The UI +will let users know if their workflow is using a mismatched version of the node. + +```py title="resize.py" +@invocation("resize", title="My Resizer", tags=["resize", "image"], category="My Invocations", version="1.0.0") +class ResizeInvocation(BaseInvocation): + """Resizes an image""" + + image: ImageField = InputField(description="The input image") + + # Rest of the code +``` + +That's it. You made your own **Resize Invocation**. + +## Result + +Once you make your Invocation correctly, the rest of the process is fully +automated for you. + +When you launch InvokeAI, you can go to `http://localhost:9090/docs` and see +your new Invocation show up there with all the relevant info. + +![resize invocation](./assets/resize_invocation.png) + +When you launch the frontend UI, you can go to the Node Editor tab and find your +new Invocation ready to be used. + +![resize node editor](./assets/resize_node_editor.png) + +## Contributing Nodes + +Once you've created a Node, the next step is to share it with the community! The +best way to do this is to submit a Pull Request to add the Node to the +[Community Nodes](../nodes/communityNodes.md) list. If you're not sure how to do that, +take a look a at our [contributing nodes overview](../nodes/contributingNodes.md). + +## Advanced + +### Custom Output Types + +Like with custom inputs, sometimes you might find yourself needing custom +outputs that InvokeAI does not provide. We can easily set one up. + +Now that you are familiar with Invocations and Inputs, let us use that knowledge +to create an output that has an `image` field, a `color` field and a `string` +field. + +- An invocation output is a class that derives from the parent class of + `BaseInvocationOutput`. +- All invocation outputs must use the `@invocation_output` decorator to provide + their unique output type. +- Output fields must use the provided `OutputField` function. This is very + similar to the `InputField` function described earlier - it's a wrapper around + `pydantic`'s `Field()`. +- It is not mandatory but we recommend using names ending with `Output` for + output types. +- It is not mandatory but we highly recommend adding a `docstring` to describe + what your output type is for. + +Now that we know the basic rules for creating a new output type, let us go ahead +and make it. + +```py title="custom_output.py" +from .baseinvocation import BaseInvocationOutput, OutputField, invocation_output +from .primitives import ImageField, ColorField + +@invocation_output('image_color_string_output') +class ImageColorStringOutput(BaseInvocationOutput): + '''Base class for nodes that output a single image''' + + image: ImageField = OutputField(description="The image") + color: ColorField = OutputField(description="The color") + text: str = OutputField(description="The string") +``` + +That's all there is to it. + +### Custom Input Fields + +Now that you know how to create your own Invocations, let us dive into slightly +more advanced topics. + +While creating your own Invocations, you might run into a scenario where the +existing fields in InvokeAI do not meet your requirements. In such cases, you +can create your own fields. + +Let us create one as an example. Let us say we want to create a color input +field that represents a color code. But before we start on that here are some +general good practices to keep in mind. + +### Best Practices + +- There is no naming convention for input fields, but we highly recommend that + you name it something appropriate like `ColorField`. +- It is not mandatory but it is heavily recommended to add a relevant + `docstring` to describe your field. +- Keep your field in the same file as the Invocation that it is made for, or in + another file where it is relevant. + +All input types are a class that derive from the `BaseModel` type from `pydantic`. +So let's create one. + +```py title="color_field.py" + from pydantic import BaseModel + + class ColorField(BaseModel): + '''A field that holds the rgba values of a color''' + pass +``` + +Perfect. Now let us create the properties for our field. This is similar to how +you created input fields for your Invocation. All the same rules apply. Let us +create four fields representing the _red(r)_, _blue(b)_, _green(g)_ and +_alpha(a)_ channel of the color. + +:::note + Technically, the properties are _also_ called fields - but in this case, it refers to a `pydantic` field. +::: + +```py title="color_field.py" +class ColorField(BaseModel): + '''A field that holds the rgba values of a color''' + r: int = Field(ge=0, le=255, description="The red channel") + g: int = Field(ge=0, le=255, description="The green channel") + b: int = Field(ge=0, le=255, description="The blue channel") + a: int = Field(ge=0, le=255, description="The alpha channel") +``` + +That's it. We now have a new input field type that we can use in our Invocations +like this. + +```python +color: ColorField = InputField(default=ColorField(r=0, g=0, b=0, a=0), description='Background color of an image') +``` + +### Using the custom field + +When you start the UI, your custom field will be automatically recognized. + +Custom fields only support connection inputs in the Workflow Editor. diff --git a/docs/src/content/docs/development/Architecture/model-manager.mdx b/docs/src/content/docs/development/Architecture/model-manager.mdx new file mode 100644 index 00000000000..ca7884482f6 --- /dev/null +++ b/docs/src/content/docs/development/Architecture/model-manager.mdx @@ -0,0 +1,1198 @@ +--- +title: Introduction to the Model Manager +sidebar: + label: Model Manager +lastUpdated: 2026-02-18 +--- + +import { FileTree, Code, Steps } from '@astrojs/starlight/components'; + +The Model Manager is responsible for organizing the various machine +learning models used by InvokeAI. It consists of a series of +interdependent services that together handle the full lifecycle of a +model. These are the: + +- **ModelRecordServiceBase:** Responsible for managing model metadata and configuration information. Among other things, the record service tracks the type of the model, its provenance, and where it can be found on disk. +- **ModelInstallServiceBase:** A service for installing models to disk. It uses `DownloadQueueServiceBase` to download models and their metadata, and `ModelRecordServiceBase` to store that information. It is also responsible for managing the InvokeAI `models` directory and its contents. +- **DownloadQueueServiceBase:** A multithreaded downloader responsible for downloading models from a remote source to disk. The download queue has special methods for downloading repo_id folders from Hugging Face, as well as discriminating among model versions in Civitai, but can be used for arbitrary content. + - **ModelLoadServiceBase** Responsible for loading a model from disk into RAM and VRAM and getting it ready for inference. + + + ## Location of the Code + + The four main services can be found in `invokeai/app/services` in the following directories: + + +- invokeai + - app + - services Model manager services + - model_records/ + - model_install/ + - downloads/ + - model_load/ + - api + - routers + - model_manager_v2.py FastAPI web API for model management + + +--- + +## What's in a Model? The ModelRecordService + +The `ModelRecordService` manages the model's metadata. It supports a hierarchy of pydantic metadata "config" objects, which become increasingly specialized to support particular model types. + +### ModelConfigBase + +All model metadata classes inherit from this pydantic class. it provides the following fields: + +| **Field Name** | **Type** | **Description** | +|----------------|-----------------|------------------| +| `key` | str | Unique identifier for the model | +| `name` | str | Name of the model (not unique) | +| `model_type` | ModelType | The type of the model | +| `model_format` | ModelFormat | The format of the model (e.g. "diffusers"); also used as a Union discriminator | +| `base_model` | BaseModelType | The base model that the model is compatible with | +| `path` | str | Location of model on disk | +| `hash` | str | Hash of the model | +| `description` | str | Human-readable description of the model (optional) | +| `source` | str | Model's source URL or repo id (optional) | + +The `key` is a unique 32-character random ID which was generated at install time. The `hash` field stores a hash of the model's contents at install time obtained by sampling several parts of the model's files using the `imohash` library. Over the course of the model's lifetime it may be transformed in various ways, such as changing its precision or converting it from a .safetensors to a diffusers model. + +The `path` field can be absolute or relative. If relative, it is taken to be relative to the `models_dir` setting in the user's `invokeai.yaml` file. + +`ModelType`, `ModelFormat` and `BaseModelType` are string enums that are defined in `invokeai.backend.model_manager.config`. They are also imported by, and can be reexported from, `invokeai.app.services.model_manager.model_records`: + +```py title="invokeai.backend.model_manager.config" +from invokeai.app.services.model_records import ModelType, ModelFormat, BaseModelType +```` + +### CheckpointConfig + +This adds support for checkpoint configurations, and adds the +following field: + +| **Field Name** | **Type** | **Description** | +|----------------|-----------------|------------------| +| `config` | str | Path to the checkpoint's config file | + +`config` is the path to the checkpoint's config file. If relative, it is taken to be relative to the InvokeAI root directory (e.g. `configs/stable-diffusion/v1-inference.yaml`) + +### MainConfig + +This adds support for "main" Stable Diffusion models, and adds these fields: + +| **Field Name** | **Type** | **Description** | +|----------------|-----------------|------------------| +| `vae` | str | Path to a VAE to use instead of the burnt-in one | +| `variant` | ModelVariantType| Model variant type, such as "inpainting" | + +`vae` can be an absolute or relative path. If relative, its base is taken to be the `models_dir` directory. + +`variant` is an enumerated string class with values `normal`, `inpaint` and `depth`. If needed, it can be imported if needed from either `invokeai.app.services.model_records` or `invokeai.backend.model_manager.config`. + +### ONNXSD2Config + +| **Field Name** | **Type** | **Description** | +|----------------|-----------------|------------------| +| `prediction_type` | SchedulerPredictionType | Scheduler prediction type to use, e.g. "epsilon" | +| `upcast_attention` | bool | Model requires its attention module to be upcast | + +The `SchedulerPredictionType` enum can be imported from either `invokeai.app.services.model_records` or `invokeai.backend.model_manager.config`. + +### Other config classes + +There are a series of such classes each discriminated by their `ModelFormat`, including `LoRAConfig`, `IPAdapterConfig`, and so forth. These are rarely needed outside the model manager's internal code, but available in `invokeai.backend.model_manager.config` if needed. There is also a Union of all ModelConfig classes, called `AnyModelConfig` that can be imported from the same file. + +### Limitations of the Data Model + +The config hierarchy has a major limitation in its handling of the base model type. Each model can only be compatible with one base model, which breaks down in the event of models that are compatible with two or more base models. For example, SD-1 VAEs also work with SD-2 models. A partial workaround is to use `BaseModelType.Any`, which indicates that the model is compatible with any of the base models. This works OK for some models, such as the IP Adapter image encoders, but is an all-or-nothing proposition. + +## Reading and Writing Model Configuration Records + +The `ModelRecordService` provides the ability to retrieve model configuration records from SQL or YAML databases, update them, and write them back. + +A application-wide `ModelRecordService` is created during API initialization and can be retrieved within an invocation from the `InvocationContext` object: + +```py +store = context.services.model_manager.store +``` + +or from elsewhere in the code by accessing `ApiDependencies.invoker.services.model_manager.store`. + +### Creating a `ModelRecordService` + +To create a new `ModelRecordService` database or open an existing one, you can directly create either a `ModelRecordServiceSQL` or a `ModelRecordServiceFile` object: + +```py +from invokeai.app.services.model_records import ModelRecordServiceSQL, ModelRecordServiceFile + +store = ModelRecordServiceSQL.from_connection(connection, lock) +store = ModelRecordServiceSQL.from_db_file('/path/to/sqlite_database.db') +store = ModelRecordServiceFile.from_db_file('/path/to/database.yaml') +``` + +The `from_connection()` form is only available from the `ModelRecordServiceSQL` class, and is used to manage records in a previously-opened SQLITE3 database using a `sqlite3.connection` object and a `threading.lock` object. It is intended for the specific use case of storing the record information in the main InvokeAI database, usually `databases/invokeai.db`. + +The `from_db_file()` methods can be used to open new connections to the named database files. If the file doesn't exist, it will be created and initialized. + +As a convenience, `ModelRecordServiceBase` offers two methods, `from_db_file` and `open`, which will return either a SQL or File implementation depending on the context. The former looks at the file extension to determine whether to open the file as a SQL database (".db") or as a file database (".yaml"). If the file exists, but is either the wrong type or does not contain the expected schema metainformation, then an appropriate `AssertionError` will be raised: + +```py +store = ModelRecordServiceBase.from_db_file('/path/to/a/file.{yaml,db}') +``` + +The `ModelRecordServiceBase.open()` method is specifically designed for use in the InvokeAI web server. Its signature is: + +```py +def open( + cls, + config: InvokeAIAppConfig, + conn: Optional[sqlite3.Connection] = None, + lock: Optional[threading.Lock] = None + ) -> Union[ModelRecordServiceSQL, ModelRecordServiceFile]: +``` + +The way it works is as follows: + +1. Retrieve the value of the `model_config_db` option from the user's `invokeai.yaml` config file. +2. If `model_config_db` is `auto` (the default), then: + - Use the values of `conn` and `lock` to return a `ModelRecordServiceSQL` object opened on the passed connection and lock. + - Open up a new connection to `databases/invokeai.db` if `conn` and/or `lock` are missing (see note below). +3. If `model_config_db` is a Path, then use `from_db_file` to return the appropriate type of ModelRecordService. +4. If `model_config_db` is None, then retrieve the legacy `conf_path` option from `invokeai.yaml` and use the Path indicated there. This will default to `configs/models.yaml`. + +So a typical startup pattern would be: + +```py +import sqlite3 +from invokeai.app.services.thread import lock +from invokeai.app.services.model_records import ModelRecordServiceBase +from invokeai.app.services.config import InvokeAIAppConfig + +config = InvokeAIAppConfig.get_config() +db_conn = sqlite3.connect(config.db_path.as_posix(), check_same_thread=False) +store = ModelRecordServiceBase.open(config, db_conn, lock) +``` + +### Fetching a Model's Configuration from `ModelRecordServiceBase` + +Configurations can be retrieved in several ways. + +#### get_model(key) -> AnyModelConfig + +The basic functionality is to call the record store object's `get_model()` method with the desired model's unique key. It returns the appropriate subclass of ModelConfigBase: + +```py +model_conf = store.get_model('f13dd932c0c35c22dcb8d6cda4203764') +print(model_conf.path) + +>> '/tmp/models/ckpts/v1-5-pruned-emaonly.safetensors' + +``` + +If the key is unrecognized, this call raises an `UnknownModelException`. + +#### exists(key) -> AnyModelConfig + +Returns True if a model with the given key exists in the database. + +#### search_by_path(path) -> AnyModelConfig + +Returns the configuration of the model whose path is `path`. The path is matched using a simple string comparison and won't correctly match models referred to by different paths (e.g. using symbolic links). + +#### search_by_name(name, base, type) -> List[AnyModelConfig] + +This method searches for models that match some combination of `name`, `BaseType` and `ModelType`. Calling without any arguments will return all the models in the database. + +#### all_models() -> List[AnyModelConfig] + +Return all the model configs in the database. Exactly equivalent to calling `search_by_name()` with no arguments. + +#### search_by_tag(tags) -> List[AnyModelConfig] + +`tags` is a list of strings. This method returns a list of model configs that contain all of the given tags. Examples: + +```py +# find all models that are marked as both SFW and as generating +# background scenery +configs = store.search_by_tag(['sfw', 'scenery']) +``` + +Note that only tags are not searchable in this way. Other fields can be searched using a filter: + +```py +commercializable_models = [x for x in store.all_models() \ + if x.license.contains('allowCommercialUse=Sell')] +``` + +#### version() -> str + +Returns the version of the database, currently at `3.2` + +#### model_info_by_name(name, base_model, model_type) -> ModelConfigBase + +This method exists to ease the transition from the previous version of the model manager, in which `get_model()` took the three arguments shown above. This looks for a unique model identified by name, base model and model type and returns it. + +The method will generate a `DuplicateModelException` if there are more than one models that share the same type, base and name. While unlikely, it is certainly possible to have a situation in which the user had added two models with the same name, base and type, one located at path `/foo/my_model` and the other at `/bar/my_model`. It is strongly recommended to search for models using `search_by_name()`, which can return multiple results, and then to select the desired model and pass its key to `get_model()`. + +### Writing model configs to the database + +Several methods allow you to create and update stored model config records. + +#### add_model(key, config) -> AnyModelConfig + +Given a key and a configuration, this will add the model's configuration record to the database. `config` can either be a subclass of `ModelConfigBase` (i.e. any class listed in `AnyModelConfig`), or a `dict` of key/value pairs. In the latter case, the correct configuration class will be picked by Pydantic's discriminated union mechanism. + +If successful, the method will return the appropriate subclass of `ModelConfigBase`. It will raise a `DuplicateModelException` if a model with the same key is already in the database, or an `InvalidModelConfigException` if a dict was passed and Pydantic experienced a parse or validation error. + +### update_model(key, config) -> AnyModelConfig + +Given a key and a configuration, this will update the model configuration record in the database. `config` can be either a instance of `ModelConfigBase`, or a sparse `dict` containing the fields to be updated. This will return an `AnyModelConfig` on success, or raise `InvalidModelConfigException` or `UnknownModelException` exceptions on failure. + +--- + +## Model installation + +The `ModelInstallService` class implements the +`ModelInstallServiceBase` abstract base class, and provides a one-stop +shop for all your model install needs. It provides the following +functionality: + +- Registering a model config record for a model already located on the local filesystem, without moving it or changing its path. + +- Installing a model alreadiy located on the local filesystem, by moving it into the InvokeAI root directory under the `models` folder (or wherever config parameter `models_dir` specifies). + +- Probing of models to determine their type, base type and other key information. + +- Interface with the InvokeAI event bus to provide status updates on the download, installation and registration process. + +- Downloading a model from an arbitrary URL and installing it in `models_dir`. + +- Special handling for HuggingFace repo_ids to recursively download the contents of the repository, paying attention to alternative variants such as fp16. + +- Saving tags and other metadata about the model into the invokeai database when fetching from a repo that provides that type of information, (currently only HuggingFace). + +### Initializing the installer + +A default installer is created at InvokeAI api startup time and stored in `ApiDependencies.invoker.services.model_install` and can also be retrieved from an invocation's `context` argument with `context.services.model_install`. + +In the event you wish to create a new installer, you may use the following initialization pattern: + +```py +from invokeai.app.services.config import get_config +from invokeai.app.services.model_records import ModelRecordServiceSQL +from invokeai.app.services.model_install import ModelInstallService +from invokeai.app.services.download import DownloadQueueService +from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase +from invokeai.backend.util.logging import InvokeAILogger + +config = get_config() + +logger = InvokeAILogger.get_logger(config=config) +db = SqliteDatabase(config.db_path, logger) +record_store = ModelRecordServiceSQL(db, logger) +queue = DownloadQueueService() +queue.start() + +installer = ModelInstallService(app_config=config, + record_store=record_store, + download_queue=queue + ) +installer.start() +``` + +The full form of `ModelInstallService()` takes the following required parameters: + +| **Argument** | **Type** | **Description** | +|------------------|------------------------------|------------------------------| +| `app_config` | InvokeAIAppConfig | InvokeAI app configuration object | +| `record_store` | ModelRecordServiceBase | Config record storage database | +| `download_queue` | DownloadQueueServiceBase | Download queue object | +|`session` | Optional[requests.Session] | Swap in a different Session object (usually for debugging) | + +Once initialized, the installer will provide the following methods: + +#### install_job = installer.heuristic_import(source, [config], [access_token]) + +This is a simplified interface to the installer which takes a source string, an optional model configuration dictionary and an optional access token. + +The `source` is a string that can be any of these forms + +1. A path on the local filesystem (`C:\\users\\fred\\model.safetensors`) +2. A Url pointing to a single downloadable model file (`https://civitai.com/models/58390/detail-tweaker-lora-lora`) +3. A HuggingFace repo_id with any of the following formats: + * `model/name` -- entire model + * `model/name:fp32` -- entire model, using the fp32 variant + * `model/name:fp16:vae` -- vae submodel, using the fp16 variant + * `model/name::vae` -- vae submodel, using default precision + * `model/name:fp16:path/to/model.safetensors` -- an individual model file, fp16 variant + * `model/name::path/to/model.safetensors` -- an individual model file, default variant + +Note that by specifying a relative path to the top of the HuggingFace repo, you can download and install arbitrary models files. + +The variant, if not provided, will be automatically filled in with `fp32` if the user has requested full precision, and `fp16` otherwise. If a variant that does not exist is requested, then the method will install whatever HuggingFace returns as its default revision. + +`config` is an optional dict of values that will override the autoprobed values for model type, base, scheduler prediction type, and so forth. See [Model configuration and probing](#model-configuration-and-probing) for details. + +`access_token` is an optional access token for accessing resources that need authentication. + +The method will return a `ModelInstallJob`. This object is discussed at length in the following section. + +#### install_job = installer.import_model() + +The `import_model()` method is the core of the installer. The following illustrates basic usage: + +```py +from invokeai.app.services.model_install import ( + LocalModelSource, + HFModelSource, + URLModelSource, +) + +source1 = LocalModelSource(path='/opt/models/sushi.safetensors') # a local safetensors file +source2 = LocalModelSource(path='/opt/models/sushi_diffusers') # a local diffusers folder + +source3 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5') # a repo_id +source4 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5', subfolder='vae') # a subfolder within a repo_id +source5 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5', variant='fp16') # a named variant of a HF model +source6 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5', subfolder='OrangeMix/OrangeMix1.ckpt') # path to an individual model file + +source7 = URLModelSource(url='https://civitai.com/api/download/models/63006') # model located at a URL +source8 = URLModelSource(url='https://civitai.com/api/download/models/63006', access_token='letmein') # with an access token + +for source in [source1, source2, source3, source4, source5, source6, source7]: + install_job = installer.install_model(source) + +source2job = installer.wait_for_installs(timeout=120) +for source in sources: + job = source2job[source] + if job.complete: + model_config = job.config_out + model_key = model_config.key + print(f"{source} installed as {model_key}") + elif job.errored: + print(f"{source}: {job.error_type}.\nStack trace:\n{job.error}") + +``` + +As shown here, the `import_model()` method accepts a variety of sources, including local safetensors files, local diffusers folders, HuggingFace repo_ids with and without a subfolder designation, Civitai model URLs and arbitrary URLs that point to checkpoint files (but not to folders). + +Each call to `import_model()` return a `ModelInstallJob` job, an object which tracks the progress of the install. + +If a remote model is requested, the model's files are downloaded in parallel across a multiple set of threads using the download queue. During the download process, the `ModelInstallJob` is updated to provide status and progress information. After the files (if any) are downloaded, the remainder of the installation runs in a single serialized background thread. These are the model probing, file copying, and config record database update steps. + +Multiple install jobs can be queued up. You may block until all install jobs are completed (or errored) by calling the `wait_for_installs()` method as shown in the code example. `wait_for_installs()` will return a `dict` that maps the requested source to its job. This object can be interrogated to determine its status. If the job errored out, then the error type and details can be recovered from `job.error_type` and `job.error`. + +The full list of arguments to `import_model()` is as follows: + +| Argument | Type | Default | Description | +|----------|-------|---------|-------------| +| `source` | ModelSource | None | The source of the model, Path, URL or repo_id | +| `config` | Dict[str, Any] | None | Override all or a portion of model's probed attributes | + +The next few sections describe the various types of ModelSource that can be passed to `import_model()`. + +`config` can be used to override all or a portion of the configuration attributes returned by the model prober. See the section below for details. + +#### LocalModelSource + +This is used for a model that is located on a locally-accessible Posix filesystem, such as a local disk or networked fileshare. + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `path` | str | Path | None | Path to the model file or directory | +| `inplace` | bool | False | If set, the model file(s) will be left in their location; otherwise they will be copied into the InvokeAI root's `models` directory | + +#### URLModelSource + +This is used for a single-file model that is accessible via a URL. The +fields are: + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `url` | AnyHttpUrl | None | The URL for the model file. | +| `access_token` | str | None | An access token needed to gain access to this file. | + +The `AnyHttpUrl` class can be imported from `pydantic.networks`. + +Ordinarily, no metadata is retrieved from these sources. However, there is special-case code in the installer that looks for HuggingFace and fetches the corresponding model metadata from the corresponding repo. + +#### HFModelSource + +HuggingFace has the most complicated `ModelSource` structure: + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `repo_id` | str | None | The ID of the desired model. | +| `variant` | ModelRepoVariant | ModelRepoVariant('fp16') | The desired variant. | +| `subfolder` | Path | None | Look for the model in a subfolder of the repo. | +| `access_token` | str | None | An access token needed to gain access to a subscriber's-only model. | + +The `repo_id` is the repository ID, such as `stabilityai/sdxl-turbo`. + +The `variant` is one of the various diffusers formats that HuggingFace supports and is used to pick out from the hodgepodge of files that in a typical HuggingFace repository the particular components needed for a complete diffusers model. `ModelRepoVariant` is an enum that can be imported from `invokeai.backend.model_manager` and has the following values: + +| Name | String Value | +|------|--------------| +| ModelRepoVariant.DEFAULT | "default" | +| ModelRepoVariant.FP16 | "fp16" | +| ModelRepoVariant.FP32 | "fp32" | +| ModelRepoVariant.ONNX | "onnx" | +| ModelRepoVariant.OPENVINO | "openvino" | +| ModelRepoVariant.FLAX | "flax" | + +You can also pass the string forms to `variant` directly. Note that InvokeAI may not be able to load and run all variants. At the current time, specifying `ModelRepoVariant.DEFAULT` will retrieve model files that are unqualified, e.g. `pytorch_model.safetensors` rather than `pytorch_model.fp16.safetensors`. These are usually the 32-bit safetensors forms of the model. + +If `subfolder` is specified, then the requested model resides in a subfolder of the main model repository. This is typically used to fetch and install VAEs. + +Some models require you to be registered with HuggingFace and logged in. To download these files, you must provide an `access_token`. Internally, if no access token is provided, then `HfFolder.get_token()` will be called to fill it in with the cached one. + +#### Monitoring the install job process + +When you create an install job with `import_model()`, it launches the download and installation process in the background and returns a `ModelInstallJob` object for monitoring the process. + +The `ModelInstallJob` class has the following structure: + +| Attribute | Type | Description | +|-----------|------|-------------| +| `id` | `int` | Integer ID for this job | +| `status` | `InstallStatus` | An enum of [`waiting`, `downloading`, `running`, `completed`, `error` and `cancelled`] | +| `config_in` | `dict` | Overriding configuration values provided by the caller | +| `config_out` | `AnyModelConfig` | After successful completion, contains the configuration record written to the database | +| `inplace` | `boolean` | True if the caller asked to install the model in place using its local path | +| `source` | `ModelSource` | The local path, remote URL or repo_id of the model to be installed | +| `local_path` | `Path` | If a remote model, holds the path of the model after it is downloaded; if a local model, same as `source` | +| `error_type` | `str` | Name of the exception that led to an error status | +| `error` | `str` | Traceback of the error | + +If the `event_bus` argument was provided, events will also be broadcast to the InvokeAI event bus. The events will appear on the bus as an event of type `EventServiceBase.model_event`, a timestamp and the following event names: + +##### `model_install_downloading` + +For remote models only, `model_install_downloading` events will be issued at regular intervals as the download progresses. The event's payload contains the following keys: + +| Key | Type | Description | +|-----|------|-------------| +|`source`|str|String representation of the requested source| +|`local_path`|str|String representation of the path to the downloading model (usually a temporary directory)| +|`bytes`|int|How many bytes downloaded so far| +|`total_bytes`|int|Total size of all the files that make up the model| +|`parts`|List[Dict]|Information on the progress of the individual files that make up the model| + +The parts is a list of dictionaries that give information on each of the components pieces of the download. The dictionary's keys are `source`, `local_path`, `bytes` and `total_bytes`, and correspond to the like-named keys in the main event. + +Note that downloading events will not be issued for local models, and that downloading events occur _before_ the running event. + +##### `model_install_running` + +`model_install_running` is issued when all the required downloads have completed (if applicable) and the model probing, copying and registration process has now started. + +The payload will contain the key `source`. + +##### `model_install_completed` + +`model_install_completed` is issued once at the end of a successful installation. The payload will contain the keys `source`, `total_bytes` and `key`, where `key` is the ID under which the model has been registered. + +##### `model_install_error` + +`model_install_error` is emitted if the installation process fails for some reason. The payload will contain the keys `source`, `error_type` and `error`. `error_type` is a short message indicating the nature of the error, and `error` is the long traceback to help debug the problem. + +##### `model_install_cancelled` + +`model_install_cancelled` is issued if the model installation is cancelled, or if one or more of its files' downloads are cancelled. The payload will contain `source`. + +##### Following the model status + +You may poll the `ModelInstallJob` object returned by `import_model()` to ascertain the state of the install. The job status can be read from the job's `status` attribute, an `InstallStatus` enum which has the enumerated values `WAITING`, `DOWNLOADING`, `RUNNING`, `COMPLETED`, `ERROR` and `CANCELLED`. + +For convenience, install jobs also provided the following boolean properties: `waiting`, `downloading`, `running`, `complete`, `errored` and `cancelled`, as well as `in_terminal_state`. The last will return True if the job is in the complete, errored or cancelled states. + +#### Model configuration and probing + +The install service uses the `invokeai.backend.model_manager.probe` module during import to determine the model's type, base type, and other configuration parameters. Among other things, it assigns a default name and description for the model based on probed fields. + +When downloading remote models is implemented, additional configuration information, such as list of trigger terms, will be retrieved from the HuggingFace and Civitai model repositories. + +The probed values can be overridden by providing a dictionary in the optional `config` argument passed to `import_model()`. You may provide overriding values for any of the model's configuration attributes. Here is an example of setting the `SchedulerPredictionType` and `name` for an sd-2 model: + +```py +install_job = installer.import_model( + source=HFModelSource(repo_id='stabilityai/stable-diffusion-2-1',variant='fp32'), + config=dict( + prediction_type=SchedulerPredictionType('v_prediction') + name='stable diffusion 2 base model', + ) + ) +``` + +### Other installer methods + +This section describes additional methods provided by the installer class. + +#### jobs = installer.wait_for_installs([timeout]) + +Block until all pending installs are completed or errored and then returns a list of completed jobs. The optional `timeout` argument will return from the call if jobs aren't completed in the specified time. An argument of 0 (the default) will block indefinitely. + +#### jobs = installer.wait_for_job(job, [timeout]) + +Like `wait_for_installs()`, but block until a specific job has completed or errored, and then return the job. The optional `timeout` argument will return from the call if the job doesn't complete in the specified time. An argument of 0 (the default) will block indefinitely. + +#### jobs = installer.list_jobs() + +Return a list of all active and complete `ModelInstallJobs`. + +#### jobs = installer.get_job_by_source(source) + +Return a list of `ModelInstallJob` corresponding to the indicated model source. + +#### jobs = installer.get_job_by_id(id) + +Return a list of `ModelInstallJob` corresponding to the indicated model id. + +#### jobs = installer.cancel_job(job) + +Cancel the indicated job. + +#### installer.prune_jobs + +Remove jobs that are in a terminal state (i.e. complete, errored or cancelled) from the job list returned by `list_jobs()` and `get_job()`. + +#### installer.app_config, installer.record_store, installer.event_bus + +Properties that provide access to the installer's `InvokeAIAppConfig`, `ModelRecordServiceBase` and `EventServiceBase` objects. + +#### key = installer.register_path(model_path, config), key = installer.install_path(model_path, config) + +These methods bypass the download queue and directly register or install the model at the indicated path, returning the unique ID for the installed model. + +Both methods accept a Path object corresponding to a checkpoint or diffusers folder, and an optional dict of config attributes to use to override the values derived from model probing. + +The difference between `register_path()` and `install_path()` is that the former creates a model configuration record without changing the location of the model in the filesystem. The latter makes a copy of the model inside the InvokeAI models directory before registering it. + +#### installer.unregister(key) + +This will remove the model config record for the model at key, and is equivalent to `installer.record_store.del_model(key)` + +#### installer.delete(key) + +This is similar to `unregister()` but has the additional effect of conditionally deleting the underlying model file(s) if they reside within the InvokeAI models directory + +#### installer.unconditionally_delete(key) + +This method is similar to `unregister()`, but also unconditionally deletes the corresponding model weights file(s), regardless of whether they are inside or outside the InvokeAI models hierarchy. + +#### path = installer.download_and_cache(remote_source, [access_token], [timeout]) + +This utility routine will download the model file located at source, cache it, and return the path to the cached file. It does not attempt to determine the model type, probe its configuration values, or register it with the models database. + +You may provide an access token if the remote source requires authorization. The call will block indefinitely until the file is completely downloaded, cancelled or raises an error of some sort. If you provide a timeout (in seconds), the call will raise a `TimeoutError` exception if the download hasn't completed in the specified period. + +You may use this mechanism to request any type of file, not just a model. The file will be stored in a subdirectory of `INVOKEAI_ROOT/models/.cache`. If the requested file is found in the cache, its path will be returned without redownloading it. + +Be aware that the models cache is cleared of infrequently-used files and directories at regular intervals when the size of the cache exceeds the value specified in Invoke's `convert_cache` configuration variable. + +#### installer.start(invoker) + +The `start` method is called by the API initialization routines when the API starts up. Its effect is to call `sync_to_config()` to synchronize the model record store database with what's currently on disk. + +--- + +## Get on line: The Download Queue + +InvokeAI can download arbitrary files using a multithreaded background download queue. Internally, the download queue is used for installing models located at remote locations. The queue is implemented by the `DownloadQueueService` defined in `invokeai.app.services.download_manager`. However, most of the implementation is spread out among several files in `invokeai/backend/model_manager/download/*` + +A default download queue is located in `ApiDependencies.invoker.services.download_queue`. However, you can create additional instances if you need to isolate your queue from the main one. + +### A job for every task + +The queue operates on a series of download job objects. These objects specify the source and destination of the download, and keep track of the progress of the download. Jobs come in a variety of shapes and colors as they are progressively specialized for particular download task. + +The basic job is the `DownloadJobBase`, a pydantic object with the following fields: + +| Field | Type | Default | Description | +|-------|------|---------|-------------| +| `id` | int | | Job ID, an integer >= 0 | +| `priority` | int | 10 | Job priority. Lower priorities run before higher priorities | +| `source` | str | | Where to download from (specialized types used in subclasses) | +| `destination` | Path | | Where to download to | +| `status` | DownloadJobStatus | Idle | Job's status (see below) | +| `event_handlers` | List[DownloadEventHandler] | | Event handlers (see below) | +| `job_started` | float | | Timestamp for when the job started running | +| `job_ended` | float | | Timestamp for when the job completed or errored out | +| `job_sequence` | int | | A counter that is incremented each time a model is dequeued | +| `error` | Exception | | A copy of the Exception that caused an error during download | + +When you create a job, you can assign it a `priority`. If multiple jobs are queued, the job with the lowest priority runs first. (Don't blame us! The Unix developers came up with this convention.) + +Every job has a `source` and a `destination`. `source` is a string in the base class, but subclassses redefine it more specifically. + +The `destination` must be the Path to a file or directory on the local filesystem. If the Path points to a new or existing file, then the source will be stored under that filename. If the Path ponts to an existing directory, then the downloaded file will be stored inside the directory, usually using the name assigned to it at the remote site in the `content-disposition` http field. + +When the job is submitted, it is assigned a numeric `id`. The id can then be used to control the job, such as starting, stopping and cancelling its download. + +The `status` field is updated by the queue to indicate where the job is in its lifecycle. Values are defined in the string enum `DownloadJobStatus`, a symbol available from `invokeai.app.services.download_manager`. Possible values are: + +| Value | String Value | Description | +|-------|--------------|-------------| +| `IDLE` | idle | Job created, but not submitted to the queue | +| `ENQUEUED` | enqueued | Job is patiently waiting on the queue | +| `RUNNING` | running | Job is running! | +| `PAUSED` | paused | Job was paused and can be restarted | +| `COMPLETED` | completed | Job has finished its work without an error | +| `ERROR` | error | Job encountered an error and will not run again | +| `CANCELLED` | cancelled | Job was cancelled and will not run (again) | + +`job_started`, `job_ended` and `job_sequence` indicate when the job was started (using a python timestamp), when it completed, and the order in which it was taken off the queue. These are mostly used for debugging and performance testing. + +In case of an error, the Exception that caused the error will be placed in the `error` field, and the job's status will be set to `DownloadJobStatus.ERROR`. + +After an error occurs, any partially downloaded files will be deleted from disk, unless `preserve_partial_downloads` was set to True at job creation time (or set to True any time before the error occurred). Note that since all InvokeAI model install operations involve downloading files to a temporary directory that has a limited lifetime, this flag is not used by the model installer. + +There are a series of subclasses of `DownloadJobBase` that provide support for specific types of downloads. These are: + +#### DownloadJobPath + +This subclass redefines `source` to be a filesystem Path. It is used to move a file or directory from the `source` to the `destination` paths in the background using a uniform event-based infrastructure. + +#### DownloadJobRemoteSource + +This subclass adds the following fields to the job: + +| Field | Type | Default | Description | +|-------|------|---------|-------------| +| `bytes` | int | 0 | bytes downloaded so far | +| `total_bytes` | int | 0 | total size to download | +| `access_token` | Any | None | an authorization token to present to the remote source | + +The job will start out with 0/0 in its bytes/total_bytes fields. Once it starts running, `total_bytes` will be populated from information provided in the HTTP download header (if available), and the number of bytes downloaded so far will be progressively incremented. + +#### DownloadJobURL + +This is a subclass of `DownloadJobBase`. It redefines `source` to be a +Pydantic `AnyHttpUrl` object, which enforces URL validation checking +on the field. + +Note that the installer service defines an additional subclass of +`DownloadJobRemoteSource` that accepts HuggingFace repo_ids in +addition to URLs. This is discussed later in this document. + +### Event handlers + +While a job is being downloaded, the queue will emit events at +periodic intervals. A typical series of events during a successful +download session will look like this: + + + 1. `enqueued` + 2. `running` + 3. `running` + 4. `running` + 5. `completed` + + +There will be a single enqueued event, followed by one or more running events, and finally one `completed`, `error` or `cancelled` events. + +It is possible for a caller to pause download temporarily, in which case the events may look something like this: + + + 1. `enqueued` + 2. `running` + 3. `running` + 4. `paused` user paused the download + 5. `running` + 6. `completed` + + +The download queue logs when downloads start and end (unless `quiet` is set to True at initialization time) but doesn't log any progress events. You will probably want to be alerted to events during the download job and provide more user feedback. In order to intercept and respond to events you may install a series of one or more event handlers in the job. Whenever the job's status changes, the chain of event handlers is traversed and executed in the same thread that the download job is running in. + +Event handlers have the signature `Callable[["DownloadJobBase"], None]`, i.e. + +```py +def handler(job: DownloadJobBase): + pass +``` + +A typical handler will examine `job.status` and decide if there's something to be done. This can include cancelling or erroring the job, but more typically is used to report on the job status to the user interface or to perform certain actions on successful completion of the job. + +Event handlers can be attached to a job at creation time. In addition, you can create a series of default handlers that are attached to the queue object itself. These handlers will be executed for each job after the job's own handlers (if any) have run. + +During a download, running events are issued every time roughly 1% of the file is transferred. This is to provide just enough granularity to update a tqdm progress bar smoothly. + +Handlers can be added to a job after the fact using the job's `add_event_handler` method: + +```py +job.add_event_handler(my_handler) +``` + +All handlers can be cleared using the job's `clear_event_handlers()` method. Note that it might be a good idea to pause the job before altering its handlers. + +### Creating a download queue object + +The `DownloadQueueService` constructor takes the following arguments: + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `event_handlers` | List[DownloadEventHandler] | [] | Event handlers | +| `max_parallel_dl` | int | 5 | Maximum number of simultaneous downloads allowed | +| `requests_session` | requests.sessions.Session | None | An alternative requests Session object to use for the download | +| `quiet` | bool | False | Do work quietly without issuing log messages | + +A typical initialization sequence will look like: + +```py +from invokeai.app.services.download_manager import DownloadQueueService + +def log_download_event(job: DownloadJobBase): + logger.info(f'job={job.id}: status={job.status}') + +queue = DownloadQueueService( + event_handlers=[log_download_event] + ) +``` + +Event handlers can be provided to the queue at initialization time as shown in the example. These will be automatically appended to the handler list for any job that is submitted to this queue. + +`max_parallel_dl` sets the number of simultaneous active downloads that are allowed. The default of five has not been benchmarked in any way, but seems to give acceptable performance. + +`requests_session` can be used to provide a `requests` module Session object that will be used to stream remote URLs to disk. This facility was added for use in the module's unit tests to simulate a remote web server, but may be useful in other contexts. + +`quiet` will prevent the queue from issuing any log messages at the INFO or higher levels. + +### Submitting a download job + +You can submit a download job to the queue either by creating the job manually and passing it to the queue's `submit_download_job()` method, or using the `create_download_job()` method, which will do the same thing on your behalf. + +To use the former method, follow this example: + +```py +job = DownloadJobRemoteSource( + source='http://www.civitai.com/models/13456', + destination='/tmp/models/', + event_handlers=[my_handler1, my_handler2], # if desired +) +queue.submit_download_job(job, start=True) +``` + +`submit_download_job()` takes just two arguments: the job to submit, and a flag indicating whether to immediately start the job (defaulting to True). If you choose not to start the job immediately, you can start it later by calling the queue's `start_job()` or `start_all_jobs()` methods, which are described later. + +To have the queue create the job for you, follow this example instead: + +```py +job = queue.create_download_job( + source='http://www.civitai.com/models/13456', + destdir='/tmp/models/', + filename='my_model.safetensors', + event_handlers=[my_handler1, my_handler2], # if desired + start=True, + ) +``` + +The `filename` argument forces the downloader to use the specified name for the file rather than the name provided by the remote source, and is equivalent to manually specifying a destination of `/tmp/models/my_model.safetensors' in the submitted job. + +Here is the full list of arguments that can be provided to `create_download_job()`: + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `source` | Union[str, Path, AnyHttpUrl] | | Download remote or local source | +| `destdir` | Path | | Destination directory for downloaded file | +| `filename` | Path | None | Filename for downloaded file | +| `start` | bool | True | Enqueue the job immediately | +| `priority` | int | 10 | Starting priority for this job | +| `access_token` | str | None | Authorization token for this resource | +| `event_handlers` | List[DownloadEventHandler] | [] | Event handlers for this job | + +Internally, `create_download_job()` has a little bit of internal logic that looks at the type of the source and selects the right subclass of `DownloadJobBase` to create and enqueue. + +**TODO**: move this logic into its own method for overriding in subclasses. + +### Job control + +Prior to completion, jobs can be controlled with a series of queue method calls. Do not attempt to modify jobs by directly writing to their fields, as this is likely to lead to unexpected results. + +Any method that accepts a job argument may raise an `UnknownJobIDException` if the job has not yet been submitted to the queue or was not created by this queue. + +#### queue.join() + +This method will block until all the active jobs in the queue have reached a terminal state (completed, errored or cancelled). + +#### queue.wait_for_job(job, [timeout]) + +This method will block until the indicated job has reached a terminal state (completed, errored or cancelled). If the optional timeout is provided, the call will block for at most timeout seconds, and raise a TimeoutError otherwise. + +#### jobs = queue.list_jobs() + +This will return a list of all jobs, including ones that have not yet been enqueued and those that have completed or errored out. + +#### job = queue.id_to_job(int) + +This method allows you to recover a submitted job using its ID. + +#### queue.prune_jobs() + +Remove completed and errored jobs from the job list. + +#### queue.start_job(job) + +If the job was submitted with `start=False`, then it can be started using this method. + +#### queue.pause_job(job) + +This will temporarily pause the job, if possible. It can later be restarted and pick up where it left off using `queue.start_job()`. + +#### queue.cancel_job(job) + +This will cancel the job if possible and clean up temporary files and other resources that it might have been using. + +#### queue.start_all_jobs(), queue.pause_all_jobs(), queue.cancel_all_jobs() + +This will start/pause/cancel all jobs that have been submitted to the queue and have not yet reached a terminal state. + +--- + +## This Meta be Good: Model Metadata Storage + +The modules found under `invokeai.backend.model_manager.metadata` provide a straightforward API for fetching model metadatda from online repositories. Currently only HuggingFace is supported. However, the modules are easily extended for additional repos, provided that they have defined APIs for metadata access. + +Metadata comprises any descriptive information that is not essential for getting the model to run. For example "author" is metadata, while "type", "base" and "format" are not. The latter fields are part of the model's config, as defined in `invokeai.backend.model_manager.config`. + +### Example Usage + +```py +from invokeai.backend.model_manager.metadata import ( + AnyModelRepoMetadata, +) +# to access the initialized sql database +from invokeai.app.api.dependencies import ApiDependencies + +hf = HuggingFaceMetadataFetch() + +# fetch the metadata +model_metadata = hf.from_id("") + +assert isinstance(model_metadata, HuggingFaceMetadata) +``` + +### Structure of the Metadata objects + +There is a short class hierarchy of Metadata objects, all of which descend from the Pydantic `BaseModel`. + +#### `ModelMetadataBase` + +This is the common base class for metadata: + +| Field Name | Type | Description | +|------------|------|-------------| +| `name` | str | Repository's name for the model | +| `author` | str | Model's author | +| `tags` | Set[str] | Model tags | + +Note that the model config record also has a `name` field. It is intended that the config record version be locally customizable, while the metadata version is read-only. However, enforcing this is expected to be part of the business logic. + +Descendents of the base add additional fields. + +#### `HuggingFaceMetadata` + +This descends from `ModelMetadataBase` and adds the following fields: + +| Field Name | Type | Description | +|------------|------|-------------| +| `type` | Literal["huggingface"] | Used for the discriminated union of metadata classes | +| `id` | str | HuggingFace repo_id | +| `tag_dict` | Dict[str, Any] | A dictionary of tag/value pairs provided in addition to `tags` | +| `last_modified` | datetime | Date of last commit of this model to the repo | +| `files` | List[Path] | List of the files in the model repo | + +#### `AnyModelRepoMetadata` + +This is a discriminated Union of `HuggingFaceMetadata`. + +### Fetching Metadata from Online Repos + +The `HuggingFaceMetadataFetch` class will retrieve metadata from its corresponding repository and return `AnyModelRepoMetadata` objects. Their base class `ModelMetadataFetchBase` is an abstract class that defines two methods: `from_url()` and `from_id()`. The former accepts the type of model URLs that the user will try to cut and paste into the model import form. The latter accepts a string ID in the format recognized by the repository of choice. Both methods return an `AnyModelRepoMetadata`. + +The base class also has a class method `from_json()` which will take the JSON representation of a `ModelMetadata` object, validate it, and return the corresponding `AnyModelRepoMetadata` object. + +When initializing one of the metadata fetching classes, you may provide a `requests.Session` argument. This allows you to customize the low-level HTTP fetch requests and is used, for instance, in the testing suite to avoid hitting the internet. + +The HuggingFace fetcher subclass add additional repo-specific fetching methods: + +#### HuggingFaceMetadataFetch + +This overrides its base class `from_json()` method to return a `HuggingFaceMetadata` object directly. + +### Metadata Storage + +The `ModelConfigBase` stores this response in the `source_api_response` field as a JSON blob. + +--- + +## The Lowdown on the ModelLoadService + +The `ModelLoadService` is responsible for loading a named model into memory so that it can be used for inference. Despite the fact that it does a lot under the covers, it is very straightforward to use. + +An application-wide model loader is created at API initialization time and stored in `ApiDependencies.invoker.services.model_loader`. However, you can create alternative instances if you wish. + +### Creating a ModelLoadService object + +The class is defined in `invokeai.app.services.model_load`. It is initialized with an InvokeAIAppConfig object, from which it gets configuration information such as the user's desired GPU and precision, and with a previously-created `ModelRecordServiceBase` object, from which it loads the requested model's configuration information. + +Here is a typical initialization pattern: + +```py +from invokeai.app.services.config import InvokeAIAppConfig +from invokeai.app.services.model_load import ModelLoadService, ModelLoaderRegistry + +config = InvokeAIAppConfig.get_config() + +ram_cache = ModelCache( + max_cache_size=config.ram_cache_size, max_vram_cache_size=config.vram_cache_size, logger=logger +) + +convert_cache = ModelConvertCache( + cache_path=config.models_convert_cache_path, max_size=config.convert_cache_size +) + +loader = ModelLoadService( + app_config=config, + ram_cache=ram_cache, + convert_cache=convert_cache, + registry=ModelLoaderRegistry +) +``` + +### load_model(model_config, [submodel_type], [context]) -> LoadedModel + +The `load_model()` method takes an `AnyModelConfig` returned by `ModelRecordService.get_model()` and returns the corresponding loaded model. It loads the model into memory, gets the model ready for use, and returns a `LoadedModel` object. + +The optional second argument, `subtype` is a `SubModelType` string enum, such as "vae". It is mandatory when used with a main model, and is used to select which part of the main model to load. + +The optional third argument, `context` can be provided by an invocation to trigger model load event reporting. See below for details. + +The returned `LoadedModel` object contains a copy of the configuration record returned by the model record`get_model()` method, as well as the in-memory loaded model: + +| Attribute Name | Type | Description | +|----------------|------|-------------| +| `config` | AnyModelConfig | A copy of the model's configuration record for retrieving base type, etc. | +| `model` | AnyModel | The instantiated model (details below) | + +### get_model_by_key(key, [submodel]) -> LoadedModel + +The `get_model_by_key()` method will retrieve the model using its unique database key. For example: + +```py +loaded_model = loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae')) +``` + +`get_model_by_key()` may raise any of the following exceptions: + +* `UnknownModelException` -- key not in database +* `ModelNotFoundException` -- key in database but model not found at path +* `NotImplementedException` -- the loader doesn't know how to load this type of model + +### Using the Loaded Model in Inference + +`LoadedModel` acts as a context manager. The context loads the model into the execution device (e.g. VRAM on CUDA systems), locks the model in the execution device for the duration of the context, and returns the model. Use it like this: + +```py +loaded_model_= loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae')) +with loaded_model as vae: + image = vae.decode(latents)[0] +``` + +The object returned by the LoadedModel context manager is an `AnyModel`, which is a Union of `ModelMixin`, `torch.nn.Module`, `IAIOnnxRuntimeModel`, `IPAdapter`, `IPAdapterPlus`, and `EmbeddingModelRaw`. `ModelMixin` is the base class of all diffusers models, `EmbeddingModelRaw` is used for LoRA and TextualInversion models. The others are obvious. + +In addition, you may call `LoadedModel.model_on_device()`, a context manager that returns a tuple of the model's state dict in CPU and the model itself in VRAM. It is used to optimize the LoRA patching and unpatching process: + +```py +loaded_model_= loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae')) +with loaded_model.model_on_device() as (state_dict, vae): + image = vae.decode(latents)[0] +``` + +Since not all models have state dicts, the `state_dict` return value can be None. + +### Emitting model loading events + +When the `context` argument is passed to `load_model_*()`, it will retrieve the invocation event bus from the passed `InvocationContext` object to emit events on the invocation bus. The two events are "model_load_started" and "model_load_completed". Both carry the following payload: + +```py +payload=dict( + queue_id=queue_id, + queue_item_id=queue_item_id, + queue_batch_id=queue_batch_id, + graph_execution_state_id=graph_execution_state_id, + model_key=model_key, + submodel_type=submodel, + hash=model_info.hash, + location=str(model_info.location), + precision=str(model_info.precision), +) +``` + +### Adding Model Loaders + +Model loaders are small classes that inherit from the `ModelLoader` base class. They typically implement one method `_load_model()` whose signature is: + +```py +def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, +) -> AnyModel: +``` + +`_load_model()` will be passed the path to the model on disk, an optional repository variant (used by the diffusers loaders to select, e.g. the `fp16` variant, and an optional submodel_type for main and onnx models. + +To install a new loader, place it in `invokeai/backend/model_manager/load/model_loaders`. Inherit from `ModelLoader` and use the `@ModelLoaderRegistry.register()` decorator to indicate what type of models the loader can handle. + +Here is a complete example from `generic_diffusers.py`, which is able to load several different diffusers types: + +```py +from pathlib import Path +from typing import Optional + +from invokeai.backend.model_manager import ( + AnyModel, + BaseModelType, + ModelFormat, + ModelRepoVariant, + ModelType, + SubModelType, +) +from .. import ModelLoader, ModelLoaderRegistry + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers) +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T2IAdapter, format=ModelFormat.Diffusers) +class GenericDiffusersLoader(ModelLoader): + """Class to load simple diffusers models.""" + + def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + model_class = self._get_hf_load_class(model_path) + if submodel_type is not None: + raise Exception(f"There are no submodels in models of type {model_class}") + variant = model_variant.value if model_variant else None + result: AnyModel = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, variant=variant) # type: ignore + return result +``` + +:::note + A loader can register itself to handle several different + model types. An exception will be raised if more than one loader tries + to register the same model type. +::: + +#### Conversion + +Some models require conversion to diffusers format before they can be loaded. These loaders should override two additional methods: + +```py +_needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool +_convert_model(self, config: AnyModelConfig, model_path: Path, output_path: Path) -> Path: +``` + +The first method accepts the model configuration, the path to where the unmodified model is currently installed, and a proposed destination for the converted model. This method returns True if the model needs to be converted. It typically does this by comparing the last modification time of the original model file to the modification time of the converted model. In some cases you will also want to check the modification date of the configuration record, in the event that the user has changed something like the scheduler prediction type that will require the model to be re-converted. See `controlnet.py` for an example of this logic. + +The second method accepts the model configuration, the path to the original model on disk, and the desired output path for the converted model. It does whatever it needs to do to get the model into diffusers format, and returns the Path of the resulting model. (The path should ordinarily be the same as `output_path`.) + +## The ModelManagerService object + +For convenience, the API provides a `ModelManagerService` object which gives a single point of access to the major model manager services. This object is created at initialization time and can be found in the global `ApiDependencies.invoker.services.model_manager` object, or in `context.services.model_manager` from within an invocation. + +In the examples below, we have retrieved the manager using: + +```py +mm = ApiDependencies.invoker.services.model_manager +``` + +The following properties and methods will be available: + +### mm.store + +This retrieves the `ModelRecordService` associated with the manager. Example: + +```py +configs = mm.store.get_model_by_attr(name='stable-diffusion-v1-5') +``` + +### mm.install + +This retrieves the `ModelInstallService` associated with the manager. Example: + +```py +job = mm.install.heuristic_import(`https://civitai.com/models/58390/detail-tweaker-lora-lora`) +``` + +### mm.load + +This retrieves the `ModelLoaderService` associated with the manager. Example: + +```py +configs = mm.store.get_model_by_attr(name='stable-diffusion-v1-5') +assert len(configs) > 0 + +loaded_model = mm.load.load_model(configs[0]) +``` + +The model manager also offers a few convenience shortcuts for loading models: + +### mm.load_model_by_config(model_config, [submodel], [context]) -> LoadedModel + +Same as `mm.load.load_model()`. + +### mm.load_model_by_attr(model_name, base_model, model_type, [submodel], [context]) -> LoadedModel + +This accepts the combination of the model's name, type and base, which it passes to the model record config store for retrieval. If a unique model config is found, this method returns a `LoadedModel`. It can raise the following exceptions: + +- `UnknownModelException` -- model with these attributes not known +- `NotImplementedException` -- the loader doesn't know how to load this type of model +- `ValueError` -- more than one model matches this combination of base/type/name + +### mm.load_model_by_key(key, [submodel], [context]) -> LoadedModel + +This method takes a model key, looks it up using the `ModelRecordServiceBase` object in `mm.store`, and passes the returned model configuration to `load_model_by_config()`. It may raise a `NotImplementedException`. + +## Invocation Context Model Manager API + +Within invocations, the following methods are available from the `InvocationContext` object: + +### context.download_and_cache_model(source) -> Path + +This method accepts a `source` of a remote model, downloads and caches it locally, and then returns a Path to the local model. The source can be a direct download URL or a HuggingFace repo_id. + +In the case of HuggingFace repo_id, the following variants are recognized: + +* stabilityai/stable-diffusion-v4 -- default model +* stabilityai/stable-diffusion-v4:fp16 -- fp16 variant +* stabilityai/stable-diffusion-v4:fp16:vae -- the fp16 vae subfolder +* stabilityai/stable-diffusion-v4:onnx:vae -- the onnx variant vae subfolder + +You can also point at an arbitrary individual file within a repo_id directory using this syntax: + +* stabilityai/stable-diffusion-v4::/checkpoints/sd4.safetensors + +### context.load_local_model(model_path, [loader]) -> LoadedModel + +This method loads a local model from the indicated path, returning a `LoadedModel`. The optional loader is a Callable that accepts a Path to the object, and returns a `AnyModel` object. If no loader is provided, then the method will use `torch.load()` for a .ckpt or .bin checkpoint file, `safetensors.torch.load_file()` for a safetensors checkpoint file, or `cls.from_pretrained()` for a directory that looks like a diffusers directory. + +### context.load_remote_model(source, [loader]) -> LoadedModel + +This method accepts a `source` of a remote model, downloads and caches it locally, loads it, and returns a `LoadedModel`. The source can be a direct download URL or a HuggingFace repo_id. + +In the case of HuggingFace repo_id, the following variants are recognized: + +* stabilityai/stable-diffusion-v4 -- default model +* stabilityai/stable-diffusion-v4:fp16 -- fp16 variant +* stabilityai/stable-diffusion-v4:fp16:vae -- the fp16 vae subfolder +* stabilityai/stable-diffusion-v4:onnx:vae -- the onnx variant vae subfolder + +You can also point at an arbitrary individual file within a repo_id directory using this syntax: + +* stabilityai/stable-diffusion-v4::/checkpoints/sd4.safetensors diff --git a/docs/src/content/docs/development/Architecture/overview.mdx b/docs/src/content/docs/development/Architecture/overview.mdx new file mode 100644 index 00000000000..1ab8ebdd3cc --- /dev/null +++ b/docs/src/content/docs/development/Architecture/overview.mdx @@ -0,0 +1,104 @@ +--- +title: Architecture Overview +sidebar: + order: 1 + label: Overview + +lastUpdated: 2026-02-18 +--- + +import Mermaid from '@components/Mermaid.astro' + + +```mermaid +flowchart TB + + subgraph apps[Applications] + webui[WebUI] + cli[CLI] + + subgraph webapi[Web API] + api[HTTP API] + sio[Socket.IO] + end + + end + + subgraph invoke[Invoke] + direction LR + invoker + services + sessions + invocations + end + + subgraph core[AI Core] + Generate + end + + webui --> webapi + webapi --> invoke + cli --> invoke + + invoker --> services & sessions + invocations --> services + sessions --> invocations + + services --> core + + %% Styles + classDef sg fill:#5028C8,font-weight:bold,stroke-width:2,color:#fff,stroke:#14141A + classDef default stroke-width:2px,stroke:#F6B314,color:#fff,fill:#14141A + + class apps,webapi,invoke,core sg + +``` + + +## Applications + +Applications are built on top of the invoke framework. They should construct `invoker` and then interact through it. They should avoid interacting directly with core code in order to support a variety of configurations. + +### Web UI + +The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/invokeai/frontend` and the backend code is found in `/invokeai/app/api_app.py` and `/invokeai/app/api/`. The code is further organized as such: + +| Component | Description | +| --- | --- | +| api_app.py | Sets up the API app, annotates the OpenAPI spec with additional data, and runs the API | +| dependencies | Creates all invoker services and the invoker, and provides them to the API | +| events | An eventing system that could in the future be adapted to support horizontal scale-out | +| sockets | The Socket.IO interface - handles listening to and emitting session events (events are defined in the events service module) | +| routers | API definitions for different areas of API functionality | + +### CLI + +The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/invokeai/frontend/cli`. + +## Invoke + +The Invoke framework provides the interface to the underlying AI systems and is built with flexibility and extensibility in mind. There are four major concepts: invoker, sessions, invocations, and services. + +### Invoker + +The invoker (`/invokeai/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services: +- **invocation services**, which are used by invocations to interact with core functionality. +- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue. + +### Sessions + +Invocations and links between them form a graph, which is maintained in a session. Sessions can be queued for invocation, which will execute their graph (either the next ready invocation, or all invocations). Sessions also maintain execution history for the graph (including storage of any outputs). An invocation may be added to a session at any time, and there is capability to add and entire graph at once, as well as to automatically link new invocations to previous invocations. Invocations can not be deleted or modified once added. + +The session graph does not support looping. This is left as an application problem to prevent additional complexity in the graph. + +### Invocations + +Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/invokeai/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations. + +### Services + +Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/invokeai/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import). + +## AI Core + +The AI Core is represented by the rest of the code base (i.e. the code outside of `/invokeai/app/`). diff --git a/docs/src/content/docs/development/Front End/index.md b/docs/src/content/docs/development/Front End/index.md new file mode 100644 index 00000000000..52cb702dac0 --- /dev/null +++ b/docs/src/content/docs/development/Front End/index.md @@ -0,0 +1,131 @@ +--- +title: Frontend Development +lastUpdated: 2026-02-18 +--- + +Invoke's UI is made possible by many contributors and open-source libraries. Thank you! + +## Dev environment + +Follow the [dev environment](../dev-environment.md) guide to get set up. Run the UI using `pnpm dev`. + +## Package scripts + +- `dev`: run the frontend in dev mode, enabling hot reloading +- `build`: run all checks (dpdm, eslint, prettier, tsc, knip) and then build the frontend +- `lint:dpdm`: check circular dependencies +- `lint:eslint`: check code quality +- `lint:prettier`: check code formatting +- `lint:tsc`: check type issues +- `lint:knip`: check for unused exports or objects +- `lint`: run all checks concurrently +- `fix`: run `eslint` and `prettier`, fixing fixable issues +- `test:ui`: run `vitest` with the fancy web UI + +## Type generation + +We use [openapi-typescript] to generate types from the app's OpenAPI schema. The generated types are committed to the repo in [schema.ts]. + +If you make backend changes, it's important to regenerate the frontend types: + +```sh +cd invokeai/frontend/web && python ../../../scripts/generate_openapi_schema.py | pnpm typegen +``` + +On macOS and Linux, you can run `make frontend-typegen` as a shortcut for the above snippet. + +## Localization + +We use [i18next] for localization, but translation to languages other than English happens on our [Weblate] project. + +Only the English source strings (i.e. `en.json`) should be changed on this repo. + +## VSCode + +### Example debugger config + +```jsonc +{ + "version": "0.2.0", + "configurations": [ + { + "type": "chrome", + "request": "launch", + "name": "Invoke UI", + "url": "http://localhost:5173", + "webRoot": "${workspaceFolder}/invokeai/frontend/web" + } + ] +} +``` + +### Remote dev + +We've noticed an intermittent timeout issue with the VSCode remote dev port forwarding. + +We suggest disabling the editor's port forwarding feature and doing it manually via SSH: + +```sh +ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host +``` + +## Contributing Guidelines + +Thanks for your interest in contributing to the Invoke Web UI! + +Please follow these guidelines when contributing. + +## Check in before investing your time + +Please check in before you invest your time on anything besides a trivial fix, in case it conflicts with ongoing work or isn't aligned with the vision for the app. + +If a feature request or issue doesn't already exist for the thing you want to work on, please create one. + +Ping `@psychedelicious` on [discord] in the `#frontend-dev` channel or in the feature request / issue you want to work on - we're happy to chat. + +## Code conventions + +- This is a fairly complex app with a deep component tree. Please use memoization (`useCallback`, `useMemo`, `memo`) with enthusiasm. +- If you need to add some global, ephemeral state, please use [nanostores] if possible. +- Be careful with your redux selectors. If they need to be parameterized, consider creating them inside a `useMemo`. +- Feel free to use `lodash` (via `lodash-es`) to make the intent of your code clear. +- Please add comments describing the "why", not the "how" (unless it is really arcane). + +## Commit format + +Please use the [conventional commits] spec for the web UI, with a scope of "ui": + +- `chore(ui): bump deps` +- `chore(ui): lint` +- `feat(ui): add some cool new feature` +- `fix(ui): fix some bug` + +## Tests + +We don't do any UI testing at this time, but consider adding tests for sensitive logic. + +We use `vitest`, and tests should be next to the file they are testing. If the logic is in `something.ts`, the tests should be in `something.test.ts`. + +In some situations, we may want to test types. For example, if you use `zod` to create a schema that should match a generated type, it's best to add a test to confirm that the types match. Use `tsafe`'s assert for this. + +## Submitting a PR + +- Ensure your branch is tidy. Use an interactive rebase to clean up the commit history and reword the commit messages if they are not descriptive. +- Run `pnpm lint`. Some issues are auto-fixable with `pnpm fix`. +- Fill out the PR form when creating the PR. + - It doesn't need to be super detailed, but a screenshot or video is nice if you changed something visually. + - If a section isn't relevant, delete it. + +## Other docs + +- [Workflows - Design and Implementation] +- [State Management] + +[discord]: https://discord.gg/ZmtBAhwWhy +[i18next]: https://github.com/i18next/react-i18next +[Weblate]: https://hosted.weblate.org/engage/invokeai/ +[openapi-typescript]: https://github.com/openapi-ts/openapi-typescript +[schema.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/services/api/schema.ts +[conventional commits]: https://www.conventionalcommits.org/en/v1.0.0/ +[Workflows - Design and Implementation]: ./workflows.md +[State Management]: ./state-management.md diff --git a/docs/src/content/docs/development/Front End/state-management.mdx b/docs/src/content/docs/development/Front End/state-management.mdx new file mode 100644 index 00000000000..96fede7ba7f --- /dev/null +++ b/docs/src/content/docs/development/Front End/state-management.mdx @@ -0,0 +1,41 @@ +--- +title: State Management +lastUpdated: 2026-02-18 +--- + +The app makes heavy use of Redux Toolkit, its Query library, and `nanostores`. + +## Redux + +We use RTK extensively - slices, entity adapters, queries, reselect, the whole 9 yards. Their [docs](https://redux-toolkit.js.org/) are excellent. + +## `nanostores` + +[nanostores] is a tiny state management library. It provides both imperative and declarative APIs. + +### Example + +```ts +export const $myStringOption = atom(null); + +// Outside a component, or within a callback for performance-critical logic +$myStringOption.get(); +$myStringOption.set('new value'); + +// Inside a component +const myStringOption = useStore($myStringOption); +``` + +### Where to put nanostores + +- For global application state, export your stores from `invokeai/frontend/web/src/app/store/nanostores/`. +- For feature state, create a file for the stores next to the redux slice definition (e.g. `invokeai/frontend/web/src/features/myFeature/myFeatureNanostores.ts`). +- For hooks with global state, export the store from the same file the hook is in, or put it next to the hook. + +### When to use nanostores + +- For non-serializable data that needs to be available throughout the app, use `nanostores` instead of a global. +- For ephemeral global state (i.e. state that does not need to be persisted), use `nanostores` instead of redux. +- For performance-critical code and in callbacks, redux selectors can be problematic due to the declarative reactivity system. Consider refactoring to use `nanostores` if there's a **measurable** performance issue. + +[nanostores]: https://github.com/nanostores/nanostores/ diff --git a/docs/src/content/docs/development/Front End/workflows.mdx b/docs/src/content/docs/development/Front End/workflows.mdx new file mode 100644 index 00000000000..d083bb8df1e --- /dev/null +++ b/docs/src/content/docs/development/Front End/workflows.mdx @@ -0,0 +1,315 @@ +--- +title: Workflows +lastUpdated: 2026-02-18 +--- + +This document describes, at a high level, the design and implementation of workflows in the InvokeAI frontend. There are a substantial number of implementation details not included, but which are hopefully clear from the code. + +InvokeAI's backend uses graphs, composed of **nodes** and **edges**, to process data and generate images. + +Nodes have any number of **input fields** and **output fields**. Edges connect nodes together via their inputs and outputs. Fields have data types which dictate how they may be connected. + +During execution, a nodes' outputs may be passed along to any number of other nodes' inputs. + +Workflows are an enriched abstraction over a graph. + +## Design + +InvokeAI provide two ways to build graphs in the frontend: the [Linear UI](#linear-ui) and [Workflow Editor](#workflow-editor). + +To better understand the use case and challenges related to workflows, we will review both of these modes. + +### Linear UI + +This includes the **Text to Image**, **Image to Image** and **Unified Canvas** tabs. + +The user-managed parameters on these tabs are stored as simple objects in the application state. When the user invokes, adding a generation to the queue, we internally build a graph from these parameters. + +This logic can be fairly complex due to the range of features available and their interactions. Depending on the parameters selected, the graph may be very different. Building graphs in code can be challenging - you are trying to construct a non-linear structure in a linear context. + +The simplest graph building logic is for **Text to Image** with a SD1.5 model: [buildLinearTextToImageGraph.ts] + +There are many other graph builders in the same directory for different tabs or base models (e.g. SDXL). Some are pretty hairy. + +In the Linear UI, we go straight from **simple application state** to **graph** via these builders. + +### Workflow Editor + +The Workflow Editor is a visual graph editor, allowing users to draw edges from node to node to construct a graph. This _far_ more approachable way to create complex graphs. + +InvokeAI uses the [reactflow] library to power the Workflow Editor. It provides both a graph editor UI and manages its own internal graph state. + +#### Workflows + +A workflow is a representation of a graph plus additional metadata: + +- Name +- Description +- Version +- Notes +- [Exposed fields](#workflow-linear-view) +- Author, tags, category, etc. + +Workflows should have other qualities: + +- Portable: you should be able to load a workflow created by another person. +- Resilient: you should be able to "upgrade" a workflow as the application changes. +- Abstract: as much as is possible, workflows should not be married to the specific implementation details of the application. + +To support these qualities, workflows are serializable, have a versioned schemas, and represent graphs as minimally as possible. Fortunately, the reactflow state for nodes and edges works perfectly for this. + +##### Workflow -> reactflow state -> InvokeAI graph + +Given a workflow, we need to be able to derive reactflow state and/or an InvokeAI graph from it. + +The first step - workflow to reactflow state - is very simple. The logic is in [nodesSlice.ts], in the `workflowLoaded` reducer. + +The reactflow state is, however, structurally incompatible with our backend's graph structure. When a user invokes on a Workflow, we need to convert the reactflow state into an InvokeAI graph. This is far simpler than the graph building logic from the Linear UI: +[buildNodesGraph.ts] + +##### Nodes vs Invocations + +We often use the terms "node" and "invocation" interchangeably, but they may refer to different things in the frontend. + +reactflow [has its own definitions][reactflow-concepts] of "node", "edge" and "handle" which are closely related to InvokeAI graph concepts. + +- A reactflow node is related to an InvokeAI invocation. It has a "data" property, which holds the InvokeAI-specific invocation data. +- A reactflow edge is roughly equivalent to an InvokeAI edge. +- A reactflow handle is roughly equivalent to an InvokeAI input or output field. + +##### Workflow Linear View + +Graphs are very capable data structures, but not everyone wants to work with them all the time. + +To allow less technical users - or anyone who wants a less visually noisy workspace - to benefit from the power of nodes, InvokeAI has a workflow feature called the Linear View. + +A workflow input field can be added to this Linear View, and its input component can be presented similarly to the Linear UI tabs. Internally, we add the field to the workflow's list of exposed fields. + +#### OpenAPI Schema + +OpenAPI is a schema specification that can represent complex data structures and relationships. The backend is capable of generating an OpenAPI schema for all invocations. + +When the UI connects, it requests this schema and parses each invocation into an **invocation template**. Invocation templates have a number of properties, like title, description and type, but the most important ones are their input and output **field templates**. + +Invocation and field templates are the "source of truth" for graphs, because they indicate what the backend is able to process. + +When a user adds a new node to their workflow, these templates are used to instantiate a node with fields instantiated from the input and output field templates. + +##### Field Instances and Templates + +Field templates consist of: + +- Name: the identifier of the field, its variable name in python +- Type: derived from the field's type annotation in python (e.g. IntegerField, ImageField, MainModelField) +- Constraints: derived from the field's creation args in python (e.g. minimum value for an integer) +- Default value: optionally provided in the field's creation args (e.g. 42 for an integer) + +Field instances are created from the templates and have name, type and optionally a value. + +The type of the field determines the UI components that are rendered for it. + +A field instance's name associates it with its template. + +##### Stateful vs Stateless Fields + +**Stateful** fields store their value in the frontend graph. Think primitives, model identifiers, images, etc. Fields are only stateful if the frontend allows the user to directly input a value for them. + +Many field types, however, are **stateless**. An example is a `UNetField`, which contains some data describing a UNet. Users cannot directly provide this data - it is created and consumed in the backend. + +Stateless fields do not store their value in the node, so their field instances do not have values. + +"Custom" fields will always be treated as stateless fields. + +##### Single and Collection Fields + +Field types have a name and cardinality property which may identify it as a **SINGLE**, **COLLECTION** or **SINGLE_OR_COLLECTION** field. + +- If a field is annotated in python as a singular value or class, its field type is parsed as a **SINGLE** type (e.g. `int`, `ImageField`, `str`). +- If a field is annotated in python as a list, its field type is parsed as a **COLLECTION** type (e.g. `list[int]`). +- If it is annotated as a union of a type and list, the type will be parsed as a **SINGLE_OR_COLLECTION** type (e.g. `Union[int, list[int]]`). Fields may not be unions of different types (e.g. `Union[int, list[str]]` and `Union[int, str]` are not allowed). + +## Implementation + +The majority of data structures in the backend are [pydantic] models. Pydantic provides OpenAPI schemas for all models and we then generate TypeScript types from those. + +The OpenAPI schema is parsed at runtime into our invocation templates. + +Workflows and all related data are modeled in the frontend using [zod]. Related types are inferred from the zod schemas. + +> In python, invocations are pydantic models with fields. These fields become node inputs. The invocation's `invoke()` function returns a pydantic model - its output. Like the invocation itself, the output model has any number of fields, which become node outputs. + +### zod Schemas and Types + +The zod schemas, inferred types, and type guards are in [types/]. + +Roughly order from lowest-level to highest: + +- `common.ts`: stateful field data, and couple other misc types +- `field.ts`: fields - types, values, instances, templates +- `invocation.ts`: invocations and other node types +- `workflow.ts`: workflows and constituents + +We customize the OpenAPI schema to include additional properties on invocation and field schemas. To facilitate parsing this schema into templates, we modify/wrap the types from [openapi-types] in `openapi.ts`. + +### OpenAPI Schema Parsing + +The entrypoint for OpenAPI schema parsing is [parseSchema.ts]. + +General logic flow: + +- Iterate over all invocation schema objects + - Extract relevant invocation-level attributes (e.g. title, type, version, etc) + - Iterate over the invocation's input fields + - [Parse each field's type](#parsing-field-types) + - [Build a field input template](#building-field-input-templates) from the type - either a stateful template or "generic" stateless template + - Iterate over the invocation's output fields + - Parse the field's type (same as inputs) + - [Build a field output template](#building-field-output-templates) + - Assemble the attributes and fields into an invocation template + +Most of these involve very straightforward `reduce`s, but the less intuitive steps are detailed below. + +#### Parsing Field Types + +Field types are represented as structured objects: + +```ts +type FieldType = { + name: string; + cardinality: 'SINGLE' | 'COLLECTION' | 'SINGLE_OR_COLLECTION'; +}; +``` + +The parsing logic is in `parseFieldType.ts`. + +There are 4 general cases for field type parsing. + +##### Primitive Types + +When a field is annotated as a primitive values (e.g. `int`, `str`, `float`), the field type parsing is fairly straightforward. The field is represented by a simple OpenAPI **schema object**, which has a `type` property. + +We create a field type name from this `type` string (e.g. `string` -> `StringField`). The cardinality is `"SINGLE"`. + +##### Complex Types + +When a field is annotated as a pydantic model (e.g. `ImageField`, `MainModelField`, `ControlField`), it is represented as a **reference object**. Reference objects are pointers to another schema or reference object within the schema. + +We need to **dereference** the schema to pull these out. Dereferencing may require recursion. We use the reference object's name directly for the field type name. + +> Unfortunately, at this time, we've had limited success using external libraries to deference at runtime, so we do this ourselves. + +##### Collection Types + +When a field is annotated as a list of a single type, the schema object has an `items` property. They may be a schema object or reference object and must be parsed to determine the item type. + +We use the item type for field type name. The cardinality is `"COLLECTION"`. + +##### Single or Collection Types + +When a field is annotated as a union of a type and list of that type, the schema object has an `anyOf` property, which holds a list of valid types for the union. + +After verifying that the union has two members (a type and list of the same type), we use the type for field type name, with cardinality `"SINGLE_OR_COLLECTION"`. + +##### Optional Fields + +In OpenAPI v3.1, when an object is optional, it is put into an `anyOf` along with a primitive schema object with `type: 'null'`. + +Handling this adds a fair bit of complexity, as we now must filter out the `'null'` types and work with the remaining types as described above. + +If there is a single remaining schema object, we must recursively call to `parseFieldType()` to get parse it. + +#### Building Field Input Templates + +Now that we have a field type, we can build an input template for the field. + +Stateful fields all get a function to build their template, while stateless fields are constructed directly. This is possible because stateless fields have no default value or constraints. + +See [buildFieldInputTemplate.ts]. + +#### Building Field Output Templates + +Field outputs are similar to stateless fields - they do not have any value in the frontend. When building their templates, we don't need a special function for each field type. + +See [buildFieldOutputTemplate.ts]. + +### Managing reactflow State + +As described above, the workflow editor state is the essentially the reactflow state, plus some extra metadata. + +We provide reactflow with an array of nodes and edges via redux, and a number of [event handlers][reactflow-events]. These handlers dispatch redux actions, managing nodes and edges. + +The pieces of redux state relevant to workflows are: + +- `state.nodes.nodes`: the reactflow nodes state +- `state.nodes.edges`: the reactflow edges state +- `state.nodes.workflow`: the workflow metadata + +#### Building Nodes and Edges + +A reactflow node has a few important top-level properties: + +- `id`: unique identifier +- `type`: a string that maps to a react component to render the node +- `position`: XY coordinates +- `data`: arbitrary data + +When the user adds a node, we build **invocation node data**, storing it in `data`. Invocation properties (e.g. type, version, label, etc.) are copied from the invocation template. Inputs and outputs are built from the invocation template's field templates. + +See [buildInvocationNode.ts]. + +Edges are managed by reactflow, but briefly, they consist of: + +- `source`: id of the source node +- `sourceHandle`: id of the source node handle (output field) +- `target`: id of the target node +- `targetHandle`: id of the target node handle (input field) + +> Edge creation is gated behind validation logic. This validation compares the input and output field types and overall graph state. + +#### Building a Workflow + +Building a workflow entity is as simple as dropping the nodes, edges and metadata into an object. + +Each node and edge is parsed with a zod schema, which serves to strip out any unneeded data. + +See [buildWorkflow.ts]. + +#### Loading a Workflow + +Workflows may be loaded from external sources or the user's local instance. In all cases, the workflow needs to be handled with care, as an untrusted object. + +Loading has a few stages which may throw or warn if there are problems: + +- Parsing the workflow data structure itself, [migrating](#workflow-migrations) it if necessary (throws) +- Check for a template for each node (warns) +- Check each node's version against its template (warns) +- Validate the source and target of each edge (warns) + +This validation occurs in [validateWorkflow.ts]. + +If there are no fatal errors, the workflow is then stored in redux state. + +### Workflow Migrations + +When the workflow schema changes, we may need to perform some data migrations. This occurs as workflows are loaded. zod schemas for each workflow schema version is retained to facilitate migrations. + +Previous schemas are in folders in `invokeai/frontend/web/src/features/nodes/types/`, eg `v1/`. + +Migration logic is in [migrations.ts]. + +[pydantic]: https://github.com/pydantic/pydantic 'pydantic' +[zod]: https://github.com/colinhacks/zod 'zod' +[openapi-types]: https://github.com/kogosoftwarellc/open-api/tree/main/packages/openapi-types 'openapi-types' +[reactflow]: https://github.com/xyflow/xyflow 'reactflow' +[reactflow-concepts]: https://reactflow.dev/learn/concepts/terms-and-definitions +[reactflow-events]: https://reactflow.dev/api-reference/react-flow#event-handlers +[buildWorkflow.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/workflow/buildWorkflow.ts +[nodesSlice.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts +[buildLinearTextToImageGraph.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearTextToImageGraph.ts +[buildNodesGraph.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/graph/buildNodesGraph.ts +[buildInvocationNode.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/node/buildInvocationNode.ts +[validateWorkflow.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/workflow/validateWorkflow.ts +[migrations.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/workflow/migrations.ts +[parseSchema.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/schema/parseSchema.ts +[buildFieldInputTemplate.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputTemplate.ts +[buildFieldOutputTemplate.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldOutputTemplate.ts diff --git a/docs/src/content/docs/development/Guides/api-development.mdx b/docs/src/content/docs/development/Guides/api-development.mdx new file mode 100644 index 00000000000..e1a9f3b8aea --- /dev/null +++ b/docs/src/content/docs/development/Guides/api-development.mdx @@ -0,0 +1,48 @@ +--- +title: API Development +--- + +Each invocation's `invoke` method is provided a single arg - the Invocation Context. + +This object provides an API the invocation can use to interact with application services, for example: + +- Saving images +- Logging messages +- Loading models + +```py +class MyInvocation(BaseInvocation): + ... + def invoke(self, context: InvocationContext) -> ImageOutput: + # Load an image + image_pil = context.images.get_pil(self.image.image_name) + # Do something to the image + output_image = do_something_cool(image_pil) + # Save the image + image_dto = context.images.save(output_image) + # Log a message + context.logger.info(f"Did something cool, image saved!") + # Return the output + return ImageOutput.build(image_dto) + ... +``` + +The full API is documented below. + +## Mixins + +Two important mixins are provided to facilitate working with metadata and gallery boards. + +### `WithMetadata` + +Inherit from this class (in addition to `BaseInvocation`) to add a `metadata` input to your node. When you do this, you can access the metadata dict from `self.metadata` in the `invoke()` function. + +The dict will be populated via the node's input, and you can add any metadata you'd like to it. When you call `context.images.save()`, if the metadata dict has any data, it be automatically embedded in the image. + +### `WithBoard` + +Inherit from this class (in addition to `BaseInvocation`) to add a `board` input to your node. This renders as a drop-down to select a board. The user's selection will be accessible from `self.board` in the `invoke()` function. + +When you call `context.images.save()`, if a board was selected, the image will added to that board as it is saved. + +{/* TODO: Insert generated API documentation */} diff --git a/docs/src/content/docs/development/Guides/assets/html-detail.png b/docs/src/content/docs/development/Guides/assets/html-detail.png new file mode 100644 index 00000000000..055218002f7 Binary files /dev/null and b/docs/src/content/docs/development/Guides/assets/html-detail.png differ diff --git a/docs/src/content/docs/development/Guides/assets/html-overview.png b/docs/src/content/docs/development/Guides/assets/html-overview.png new file mode 100644 index 00000000000..1f288fde118 Binary files /dev/null and b/docs/src/content/docs/development/Guides/assets/html-overview.png differ diff --git a/docs/src/content/docs/development/Guides/creating-nodes.mdx b/docs/src/content/docs/development/Guides/creating-nodes.mdx new file mode 100644 index 00000000000..cf27f115672 --- /dev/null +++ b/docs/src/content/docs/development/Guides/creating-nodes.mdx @@ -0,0 +1,42 @@ +--- +title: Creating Nodes +--- + +import { Steps, LinkCard } from '@astrojs/starlight/components'; + + + 1. Learn about the specifics of creating a new node in our Node Creation Documentation. + + + + 2. Make sure the node is contained in a new Python (.py) file. Preferably, the node is in a repo with a README detailing the nodes usage & examples to help others more easily use your node. Including the tag "invokeai-node" in your repository's README can also help other users find it more easily. + + 3. Submit a pull request with a link to your node(s) repo in GitHub against the `main` branch to add the node to the [Community Nodes](communityNodes.md) list + + Make sure you are following the template below and have provided all relevant details about the node and what it does. Example output images and workflows are very helpful for other users looking to use your node. + + 4. A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you may be asked for permission to include it in the core project. + + +### Community Node Template + +Append the following template to your pull request and the [Community Nodes](/workflows/community-nodes) page when submitting a node to be added to the community nodes list: + +```md +--- + +### Super Cool Node Template + +**Description:** This node allows you to do super cool things with InvokeAI. + +**Node Link:** https://github.com/invoke-ai/InvokeAI/fake_node.py + +**Example Node Graph:** https://github.com/invoke-ai/InvokeAI/fake_node_graph.json + +**Output Examples** + +![InvokeAI](https://invoke-ai.github.io/InvokeAI/assets/invoke_ai_banner.png) +``` diff --git a/docs/src/content/docs/development/Guides/models.mdx b/docs/src/content/docs/development/Guides/models.mdx new file mode 100644 index 00000000000..8ae8a9f7477 --- /dev/null +++ b/docs/src/content/docs/development/Guides/models.mdx @@ -0,0 +1,544 @@ +--- +title: Integrating a New Model Architecture +description: A comprehensive guide to integrating new foundational model architectures into InvokeAI. +lastUpdated: 2026-02-19 +--- + +import { Steps, FileTree } from '@astrojs/starlight/components'; + +This guide walks you through the end-to-end process of integrating a **new foundational model architecture** into InvokeAI. This is required when adding a completely new family of models (e.g., Stable Diffusion 3, FLUX, Hunyuan, etc.), rather than just adding a new checkpoint for an existing architecture. + +:::note +The code examples in this guide use a hypothetical `NewModel` architecture. The implementations of `FLUX`, `SD3`, and `SDXL` in the InvokeAI codebase serve as excellent real-world references. +::: + +## Architectural Overview + +Integrating a new model touches several parts of the InvokeAI stack, from the lowest-level PyTorch inference code up to the React frontend: + +1. **Taxonomy & Configuration (Backend)**: Declaring the model's existence and defining how to detect it from its weights on disk. +2. **Model Loading (Backend)**: Defining how to load the detected files into PyTorch models in memory. +3. **Sampling & Denoising (Backend)**: Implementing the core math for noise generation, scheduling, and the denoising loop. +4. **Invocations (Backend)**: Wrapping the PyTorch logic into isolated "nodes" that can be executed by InvokeAI's graph engine. +5. **Graph Building (Frontend)**: Instructing the UI on how to wire these nodes together based on user settings. +6. **State & UI (Frontend)**: Adding the necessary UI controls and state management for the new model's unique parameters. + +--- + +## 1. Taxonomy & Defaults + +The first step is to declare your model in the system's taxonomy and provide reasonable default settings. + + +1. **Add `BaseModelType`** + + Update the base model taxonomy to include your new model. + + ```python title="invokeai/backend/model_manager/taxonomy.py" ins={7} + class BaseModelType(str, Enum): + # Existing types + StableDiffusion1 = "sd-1" + StableDiffusion2 = "sd-2" + StableDiffusionXL = "sdxl" + Flux = "flux" + NewModel = "newmodel" + ``` + +2. **Add Variant Type (if needed)** + + If your model comes in different structural variants (e.g., different parameter counts or distilled versions like `schnell` vs `dev`), define a variant enum. + + ```python title="invokeai/backend/model_manager/taxonomy.py" + class NewModelVariantType(str, Enum): + VariantA = "variant_a" + VariantB = "variant_b" + ``` + +3. **Define Default Settings** + + Provide default generation parameters (steps, CFG scale, etc.) for the UI to use when this model is selected. + + ```python title="invokeai/backend/model_manager/configs/main.py" ins={5-6} + class MainModelDefaultSettings: + @staticmethod + def from_base(base: BaseModelType, variant: AnyVariant | None = None): + match base: + case BaseModelType.NewModel: + return MainModelDefaultSettings(steps=20, cfg_scale=7.0) + ``` + + +:::tip[Checklist: Taxonomy]{icon="approve-check"} + - [ ] Extend `BaseModelType` enum in `taxonomy.py` + - [ ] Create variant enum if needed in `taxonomy.py` + - [ ] Update `AnyVariant` union in `taxonomy.py` + - [ ] Add default settings in `from_base()` in `configs/main.py` +::: + +--- + +## 2. Model Configs & Detection + +InvokeAI needs to know how to identify your model from a `.safetensors` file or a diffusers folder. + + +1. **Create Main Model Config** + + Define the configuration schemas for your model format(s). + + ```python title="invokeai/backend/model_manager/configs/main.py" + # Checkpoint Format (Single File) + @ModelConfigFactory.register + class Main_Checkpoint_NewModel_Config(Checkpoint_Config_Base): + type: Literal[ModelType.Main] = ModelType.Main + base: Literal[BaseModelType.NewModel] = BaseModelType.NewModel + format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint + variant: NewModelVariantType = NewModelVariantType.VariantA + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict) -> Self: + if not cls._validate_is_newmodel(mod): + raise NotAMatchError("Not a NewModel") + variant = cls._get_variant_or_raise(mod) + return cls(..., variant=variant) + + # Diffusers Format (Folder) + @ModelConfigFactory.register + class Main_Diffusers_NewModel_Config(Diffusers_Config_Base): + type: Literal[ModelType.Main] = ModelType.Main + base: Literal[BaseModelType.NewModel] = BaseModelType.NewModel + format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers + ``` + +2. **Implement Detection Logic** + + Write helper functions to inspect the state dictionary keys and shape to uniquely identify your architecture. + + ```python title="invokeai/backend/model_manager/configs/main.py" + def _is_newmodel(state_dict: dict) -> bool: + """Detect if state dict belongs to NewModel architecture.""" + # Example: check for a highly specific layer name or shape + required_keys = ["transformer_blocks.0.attn.to_q.weight"] + return all(key in state_dict for key in required_keys) + + def _get_newmodel_variant(state_dict: dict) -> NewModelVariantType: + """Determine variant from state dict.""" + # Example: distinguish variants based on hidden dimension size + context_dim = state_dict["context_embedder.weight"].shape[1] + if context_dim == 7680: + return NewModelVariantType.VariantA + return NewModelVariantType.VariantB + ``` + +3. **Submodels (VAE & Text Encoder)** + + If your model uses a novel VAE or Text Encoder not already in InvokeAI, you must repeat this process to create configs for them (e.g., in `configs/vae.py` and `configs/[encoder_type].py`). + +4. **Update the Configuration Union** + + Register your new configs so the application knows to check them when scanning directories. + + ```python title="invokeai/backend/model_manager/configs/factory.py" ins={4-5} + AnyModelConfig = Annotated[ + # ... existing configs + Main_Checkpoint_NewModel_Config | + Main_Diffusers_NewModel_Config, + Discriminator(...) + ] + ``` + + +:::tip[Checklist: Configs]{icon="approve-check"} + - [ ] Create main checkpoint config (`configs/main.py`) + - [ ] Create main diffusers config (`configs/main.py`) + - [ ] Create detection helper functions (`_is_newmodel()`, `_get_variant()`) + - [ ] Create VAE and Text Encoder configs if they use novel architectures + - [ ] Update `AnyModelConfig` union (`configs/factory.py`) +::: + +--- + +## 3. Model Loaders + +Loaders are responsible for converting the files on disk (described by the config) into PyTorch models in memory. + + +1. **Create the Model Loader** + + ```python title="invokeai/backend/model_manager/load/model_loaders/[newmodel].py" + @ModelLoaderRegistry.register( + base=BaseModelType.NewModel, + type=ModelType.Main, + format=ModelFormat.Checkpoint + ) + class NewModelLoader(ModelLoader): + def _load_model(self, config: AnyModelConfig, submodel_type: SubModelType | None) -> AnyModel: + # 1. Load the raw weights from disk + state_dict = self._load_state_dict(config.path) + + # 2. Convert state dict keys if necessary (e.g. from original repo format to Diffusers) + if self._is_original_format(state_dict): + state_dict = self._convert_to_diffusers_format(state_dict) + + # 3. Instantiate the empty PyTorch model + model = NewModelTransformer(config=model_config) + + # 4. Load weights into the model + model.load_state_dict(state_dict) + return model + ``` + +2. **Custom VAE/Encoder Loaders (If Applicable)** + + If you created custom configs for the VAE or Text Encoder, you must also create loaders for them, registering them with the appropriate `ModelType`. + + +:::tip[Checklist: Loaders]{icon="approve-check"} +- [ ] Create and register the main model loader +- [ ] Create VAE/Encoder loaders if necessary +- [ ] Implement state dict conversion if supporting non-diffusers formats +::: + +--- + +## 4. Sampling and Denoising Core + +This is where the actual mathematical implementation of the model lives. + + + +1. **Sampling Utilities** + + Create utility functions specific to how your model handles noise, packing, and scheduling. + + ```python title="invokeai/backend/[newmodel]/sampling_utils.py" + def get_noise_newmodel(num_samples: int, height: int, width: int, seed: int, device: torch.device, dtype: torch.dtype) -> torch.Tensor: + # Models often have different latent channel counts (e.g., SD1.5 has 4, FLUX has 16) + latent_channels = 32 + latent_h, latent_w = height // 8, width // 8 + generator = torch.Generator(device=device).manual_seed(seed) + return torch.randn((num_samples, latent_channels, latent_h, latent_w), generator=generator, device=device, dtype=dtype) + + def pack_newmodel(x: torch.Tensor) -> torch.Tensor: + # Some transformer-based models require packing latents into a sequence + return rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2) + ``` + +2. **The Denoising Loop** + + Implement the core sampling loop. This interacts with schedulers and handles classifier-free guidance (CFG). + + ```python title="invokeai/backend/[newmodel]/denoise.py" + def denoise(model: nn.Module, img: torch.Tensor, txt: torch.Tensor, timesteps: list[float], cfg_scale: list[float], scheduler: Any = None) -> torch.Tensor: + """Main denoising loop.""" + total_steps = len(timesteps) - 1 + + for step_index in range(total_steps): + t_curr = timesteps[step_index] + + # Handle CFG (Classifier-Free Guidance) + if cfg_scale[step_index] > 1.0: + # Batch positive and negative prompts if applicable + pred_pos = model(img, t_curr, txt) + # ... + else: + pred = model(img, t_curr, txt) + + # Step the scheduler + img = scheduler.step(pred, t_curr, img).prev_sample + + return img + ``` + +3. **Schedulers** + + If your model requires a novel scheduler, add it to the scheduler mapping (e.g., `invokeai/backend/[newmodel]/schedulers.py`). + + +:::tip[Checklist: Core Inference]{icon="approve-check"} + - [ ] Noise generation (`get_noise_newmodel()`) + - [ ] Pack/unpack functions (if transformer-based) + - [ ] Timestep schedule generation + - [ ] Denoise loop implementation + - [ ] Map supported schedulers +::: + +--- + +## 5. Invocations + +Invocations expose your PyTorch functions as isolated execution nodes in InvokeAI's graph. + + +1. **Model Loader Invocation** + + Loads the components (Transformer, VAE, etc.) and provides them to downstream nodes. + + ```python title="invokeai/app/invocations/[newmodel]_model_loader.py" + @invocation("newmodel_model_loader", title="NewModel Loader", category="model_loader") + class NewModelModelLoaderInvocation(BaseInvocation): + model: ModelIdentifierField = InputField(description="Main model") + + def invoke(self, context: InvocationContext) -> NewModelLoaderOutput: + transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer}) + vae = self.model.model_copy(update={"submodel_type": SubModelType.VAE}) + return NewModelLoaderOutput(transformer=transformer, vae=vae) + ``` + +2. **Text Encoder Invocation** + + Tokenizes the prompt and runs the text encoder(s). + + ```python title="invokeai/app/invocations/[newmodel]_text_encoder.py" + @invocation("newmodel_text_encode", title="NewModel Text Encoder", category="conditioning") + class NewModelTextEncoderInvocation(BaseInvocation): + prompt: str = InputField() + encoder: EncoderField = InputField() + + def invoke(self, context: InvocationContext) -> ConditioningOutput: + # 1. Tokenize prompt + # 2. Run encoder to get embeddings + # 3. Save to context and return + conditioning_name = context.conditioning.save(ConditioningFieldData(...)) + return ConditioningOutput(conditioning=ConditioningField(conditioning_name=conditioning_name)) + ``` + +3. **Denoise Invocation** + + Wraps the `denoise` loop you wrote in the previous section. + + ```python title="invokeai/app/invocations/[newmodel]_denoise.py" + @invocation("newmodel_denoise", title="NewModel Denoise", category="latents") + class NewModelDenoiseInvocation(BaseInvocation): + latents: LatentsField | None = InputField(default=None) + positive_conditioning: ConditioningField = InputField() + transformer: TransformerField = InputField() + steps: int = InputField(default=20) + cfg_scale: float = InputField(default=7.0) + + def invoke(self, context: InvocationContext) -> LatentsOutput: + # Generate noise, get schedule, and call your denoise() function + pass + ``` + +4. **VAE Encode / Decode Invocations** + + Create nodes to transition between pixel space (images) and latent space. + + +:::tip[Checklist: Invocations]{icon="approve-check"} + - [ ] Define output classes (e.g., `NewModelLoaderOutput`) + - [ ] Model loader invocation (`[newmodel]_model_loader.py`) + - [ ] Text encoder invocation (`[newmodel]_text_encoder.py`) + - [ ] Denoise invocation (`[newmodel]_denoise.py`) + - [ ] VAE encode/decode invocations (`[newmodel]_vae_encode.py`, `[newmodel]_vae_decode.py`) +::: + +--- + +## 6. Frontend: Graph Building + +The UI doesn't know about Python functions; it only knows how to build graphs of Invocations. + + +1. **Create the Graph Builder** + + Write a TypeScript function that constructs the node graph for your model. + + ```typescript title="invokeai/frontend/web/src/features/nodes/util/graph/generation/buildNewModelGraph.ts" + export const buildNewModelGraph = async (arg: GraphBuilderArg): Promise => { + const { state, manager } = arg; + const { model } = state.params; + const g = new Graph(); + + // 1. Add Loader + const modelLoader = g.addNode({ + id: NEWMODEL_MODEL_LOADER, + type: 'newmodel_model_loader', + model: Graph.getModelMetadataField(model), + }); + + // 2. Add Text Encoders + const positivePrompt = g.addNode({ + id: POSITIVE_CONDITIONING, + type: 'newmodel_text_encode', + prompt: state.params.positivePrompt, + }); + g.addEdge(modelLoader, 'encoder', positivePrompt, 'encoder'); + + // 3. Add Denoise + const denoise = g.addNode({ + id: NEWMODEL_DENOISE, + type: 'newmodel_denoise', + steps: state.params.steps, + cfg_scale: state.params.cfg, + }); + g.addEdge(modelLoader, 'transformer', denoise, 'transformer'); + g.addEdge(positivePrompt, 'conditioning', denoise, 'positive_conditioning'); + + // 4. Add VAE Decode + const l2i = g.addNode({ + id: NEWMODEL_VAE_DECODE, + type: 'newmodel_vae_decode', + }); + g.addEdge(modelLoader, 'vae', l2i, 'vae'); + g.addEdge(denoise, 'latents', l2i, 'latents'); + + return { g, denoise, posCond: positivePrompt }; + }; + ``` + +2. **Register the Graph Builder** + + Hook your graph builder into the main routing logic. + + ```typescript title="invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts" ins={5-6} + switch (base) { + case 'sdxl': + return buildSDXLGraph(arg); + case 'flux': + return buildFLUXGraph(arg); + case 'newmodel': + return buildNewModelGraph(arg); + } + ``` + +3. **Update Type Definitions** + + Add your new nodes to the strict frontend type unions. + + ```typescript title="invokeai/frontend/web/src/features/nodes/util/graph/types.ts" ins="| 'newmodel_vae_decode'" + export type ImageOutputNodes = + | 'l2i' | 'flux_vae_decode' | 'sd3_l2i' | 'newmodel_vae_decode'; + ``` + +4. **Generation Modes** + + Update `invokeai/app/invocations/metadata.py` to include your new modes in `GENERATION_MODES` (e.g., `"newmodel_txt2img"`, `"newmodel_img2img"`). + + +:::tip[Checklist: Graph Building]{icon="approve-check"} + - [ ] Create graph builder (`buildNewModelGraph.ts`) + - [ ] Register graph builder in `useEnqueueCanvas.ts` + - [ ] Update node unions in `types.ts` + - [ ] Add generation modes to python `metadata.py` +::: + +--- + +## 7. Frontend: State & UI + +Finally, add any custom UI controls (like a specific scheduler dropdown) and manage their state. + + +1. **Add to Redux State** + + Update the parameters slice for your model-specific settings. + + ```typescript title="invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts" + interface ParamsState { + // ... + newmodelScheduler: 'euler' | 'heun'; + } + + const initialState: ParamsState = { + // ... + newmodelScheduler: 'euler', + }; + + // Add reducers and export selectors... + ``` + +2. **Parameter Recall** + + Ensure users can extract parameters from previously generated images by updating `invokeai/frontend/web/src/features/metadata/parsing.tsx`. + + ```typescript title="invokeai/frontend/web/src/features/metadata/parsing.tsx" + const recallNewmodelScheduler = (metadata: CoreMetadata) => { + if (metadata.scheduler) { + dispatch(setNewmodelScheduler(metadata.scheduler)); + } + }; + ``` + + +:::tip[Checklist: State & UI]{icon="approve-check"} + - [ ] Extend state interface for model-specific parameters + - [ ] Create reducers and selectors + - [ ] Add parameter recall handlers in `parsing.tsx` +::: + +--- + +## 8. Optional Features + +Depending on the model, you may want to support additional features. + +### ControlNet Support +Requires backend configuration (`configs/controlnet.py`), a custom invocation (`[newmodel]_controlnet.py`), and frontend graph integration (`addControlNets`). + +### LoRA Support +Requires defining a LoRA config (`configs/lora.py`), updating the model loader to pass LoRA fields, and wiring `addLoRAs` in the frontend graph builder. + +### IP-Adapter +Requires a custom invocation for image prompting (`[newmodel]_ip_adapter.py`) and frontend integration via `addIPAdapters`. + +--- + +## 9. Starter Models + +To allow users to easily download your model from the Model Manager UI, add it to the starter models list. + +```python title="invokeai/backend/model_manager/starter_models.py" +newmodel_main = StarterModel( + name="NewModel Main", + base=BaseModelType.NewModel, + source="organization/newmodel-main", # HuggingFace repo + description="NewModel main transformer.", + type=ModelType.Main, +) + +STARTER_MODELS.append(newmodel_main) +``` + +:::tip[Checklist: Starter Models]{icon="approve-check"} +- [ ] Define main model StarterModel +- [ ] Define VAE/Encoder StarterModels if separate +- [ ] Set dependencies correctly if required +- [ ] Add to `STARTER_MODELS` list +::: + +--- + +## Summary of Integration Files + +A complete minimal `txt2img` integration touches the following areas: + + +- invokeai + - app/invocations + - metadata.py + - `[newmodel]_model_loader.py` + - `[newmodel]_text_encoder.py` + - `[newmodel]_denoise.py` + - `[newmodel]_vae_decode.py` + - backend + - model_manager + - taxonomy.py + - configs + - main.py + - factory.py + - load/model_loaders + - `[newmodel].py` + - starter_models.py + - `[newmodel]` + - sampling_utils.py + - denoise.py + - frontend/web/src/features + - nodes/util/graph + - generation/buildNewModelGraph.ts + - types.ts + - queue/hooks/useEnqueueCanvas.ts + - controlLayers/store/paramsSlice.ts + - metadata/parsing.tsx + diff --git a/docs/src/content/docs/development/Guides/tests.mdx b/docs/src/content/docs/development/Guides/tests.mdx new file mode 100644 index 00000000000..c2dfd52b98c --- /dev/null +++ b/docs/src/content/docs/development/Guides/tests.mdx @@ -0,0 +1,102 @@ +--- +title: Writing Tests +lastUpdated: 2026-02-20 +--- + +## Frontend Tests + +We use `vitest` to run the frontend tests. (See [vite.config.ts](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/vite.config.mts) for the default `vitest` options.) + +{/* TODO: Finish frontend tests docs */} + +## Backend Tests + +We use `pytest` to run the backend python tests. (See [pyproject.toml](https://github.com/invoke-ai/InvokeAI/blob/main/pyproject.toml) for the default `pytest` options.) + +### Fast vs. Slow +All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@pytest.mark.slow` decorator). + +'Fast' tests are run to validate every PR, and are fast enough that they can be run routinely during development. + +'Slow' tests are currently only run manually on an ad-hoc basis. In the future, they may be automated to run nightly. Most developers are only expected to run the 'slow' tests that directly relate to the feature(s) that they are working on. + +As a rule of thumb, tests should be marked as 'slow' if there is a chance that they take >1s (e.g. on a CPU-only machine with slow internet connection). Common examples of slow tests are tests that depend on downloading a model, or running model inference. + +### Running Tests + +Below are some common test commands: + +```bash +# Run the fast tests. (This implicitly uses the configured default option: `-m "not slow"`.) +pytest tests/ + +# Equivalent command to run the fast tests. +pytest tests/ -m "not slow" + +# Run the slow tests. +pytest tests/ -m "slow" + +# Run the slow tests from a specific file. +pytest tests/path/to/slow_test.py -m "slow" + +# Run all tests (fast and slow). +pytest tests -m "" +``` + +### Test Organization + +All backend tests are in the [`tests/`](https://github.com/invoke-ai/InvokeAI/tree/main/tests) directory. This directory mirrors the organization of the `invokeai/` directory. For example, tests for `invokeai/model_management/model_manager.py` would be found in `tests/model_management/test_model_manager.py`. + +TODO: The above statement is aspirational. A re-organization of legacy tests is required to make it true. + +### Tests that depend on models + +There are a few things to keep in mind when adding tests that depend on models. + +1. If a required model is not already present, it should automatically be downloaded as part of the test setup. +2. If a model is already downloaded, it should not be re-downloaded unnecessarily. +3. Take reasonable care to keep the total number of models required for the tests low. Whenever possible, re-use models that are already required for other tests. If you are adding a new model, consider including a comment to explain why it is required/unique. + +There are several utilities to help with model setup for tests. Here is a sample test that depends on a model: + +```python +import pytest +import torch + +from invokeai.backend.model_management.models.base import BaseModelType, ModelType +from invokeai.backend.util.test_utils import install_and_load_model + +@pytest.mark.slow +def test_model(model_installer, torch_device): + model_info = install_and_load_model( + model_installer=model_installer, + model_path_id_or_url="HF/dummy_model_id", + model_name="dummy_model", + base_model=BaseModelType.StableDiffusion1, + model_type=ModelType.Dummy, + ) + + dummy_input = build_dummy_input(torch_device) + + with torch.no_grad(), model_info as model: + model.to(torch_device, dtype=torch.float32) + output = model(dummy_input) + + # Validate output... +``` + +### Test Coverage + +To review test coverage, append `--cov` to your pytest command: + +```bash +pytest tests/ --cov +``` + +Test outcomes and coverage will be reported in the terminal. In addition, a more detailed report is created in both XML and HTML format in the `./coverage` folder. The HTML output is particularly helpful in identifying untested statements where coverage should be improved. The HTML report can be viewed by opening `./coverage/html/index.html`. + +:::note HTML coverage report output example + ![html-overview](./assets/html-overview.png) + + ![html-detail](./assets/html-detail.png) +::: diff --git a/docs/src/content/docs/development/Process/pr-merge-policy.mdx b/docs/src/content/docs/development/Process/pr-merge-policy.mdx new file mode 100644 index 00000000000..ebd08feaaf0 --- /dev/null +++ b/docs/src/content/docs/development/Process/pr-merge-policy.mdx @@ -0,0 +1,72 @@ +--- +title: PR Merge Policy +lastUpdated: 2026-02-19 +--- + +import { Steps } from '@astrojs/starlight/components'; + +This document outlines the process for reviewing and merging pull requests (PRs) into the InvokeAI repository. + +## Review Process + + + 1. Assignment + + One of the repository maintainers will assign collaborators to review a pull request. The assigned reviewer(s) will be responsible for conducting the code review. + + 2. Review and Iteration + + The assignee is responsible for: + - Reviewing the PR thoroughly + - Providing constructive feedback + - Iterating with the PR author until the assignee is satisfied that the PR is fit to merge + - Ensuring the PR meets code quality standards, follows project conventions, and doesn't introduce bugs or regressions + + 3. Approval and Notification + + Once the assignee is satisfied with the PR: + - The assignee approves the PR + - The assignee alerts one of the maintainers that the PR is ready for merge using the **#request-reviews Discord channel** + + 4. Final Merge + + One of the maintainers is responsible for: + - Performing a final check of the PR + - Merging the PR into the appropriate branch + + :::caution[Important] + Collaborators are strongly discouraged from merging PRs on their own, except in case of emergency (e.g., critical bug fix and no maintainer is available). + ::: + + 5. Release Policy + + Once a feature release candidate is published, no feature PRs are to + be merged into main. Only bugfixes are allowed until the final + release. + + +## Best Practices + +### Clean Commit History + +To encourage a clean development log, PR authors are encouraged to use `git rebase -i` to suppress trivial commit messages (e.g., `ruff` and `prettier` formatting fixes) after the PR is accepted but before it is merged. + +### Merge Strategy + +The maintainer will perform either a **3-way merge** or **squash merge** when merging a PR into the `main` branch. This approach helps avoid rebase conflict hell and maintains a cleaner project history. + +### Attribution + +The PR author should reference any papers, source code or +documentation that they used while creating the code both in the PR +and as comments in the code itself. If there are any licensing +restrictions, these should be linked to and/or reproduced in the repo +root. + +## Summary + +This policy ensures that: +- All PRs receive proper review from assigned collaborators +- Maintainers have final oversight before code enters the main branch +- The commit history remains clean and meaningful +- Merge conflicts are minimized through appropriate merge strategies diff --git a/docs/src/content/docs/development/Process/release-process.mdx b/docs/src/content/docs/development/Process/release-process.mdx new file mode 100644 index 00000000000..74a967cbdf5 --- /dev/null +++ b/docs/src/content/docs/development/Process/release-process.mdx @@ -0,0 +1,157 @@ +--- +title: Release Process +lastUpdated: 2025-12-26 +--- + +The Invoke application is published as a python package on [PyPI]. This includes both a source distribution and built distribution (a wheel). + +Most users install it with the [Launcher](https://github.com/invoke-ai/launcher/), others with `pip`. + +The launcher uses GitHub as the source of truth for available releases. + +## Broad Strokes + +- Merge all changes and bump the version in the codebase. +- Tag the release commit. +- Wait for the release workflow to complete. +- Approve the PyPI publish jobs. +- Write GH release notes. + +## General Prep + +Make a developer call-out for PRs to merge. Merge and test things +out. Create a branch with a name like user/chore/vX.X.X-prep and bump the version by editing +`invokeai/version/invokeai_version.py` and commit locally. + +## Release Workflow + +The `release.yml` workflow runs a number of jobs to handle code checks, tests, build and publish on PyPI. + +It is triggered on **tag push**, when the tag matches `v*`. + +### Triggering the Workflow + +Ensure all commits that should be in the release are merged into this branch, and that you have pulled them locally. + +Run `make tag-release` to tag the current commit and kick off the workflow. You will be prompted to provide a message - use the version specifier. + +If this version's tag already exists for some reason (maybe you had to make a last minute change), the script will overwrite it. + +Push the commit to trigger the workflow. + +> In case you cannot use the Make target, the release may also be dispatched [manually] via GH. + +### Workflow Jobs and Process + +The workflow consists of a number of concurrently-run checks and tests, then two final publish jobs. + +The publish jobs require manual approval and are only run if the other jobs succeed. + +#### `check-version` Job + +This job ensures that the `invokeai` python package version specifier matches the tag for the release. The version specifier is pulled from the `__version__` variable in `invokeai/version/invokeai_version.py`. + +This job uses [samuelcolvin/check-python-version]. + +> Any valid [version specifier] works, so long as the tag matches the version. The release workflow works exactly the same for `RC`, `post`, `dev`, etc. + +#### Check and Test Jobs + +Next, these jobs run and must pass. They are the same jobs that are run for every PR. + +- **`python-tests`**: runs `pytest` on matrix of platforms +- **`python-checks`**: runs `ruff` (format and lint) +- **`frontend-tests`**: runs `vitest` +- **`frontend-checks`**: runs `prettier` (format), `eslint` (lint), `dpdm` (circular refs), `tsc` (static type check) and `knip` (unused imports) +- **`typegen-checks`**: ensures the frontend and backend types are synced + +#### `build-wheel` Job + +This sets up both python and frontend dependencies and builds the python package. Internally, this runs `./scripts/build_wheel.sh` and uploads `dist.zip`, which contains the wheel and unarchived build. + +You don't need to download or test these artifacts. + +#### Sanity Check & Smoke Test + +At this point, the release workflow pauses as the remaining publish jobs require approval. + +It's possible to test the python package before it gets published to PyPI. We've never had problems with it, so it's not necessary to do this. + +But, if you want to be extra-super careful, here's how to test it: + +- Download the `dist.zip` build artifact from the `build-wheel` job +- Unzip it and find the wheel file +- Create a fresh Invoke install by following the [manual install guide](https://invoke-ai.github.io/InvokeAI/installation/manual/) - but instead of installing from PyPI, install from the wheel +- Test the app + +##### Something isn't right + +If testing reveals any issues, no worries. Cancel the workflow, which will cancel the pending publish jobs (you didn't approve them prematurely, right?) and start over. + +#### PyPI Publish Jobs + +The publish jobs will not run if any of the previous jobs fail. + +They use [GitHub environments], which are configured as [trusted publishers] on PyPI. + +Both jobs require a @lstein or @blessedcoolant to approve them from the workflow's **Summary** tab. + +- Click the **Review deployments** button +- Select the environment (either `testpypi` or `pypi` - typically you select both) +- Click **Approve and deploy** + +> **If the version already exists on PyPI, the publish jobs will fail.** PyPI only allows a given version to be published once - you cannot change it. If version published on PyPI has a problem, you'll need to "fail forward" by bumping the app version and publishing a followup release. + +##### Failing PyPI Publish + +Check the [python infrastructure status page] for incidents. + +If there are no incidents, contact @lstein or @blessedcoolant, who have owner access to GH and PyPI, to see if access has expired or something like that. + +#### `publish-testpypi` Job + +Publishes the distribution on the [Test PyPI] index, using the `testpypi` GitHub environment. + +This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release for some reason: + +- Approve this publish job without approving the prod publish +- Let it finish +- Create a fresh Invoke install by following the [manual install guide](https://invoke-ai.github.io/InvokeAI/installation/manual/), making sure to use the Test PyPI index URL: `https://test.pypi.org/simple/` +- Test the app + +#### `publish-pypi` Job + +Publishes the distribution on the production PyPI index, using the `pypi` GitHub environment. + +It's a good idea to wait to approve and run this job until you have the release notes ready! + +## Prep and publish the GitHub Release + +1. [Draft a new release] on GitHub, choosing the tag that triggered the release. +2. The **Generate release notes** button automatically inserts the changelog and new contributors. Make sure to select the correct tags for this release and the last stable release. GH often selects the wrong tags - do this manually. +3. Write the release notes, describing important changes. Contributions from community members should be shouted out. Use the GH-generated changelog to see all contributors. If there are Weblate translation updates, open that PR and shout out every person who contributed a translation. +4. Check **Set as a pre-release** if it's a pre-release. +5. Approve and wait for the `publish-pypi` job to finish if you haven't already. +6. Publish the GH release. +7. Post the release in Discord in the [releases](https://discord.com/channels/1020123559063990373/1149260708098359327) channel with abbreviated notes. For example: + > Invoke v5.7.0 (stable): [https://github.com/invoke-ai/InvokeAI/releases/tag/v5.7.0](https://github.com/invoke-ai/InvokeAI/releases/tag/v5.7.0) + > + > It's a pretty big one - Form Builder, Metadata Nodes (thanks @SkunkWorxDark!), and much more. +8. Right click the message in releases and copy the link to it. Then, post that link in the [new-release-discussion](https://discord.com/channels/1020123559063990373/1149506274971631688) channel. For example: + > Invoke v5.7.0 (stable): [https://discord.com/channels/1020123559063990373/1149260708098359327/1344521744916021248](https://discord.com/channels/1020123559063990373/1149260708098359327/1344521744916021248) + +## Manual Release + +The `release` workflow can be dispatched manually. You must dispatch the workflow from the right tag, else it will fail the version check. + +This functionality is available as a fallback in case something goes wonky. Typically, releases should be triggered via tag push as described above. + +[PyPI]: https://pypi.org/ +[Draft a new release]: https://github.com/invoke-ai/InvokeAI/releases/new +[Test PyPI]: https://test.pypi.org/ +[version specifier]: https://packaging.python.org/en/latest/specifications/version-specifiers/ +[GitHub environments]: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment +[trusted publishers]: https://docs.pypi.org/trusted-publishers/ +[samuelcolvin/check-python-version]: https://github.com/samuelcolvin/check-python-version +[manually]: #manual-release +[python infrastructure status page]: https://status.python.org/ diff --git a/docs/src/content/docs/development/Setup/dev-environment.mdx b/docs/src/content/docs/development/Setup/dev-environment.mdx new file mode 100644 index 00000000000..5b93d053f71 --- /dev/null +++ b/docs/src/content/docs/development/Setup/dev-environment.mdx @@ -0,0 +1,267 @@ +--- +title: Development Environment +lastUpdated: 2026-02-19 +--- + +import { LinkCard, Steps, Tabs, TabItem, FileTree, LinkButton } from '@astrojs/starlight/components' +import SystemRequirementsLink from '@components/SystemRequirmentsLink.astro' + +:::caution + Invoke uses a SQLite database. When you run the application as a dev install, you accept responsibility for your database. This means making regular backups (especially before pulling) and/or fixing it yourself in the event that a PR introduces a schema change. + + If you don't need to persist your db, you can use an ephemeral in-memory database by setting `use_memory_db: true` in your `invokeai.yaml` file. You'll also want to set `scan_models_on_startup: true` so that your models are registered on startup. +::: + +## Initial Setup + + + 1. Refer to the system requirements. + + + + 2. Fork and clone the InvokeAI git repository. + + + Fork Repository + + + Next, clone your fork to your local machine. You can use either HTTPS or SSH, depending on your git configuration. + + 3. This repository uses Git LFS to manage large files. To ensure all assets are downloaded: + - Install git-lfs → [Download here](https://git-lfs.com/) + - Enable automatic LFS fetching for this repository: + ```shell + git config lfs.fetchinclude "*" + ``` + - Fetch files from LFS (only needs to be done once; subsequent `git pull` will fetch changes automatically): + ```shell + git lfs pull + ``` + 4. Create a directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install. + 5. Follow the [manual install](../installation/manual) guide, with some modifications to the install command: + + - Use `.` instead of `invokeai` to install from the current directory. You don't need to specify the version. + - Add `-e` after the `install` operation to make this an [editable install](https://pip.pypa.io/en/latest/cli/pip_install/#cmdoption-e). That means your changes to the python code will be reflected when you restart the Invoke server. + - When installing the `invokeai` package, add the `dev`, `test` and `docs` package options to the package specifier. You may or may not need the `xformers` option - follow the manual install guide to figure that out. So, your package specifier will be either `".[dev,test,docs]"` or `".[dev,test,docs,xformers]"`. Note the quotes! + + With the modifications made, the install command should look something like this: + + ```sh + uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu128 --reinstall + ``` + 6. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI. + + This is because the UI build is not distributed with the source code. You need to build it manually. End the running server instance. + + *(If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.)* + + 7. Install the frontend dev toolchain, paying attention to versions: + + - [`nodejs`](https://nodejs.org/) (tested on LTS, v22) + - [`pnpm`](https://pnpm.io/installation) (tested on v10) + + 8. Do a production build of the frontend: + + ```sh + cd /invokeai/frontend/web + pnpm i + pnpm build + ``` + + 9. Restart the server and navigate to the URL. You should get a UI. After making changes to the python code, restart the server to see those changes. + + +## Backend Development + +Experimenting with changes to the Python source code is a drag if you have to re-start the server and re-load multi-gigabyte models after every change. + +For a faster development workflow, add the `--dev_reload` flag when starting the server. The server will watch for changes to all the Python files in the `invokeai` directory and apply those changes to the running server on the fly. + +This will allow you to avoid restarting the server (and reloading models) in most cases, but there are some caveats; see the [jurigged documentation](https://github.com/breuleux/jurigged#caveats) for details. + +### Testing + +The backend tests require the `test` dependency group, which you installed during the initial setup. + +See the [Tests](../tests) documentation for information about running and writing tests. + +## Frontend Development + +You'll need to run `pnpm build` every time you pull in new changes to the frontend. + +Another option is to skip the build and instead run the UI in dev mode: + +```sh +cd invokeai/frontend/web +pnpm dev +``` + +This starts a vite dev server for the UI at `127.0.0.1:5173`, which you will use instead of `127.0.0.1:9090`. + +The dev mode is substantially slower than the production build but may be more convenient if you just need to test things out. It will hot-reload the UI as you make changes to the frontend code. Sometimes the hot-reload doesn't work, and you need to manually refresh the browser tab. + +## Documentation + +This documentation is built on [Astro Starlight](https://starlight.astro.build/). It provides a pleasant developer environment for writing engaging documentation, and is built on top of the Astro static site generator, which provides a powerful and flexible framework for building fast, modern websites. + +To contribute to the documentation, simply edit the markdown files in the `./docs` directory. You can run a local dev server with hot-reloading for changes made to the docs. + + + - **docs** + - public/ + - src + - content docs content lives here + - docs + - lib + - components/ + - utils/ + - content.config.ts + - scripts/ + - tests/ + - invokeai/ + - docker/ + - coverage/ + + + + 1. Navigate to the `docs` directory and install the dependencies: + + ```sh + cd docs + pnpm install + ``` + 2. Start the dev server: + + ```sh + pnpm run dev + ``` + + +## VSCode Setup + +VSCode offers excellent tools for InvokeAI development, including a python debugger, automatic virtual environment activation, and remote development capabilities. + +### Prerequisites + +First, ensure you have the following extensions installed: +- [Python](https://marketplace.visualstudio.com/items?itemName=ms-python.python) +- [Pylance](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance) + +It's also highly recommended to install the Jupyter extensions if you plan on working with notebooks: +- [Jupyter](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.jupyter) +- [Jupyter Cell Tags](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.vscode-jupyter-cell-tags) +- [Jupyter Notebook Renderers](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.jupyter-renderers) +- [Jupyter Slide Show](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.vscode-jupyter-slideshow) + +### Configuration + + + + Creating a VSCode workspace for working on InvokeAI is highly recommended to hold InvokeAI-specific settings and configs. + + 1. Open the InvokeAI repository directory in VSCode + 2. Go to `File` > `Save Workspace As` and save it *outside* the repository + + **Default Python Interpreter** + + To enable automatic virtual environment activation: + 1. Open the command palette (`Ctrl+Shift+P` / `Cmd+Shift+P`) and run `Preferences: Open Workspace Settings (JSON)` + 2. Add `python.defaultInterpreterPath` to your settings, pointing to your virtual environment's python executable: + + ```jsonc + { + "folders": [ + { "path": "InvokeAI" }, + { "path": "/path/to/invokeai_root" } + ], + "settings": { + "python.defaultInterpreterPath": "/path/to/invokeai_root/.venv/bin/python" + } + } + ``` + Now, opening the integrated terminal or running python will automatically use your InvokeAI virtual environment. + + + + We use Python's typing system in InvokeAI. PR reviews will include checking that types are present and correct. + + Pylance provides type checking in the editor. To enable it: + + 1. Open a Python file + 2. Look along the status bar in VSCode for `{ } Python` + 3. Click the `{ }` + 4. Turn type checking on (Basic is fine) + + You'll now see red squiggly lines where type issues are detected. Hover your cursor over the indicated symbols to see what's wrong. + + + + Debugging configs are managed in a `launch.json` file. Follow the [official guide](https://code.visualstudio.com/docs/python/debugging) to set up your `launch.json` and try it out. + + Add these InvokeAI debugging configurations to your `launch.json`: + + ```jsonc + { + "version": "0.2.0", + "configurations": [ + { + "name": "InvokeAI Web", + "type": "python", + "request": "launch", + "program": "scripts/invokeai-web.py", + "args": [ + "--root", "/path/to/invokeai_root", + "--host", "0.0.0.0" + ], + "justMyCode": true + }, + { + "name": "InvokeAI CLI", + "type": "python", + "request": "launch", + "program": "scripts/invokeai-cli.py", + "justMyCode": true + }, + { + "name": "InvokeAI Test", + "type": "python", + "request": "launch", + "module": "pytest", + "args": ["--capture=no"], + "justMyCode": true + }, + { + "name": "InvokeAI Single Test", + "type": "python", + "request": "launch", + "module": "pytest", + "args": ["tests/nodes/test_invoker.py"], + "justMyCode": true + } + ] + } + ``` + + + + This provides a smooth experience for running the backend on a powerful Linux machine while developing on another device. + + Consult the [official guide](https://code.visualstudio.com/docs/remote/remote-overview) to get it set up. We suggest using VSCode's included settings sync so that your remote dev host has all the same app settings and extensions automatically. + + :::tip[Port Forwarding] + Automatic port forwarding can be flaky. You can disable it in `Preferences: Open Remote Settings (ssh: hostname)` by unticking `remote.autoForwardPorts`. + + To forward ports reliably, use SSH on the remote dev client: + ```bash + ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@remote-dev-host + ``` + Run this outside the VSCode integrated terminal so it persists across VSCode restarts. + ::: + + diff --git a/docs/src/content/docs/development/index.mdx b/docs/src/content/docs/development/index.mdx new file mode 100644 index 00000000000..7c611a4ab66 --- /dev/null +++ b/docs/src/content/docs/development/index.mdx @@ -0,0 +1,48 @@ +--- +title: InvokeAI Development +sidebar: + order: 1 +lastUpdated: 2026-02-19 +--- + +import { Card, CardGrid, LinkButton } from '@astrojs/starlight/components'; + +This section of the documentation is for developers interested in contributing to the InvokeAI codebase, or building on top of it. It includes guides for setting up your development environment, understanding the project structure, and making your first contribution. + + + + Instructions for setting up your local development environment, including how to run the project locally and how to set up your tooling. + + + Learn more + + + + An introduction to the front end codebase, including the technologies used and how to get started. + + + Learn more + + + + A collection of guides for common development tasks, such as adding new model architectures, making tests, and more. + + + Learn more + + + + An overview of the InvokeAI architecture, including the major components and how they interact. + + + Learn more + + + + An overview of the development processes we follow, including our pull request merge policy and release process. + + + Learn more + + + diff --git a/docs/src/content/docs/features/assets/board_settings.png b/docs/src/content/docs/features/assets/board_settings.png new file mode 100644 index 00000000000..44c4ef240bd Binary files /dev/null and b/docs/src/content/docs/features/assets/board_settings.png differ diff --git a/docs/src/content/docs/features/assets/board_tabs.png b/docs/src/content/docs/features/assets/board_tabs.png new file mode 100644 index 00000000000..23e5f8a91cf Binary files /dev/null and b/docs/src/content/docs/features/assets/board_tabs.png differ diff --git a/docs/src/content/docs/features/assets/board_thumbnails.png b/docs/src/content/docs/features/assets/board_thumbnails.png new file mode 100644 index 00000000000..1c739d48546 Binary files /dev/null and b/docs/src/content/docs/features/assets/board_thumbnails.png differ diff --git a/docs/src/content/docs/features/assets/gallery.png b/docs/src/content/docs/features/assets/gallery.png new file mode 100644 index 00000000000..89f2dd1b463 Binary files /dev/null and b/docs/src/content/docs/features/assets/gallery.png differ diff --git a/docs/src/content/docs/features/assets/image_menu.png b/docs/src/content/docs/features/assets/image_menu.png new file mode 100644 index 00000000000..2f10f280acf Binary files /dev/null and b/docs/src/content/docs/features/assets/image_menu.png differ diff --git a/docs/src/content/docs/features/assets/info_button.png b/docs/src/content/docs/features/assets/info_button.png new file mode 100644 index 00000000000..539cd6252e0 Binary files /dev/null and b/docs/src/content/docs/features/assets/info_button.png differ diff --git a/docs/src/content/docs/features/assets/thumbnail_menu.png b/docs/src/content/docs/features/assets/thumbnail_menu.png new file mode 100644 index 00000000000..a56caadbd8e Binary files /dev/null and b/docs/src/content/docs/features/assets/thumbnail_menu.png differ diff --git a/docs/src/content/docs/features/assets/top_controls.png b/docs/src/content/docs/features/assets/top_controls.png new file mode 100644 index 00000000000..c5d3cdc854b Binary files /dev/null and b/docs/src/content/docs/features/assets/top_controls.png differ diff --git a/docs/src/content/docs/features/gallery.mdx b/docs/src/content/docs/features/gallery.mdx new file mode 100644 index 00000000000..79da4501eb9 --- /dev/null +++ b/docs/src/content/docs/features/gallery.mdx @@ -0,0 +1,114 @@ +--- +title: Gallery Panel +description: Learn how to manage, organize, and use your generated images and assets with the Gallery Panel in InvokeAI. +lastUpdated: 2026-02-19 +--- + +import { Card, CardGrid, Steps } from '@astrojs/starlight/components'; + +The Gallery Panel is a fast way to review, find, and make use of images you've generated and loaded. The Gallery is divided into **Boards**. The *Uncategorized* board is always present, but you can create your own for better organization. + +![Gallery Panel Overview](./assets/gallery.png) + +--- + +## Board Display and Settings + +At the very top of the Gallery Panel, you will find the board disclosure and settings buttons. + +![Top Controls](./assets/top_controls.png) + +The **disclosure button** shows the name of the currently selected board and allows you to toggle the visibility of the board thumbnails. + +![Board Thumbnails](./assets/board_thumbnails.png) + +The **settings button** opens a list of customization options: + +![Board Settings](./assets/board_settings.png) + +- **Image Size:** A slider that lets you control the size of the image previews in the gallery. +- **Auto-Switch to New Images:** When enabled, newly generated images will automatically load into the current image panel (on the Text to Image tab) or the result panel (on the Image to Image tab). This happens invisibly even if you are on a different tab during generation. +- **Auto-Assign Board on Click:** Whenever an image is generated or saved, it is placed into a board. The destination board is marked with an `AUTO` badge. + - *When enabled:* The board selected at the moment you click **Invoke** becomes the destination. This allows you to queue multiple generations into different boards without waiting for them to finish. + - *When disabled:* An **Auto-Add Board** dropdown appears, allowing you to set one specific board as the permanent destination for all new images. +- **Always Show Image Size Badge:** Toggles whether the resolution (e.g., 512x512) is displayed on each image preview thumbnail. + +Below these buttons is the **Search Boards** text entry area, allowing you to quickly find specific boards by name. Next to it is the **Add Board (+)** button for creating new boards. + +:::tip +You can rename any board by simply clicking on its name under the thumbnail and typing the new name. +::: + +--- + +## Board Management + +Each board has a context menu accessible via right-click (or Ctrl+click). + +![Thumbnail Menu](./assets/thumbnail_menu.png) + +- **Auto-add to this Board:** If *Auto-Assign Board on Click* is disabled in settings, use this option to quickly set the selected board as the default destination for new images. +- **Download Board:** Packages all images within the board into a `.zip` file. A notification link will be provided when the download is ready. +- **Delete Board:** Permanently removes the board and all of its contents. + +:::danger +Deleting a board will **permanently delete all images** contained within it. Proceed with caution! +::: + +### Board Contents + +Every board is organized into two distinct tabs: + +![Board Tabs](./assets/board_tabs.png) + +1. **Images:** Images generated directly within InvokeAI. +2. **Assets:** External images you have uploaded to use as an [Image Prompt](https://support.invoke.ai/support/solutions/articles/151000159340-using-the-image-prompt-adapter-ip-adapter-) or within the Image to Image tab. + +--- + +## Image Interaction + +Every image generated by InvokeAI stores its generation metadata (prompt, seed, models, etc.) directly inside the file. You can read this data by selecting the image and clicking the **Info button** ![Info Button](./assets/info_button.png) in any result panel. + +Additionally, each image has a context menu (right-click or Ctrl+click) with powerful workflow actions: + +![Image Menu](./assets/image_menu.png) + +*Options marked with an asterisk (\*) require the image to have generation metadata.* + + + + - **Open in New Tab:** Opens the image in a separate browser tab. + - **Download Image:** Saves the image to your local device. + - **Star Image:** Pins the image to the top of the gallery. *(Also available by clicking the star icon on hover).* + + + - **Load Workflow*:** Loads the saved workflow settings into the Workflow tab and opens it. + - **Remix Image*:** Loads all generation settings (**excluding** the Seed) into the control panel. + - **Use Prompt*:** Loads only the text prompts. + - **Use Seed*:** Loads only the Seed. + - **Use All*:** Loads all generation settings into the control panel. + + + - **Send to Image to Image:** Moves the image to the left-hand panel of the Image to Image tab. + - **Send to Unified Canvas:** **Replaces** the current Unified Canvas contents with this image. + + + - **Change Board:** Opens a prompt to move the image. *(You can also drag and drop images onto board thumbnails).* + - **Delete Image:** Permanently deletes the image from InvokeAI. + + + +:::caution + Selecting **Delete Image** will remove the image entirely from your InvokeAI installation. This action cannot be undone. +::: + +--- + +## Summary + +This walkthrough covers the Gallery interface and Boards. For instructions on actually generating images, please refer to the documentation on [Prompts](PROMPTS.md), the [Image to Image](IMG2IMG.md) tab, and the [Unified Canvas](UNIFIED_CANVAS.md). + +## Acknowledgements + +A huge shout-out to the core team working to make the Web GUI a reality, including [psychedelicious](https://github.com/psychedelicious), [Kyle0654](https://github.com/Kyle0654), and [blessedcoolant](https://github.com/blessedcoolant). [hipsterusername](https://github.com/hipsterusername) was the team's unofficial cheerleader and added tooltips/docs. diff --git a/docs/src/content/docs/features/hotkeys.mdx b/docs/src/content/docs/features/hotkeys.mdx new file mode 100644 index 00000000000..0c74b556b78 --- /dev/null +++ b/docs/src/content/docs/features/hotkeys.mdx @@ -0,0 +1,200 @@ +--- +title: Hotkeys +description: Learn how to use and customize hotkeys in InvokeAI, and how developers can interact with the hotkey system. +lastUpdated: 2026-02-19 +--- + +import { Tabs, TabItem, Steps, Card, CardGrid, Icon } from '@astrojs/starlight/components'; + +InvokeAI allows you to customize all keyboard shortcuts (hotkeys) to match your workflow preferences. This guide covers how to use and customize hotkeys as a user, as well as providing technical documentation for developers. + +## User Guide + + + + See all available keyboard shortcuts organized by category in one place. + + + Change any shortcut to your preference, or assign multiple key combinations to the same action. + + + Built-in validation prevents invalid combinations. + + + Your custom hotkeys are safely stored and restored across sessions. + + + +### Opening the Hotkeys Modal + +Press Shift + ? or click the **keyboard icon** in the application to open the Hotkeys Modal. + +### Managing Hotkeys + + + + - Browse all available hotkeys organized by category (App, Canvas, Gallery, Workflows, etc.). + - Search for specific hotkeys using the search bar. + - See the current key combination for each action. + + + + 1. Click the **pencil** button by the hotkey you want to change, or click the **plus** button to add a new one. + 2. Enter your new hotkey combination in the editor. + - Use modifier buttons for quick-insert (Mod, Ctrl, Shift, Alt). + - Check the live preview to see how your hotkey will look. + 3. Click the **checkmark** or press Enter to save. + + + + - **Reset a single hotkey:** Click the counter-clockwise arrow icon next to customized hotkeys. + - **Reset all hotkeys:** In Edit Mode, click the **Reset All to Default** button at the bottom. + + + +### Hotkey Format Reference + +When customizing hotkeys, use the following formats: + +- **Valid Modifiers:** `mod` (Ctrl on Windows/Linux, Cmd on Mac), `ctrl`, `shift`, `alt` +- **Valid Keys:** Letters (`a-z`), Numbers (`0-9`), Function keys (`f1-f12`), Special keys (`enter`, `space`, `tab`, `backspace`, `delete`, `escape`), Arrow keys (`up`, `down`, `left`, `right`) +- **Multiple alternatives:** Separate with commas (e.g., `mod+enter, ctrl+enter`) + +:::tip + **Valid Hotkeys:** `mod+s`, `ctrl+shift+p`, `f5, mod+r`
+ **Invalid Hotkeys:** `mod+` (no key after modifier), `shift+ctrl+` (ends with modifier) +::: + +--- + +## Developer Guide + +The hotkeys system allows developers to centrally define, manage, and validate hotkeys throughout the application. It is built on top of `react-hotkeys-hook`. + +### Architecture + +The customizable hotkeys feature comprises the following components: + + + + **Hotkeys State Slice (`hotkeysSlice.ts`)** + - Stores custom hotkey mappings in Redux state. + - Persisted to IndexedDB using `redux-remember`. + - Provides actions to change, reset individual, or reset all hotkeys. + + + **`useHotkeyData` Hook (`useHotkeyData.ts`)** + - Defines all default hotkeys and merges them with custom hotkeys from the store. + - Returns the effective hotkeys to be used. + - Provides platform-specific key translations. + + + - **`HotkeyEditor.tsx`**: Inline editor with live preview, validation, and modifier insertion. + - **`HotkeysModal.tsx`**: The modal interface supporting View/Edit modes, searching, and categories. + + + +### Adding New Hotkeys + +To add a new hotkey to the system, follow these steps: + + +1. **Add Translation Strings** + In `invokeai/frontend/web/public/locales/en.json`: + ```json + { + "hotkeys": { + "app": { + "myAction": { + "title": "My Action", + "desc": "Description of what this hotkey does" + } + } + } + } + ``` + +2. **Register the Hotkey** + In `invokeai/frontend/web/src/features/system/components/HotkeysModal/useHotkeyData.ts`: + ```typescript + // Inside the appropriate category builder function + addHotkey('app', 'myAction', ['mod+k']); // Default binding + ``` + +3. **Use the Hotkey in Components** + ```tsx + import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData'; + + const MyComponent = () => { + const handleAction = useCallback(() => { + // Your action here + }, []); + + // Automatically uses custom hotkeys if configured + useRegisteredHotkeys({ + id: 'myAction', + category: 'app', // 'app', 'canvas', 'viewer', 'gallery', 'workflows' + callback: handleAction, + options: { enabled: true, preventDefault: true }, + dependencies: [handleAction] + }); + + // ... + }; + ``` + + +### Common Patterns + + + + Only enable hotkeys when certain conditions are met: + ```typescript + useRegisteredHotkeys({ + id: 'save', + category: 'app', + callback: handleSave, + options: { + enabled: hasUnsavedChanges && !isLoading, // Only when valid + preventDefault: true + }, + dependencies: [hasUnsavedChanges, isLoading, handleSave] + }); + ``` + + + Ensure hotkeys are only active when a specific region is focused: + ```tsx + import { useFocusRegion } from 'common/hooks/focus'; + + const MyComponent = () => { + const focusRegionRef = useFocusRegion('myRegion'); + + // Hotkey only works when this region has focus + useRegisteredHotkeys({ + id: 'myAction', + category: 'app', + callback: handleAction, + options: { enabled: true } + }); + + return
...
; + }; + ``` +
+ + Provide multiple alternatives for the same action: + ```typescript + // In useHotkeyData.ts + addHotkey('canvas', 'redo', ['mod+shift+z', 'mod+y']); // Two alternatives + ``` + +
+ +:::caution[Best Practices] +- **Use `mod` instead of `ctrl`**: Automatically maps to Cmd on Mac, Ctrl elsewhere. +- **Provide descriptive translations**: Help users understand what each hotkey does. +- **Avoid conflicts**: Check existing hotkeys before adding new ones. +- **Check enabled state**: Only activate hotkeys when the action is available. +- **Use dependencies correctly**: Ensure callbacks are stable with `useCallback`. +::: diff --git a/docs/src/content/docs/index.mdx b/docs/src/content/docs/index.mdx new file mode 100644 index 00000000000..169e27dcb31 --- /dev/null +++ b/docs/src/content/docs/index.mdx @@ -0,0 +1,117 @@ +--- +title: AI Image Generation
for Creatives +description: A leading creative engine built to empower professionals and enthusiasts alike. +template: splash +hero: + tagline: Invoke is a free and open-source creative engine for AI-powered image generation. Built by creatives, for creatives. Self-hosted, fully customizable, and Apache 2.0 licensed. + actions: + - text: Get Started + link: start-here/installation + icon: right-arrow + variant: primary + - text: View on GitHub + link: https://github.com/invoke-ai/InvokeAI + icon: github + variant: minimal +--- + +import { Card, CardGrid, LinkButton } from '@astrojs/starlight/components'; +import DownloadOptions from '@components/DownloadOptions.astro'; + +![Invoke Web UI](./assets/invoke-web-server-1.png) + +## The Creative Engine + +Invoke provides the most feature-complete and professional toolkit for AI image generation, built with production workflows in mind. + + + + Experience true **Layer-based Canvas Editing**. Invoke's powerful canvas allows you to draw, paint, sketch, and edit your creations with unhindered precision. Each layer can be independently manipulated—giving you the freedom to compose intricate scenes seamlessly without breaking a sweat. + + + Unlock limitless possibilities with **Advanced Node-based Workflows**. Build your own complex, reproducible pipelines via a completely visual graph backend. Expose custom UI parameters to share and update values easily without diving deep into the graph. + + + Stay on the cutting edge with out-of-the-box support for the latest foundational models, including **Flux, SDXL, SD 1.5**, and more. Manage checkpoints, LoRAs, Textual Inversions, and ControlNets with an intuitive Model Manager. + + + **Completely Local & Self-hosted**. Invoke runs locally on your own hardware. Your data, prompts, and creations belong entirely to you. Say goodbye to restrictive cloud services and privacy concerns—maintain absolute control over your art. + + + +--- + +## Built for Production + +Invoke is designed to keep your creative flow moving. Unlike other tools that feel like engineering experiments, Invoke is a polished, professional-grade application. + + + + A beautiful, clean interface that prioritizes your artwork. No cluttered menus—just the tools you need right where you expect them. + + + Extensive ControlNet implementation allows you to guide generations with depth maps, edges, poses, and more for exact composition control. + + + Rapidly iterate on concepts with batch generation, prompt wildcards, and high-resolution upscaling, all without leaving the app. + + + Actively developed by a passionate open-source community. Jump into the conversation, report bugs, or request features directly. + + + +--- + +## Join the Ecosystem + +Whether you are looking to install the app, get support, train your own models, or contribute to the project, the Invoke community has you covered. + + + + Ready to dive in? The [Invoke Launcher](installation/quick_start.md) is the fastest way to get up and running on Windows, macOS, and Linux. For advanced setups, try [Docker](installation/docker.md) or a [manual Python installation](installation/manual.md). + + + Get Invoke + + + + + Want to train models on your own style? Invoke Training provides a dedicated UI for **Textual Inversion** and **LoRA training**. + + + Explore Invoke Training + + + + + Stuck? Check out our comprehensive [FAQ](./faq.md) for quick answers. If you still need a hand, our community is incredibly active and helpful. + + Join our Discord + + + + Invoke is open-source software made possible by [people across the world](contributing/contributors.md). We welcome code, documentation, and design contributions of any size! Read our [contributing guide](contributing/index.md) to start. + + + Read Contribution Guide + + + + +--- + +## Download Invoke + +Ready to unleash your creativity? Invoke is available for Windows, macOS, and Linux. Self-hosted, fully customizable, and Apache 2.0 licensed. + + + +--- + +:::note[About the Hosted Version] +The Invoke hosted platform has been shut down as the founding team joined Adobe. However, Invoke lives on as a thriving open-source project maintained by the community. + +The open-source version offers the same powerful features you may have used in the hosted service, with the added benefit of complete control and privacy through self-hosting. + +Stewardship of the project has been passed to Lincoln Stein (lstein) and Vic (Blessedcoolant), who have been core maintainers since the project's inception and continue to drive development forward with the community. +::: diff --git a/docs/src/content/docs/start-here/installation.mdx b/docs/src/content/docs/start-here/installation.mdx new file mode 100644 index 00000000000..4ffe37fb5fb --- /dev/null +++ b/docs/src/content/docs/start-here/installation.mdx @@ -0,0 +1,87 @@ +--- +title: Simple Installation +lastUpdated: 2026-02-18 +--- + +import { LinkCard, Tabs, TabItem, Steps } from '@astrojs/starlight/components' +import { externalLinks } from '@lib/utils/links' +import Link from '@components/Link.astro' +import SystemRequirementsLink from '@components/SystemRequirmentsLink.astro' + +export const alternateLaunchers = [ + { + title: 'Stability Matrix', + description: 'Get the latest version of Stability Matrix for your platform.', + href: 'https://github.com/LykosAI/StabilityMatrix' + }, + { + title: 'LynxHub', + description: 'Get the latest version of LynxHub for your platform.', + href: 'https://github.com/KindaBrazy/LynxHub' + }, +] + + + +## Invoke Launcher + +The Invoke launcher is the official launcher to install, update and manage your invoke installation. + +### Download and Set Up the Launcher + +The Launcher manages your Invoke install. Follow these instructions to download and set up the Launcher. + + + + + 1. + 2. Run the `EXE` to install the Launcher and start it. + 3. A desktop shortcut will be created; use this to run the Launcher in the future. + 4. You can delete the `EXE` file you downloaded. + + + + + 1. + 2. Open the `DMG` and drag the app into `Applications`. + 3. Run the launcher from `Applications`. + 4. You can delete the `DMG` file you downloaded. + + + + + 1. + 2. You may need to edit the `AppImage` file properties and make it executable. + 3. Optionally move the file to a location that does not require admin privileges and add a desktop shortcut for it. + 4. Run the Launcher by double-clicking the `AppImage` or the shortcut you made. + + + + +### Install Invoke + +Run the Launcher you just set up if you haven't already. Click **Install** and follow the instructions to install (or update) Invoke. + +If you have an existing Invoke installation, you can select it and let the launcher manage the install. You'll be able to update or launch the installation. + +### Updating + +The Launcher will check for updates for itself _and_ Invoke. + +When the Launcher detects an update is available for itself, you'll get a small popup window. Click through this and the Launcher will update itself. + +When the Launcher detects an update for Invoke, you'll see a small green alert in the Launcher. Click that and follow the instructions to update Invoke. + +## Alternative Launchers + +:::caution + Installations from alternate launchers are not managed by Invoke, so we cannot guarantee it will work correctly. If you want a more stable experience, we recommend using the [official Invoke Launcher](#invoke-launcher). +::: + +{alternateLaunchers.map(({title, description, href}) => ( + +))} diff --git a/docs/src/content/docs/start-here/manual.mdx b/docs/src/content/docs/start-here/manual.mdx new file mode 100644 index 00000000000..418a4a73bee --- /dev/null +++ b/docs/src/content/docs/start-here/manual.mdx @@ -0,0 +1,193 @@ +--- +title: Manual Installation +lastUpdated: 2026-02-18 +--- + +import { LinkCard, Tabs, TabItem, Steps, LinkButton } from '@astrojs/starlight/components' +import SystemRequirementsLink from '@components/SystemRequirmentsLink.astro' + + + +## Are you in the right place? + + + + + +## Walkthrough + +We'll use [`uv`](https://github.com/astral-sh/uv) to install python and create a virtual environment, then install the `invokeai` package. `uv` is a modern, very fast alternative to `pip`. + +The following commands vary depending on the version of Invoke being installed and the system onto which it is being installed. + + + + 1. Install `uv` as described in its [docs](https://docs.astral.sh/uv/getting-started/installation/#standalone-installer). We suggest using the standalone installer method. + + Run `uv --version` to confirm that `uv` is installed and working. After installation, you may need to restart your terminal to get access to `uv`. + + 2. Create a directory for your installation, typically in your home directory (e.g. `~/invokeai` or `$Home/invokeai`): + + + + ```ps + mkdir $Home/invokeai + cd $Home/invokeai + ``` + + + ```bash + mkdir ~/invokeai + cd ~/invokeai + ``` + + + + 3. Create a virtual environment in that directory: + + ```sh + uv venv --relocatable --prompt invoke --python 3.12 --python-preference only-managed .venv + ``` + + This command creates a portable virtual environment at `.venv` complete with a portable python 3.12. It doesn't matter if your system has no python installed, or has a different version - `uv` will handle everything. + + 4. Activate the virtual environment: + + + + ```ps + .venv\Scripts\activate + ``` + + + ```bash + source .venv/bin/activate + ``` + + + + 5. Choose a version to install. + + + View Releases + + + 6. Determine the package specifier to use when installing. This is a performance optimization. + + - If you have an Nvidia 20xx series GPU or older, use `invokeai[xformers]`. + - If you have an Nvidia 30xx series GPU or newer, or do not have an Nvidia GPU, use `invokeai`. + + 7. Determine the torch backend to use for installation, if any. This is necessary to get the right version of torch installed. This is acheived by using [UV's built in torch support.](https://docs.astral.sh/uv/guides/integration/pytorch/#automatic-backend-selection) + + :::note[Torch Backend Selection] + Pick a torch backend only when it applies to your system. In all other cases, do not use a torch backend. + ::: + + + + + + Use: + ```sh + --torch-backend=cu128 + ``` + + + Do not use a torch backend. + + + + + + + + Use: + ```sh + --torch-backend=cu128 + ``` + + + Use: + ```sh + --torch-backend=cpu + ``` + + + Use: + ```sh + --torch-backend=rocm6.3 + ``` + + + Do not use a torch backend. + + + + + + 8. Install the `invokeai` package. Substitute the package specifier and version. + + + + ```sh + uv pip install == --python 3.12 --python-preference only-managed --force-reinstall + ``` + + + ```sh + uv pip install == --python 3.12 --python-preference only-managed --torch-backend= --force-reinstall + ``` + + + + + 9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment: + + + + ```ps + deactivate + .venv\Scripts\activate + ``` + + + ```bash + deactivate && source .venv/bin/activate + ``` + + + + 10. Run the application, specifying the directory you created earlier as the root directory: + + + + ```ps + invokeai-web --root ~/invokeai + ``` + + + ```bash + invokeai-web --root $Home/invokeai + ``` + + + + +If you run Invoke on a headless server, you might want to install and run Invoke on the command line. + +We do not plan to maintain scripts to do this moving forward, instead focusing our dev resources on the GUI [launcher](/getting-started/installation). + +You can create your own scripts for this by copying the handful of commands in this guide. `uv`'s [`pip` interface docs](https://docs.astral.sh/uv/reference/cli/#uv-pip-install) may be useful. diff --git a/docs/src/content/docs/start-here/system-requirements.mdx b/docs/src/content/docs/start-here/system-requirements.mdx new file mode 100644 index 00000000000..5e2194ee563 --- /dev/null +++ b/docs/src/content/docs/start-here/system-requirements.mdx @@ -0,0 +1,131 @@ +--- +title: Hardware Requirements +sidebar: + order: 1 +lastUpdated: 2026-02-18 +--- + +import { Tabs, TabItem, Steps } from '@astrojs/starlight/components' + +import { externalLinks, internalLinks } from '@utils/links' + +import Link from '@components/Link.astro' + +Invoke runs on Windows 10+, macOS 14+ and Linux (Ubuntu 20.04+ is well-tested). + +## Hardware + +Hardware requirements vary significantly depending on model and image output size. + +The requirements below are rough guidelines for best performance. GPUs with less VRAM typically still work, if a bit slower. Follow the Low-VRAM mode guide to optimize performance. + +- All Apple Silicon (M1, M2, etc) Macs work, but 16GB+ memory is recommended. +- AMD GPUs are supported on Linux only. The VRAM requirements are the same as Nvidia GPUs. + +### Windows/Linux + +| Model Family | Best resolution | GPU (series) | VRAM (min) | RAM (min) | Notes | +|---|---:|---|---:|---:|---| +| SD1.5 | 512x512 | Nvidia 10xx+ | 4GB | 8GB | | +| SDXL | 1024x1024 | Nvidia 20xx+ | 8GB | 16GB | | +| FLUX.1 | 1024x1024 | Nvidia 20xx+ | 10GB | 32GB | | +| FLUX.2 Klein 4B | 1024x1024 | Nvidia 30xx+ | 12GB | 16GB | FP8 works with 8GB+; Diffusers + encoder | +| FLUX.2 Klein 9B | 1024x1024 | Nvidia 40xx | 24GB | 32GB | FP8 works with 12GB+; Diffusers + encoder | +| Z-Image Turbo | 1024x1024 | Nvidia 20xx+ | 8GB | 16GB | Q4_K 8GB; Q8/BF16 16GB+ | + +:::tip[`tmpfs` on Linux] + If your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space. +::: + +## Python + +:::tip[The launcher installs python for you] + You don't need to do this if you are installing with the . +::: + +Invoke requires python `3.11` through `3.12`. If you don't already have one of these versions installed, we suggest installing `3.12`, as it will be supported for longer. + +Check that your system has an up-to-date Python installed by running `python3 --version` in the terminal (Linux, macOS) or cmd/powershell (Windows). + +:::tip[Installing Python]{icon="seti:python"} + + + + 1. Install python with an official installer. + 2. The installer includes an option to add python to your PATH. Be sure to enable this. If you missed it, re-run the installer, choose to modify an existing installation, and tick that checkbox. + 3. You may need to install Microsoft Visual C++ Redistributable. + + + + + 1. Install python with an official installer. + 2. If model installs fail with a certificate error, you may need to run this command (changing the python version to match what you have installed): `/Applications/Python\ 3.11/Install\ Certificates.command` + 3. If you haven't already, you will need to install the XCode CLI Tools by running `xcode-select --install` in a terminal. + + + + + 1. Installing python varies depending on your system. We recommend . + 2. You'll need to install `libglib2.0-0` and `libgl1-mesa-glx` for OpenCV to work. For example, on a Debian system: `sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx` + + + +::: + +## Drivers + +If you have an Nvidia or AMD GPU, you may need to manually install drivers or other support packages for things to work well or at all. + +### Nvidia + +Run `nvidia-smi` on your system's command line to verify that drivers and CUDA are installed. If this command fails, or doesn't report versions, you will need to install drivers. + +Go to the and carefully follow the instructions for your system to get everything installed. + +Confirm that `nvidia-smi` displays driver and CUDA versions after installation. + +#### Linux - via Nvidia Container Runtime + +An alternative to installing CUDA locally is to use the to run the application in a container. + +#### Windows - Nvidia cuDNN DLLs + +An out-of-date cuDNN library can greatly hamper performance on 30-series and 40-series cards. Check with the community on discord to compare your `it/s` if you think you may need this fix. + +First, locate the destination for the DLL files and make a quick back up: + +1. Find your InvokeAI installation folder, e.g. `C:\Users\Username\InvokeAI\`. +1. Open the `.venv` folder, e.g. `C:\Users\Username\InvokeAI\.venv` (you may need to show hidden files to see it). +1. Navigate deeper to the `torch` package, e.g. `C:\Users\Username\InvokeAI\.venv\Lib\site-packages\torch`. +1. Copy the `lib` folder inside `torch` and back it up somewhere. + +Next, download and copy the updated cuDNN DLLs: + +1. Go to the Cuda Docs. +1. Create an account if needed and log in. +1. Choose the newest version of cuDNN that works with your GPU architecture. Consult the to determine the correct version for your GPU. +1. Download the latest version and extract it. +1. Find the `bin` folder, e.g. `cudnn-windows-x86_64-SOME_VERSION\bin`. +1. Copy and paste the `.dll` files into the `lib` folder you located earlier. Replace files when prompted. + +If, after restarting the app, this doesn't improve your performance, either restore your back up or re-run the installer to reset `torch` back to its original state. + +### AMD + +:::tip[Linux Only]{icon="linux"} + AMD GPUs are supported on Linux only, due to ROCm (the AMD equivalent of CUDA) support being Linux only. +::: + +:::caution[Bumps Ahead] + While the application does run on AMD GPUs, there are occasional bumps related to spotty torch support. +::: + +Run `rocm-smi` on your system's command line verify that drivers and ROCm are installed. If this command fails, or doesn't report versions, you will need to install them. + +Go to the and carefully follow the instructions for your system to get everything installed. + +Confirm that `rocm-smi` displays driver and CUDA versions after installation. + +#### Linux - via Docker Container + +An alternative to installing ROCm locally is to use a [ROCm docker container] to run the application in a container. diff --git a/docs/src/content/docs/troubleshooting/faq.mdx b/docs/src/content/docs/troubleshooting/faq.mdx new file mode 100644 index 00000000000..d710ae4b16b --- /dev/null +++ b/docs/src/content/docs/troubleshooting/faq.mdx @@ -0,0 +1,110 @@ +--- +title: FAQ +lastUpdated: 2026-02-19 +--- + +If the troubleshooting steps on this page don't get you up and running, please either [create an issue] or hop on [discord] for help. + +## How to Install + +Follow the [Installation Guide](/start-here/installation) to install Invoke. + +## Downloading models and using existing models + +The Model Manager tab in the UI provides a few ways to install models, including using your already-downloaded models. You'll see a popup directing you there on first startup. For more information, see the [model install docs]. + +## Missing models after updating from v3 + +If you find some models are missing after updating from v3, it's likely they weren't correctly registered before the update and didn't get picked up in the migration. + +You can use the `Scan Folder` tab in the Model Manager UI to fix this. The models will either be in the old, now-unused `autoimport` folder, or your `models` folder. + +- Find and copy your install's old `autoimport` folder path, install the main install folder. +- Go to the Model Manager and click `Scan Folder`. +- Paste the path and scan. +- IMPORTANT: Uncheck `Inplace install`. +- Click `Install All` to install all found models, or just install the models you want. + +Next, find and copy your install's `models` folder path (this could be your custom models folder path, or the `models` folder inside the main install folder). + +Follow the same steps to scan and import the missing models. + +## Slow generation + +- Check the [system requirements] to ensure that your system is capable of generating images. +- Follow the [Low-VRAM mode guide](./features/low-vram.md) to optimize performance. +- Check that your generations are happening on your GPU (if you have one). Invoke will log what is being used for generation upon startup. If your GPU isn't used, re-install to and ensure you select the appropriate GPU option. +- If you are on Windows with an Nvidia GPU, you may have exceeded your GPU's VRAM capacity and are triggering Nvidia's "sysmem fallback". There's a guide to opt out of this behaviour in the [Low-VRAM mode guide](./features/low-vram.md). + +## Triton error on startup + +This can be safely ignored. Invoke doesn't use Triton, but if you are on Linux and wish to dismiss the error, you can install Triton. + +## Unable to Copy on Firefox + +Firefox does not allow Invoke to directly access the clipboard by default. As a result, you may be unable to use certain copy functions. You can fix this by configuring Firefox to allow access to write to the clipboard: + +- Go to `about:config` and click the Accept button +- Search for `dom.events.asyncClipboard.clipboardItem` +- Set it to `true` by clicking the toggle button +- Restart Firefox + +## Replicate image found online + +Most example images with prompts that you'll find on the internet have been generated using different software, so you can't expect to get identical results. In order to reproduce an image, you need to replicate the exact settings and processing steps, including (but not limited to) the model, the positive and negative prompts, the seed, the sampler, the exact image size, any upscaling steps, etc. + +## Invalid configuration file + +Everything seems to install ok, you get a `ValidationError` when starting up the app. + +This is caused by an invalid setting in the `invokeai.yaml` configuration file. The error message should tell you what is wrong. + +Check the [configuration docs] for more detail about the settings and how to specify them. + +## Out of Memory Errors + +The models are large, VRAM is expensive, and you may find yourself faced with Out of Memory errors when generating images. Follow our [Low-VRAM mode guide](./features/low-vram.md) to configure Invoke to prevent these. + +## Memory Leak (Linux) + +If you notice a memory leak, it could be caused to memory fragmentation as models are loaded and/or moved from CPU to GPU. + +A workaround is to tune memory allocation with an environment variable: + +```bash +# Force blocks >1MB to be allocated with `mmap` so that they are released to the system immediately when they are freed. +MALLOC_MMAP_THRESHOLD_=1048576 +``` + +!!! warning "Speed vs Memory Tradeoff" + + Your generations may be slower overall when setting this environment variable. + +!!! info "Possibly dependent on `libc` implementation" + + It's not known if this issue occurs with other `libc` implementations such as `musl`. + + If you encounter this issue and your system uses a different implementation, please try this environment variable and let us know if it fixes the issue. + +

Detailed Discussion

+ +Python (and PyTorch) relies on the memory allocator from the C Standard Library (`libc`). On linux, with the GNU C Standard Library implementation (`glibc`), our memory access patterns have been observed to cause severe memory fragmentation. + +This fragmentation results in large amounts of memory that has been freed but can't be released back to the OS. Loading models from disk and moving them between CPU/CUDA seem to be the operations that contribute most to the fragmentation. + +This memory fragmentation issue can result in OOM crashes during frequent model switching, even if `ram` (the max RAM cache size) is set to a reasonable value (e.g. a OOM crash with `ram=16` on a system with 32GB of RAM). + +This problem may also exist on other OSes, and other `libc` implementations. But, at the time of writing, it has only been investigated on linux with `glibc`. + +To better understand how the `glibc` memory allocator works, see these references: + +- Basics: [The GNU Allocator](https://www.gnu.org/software/libc/manual/html_node/The-GNU-Allocator.html) +- Details: [Malloc Internals](https://sourceware.org/glibc/wiki/MallocInternals) + +Note the differences between memory allocated as chunks in an arena vs. memory allocated with `mmap`. Under `glibc`'s default configuration, most model tensors get allocated as chunks in an arena making them vulnerable to the problem of fragmentation. + +[model install docs]: ./installation/models.md +[system requirements]: ./installation/requirements.md +[create an issue]: https://github.com/invoke-ai/InvokeAI/issues +[discord]: https://discord.gg/ZmtBAhwWhy +[configuration docs]: ./configuration.md diff --git a/docs/src/content/docs/workflows/adding-nodes.mdx b/docs/src/content/docs/workflows/adding-nodes.mdx new file mode 100644 index 00000000000..e5e884cf7fd --- /dev/null +++ b/docs/src/content/docs/workflows/adding-nodes.mdx @@ -0,0 +1,5 @@ +--- +title: Adding Nodes +--- + +{/* TODO: Finish this page */} diff --git a/docs/src/content/docs/workflows/assets/groupsallscale.png b/docs/src/content/docs/workflows/assets/groupsallscale.png new file mode 100644 index 00000000000..5a12fe9e131 Binary files /dev/null and b/docs/src/content/docs/workflows/assets/groupsallscale.png differ diff --git a/docs/src/content/docs/workflows/assets/groupsconditioning.png b/docs/src/content/docs/workflows/assets/groupsconditioning.png new file mode 100644 index 00000000000..baaf2b44e0e Binary files /dev/null and b/docs/src/content/docs/workflows/assets/groupsconditioning.png differ diff --git a/docs/src/content/docs/workflows/assets/groupscontrol.png b/docs/src/content/docs/workflows/assets/groupscontrol.png new file mode 100644 index 00000000000..a38e4e4bbaa Binary files /dev/null and b/docs/src/content/docs/workflows/assets/groupscontrol.png differ diff --git a/docs/src/content/docs/workflows/assets/groupsimgvae.png b/docs/src/content/docs/workflows/assets/groupsimgvae.png new file mode 100644 index 00000000000..03ac8d1f4aa Binary files /dev/null and b/docs/src/content/docs/workflows/assets/groupsimgvae.png differ diff --git a/docs/src/content/docs/workflows/assets/groupsiterate.png b/docs/src/content/docs/workflows/assets/groupsiterate.png new file mode 100644 index 00000000000..50b762099a8 Binary files /dev/null and b/docs/src/content/docs/workflows/assets/groupsiterate.png differ diff --git a/docs/src/content/docs/workflows/assets/groupslora.png b/docs/src/content/docs/workflows/assets/groupslora.png new file mode 100644 index 00000000000..74ae8a70736 Binary files /dev/null and b/docs/src/content/docs/workflows/assets/groupslora.png differ diff --git a/docs/src/content/docs/workflows/assets/groupsmultigenseeding.png b/docs/src/content/docs/workflows/assets/groupsmultigenseeding.png new file mode 100644 index 00000000000..dcd64c77581 Binary files /dev/null and b/docs/src/content/docs/workflows/assets/groupsmultigenseeding.png differ diff --git a/docs/src/content/docs/workflows/assets/groupsnoise.png b/docs/src/content/docs/workflows/assets/groupsnoise.png new file mode 100644 index 00000000000..d95b7ba3073 Binary files /dev/null and b/docs/src/content/docs/workflows/assets/groupsnoise.png differ diff --git a/docs/src/content/docs/workflows/assets/linearview.png b/docs/src/content/docs/workflows/assets/linearview.png new file mode 100644 index 00000000000..fb6b3efca0e Binary files /dev/null and b/docs/src/content/docs/workflows/assets/linearview.png differ diff --git a/docs/src/content/docs/workflows/assets/nodescontrol.png b/docs/src/content/docs/workflows/assets/nodescontrol.png new file mode 100644 index 00000000000..8b179e43acd Binary files /dev/null and b/docs/src/content/docs/workflows/assets/nodescontrol.png differ diff --git a/docs/src/content/docs/workflows/assets/nodesi2i.png b/docs/src/content/docs/workflows/assets/nodesi2i.png new file mode 100644 index 00000000000..99088338042 Binary files /dev/null and b/docs/src/content/docs/workflows/assets/nodesi2i.png differ diff --git a/docs/src/content/docs/workflows/assets/nodest2i.png b/docs/src/content/docs/workflows/assets/nodest2i.png new file mode 100644 index 00000000000..7e882dbf1b6 Binary files /dev/null and b/docs/src/content/docs/workflows/assets/nodest2i.png differ diff --git a/docs/src/content/docs/workflows/assets/workflow_library.png b/docs/src/content/docs/workflows/assets/workflow_library.png new file mode 100644 index 00000000000..a17593d3b6b Binary files /dev/null and b/docs/src/content/docs/workflows/assets/workflow_library.png differ diff --git a/docs/src/content/docs/workflows/comfyui-migration.mdx b/docs/src/content/docs/workflows/comfyui-migration.mdx new file mode 100644 index 00000000000..cc78e3de7b9 --- /dev/null +++ b/docs/src/content/docs/workflows/comfyui-migration.mdx @@ -0,0 +1,120 @@ +--- +title: ComfyUI Migration Guide +sidebar: + label: ComfyUI Migration +--- + +import { Card, CardGrid } from '@astrojs/starlight/components'; + +If you're coming to InvokeAI from ComfyUI, welcome! You'll find things are similar but different - the good news is that you already know how things should work, and it's just a matter of wiring them up! + + + InvokeAI's nodes tend to be more granular than default nodes in Comfy. This means each node in Invoke will do a specific task, and you might need to use multiple nodes to achieve the same result. The added granularity improves the control you have over your workflows. + + + InvokeAI's backend and ComfyUI's backend are very different, which means Comfy workflows are not able to be imported directly into InvokeAI. However, we have created a [list of popular workflows](/workflows/community-nodes) for you to get started with Nodes in InvokeAI! + + +## Node Equivalents + +Finding the right node is the hardest part of switching. Use the categories below to find the InvokeAI equivalents for the ComfyUI nodes you are used to. + +### Sampling + +| ComfyUI Node | Invoke Equivalent | +| :--- | :--- | +| KSampler | Denoise Latents | +| Ksampler Advanced | Denoise Latents | + +### Loaders + +| ComfyUI Node | Invoke Equivalent | +| :--- | :--- | +| Load Checkpoint | Main Model Loader _or_ SDXL Main Model Loader | +| Load VAE | VAE Loader | +| Load Lora | LoRA Loader _or_ SDXL Lora Loader | +| Load ControlNet Model | ControlNet | +| Load ControlNet Model (diff) | ControlNet | +| Load Style Model | Reference Only ControlNet will be coming in a future version of InvokeAI | +| unCLIPCheckpointLoader | N/A | +| GLIGENLoader | N/A | +| Hypernetwork Loader | N/A | +| Load Upscale Model | Occurs within "Upscale (RealESRGAN)" | + +### Conditioning + +| ComfyUI Node | Invoke Equivalent | +| :--- | :--- | +| CLIP Text Encode (Prompt) | Compel (Prompt) or SDXL Compel (Prompt) | +| CLIP Set Last Layer | CLIP Skip | +| Conditioning (Average) | Use the .blend() feature of prompts | +| Conditioning (Combine) | N/A | +| Conditioning (Concat) | See the Prompt Tools Community Node | +| Conditioning (Set Area) | N/A | +| Conditioning (Set Mask) | Mask Edge | +| CLIP Vision Encode | N/A | +| unCLIPConditioning | N/A | +| Apply ControlNet | ControlNet | +| Apply ControlNet (Advanced) | ControlNet | + +### Latent + +| ComfyUI Node | Invoke Equivalent | +| :--- | :--- | +| VAE Decode | Latents to Image | +| VAE Encode | Image to Latents | +| Empty Latent Image | Noise | +| Upscale Latent | Resize Latents | +| Upscale Latent By | Scale Latents | +| Latent Composite | Blend Latents | +| LatentCompositeMasked | N/A | + +### Image + +| ComfyUI Node | Invoke Equivalent | +| :--- | :--- | +| Save Image | Image | +| Preview Image | Current | +| Load Image | Image | +| Empty Image | Blank Image | +| Invert Image | Invert Lerp Image | +| Batch Images | Link "Image" nodes into an "Image Collection" node | +| Pad Image for Outpainting | Outpainting is easily accomplished in the Unified Canvas | +| ImageCompositeMasked | Paste Image | +| Upscale Image | Resize Image | +| Upscale Image By | Upscale Image | +| Upscale Image (using Model) | Upscale Image | +| ImageBlur | Blur Image | +| ImageQuantize | N/A | +| ImageSharpen | N/A | +| Canny | Canny Processor | + +### Mask + +| ComfyUI Node | Invoke Equivalent | +| :--- | :--- | +| Load Image (as Mask) | Image | +| Convert Mask to Image | Image | +| Convert Image to Mask | Image | +| SolidMask | N/A | +| InvertMask | Invert Lerp Image | +| CropMask | Crop Image | +| MaskComposite | Combine Mask | +| FeatherMask | Blur Image | + +### Advanced + +| ComfyUI Node | Invoke Equivalent | +| :--- | :--- | +| Load CLIP | Main Model Loader _or_ SDXL Main Model Loader | +| UNETLoader | Main Model Loader _or_ SDXL Main Model Loader | +| DualCLIPLoader | Main Model Loader _or_ SDXL Main Model Loader | +| Load Checkpoint | Main Model Loader _or_ SDXL Main Model Loader | +| ConditioningZeroOut | N/A | +| ConditioningSetTimestepRange | N/A | +| CLIPTextEncodeSDXLRefiner | Compel (Prompt) or SDXL Compel (Prompt) | +| CLIPTextEncodeSDXL | Compel (Prompt) or SDXL Compel (Prompt) | +| ModelMergeSimple | Model Merging is available in the Model Manager | +| ModelMergeBlocks | Model Merging is available in the Model Manager | +| CheckpointSave | Model saving is available in the Model Manager | +| CLIPMergeSimple | N/A | diff --git a/docs/src/content/docs/workflows/community-nodes.mdx b/docs/src/content/docs/workflows/community-nodes.mdx new file mode 100644 index 00000000000..8990d34bf5c --- /dev/null +++ b/docs/src/content/docs/workflows/community-nodes.mdx @@ -0,0 +1,731 @@ +--- +title: Community Nodes +--- + +These are nodes that have been developed by the community, for the community. If you're not sure what a node is, you can learn more about nodes [here](overview.md). + +If you'd like to submit a node for the community, please refer to the [node creation overview](contributingNodes.md). + +To use a node, add the node to the `nodes` folder found in your InvokeAI install location. + +The suggested method is to use `git clone` to clone the repository the node is found in. This allows for easy updates of the node in the future. + +If you'd prefer, you can also just download the whole node folder from the linked repository and add it to the `nodes` folder. + +To use a community workflow, download the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor. + +--- + +### Anamorphic Tools + +**Description:** A set of nodes to perform anamorphic modifications to images, like lens blur, streaks, spherical distortion, and vignetting. + +**Node Link:** https://github.com/JPPhoto/anamorphic-tools + +--- + +### Adapters Linked Nodes + +**Description:** A set of nodes for linked adapters (ControlNet, IP-Adaptor & T2I-Adapter). This allows multiple adapters to be chained together without using a `collect` node which means it can be used inside an `iterate` node without any collecting on every iteration issues. + +- `ControlNet-Linked` - Collects ControlNet info to pass to other nodes. +- `IP-Adapter-Linked` - Collects IP-Adapter info to pass to other nodes. +- `T2I-Adapter-Linked` - Collects T2I-Adapter info to pass to other nodes. + +Note: These are inherited from the core nodes so any update to the core nodes should be reflected in these. + +**Node Link:** https://github.com/skunkworxdark/adapters-linked-nodes + +--- + +### Autostereogram Nodes + +**Description:** Generate autostereogram images from a depth map. This is not a very practically useful node but more a 90s nostalgic indulgence as I used to love these images as a kid. + +**Node Link:** https://github.com/skunkworxdark/autostereogram_nodes + +**Example Usage:** + + -> -> + +--- + +### Average Images + +**Description:** This node takes in a collection of images of the same size and averages them as output. It converts everything to RGB mode first. + +**Node Link:** https://github.com/JPPhoto/average-images-node + +--- + +### BiRefNet Background Removal + +**Description:** Remove image backgrounds using BiRefNet (Bilateral Reference Network), a high-quality segmentation model. Supports multiple model variants including standard, high-resolution, matting, portrait, and specialized models for different use cases. + +**Node Link:** https://github.com/veeliks/invoke_birefnet + +**Output Examples** + +
+ Before background removal + After background removal +
+ +--- + +### Clean Image Artifacts After Cut + +Description: Removes residual artifacts after an image is separated from its background. + +Node Link: https://github.com/VeyDlin/clean-artifact-after-cut-node + +View: + + + +--- + +### Close Color Mask + +Description: Generates a mask for images based on a closely matching color, useful for color-based selections. + +Node Link: https://github.com/VeyDlin/close-color-mask-node + +View: + + + +--- + +### Clothing Mask + +Description: Employs a U2NET neural network trained for the segmentation of clothing items in images. + +Node Link: https://github.com/VeyDlin/clothing-mask-node + +View: + + + +--- + +### Contrast Limited Adaptive Histogram Equalization + +Description: Enhances local image contrast using adaptive histogram equalization with contrast limiting. + +Node Link: https://github.com/VeyDlin/clahe-node + +View: + + + +--- + +### Curves + +**Description:** Adjust an image's curve based on a user-defined string. + +**Node Link:** https://github.com/JPPhoto/curves-node + +--- + +### Depth Map from Wavefront OBJ + +**Description:** Render depth maps from Wavefront .obj files (triangulated) using this simple 3D renderer utilizing numpy and matplotlib to compute and color the scene. There are simple parameters to change the FOV, camera position, and model orientation. + +To be imported, an .obj must use triangulated meshes, so make sure to enable that option if exporting from a 3D modeling program. This renderer makes each triangle a solid color based on its average depth, so it will cause anomalies if your .obj has large triangles. In Blender, the Remesh modifier can be helpful to subdivide a mesh into small pieces that work well given these limitations. + +**Node Link:** https://github.com/dwringer/depth-from-obj-node + +**Example Usage:** + + + +--- + +### Enhance Detail + +**Description:** A single node that can enhance the detail in an image. Increase or decrease details in an image using a guided filter (as opposed to the typical Gaussian blur used by most sharpening filters.) Based on the `Enhance Detail` ComfyUI node from https://github.com/spacepxl/ComfyUI-Image-Filters + +**Node Link:** https://github.com/skunkworxdark/enhance-detail-node + +**Example Usage:** + + + +--- + +### Film Grain + +**Description:** This node adds a film grain effect to the input image based on the weights, seeds, and blur radii parameters. It works with RGB input images only. + +**Node Link:** https://github.com/JPPhoto/film-grain-node + +--- + +### Flip Pose + +**Description:** This node will flip an openpose image horizontally, recoloring it to make sure that it isn't facing the wrong direction. Note that it does not work with openpose hands. + +**Node Link:** https://github.com/JPPhoto/flip-pose-node + +--- + +### Flux Ideal Size + +**Description:** This node returns an ideal size to use for the first stage of a Flux image generation pipeline. Generating at the right size helps limit duplication and odd subject placement. + +**Node Link:** https://github.com/JPPhoto/flux-ideal-size + +--- + +### Generative Grammar-Based Prompt Nodes + +**Description:** This set of 3 nodes generates prompts from simple user-defined grammar rules (loaded from custom files - examples provided below). The prompts are made by recursively expanding a special template string, replacing nonterminal "parts-of-speech" until no nonterminal terms remain in the string. + +This includes 3 Nodes: +- *Lookup Table from File* - loads a YAML file "prompt" section (or of a whole folder of YAML's) into a JSON-ified dictionary (Lookups output) +- *Lookups Entry from Prompt* - places a single entry in a new Lookups output under the specified heading +- *Prompt from Lookup Table* - uses a Collection of Lookups as grammar rules from which to randomly generate prompts. + +**Node Link:** https://github.com/dwringer/generative-grammar-prompt-nodes + +**Example Usage:** + + + +--- + +### GPT2RandomPromptMaker + +**Description:** A node for InvokeAI utilizes the GPT-2 language model to generate random prompts based on a provided seed and context. + +**Node Link:** https://github.com/mickr777/GPT2RandomPromptMaker + +**Output Examples** + +Generated Prompt: An enchanted weapon will be usable by any character regardless of their alignment. + + + +--- + +### Grid to Gif + +**Description:** One node that turns a grid image into an image collection, one node that turns an image collection into a gif. + +**Node Link:** https://github.com/mildmisery/invokeai-GridToGifNode/blob/main/GridToGif.py + +**Example Node Graph:** https://github.com/mildmisery/invokeai-GridToGifNode/blob/main/Grid%20to%20Gif%20Example%20Workflow.json + +**Output Examples** + + + + +--- + +### Halftone + +**Description**: Halftone converts the source image to grayscale and then performs halftoning. CMYK Halftone converts the image to CMYK and applies a per-channel halftoning to make the source image look like a magazine or newspaper. For both nodes, you can specify angles and halftone dot spacing. + +**Node Link:** https://github.com/JPPhoto/halftone-node + +**Example** + +Input: + + + +Halftone Output: + + + +CMYK Halftone Output: + + + +--- + +### Hand Refiner with MeshGraphormer + +**Description**: Hand Refiner takes in your image and automatically generates a fixed depth map for the hands along with a mask of the hands region that will conveniently allow you to use them along with ControlNet to fix the wonky hands generated by Stable Diffusion + +**Node Link:** https://github.com/blessedcoolant/invoke_meshgraphormer + +**View** + + +--- + +### Image and Mask Composition Pack + +**Description:** This is a pack of nodes for composing masks and images, including a simple text mask creator and both image and latent offset nodes. The offsets wrap around, so these can be used in conjunction with the Seamless node to progressively generate centered on different parts of the seamless tiling. + +This includes 15 Nodes: + +- *Adjust Image Hue Plus* - Rotate the hue of an image in one of several different color spaces. +- *Blend Latents/Noise (Masked)* - Use a mask to blend part of one latents tensor [including Noise outputs] into another. Can be used to "renoise" sections during a multi-stage [masked] denoising process. +- *Enhance Image* - Boost or reduce color saturation, contrast, brightness, sharpness, or invert colors of any image at any stage with this simple wrapper for pillow [PIL]'s ImageEnhance module. +- *Equivalent Achromatic Lightness* - Calculates image lightness accounting for Helmholtz-Kohlrausch effect based on a method described by High, Green, and Nussbaum (2023). +- *Text to Mask (Clipseg)* - Input a prompt and an image to generate a mask representing areas of the image matched by the prompt. +- *Text to Mask Advanced (Clipseg)* - Output up to four prompt masks combined with logical "and", logical "or", or as separate channels of an RGBA image. +- *Image Layer Blend* - Perform a layered blend of two images using alpha compositing. Opacity of top layer is selectable, with optional mask and several different blend modes/color spaces. +- *Image Compositor* - Take a subject from an image with a flat backdrop and layer it on another image using a chroma key or flood select background removal. +- *Image Dilate or Erode* - Dilate or expand a mask (or any image!). This is equivalent to an expand/contract operation. +- *Image Value Thresholds* - Clip an image to pure black/white beyond specified thresholds. +- *Offset Latents* - Offset a latents tensor in the vertical and/or horizontal dimensions, wrapping it around. +- *Offset Image* - Offset an image in the vertical and/or horizontal dimensions, wrapping it around. +- *Rotate/Flip Image* - Rotate an image in degrees clockwise/counterclockwise about its center, optionally resizing the image boundaries to fit, or flipping it about the vertical and/or horizontal axes. +- *Shadows/Highlights/Midtones* - Extract three masks (with adjustable hard or soft thresholds) representing shadows, midtones, and highlights regions of an image. +- *Text Mask (simple 2D)* - create and position a white on black (or black on white) line of text using any font locally available to Invoke. + +**Node Link:** https://github.com/dwringer/composition-nodes + + + +--- + +### Image Dominant Color + +Description: Identifies and extracts the dominant color from an image using k-means clustering. + +Node Link: https://github.com/VeyDlin/image-dominant-color-node + +View: + + + +--- + +### Image Export + +**Description:** Export images in multiple formats (AVIF, JPEG, PNG, TIFF, WebP) with format-specific compression and quality options. + +**Node Link:** https://github.com/veeliks/invoke_image_export + +**Nodes:** + +
+ Save Image as AVIF + Save Image as JPEG + Save Image as PNG + Save Image as TIFF + Save Image as WebP +
+ +--- + +### Image to Character Art Image Nodes + +**Description:** Group of nodes to convert an input image into ascii/unicode art Image + +**Node Link:** https://github.com/mickr777/imagetoasciiimage + +**Output Examples** + + + + + + + + + +--- + +### Image Picker + +**Description:** This InvokeAI node takes in a collection of images and randomly chooses one. This can be useful when you have a number of poses to choose from for a ControlNet node, or a number of input images for another purpose. + +**Node Link:** https://github.com/JPPhoto/image-picker-node + +--- + +### Image Resize Plus + +Description: Provides various image resizing options such as fill, stretch, fit, center, and crop. + +Node Link: https://github.com/VeyDlin/image-resize-plus-node + +View: + + + +--- + +### Latent Upscale + +**Description:** This node uses a small (~2.4mb) model to upscale the latents used in a Stable Diffusion 1.5 or Stable Diffusion XL image generation, rather than the typical interpolation method, avoiding the traditional downsides of the latent upscale technique. + +**Node Link:** [https://github.com/gogurtenjoyer/latent-upscale](https://github.com/gogurtenjoyer/latent-upscale) + +--- + +### Load Video Frame + +**Description:** This is a video frame image provider + indexer/video creation nodes for hooking up to iterators and ranges and ControlNets and such for invokeAI node experimentation. Think animation + ControlNet outputs. + +**Node Link:** https://github.com/helix4u/load_video_frame + +**Output Example:** + + + +--- +### Make 3D + +**Description:** Create compelling 3D stereo images from 2D originals. + +**Node Link:** [https://gitlab.com/srcrr/shift3d/-/raw/main/make3d.py](https://gitlab.com/srcrr/shift3d) + +**Example Node Graph:** https://gitlab.com/srcrr/shift3d/-/raw/main/example-workflow.json?ref_type=heads&inline=false + +**Output Examples** + + + + +--- + +### Mask Operations + +Description: Offers logical operations (OR, SUB, AND) for combining and manipulating image masks. + +Node Link: https://github.com/VeyDlin/mask-operations-node + +View: + + + +--- + +### Match Histogram + +**Description:** An InvokeAI node to match a histogram from one image to another. This is a bit like the `color correct` node in the main InvokeAI but this works in the YCbCr colourspace and can handle images of different sizes. Also does not require a mask input. +- Option to only transfer luminance channel. +- Option to save output as grayscale + +A good use case for this node is to normalize the colors of an image that has been through the tiled scaling workflow of my XYGrid Nodes. + +See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/main/README.md + +**Node Link:** https://github.com/skunkworxdark/match_histogram + +**Output Examples** + + + +--- + +### Metadata Linked Nodes + +**Description:** A set of nodes for Metadata. Collect Metadata from within an `iterate` node & extract metadata from an image. + +- `Metadata Item Linked` - Allows collecting of metadata while within an iterate node with no need for a collect node or conversion to metadata node +- `Metadata From Image` - Provides Metadata from an image +- `Metadata To String` - Extracts a String value of a label from metadata +- `Metadata To Integer` - Extracts an Integer value of a label from metadata +- `Metadata To Float` - Extracts a Float value of a label from metadata +- `Metadata To Scheduler` - Extracts a Scheduler value of a label from metadata +- `Metadata To Bool` - Extracts Bool types from metadata +- `Metadata To Model` - Extracts model types from metadata +- `Metadata To SDXL Model` - Extracts SDXL model types from metadata +- `Metadata To LoRAs` - Extracts Loras from metadata. +- `Metadata To SDXL LoRAs` - Extracts SDXL Loras from metadata +- `Metadata To ControlNets` - Extracts ControNets from metadata +- `Metadata To IP-Adapters` - Extracts IP-Adapters from metadata +- `Metadata To T2I-Adapters` - Extracts T2I-Adapters from metadata +- `Denoise Latents + Metadata` - This is an inherited version of the existing `Denoise Latents` node but with a metadata input and output. + +**Node Link:** https://github.com/skunkworxdark/metadata-linked-nodes + +--- + +### Negative Image + +Description: Creates a negative version of an image, effective for visual effects and mask inversion. + +Node Link: https://github.com/VeyDlin/negative-image-node + +View: + + + +--- + +### Nightmare Promptgen + +**Description:** Nightmare Prompt Generator - Uses a local text generation model to create unique imaginative (but usually nightmarish) prompts for InvokeAI. By default, it allows you to choose from some gpt-neo models I finetuned on over 2500 of my own InvokeAI prompts in Compel format, but you're able to add your own, as well. Offers support for replacing any troublesome words with a random choice from list you can also define. + +**Node Link:** [https://github.com/gogurtenjoyer/nightmare-promptgen](https://github.com/gogurtenjoyer/nightmare-promptgen) + +--- + +### Ollama Node + +**Description:** Uses Ollama API to expand text prompts for text-to-image generation using local LLMs. Works great for expanding basic prompts into detailed natural language prompts for Flux. Also provides a toggle to unload the LLM model immediately after expanding, to free up VRAM for Invoke to continue the image generation workflow. + +**Node Link:** https://github.com/Jonseed/Ollama-Node + +**Example Node Graph:** https://github.com/Jonseed/Ollama-Node/blob/main/Ollama-Node-Flux-example.json + +**View:** + +![ollama node](https://raw.githubusercontent.com/Jonseed/Ollama-Node/a3e7cdc55e394cb89c1ea7ed54e106c212c85e8c/ollama-node-screenshot.png) + +--- + +### One Button Prompt + + + +**Description:** an extensive suite of auto prompt generation and prompt helper nodes based on extensive logic. Get creative with the best prompt generator in the world. + +The main node generates interesting prompts based on a set of parameters. There are also some additional nodes such as Auto Negative Prompt, One Button Artify, Create Prompt Variant and other cool prompt toys to play around with. + +**Node Link:** [https://github.com/AIrjen/OneButtonPrompt_X_InvokeAI](https://github.com/AIrjen/OneButtonPrompt_X_InvokeAI) + +**Nodes:** + + + +--- + +### Oobabooga + +**Description:** asks a local LLM running in Oobabooga's Text-Generation-Webui to write a prompt based on the user input. + +**Link:** https://github.com/sammyf/oobabooga-node + +**Example:** + +"describe a new mystical creature in its natural environment" + +*can return* + +"The mystical creature I am describing to you is called the "Glimmerwing". It is a majestic, iridescent being that inhabits the depths of the most enchanted forests and glimmering lakes. Its body is covered in shimmering scales that reflect every color of the rainbow, and it has delicate, translucent wings that sparkle like diamonds in the sunlight. The Glimmerwing's home is a crystal-clear lake, surrounded by towering trees with leaves that shimmer like jewels. In this serene environment, the Glimmerwing spends its days swimming gracefully through the water, chasing schools of glittering fish and playing with the gentle ripples of the lake's surface. +As the sun sets, the Glimmerwing perches on a branch of one of the trees, spreading its wings to catch the last rays of light. The creature's scales glow softly, casting a rainbow of colors across the forest floor. The Glimmerwing sings a haunting melody, its voice echoing through the stillness of the night air. Its song is said to have the power to heal the sick and bring peace to troubled souls. Those who are lucky enough to hear the Glimmerwing's song are forever changed by its beauty and grace." + + + +**Requirement** + +a Text-Generation-Webui instance (might work remotely too, but I never tried it) and obviously InvokeAI 3.x + +**Note** + +This node works best with SDXL models, especially as the style can be described independently of the LLM's output. + +--- + +### Prompt Tools + +**Description:** A set of InvokeAI nodes that add general prompt (string) manipulation tools. Designed to accompany the `Prompts From File` node and other prompt generation nodes. + +1. `Prompt To File` - saves a prompt or collection of prompts to a file. one per line. There is an append/overwrite option. +2. `PTFields Collect` - Converts image generation fields into a Json format string that can be passed to Prompt to file. +3. `PTFields Expand` - Takes Json string and converts it to individual generation parameters. This can be fed from the Prompts to file node. +4. `Prompt Strength` - Formats prompt with strength like the weighted format of compel +5. `Prompt Strength Combine` - Combines weighted prompts for .and()/.blend() +6. `CSV To Index String` - Gets a string from a CSV by index. Includes a Random index option + +The following Nodes are now included in v3.2 of Invoke and are no longer in this set of tools. + +- `Prompt Join` -> `String Join` +- `Prompt Join Three` -> `String Join Three` +- `Prompt Replace` -> `String Replace` +- `Prompt Split Neg` -> `String Split Neg` + + +See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/main/README.md + +**Node Link:** https://github.com/skunkworxdark/Prompt-tools-nodes + +**Workflow Examples** + + + +--- + +### Remote Image + +**Description:** This is a pack of nodes to interoperate with other services, be they public websites or bespoke local servers. The pack consists of these nodes: + +- *Load Remote Image* - Lets you load remote images such as a realtime webcam image, an image of the day, or dynamically created images. +- *Post Image to Remote Server* - Lets you upload an image to a remote server using an HTTP POST request, eg for storage, display or further processing. + +**Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image + +--- + +### BriaAI Remove Background + +**Description**: Implements one click background removal with BriaAI's new version 1.4 model which seems to be producing better results than any other previous background removal tool. + +**Node Link:** https://github.com/blessedcoolant/invoke_bria_rmbg + +**View** + + +--- + +### Remove Background + +Description: An integration of the rembg package to remove backgrounds from images using multiple U2NET models. + +Node Link: https://github.com/VeyDlin/remove-background-node + +View: + + + +--- + +### Retroize + +**Description:** Retroize is a collection of nodes for InvokeAI to "Retroize" images. Any image can be given a fresh coat of retro paint with these nodes, either from your gallery or from within the graph itself. It includes nodes to pixelize, quantize, palettize, and ditherize images; as well as to retrieve palettes from existing images. + +**Node Link:** https://github.com/Ar7ific1al/invokeai-retroizeinode/ + +**Retroize Output Examples** + + + +--- + +### Stereogram Nodes + +**Description:** A set of custom nodes for InvokeAI to create cross-view or parallel-view stereograms. Stereograms are 2D images that, when viewed properly, reveal a 3D scene. Check out [r/crossview](https://www.reddit.com/r/CrossView/) for tutorials. + +**Node Link:** https://github.com/simonfuhrmann/invokeai-stereo + +**Example Workflow and Output** + + + +--- + +### Simple Skin Detection + +Description: Detects skin in images based on predefined color thresholds. + +Node Link: https://github.com/VeyDlin/simple-skin-detection-node + +View: + + + +--- + +### Size Stepper Nodes + +**Description:** This is a set of nodes for calculating the necessary size increments for doing upscaling workflows. Use the *Final Size & Orientation* node to enter your full size dimensions and orientation (portrait/landscape/random), then plug that and your initial generation dimensions into the *Ideal Size Stepper* and get 1, 2, or 3 intermediate pairs of dimensions for upscaling. Note this does not output the initial size or full size dimensions: the 1, 2, or 3 outputs of this node are only the intermediate sizes. + +A third node is included, *Random Switch (Integers)*, which is just a generic version of Final Size with no orientation selection. + +**Node Link:** https://github.com/dwringer/size-stepper-nodes + +**Example Usage:** + + + +--- + +### Text font to Image + +**Description:** text font to text image node for InvokeAI, download a font to use (or if in font cache uses it from there), the text is always resized to the image size, but can control that with padding, optional 2nd line + +**Node Link:** https://github.com/mickr777/textfontimage + +**Output Examples** + + + +Results after using the depth controlnet + + + + + +--- + +### Thresholding + +**Description:** This node generates masks for highlights, midtones, and shadows given an input image. You can optionally specify a blur for the lookup table used in making those masks from the source image. + +**Node Link:** https://github.com/JPPhoto/thresholding-node + +**Examples** + +Input: + + + +Highlights/Midtones/Shadows: + + + + + +Highlights/Midtones/Shadows (with LUT blur enabled): + + + + + +--- + +### Unsharp Mask + +**Description:** Applies an unsharp mask filter to an image, preserving its alpha channel in the process. + +**Node Link:** https://github.com/JPPhoto/unsharp-mask-node + +--- + +### XY Image to Grid and Images to Grids nodes + +**Description:** These nodes add the following to InvokeAI: +- Generate grids of images from multiple input images +- Create XY grid images with labels from parameters +- Split images into overlapping tiles for processing (for super-resolution workflows) +- Recombine image tiles into a single output image blending the seams + +The nodes include: +1. `Images To Grids` - Combine multiple images into a grid of images +2. `XYImage To Grid` - Take X & Y params and creates a labeled image grid. +3. `XYImage Tiles` - Super-resolution (embiggen) style tiled resizing +4. `Image Tot XYImages` - Takes an image and cuts it up into a number of columns and rows. +5. Multiple supporting nodes - Helper nodes for data wrangling and building `XYImage` collections + +See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/README.md + +**Node Link:** https://github.com/skunkworxdark/XYGrid_nodes + +**Output Examples** + + + +--- + +### Example Node Template + +**Description:** This node allows you to do super cool things with InvokeAI. + +**Node Link:** https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/app/invocations/prompt.py + +**Example Workflow:** https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Prompt_from_File.json + +**Output Examples** + + + + +## Disclaimer + +The nodes linked have been developed and contributed by members of the Invoke AI community. While we strive to ensure the quality and safety of these contributions, we do not guarantee the reliability or security of the nodes. If you have issues or concerns with any of the nodes below, please raise it on GitHub or in the Discord. + + +## Help +If you run into any issues with a node, please post in the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy). diff --git a/docs/src/content/docs/workflows/editor-interface.mdx b/docs/src/content/docs/workflows/editor-interface.mdx new file mode 100644 index 00000000000..f4c50c7bc32 --- /dev/null +++ b/docs/src/content/docs/workflows/editor-interface.mdx @@ -0,0 +1,128 @@ +--- +title: Editor Interface +description: Learn how to use the Workflow Editor in InvokeAI. +lastUpdated: 2026-02-20 +--- + +import { Card, CardGrid, Steps, Tabs, TabItem } from '@astrojs/starlight/components'; + +The workflow editor is a blank canvas allowing for the use of individual functions and image transformations to control the image generation workflow. Nodes take in inputs on the left side of the node, and return an output on the right side of the node. + +A node graph is composed of multiple nodes that are connected together to create a workflow. Nodes' inputs and outputs are connected by dragging connectors from node to node. Inputs and outputs are color-coded for ease of use. + +:::tip[New to Diffusion?] +If you're not familiar with Diffusion, take a look at our [Diffusion Overview](/concepts/diffusion). Understanding how diffusion works will enable you to more easily use the Workflow Editor and build workflows to suit your needs. +::: + +## Features + + + Save workflows to the Invoke database, allowing you to easily create, modify, and share workflows as needed. A curated set of default workflows is provided to help explain important node usage. + + ![Workflow Library](./assets/workflow_library.png) + + + Create a custom UI for your workflow, making it easier to iterate on your generations. The Linear UI View is saved alongside the workflow, allowing you to share workflows and enable others to use them. + + + 1. Right-click on any **input label** on a node. + 2. Select **"Add to Linear View"**. + 3. The input will now appear in your Linear View panel! + + + ![Linear View](./assets/linearview.png) + + + Any node or input field can be renamed in the workflow editor. If the input field you have renamed has been added to the Linear View, the changed name will be reflected in both places. + + + Nodes have a **"Use Cache"** option in their footer. This allows for performance improvements by reusing previously cached values during workflow processing. + + +### Managing Nodes + +Use these quick keyboard shortcuts to navigate and manage your workflow efficiently: + + + + Ctrl + C (or Cmd + C) + + + Ctrl + V (or Cmd + V) + + + Shift + Click & Drag + + + Backspace / Delete + + + +## Important Nodes & Concepts + +There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. + +:::note +The screenshots below aren't examples of complete functioning node graphs, but rather snippets demonstrating specific concepts. +::: + + + + ### Create Latent Noise + An initial noise tensor is necessary for the latent diffusion process. As a result, the Denoising node requires a noise node input. + + ![Create Latent Noise](./assets/groupsnoise.png) + + ### Text Prompt Conditioning + Conditioning is necessary for the latent diffusion process, whether empty or not. As a result, the Denoising node requires positive and negative conditioning inputs. Conditioning is reliant on a CLIP text encoder provided by the Model Loader node. + + ![Text Prompt Conditioning](./assets/groupsconditioning.png) + + + + ### Image to Latents & VAE + The **ImageToLatents** node takes in a pixel image and a VAE and outputs latents. The **LatentsToImage** node does the opposite, taking in latents and a VAE and outputs a pixel image. + + ![Image to Latents & VAE](./assets/groupsimgvae.png) + + ### Scaling + Use the **ImageScale**, **ScaleLatents**, and **Upscale** nodes to upscale images and/or latent images. Upscaling is the process of enlarging an image and adding more detail. + + The chosen method differs across contexts. However, be aware that latents are already noisy and compressed at their original resolution; scaling an image could produce more detailed results. + + ![Scaling Nodes](./assets/groupsallscale.png) + + + + ### ControlNet + The **ControlNet** node outputs a Control, which can be provided as input to a Denoise Latents node. Depending on the type of ControlNet desired, ControlNet nodes usually require an image processor node, such as a Canny Processor or Depth Processor, which prepares an input image for use with ControlNet. + + ![ControlNet Setup](./assets/groupscontrol.png) + + ### LoRA + The **Lora Loader** node lets you load a LoRA and pass it as output. A LoRA provides fine-tunes to the UNet and text encoder weights that augment the base model’s image and text vocabularies. + + ![LoRA Setup](./assets/groupslora.png) + + + + ### Defined & Random Seeds + It is common to want to use both the same seed (for continuity) and random seeds (for variety). To define a seed, simply enter it into the **'Seed'** field on a noise node. Conversely, the **RandomInt** node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed. + + ![Defined & Random Seeds](./assets/groupsnoise.png) + + ### Iteration + Multiple Images as Input + Iteration is a common concept in any processing, and means to repeat a process with given input. In nodes, you're able to use the **Iterate** node to iterate through collections usually gathered by the **Collect** node. + + The Iterate node has many potential uses, from processing a collection of images one after another, to varying seeds across multiple image generations and more. This screenshot demonstrates how to collect several images and use them in an image generation workflow. + + ![Iteration](./assets/groupsiterate.png) + + ### Batch / Multiple Image Generation + Batch or multiple image generation in the workflow editor is done using the **RandomRange** node. In this case, the 'Size' field represents the number of images to generate, meaning this example will generate 4 images. + + As RandomRange produces a collection of integers, we need to add the Iterate node to iterate through the collection. This noise can then be fed to the Denoise Latents node for it to iterate through the denoising process with the different seeds provided. + + ![Batch Generation](./assets/groupsmultigenseeding.png) + + diff --git a/docs/src/content/docs/workflows/index.mdx b/docs/src/content/docs/workflows/index.mdx new file mode 100644 index 00000000000..46299ae454c --- /dev/null +++ b/docs/src/content/docs/workflows/index.mdx @@ -0,0 +1,31 @@ +--- +title: Using Workflows +sidebar: + order: 1 +--- + +import { LinkCard, CardGrid } from '@astrojs/starlight/components'; + +Workflows allow you to link multiple **Nodes** together to create custom, repeatable image generation processes. By connecting the outputs of some nodes to the inputs of others, you can build complex functionality tailored to your specific needs. + +## The Node Editor + +With nodes, you can easily extend the image generation capabilities of InvokeAI. All InvokeAI features are added through nodes. + +You can read more about nodes and how to use the node editor by checking out the detailed node documentation: + + + +## Downloading New Nodes + +To download a new node and enhance your workflows with new features, visit our list of Community Nodes. These are nodes that have been created by the community, for the community. + + diff --git a/docs/src/content/i18n/en.json b/docs/src/content/i18n/en.json new file mode 100644 index 00000000000..69333e3a0b2 --- /dev/null +++ b/docs/src/content/i18n/en.json @@ -0,0 +1,45 @@ +{ + "skipLink.label": "Skip to content", + "search.label": "Search", + "search.ctrlKey": "Ctrl", + "search.cancelLabel": "Cancel", + "search.devWarning": "Search is only available in production builds. \nTry building and previewing the site to test it out locally.", + "themeSelect.accessibleLabel": "Select theme", + "themeSelect.dark": "Dark", + "themeSelect.light": "Light", + "themeSelect.auto": "Auto", + "languageSelect.accessibleLabel": "Select language", + "menuButton.accessibleLabel": "Menu", + "sidebarNav.accessibleLabel": "Main", + "tableOfContents.onThisPage": "On this page", + "tableOfContents.overview": "Overview", + "i18n.untranslatedContent": "This content is not available in your language yet.", + "page.editLink": "Edit page", + "page.lastUpdated": "Last updated:", + "page.previousLink": "Previous", + "page.nextLink": "Next", + "page.draft": "This content is a draft and will not be included in production builds.", + "404.text": "Page not found. Check the URL or try using the search bar.", + "aside.note": "Note", + "aside.tip": "Tip", + "aside.caution": "Caution", + "aside.danger": "Danger", + "fileTree.directory": "Directory", + "builtWithStarlight.label": "Built with Starlight", + "heading.anchorLabel": "Section titled “{{title}}”", + + "expressiveCode.copyButtonCopied": "Copied!", + "expressiveCode.copyButtonTooltip": "Copy to clipboard", + "expressiveCode.terminalWindowFallbackTitle": "Terminal window", + + "pagefind.clear_search": "Clear", + "pagefind.load_more": "Load more results", + "pagefind.search_label": "Search this site", + "pagefind.filters_label": "Filters", + "pagefind.zero_results": "No results for [SEARCH_TERM]", + "pagefind.many_results": "[COUNT] results for [SEARCH_TERM]", + "pagefind.one_result": "[COUNT] result for [SEARCH_TERM]", + "pagefind.alt_search": "No results for [SEARCH_TERM]. Showing results for [DIFFERENT_TERM] instead", + "pagefind.search_suggestion": "No results for [SEARCH_TERM]. Try one of the following searches:", + "pagefind.searching": "Searching for [SEARCH_TERM]..." +} diff --git a/docs/src/layouts/PageFrameExtended.astro b/docs/src/layouts/PageFrameExtended.astro new file mode 100644 index 00000000000..9287376a4b9 --- /dev/null +++ b/docs/src/layouts/PageFrameExtended.astro @@ -0,0 +1,9 @@ +--- +import PageFrame from '@astrojs/starlight/components/PageFrame.astro'; +--- + + + + + + diff --git a/docs/src/lib/components/DownloadOptions.astro b/docs/src/lib/components/DownloadOptions.astro new file mode 100644 index 00000000000..a9f036395ff --- /dev/null +++ b/docs/src/lib/components/DownloadOptions.astro @@ -0,0 +1,196 @@ +--- +import { LinkCard, Icon, LinkButton } from '@astrojs/starlight/components'; +import { type StarlightIcon } from '@astrojs/starlight/types'; + +type LauncherDownloadOption = { + icon: StarlightIcon; + headline: string; + note: string; + launcherDownloadLink: string; + launcherDownloadLabel?: string; +}; +const launcherDownloadOptions: Record = { + windows: { + icon: 'seti:windows', + headline: 'Download for Windows', + note: 'Requires Windows 10 or later, and NVIDIA or AMD GPU.', + launcherDownloadLink: + 'https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition.Setup.latest.exe', + launcherDownloadLabel: 'Download EXE', + }, + macos: { + icon: 'apple', + headline: 'Download for MacOS', + note: 'Requires Apple Silicon (M-Series). Not compatible with Intel.', + launcherDownloadLink: + 'https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest-arm64.dmg', + launcherDownloadLabel: 'Download DMG', + }, + linux: { + icon: 'linux', + headline: 'Download for Linux', + note: 'Requires NVIDIA or AMD GPU. Compatible with most distributions.', + launcherDownloadLink: + 'https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest.AppImage', + launcherDownloadLabel: 'Download AppImage', + }, +}; + +const manualDownloadOptions = { + github: { + headline: 'Download from GitHub', + description: 'For advanced users who want to set up Invoke manually or contribute to the project.', + href: 'https://github.com/invoke-ai/InvokeAI/releases', + }, + docker: { + headline: 'Run with Docker', + description: 'For users who want to run Invoke without installing dependencies directly on their system.', + href: '/configuration/docker/', + }, +}; +--- + +
+ +
+ { + Object.entries(launcherDownloadOptions).map( + ([key, { icon, headline, note, launcherDownloadLink, launcherDownloadLabel }]) => ( +
+ +

{headline}

+

{note}

+ +
+ + {launcherDownloadLabel} + +
+
+ ), + ) + } +
+ + +
+ OR +
+ + +
+ { + Object.entries(manualDownloadOptions).map(([key, { headline, href, description }]) => ( + + )) + } +
+
+ + + + diff --git a/docs/src/lib/components/EmptyComponent.astro b/docs/src/lib/components/EmptyComponent.astro new file mode 100644 index 00000000000..a04846e64d7 --- /dev/null +++ b/docs/src/lib/components/EmptyComponent.astro @@ -0,0 +1,3 @@ +--- +// This is used to override starlight components we don't want to use +--- diff --git a/docs/src/lib/components/Footer.astro b/docs/src/lib/components/Footer.astro new file mode 100644 index 00000000000..65137dec3b9 --- /dev/null +++ b/docs/src/lib/components/Footer.astro @@ -0,0 +1,28 @@ +--- +import PageFooter from '@astrojs/starlight/components/Footer.astro'; +--- + + + +
+ This site was designed and developed by Aether Fox Studio. +
+ + diff --git a/docs/src/lib/components/ForceDarkTheme.astro b/docs/src/lib/components/ForceDarkTheme.astro new file mode 100644 index 00000000000..f9c102a1ce8 --- /dev/null +++ b/docs/src/lib/components/ForceDarkTheme.astro @@ -0,0 +1,12 @@ +--- + +--- + + + diff --git a/docs/src/lib/components/Link.astro b/docs/src/lib/components/Link.astro new file mode 100644 index 00000000000..cca88478340 --- /dev/null +++ b/docs/src/lib/components/Link.astro @@ -0,0 +1,23 @@ +--- +type Props = { + href: string; + label?: string; + [key: string]: any; +}; + +const { href, label, ...rest } = Astro.props as Props; + +const useSlot = !!Astro.slots.has('default'); +const isExternal = /^https?:\/\//.test(href); +--- + + + {useSlot ? : label} + diff --git a/docs/src/lib/components/Mermaid.astro b/docs/src/lib/components/Mermaid.astro new file mode 100644 index 00000000000..18a59eba203 --- /dev/null +++ b/docs/src/lib/components/Mermaid.astro @@ -0,0 +1,58 @@ +--- +type Props = { + title?: string; +}; + +const { title = '' } = Astro.props as Props; +--- + + + +
+
{title}
+ +
Loading diagram...
+ +
+ Source + +
+
diff --git a/docs/src/lib/components/SystemRequirmentsLink.astro b/docs/src/lib/components/SystemRequirmentsLink.astro new file mode 100644 index 00000000000..80c05e7fcfb --- /dev/null +++ b/docs/src/lib/components/SystemRequirmentsLink.astro @@ -0,0 +1,9 @@ +--- +import { LinkCard } from '@astrojs/starlight/components'; +--- + + diff --git a/docs/src/lib/utils/links.ts b/docs/src/lib/utils/links.ts new file mode 100644 index 00000000000..3f6d67d910c --- /dev/null +++ b/docs/src/lib/utils/links.ts @@ -0,0 +1,34 @@ +export const externalLinks = { + // Nvidia + cudaDownloads: 'https://developer.nvidia.com/cuda-downloads', + nvidiaRuntime: 'https://developer.nvidia.com/container-runtime', + cudnnDocs: 'https://developer.nvidia.com/cudnn', + cudnnSupport: 'https://docs.nvidia.com/deeplearning/cudnn/support-matrix/index.html', + cudnnDownload: 'https://developer.nvidia.com/cudnn', + + // AMD + rocmDocs: 'https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html', + rocmDocker: 'https://github.com/ROCm/ROCm-docker', + + // Python + pythonDownload: 'https://www.python.org/downloads/', + msvcRedist: 'https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist', + uvDocs: 'https://docs.astral.sh/uv/concepts/python-versions/#installing-a-python-version', + + // InvokeAI + discord: 'https://discord.gg/ZmtBAhwWhy', + github: 'https://github.com/invoke-ai/InvokeAI', + launcher: 'https://github.com/invoke-ai/launcher/releases/latest', + support: 'https://support.invoke.ai', + + // Launcher + launcherWindows: 'https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition.Setup.latest.exe', + launcherMacOS: 'https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest-arm64.dmg', + launcherLinux: 'https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest.AppImage', +} as const; + +export const internalLinks = { + quickStart: '/getting-started/quick_start', + lowVram: '/configuration/low-vram-mode', + // ... etc +} as const; diff --git a/docs/src/pages/download.astro b/docs/src/pages/download.astro new file mode 100644 index 00000000000..1160566dadc --- /dev/null +++ b/docs/src/pages/download.astro @@ -0,0 +1,17 @@ +--- +import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro'; +import DownloadOptions from '@components/DownloadOptions.astro'; +--- + + + + diff --git a/docs/src/styles/custom.css b/docs/src/styles/custom.css new file mode 100644 index 00000000000..7d824457371 --- /dev/null +++ b/docs/src/styles/custom.css @@ -0,0 +1,315 @@ +:root { + /* Typography */ + --__sl-font: 'Inter', sans-serif; + --__sl-font-mono: + 'Roboto Mono', SFMono-Regular, Menlo, Monaco, Consolas, 'Liberation Mono', 'Courier New', monospace; + + --radius: 0.35rem; + + /* Colors */ + --sl-color-bg: #1c1f23; + --sl-color-bg-nav: #31343b; + --sl-color-bg-sidebar: #272a2f; + + --sl-color-gray-7: #272a2f; + + --sl-color-hairline: rgba(255, 255, 255, 0.08); + --sl-color-hairline-light: rgba(255, 255, 255, 0.16); + + --sl-color-text-accent: #97d2ee; + --sl-color-text-accent-2: #e4fd1d; +} + +html, +body { + scroll-behavior: smooth; +} + +[data-has-hero] { + header { + background-color: var(--sl-color-bg); + border-color: transparent; + + .header { + max-width: calc(var(--sl-content-width) + 8rem); + margin-inline: auto; + } + } +} + +.site-title { + transition: transform 100ms ease-in-out; + + &:hover { + transform: scale(1.02); + } + &:active { + transform: scale(0.98); + } +} + +.hero { + padding-top: clamp(2.5rem, calc(1rem + 10vmin), 5rem); + padding-bottom: clamp(2.5rem, calc(1rem + 10vmin), 10rem); + + &:has(> :only-child) { + grid-template-columns: 1fr; + gap: 0; + + .sl-flex { + align-items: center; + text-align: center; + } + } +} + +.header { + display: flex; + align-items: center; + justify-content: space-between; + width: 100%; + + .title--wrapper { + flex-shrink: 0; + } + + /* Site Search Container */ + .sl-flex:has(site-search) { + order: 2; + justify-content: end; + width: 100%; + max-width: 22rem; + + @media (max-width: 799px) { + width: auto; + } + } + + /* Social Items */ + > :last-child:has(> .social-icons) { + order: 1; + margin-left: auto; + justify-content: end; + } +} + +.page { + background-image: radial-gradient(circle, var(--sl-color-hairline) 1px, transparent 1px); + background-size: 20px 20px; +} + +.right-sidebar-container { + background: var(--sl-color-bg); +} + +#starlight__sidebar .sidebar-content { + a { + transition: + background 50ms ease-in-out, + color 50ms ease-in-out; + + &:not([aria-current='page']):hover { + background: var(--sl-color-hairline); + } + &:not([aria-current='page']):active { + background: var(--sl-color-hairline-light); + } + } +} + +site-search > button { + transition: border-color 100ms ease-in-out; +} + +.sl-link-button { + border-radius: 0.5rem; +} + +.sl-link-card { + background: var(--sl-color-bg); + transition: border-color 100ms ease-in-out; + + &:active { + border-color: var(--sl-color-hairline-light); + } + + svg { + transition: color 100ms ease-in-out; + } +} + +.expressive-code .frame pre { + background: var(--sl-color-bg-sidebar); +} + +ul[role='tablist'] { + border-bottom: 1px solid var(--sl-color-hairline); +} + +a[role='tab'] { + border: none; + padding: 0.275rem 0.5rem; + transition: all 100ms ease; + border-radius: var(--radius); + border-bottom-left-radius: 0; + border-bottom-right-radius: 0; + border-bottom-width: 2px; + border-bottom-style: solid; + box-shadow: none; + + &:not([aria-selected='true']) { + border-color: transparent; + color: var(--sl-color-text); + } + + &:not([aria-selected='true']):hover { + background: var(--sl-color-hairline); + } + + &:not([aria-selected='true']):active { + background: var(--sl-color-hairline-light); + } + + &[aria-selected='true'] { + font-weight: normal; + --sl-tab-color-border: var(--sl-color-text-accent); + color: var(--sl-color-text-accent); + } +} + +/* Decorate tabs with parent aside colors */ +aside a[role='tab'] { + &[aria-selected='true'] { + --sl-tab-color-border: var(--sl-color-asides-border); + color: var(--sl-color-asides-text-accent); + } +} + +a[rel='next'], +a[rel='prev'] { + background: var(--sl-color-bg); + transition: border-color 100ms ease-in-out; +} + +.sl-steps { + & > li::before { + font-family: var(--__sl-font-mono); + } +} + +article.card { + border-radius: var(--radius); + + padding: clamp(1rem, calc(0.125rem + 3vw), 1.5rem); +} + +.starlight-aside { + border-radius: var(--radius); + border: none; + position: relative; + padding: 0.75rem; + padding-left: 1.5rem; + + &::before { + content: ''; + position: absolute; + left: 0.35rem; + width: 0.25rem; + inset-block: 0.35rem; + border-radius: 999px; + background: var(--sl-color-asides-border); + } +} + +.expressive-code .has-title { + .header { + border-bottom: var(--ec-brdWd) solid var(--ec-brdCol); + } + .header .title { + border-inline: var(--ec-brdWd) solid var(--ec-brdCol); + border-top: var(--ec-brdWd) solid var(--ec-brdCol); + background: var(--sl-color-bg-sidebar); + font-family: var(--__sl-font-mono); + font-size: var(--sl-text-xs); + padding: calc(var(--ec-uiPadBlk) + var(--ec-frm-edActTabIndHt)) var(--ec-uiPadInl); + cursor: pointer; + + &::after { + display: none; + } + + &::before { + position: absolute; + content: ''; + inset: 0; + background: var(--sl-color-hairline); + opacity: 0; + transition: opacity 75ms ease-in-out; + } + + &:hover::before { + opacity: 1; + } + } +} + +.hero .actions { + gap: 1.5rem; +} + +.card .sl-link-button { + margin-bottom: 0; +} + +.sl-link-button { + position: relative; + overflow: hidden; + transition: transform 100ms ease-in-out; + + &:not(.minimal) { + padding: 0.65rem 0.85rem; + + &::before { + content: ''; + position: absolute; + inset: 0; + background: var(--sl-color-hairline); + opacity: 0; + transition: opacity 75ms ease-in-out; + pointer-events: none; + } + } + + &:hover::before { + opacity: 1; + } + + &:hover, + &:focus-visible { + transform: scale(1.02); + } + + &:active { + transform: scale(0.98); + } + + &.primary::before { + background: rgba(0, 0, 0, 0.12); + } + + &.secondary { + border-color: var(--sl-color-gray-5); + } +} + +/* TODO: Custom markdown content styles */ +.sl-markdown-content { + table { + :is(th:first-child, td:first-child):not(:where(.not-content *)) { + padding: 0.5rem 1rem; + } + tr:hover { + background-color: var(--sl-color-bg-sidebar); + } + } +} diff --git a/docs/tsconfig.json b/docs/tsconfig.json new file mode 100644 index 00000000000..7e676e80c64 --- /dev/null +++ b/docs/tsconfig.json @@ -0,0 +1,14 @@ +{ + "extends": "astro/tsconfigs/strict", + "include": [".astro/types.d.ts", "**/*"], + "exclude": ["dist"], + "compilerOptions": { + "baseUrl": ".", + "paths": { + "@/*": ["./*"], + "@lib/*": ["./src/lib/*"], + "@utils/*": ["./src/lib/utils/*"], + "@components/*": ["./src/lib/components/*"], + }, + }, +}