diff --git a/invokeai/app/api/routers/node_docs.py b/invokeai/app/api/routers/node_docs.py new file mode 100644 index 00000000000..af16c27e979 --- /dev/null +++ b/invokeai/app/api/routers/node_docs.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +import importlib.resources as pkg_resources +import mimetypes +import re +from importlib import import_module + +from fastapi import APIRouter, HTTPException +from fastapi.responses import PlainTextResponse, Response + +from invokeai.backend.util.logging import InvokeAILogger + +logger = InvokeAILogger.get_logger(__name__) + +router = APIRouter() + +# validation regexes +VALID_SEGMENT = re.compile(r"^[A-Za-z0-9_-]+$") +IMAGE_SEGMENT = re.compile(r"^[A-Za-z0-9_.-]+$") + + +@router.get("/nodeDocs/{lang}/{name}.md") +def get_node_doc(lang: str, name: str) -> PlainTextResponse: + """Return packaged markdown for a node. + + This endpoint reads packaged resources from the installed `invokeai.resources` + package via importlib. + """ + # Basic validation + if not VALID_SEGMENT.match(lang) or not VALID_SEGMENT.match(name): + raise HTTPException(status_code=400, detail="Invalid path segment") + + try: + res_pkg = import_module("invokeai.resources") + pkg_path = pkg_resources.files(res_pkg).joinpath("node_docs", lang, f"{name}.md") + except Exception as e: + logger.warning(f"node_docs: unable to import packaged resources: {e}") + raise HTTPException(status_code=404, detail="Not found") + + # Ensure resource exists in the package + try: + if not pkg_path.is_file(): + logger.debug(f"node_docs: resource not found in package: {pkg_path}") + raise FileNotFoundError + text = pkg_path.read_text(encoding="utf-8") + return PlainTextResponse(content=text, media_type="text/markdown") + except FileNotFoundError: + raise HTTPException(status_code=404, detail="Not found") + except Exception as e: + logger.warning(f"node_docs: failed reading resource {pkg_path}: {e}") + raise HTTPException(status_code=404, detail="Not found") + + +@router.get("/nodeDocs/{lang}/images/{image_name}") +def get_node_doc_image(lang: str, image_name: str) -> Response: + """Return packaged image resource for node docs. + + Only reads from `invokeai.resources` packaged data. Adds `X-Content-Type-Options` + and a conservative Cache-Control header. + """ + # Validate inputs + if not IMAGE_SEGMENT.match(image_name) or not VALID_SEGMENT.match(lang): + raise HTTPException(status_code=400, detail="Invalid path segment") + + try: + res_pkg = import_module("invokeai.resources") + pkg_path = pkg_resources.files(res_pkg).joinpath("node_docs", lang, "images", image_name) + except Exception as e: + logger.warning(f"node_docs: unable to import packaged resources for image: {e}") + raise HTTPException(status_code=404, detail="Not found") + + try: + if not pkg_path.is_file(): + logger.debug(f"node_docs: image resource not found in package: {pkg_path}") + raise FileNotFoundError + data = pkg_path.read_bytes() + mime_type, _ = mimetypes.guess_type(image_name) + headers = { + "X-Content-Type-Options": "nosniff", + "Cache-Control": "public, max-age=86400", + } + return Response(content=data, media_type=mime_type or "application/octet-stream", headers=headers) + except FileNotFoundError: + raise HTTPException(status_code=404, detail="Not found") + except Exception as e: + logger.warning(f"node_docs: failed reading image resource {pkg_path}: {e}") + raise HTTPException(status_code=404, detail="Not found") + + +# Expose the router +node_docs_router = router diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 335327f532b..205abb7351a 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -24,6 +24,8 @@ images, model_manager, model_relationships, + # node_docs router serves packaged node documentation + node_docs, session_queue, style_presets, utilities, @@ -133,6 +135,8 @@ async def dispatch(self, request: Request, call_next: RequestResponseEndpoint): app.include_router(workflows.workflows_router, prefix="/api") app.include_router(style_presets.style_presets_router, prefix="/api") app.include_router(client_state.client_state_router, prefix="/api") +# Include node docs router (serves packaged node docs) +app.include_router(node_docs.node_docs_router, prefix="") app.openapi = get_openapi_func(app) diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index 118fd330d07..a9c8d89b564 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -57,6 +57,7 @@ "cmdk": "^1.1.1", "compare-versions": "^6.1.1", "dockview": "^4.7.1", + "dompurify": "^2.4.0", "es-toolkit": "^1.39.7", "filesize": "^10.1.6", "fracturedjsonjs": "^4.1.0", @@ -69,6 +70,7 @@ "linkify-react": "^4.3.1", "linkifyjs": "^4.3.1", "lru-cache": "^11.1.0", + "marked": "^17.0.1", "mtwist": "^1.0.2", "nanoid": "^5.1.5", "nanostores": "^1.0.1", @@ -114,6 +116,7 @@ "@storybook/addon-docs": "^9.0.17", "@storybook/addon-links": "^9.0.17", "@storybook/react-vite": "^9.0.17", + "@types/dompurify": "^3.2.0", "@types/node": "^22.15.1", "@types/react": "^18.3.11", "@types/react-dom": "^18.3.0", diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml index bc37d622178..5a6188e26b6 100644 --- a/invokeai/frontend/web/pnpm-lock.yaml +++ b/invokeai/frontend/web/pnpm-lock.yaml @@ -62,6 +62,9 @@ importers: dockview: specifier: ^4.7.1 version: 4.7.1(react@18.3.1) + dompurify: + specifier: ^2.4.0 + version: 2.5.8 es-toolkit: specifier: ^1.39.7 version: 1.39.7 @@ -98,6 +101,9 @@ importers: lru-cache: specifier: ^11.1.0 version: 11.1.0 + marked: + specifier: ^17.0.1 + version: 17.0.1 mtwist: specifier: ^1.0.2 version: 1.0.2 @@ -216,6 +222,9 @@ importers: '@storybook/react-vite': specifier: ^9.0.17 version: 9.0.17(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(rollup@4.45.1)(storybook@9.0.17(@testing-library/dom@10.4.0)(prettier@3.6.2))(typescript@5.8.3)(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)) + '@types/dompurify': + specifier: ^3.2.0 + version: 3.2.0 '@types/node': specifier: ^22.15.1 version: 22.16.0 @@ -326,7 +335,7 @@ importers: version: 5.1.4(typescript@5.8.3)(vite@7.0.5(@types/node@22.16.0)(jiti@2.4.2)) vitest: specifier: ^3.1.2 - version: 3.2.4(@types/node@22.16.0)(@vitest/ui@3.2.4)(jiti@2.4.2) + version: 3.2.4(@types/debug@4.1.12)(@types/node@22.16.0)(@vitest/ui@3.2.4)(jiti@2.4.2) packages: @@ -1531,12 +1540,19 @@ packages: '@types/d3-zoom@3.0.8': resolution: {integrity: sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==} + '@types/debug@4.1.12': + resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + '@types/deep-eql@4.0.2': resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} '@types/doctrine@0.0.9': resolution: {integrity: sha512-eOIHzCUSH7SMfonMG1LsC2f8vxBFtho6NGBznK41R84YzPuvSBzrhEps33IsQiOW9+VL6NQ9DbjQJznk/S4uRA==} + '@types/dompurify@3.2.0': + resolution: {integrity: sha512-Fgg31wv9QbLDA0SpTOXO3MaxySc4DKGLi8sna4/Utjo4r3ZRPdCt4UQee8BWr+Q5z21yifghREPJGYaEOEIACg==} + deprecated: This is a stub types definition. dompurify provides its own type definitions, so you do not need this installed. + '@types/eslint@8.56.12': resolution: {integrity: sha512-03ruubjWyOHlmljCVoxSuNDdmfZDzsrrz0P2LeJsOXr+ZwFQ+0yQIwNCwt/GYhV7Z31fgtXJTAEs+FYlEL851g==} @@ -1564,6 +1580,9 @@ packages: '@types/mdx@2.0.13': resolution: {integrity: sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==} + '@types/ms@2.1.0': + resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} + '@types/node@22.16.0': resolution: {integrity: sha512-B2egV9wALML1JCpv3VQoQ+yesQKAmNMBIAY7OteVrikcOcAkWm+dGL6qpeCktPjAv6N1JLnhbNiqS35UpFyBsQ==} @@ -2160,6 +2179,9 @@ packages: dom-helpers@5.2.1: resolution: {integrity: sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==} + dompurify@2.5.8: + resolution: {integrity: sha512-o1vSNgrmYMQObbSSvF/1brBYEQPHhV1+gsmrusO7/GXtp1T9rCS8cXFqVxK/9crT1jA6Ccv+5MTSjBNqr7Sovw==} + dpdm@3.14.0: resolution: {integrity: sha512-YJzsFSyEtj88q5eTELg3UWU7TVZkG1dpbF4JDQ3t1b07xuzXmdoGeSz9TKOke1mUuOpWlk4q+pBh+aHzD6GBTg==} hasBin: true @@ -2999,6 +3021,11 @@ packages: resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==} engines: {node: '>=10'} + marked@17.0.1: + resolution: {integrity: sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg==} + engines: {node: '>= 20'} + hasBin: true + math-expression-evaluator@2.0.7: resolution: {integrity: sha512-uwliJZ6BPHRq4eiqNWxZBDzKUiS5RIynFFcgchqhBOloVLVBpZpNG8jRYkedLcBvhph8TnRyWEuxPqiQcwIdog==} @@ -5550,10 +5577,19 @@ snapshots: '@types/d3-interpolate': 3.0.4 '@types/d3-selection': 3.0.11 + '@types/debug@4.1.12': + dependencies: + '@types/ms': 2.1.0 + optional: true + '@types/deep-eql@4.0.2': {} '@types/doctrine@0.0.9': {} + '@types/dompurify@3.2.0': + dependencies: + dompurify: 2.5.8 + '@types/eslint@8.56.12': dependencies: '@types/estree': 1.0.8 @@ -5579,6 +5615,9 @@ snapshots: '@types/mdx@2.0.13': {} + '@types/ms@2.1.0': + optional: true + '@types/node@22.16.0': dependencies: undici-types: 6.21.0 @@ -5722,7 +5761,7 @@ snapshots: std-env: 3.9.0 test-exclude: 7.0.1 tinyrainbow: 2.0.0 - vitest: 3.2.4(@types/node@22.16.0)(@vitest/ui@3.2.4)(jiti@2.4.2) + vitest: 3.2.4(@types/debug@4.1.12)(@types/node@22.16.0)(@vitest/ui@3.2.4)(jiti@2.4.2) transitivePeerDependencies: - supports-color @@ -5771,7 +5810,7 @@ snapshots: sirv: 3.0.1 tinyglobby: 0.2.14 tinyrainbow: 2.0.0 - vitest: 3.2.4(@types/node@22.16.0)(@vitest/ui@3.2.4)(jiti@2.4.2) + vitest: 3.2.4(@types/debug@4.1.12)(@types/node@22.16.0)(@vitest/ui@3.2.4)(jiti@2.4.2) '@vitest/utils@3.2.4': dependencies: @@ -6283,6 +6322,8 @@ snapshots: '@babel/runtime': 7.27.6 csstype: 3.1.3 + dompurify@2.5.8: {} + dpdm@3.14.0: dependencies: chalk: 4.1.2 @@ -7257,6 +7298,8 @@ snapshots: dependencies: semver: 7.7.2 + marked@17.0.1: {} + math-expression-evaluator@2.0.7: {} math-intrinsics@1.1.0: {} @@ -8481,7 +8524,7 @@ snapshots: fsevents: 2.3.3 jiti: 2.4.2 - vitest@3.2.4(@types/node@22.16.0)(@vitest/ui@3.2.4)(jiti@2.4.2): + vitest@3.2.4(@types/debug@4.1.12)(@types/node@22.16.0)(@vitest/ui@3.2.4)(jiti@2.4.2): dependencies: '@types/chai': 5.2.2 '@vitest/expect': 3.2.4 @@ -8507,6 +8550,7 @@ snapshots: vite-node: 3.2.4(@types/node@22.16.0)(jiti@2.4.2) why-is-node-running: 2.3.0 optionalDependencies: + '@types/debug': 4.1.12 '@types/node': 22.16.0 '@vitest/ui': 3.2.4(vitest@3.2.4) transitivePeerDependencies: diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 881d7253270..40db38c9a3f 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1274,7 +1274,9 @@ "alignmentDL": "Bottom Left", "alignmentUR": "Top Right", "alignmentDR": "Bottom Right" - } + }, + "help": "Help", + "noDocsAvailable": "No documentation available for this node." }, "parameters": { "aspect": "Aspect", @@ -1561,8 +1563,6 @@ "problemUnpublishingWorkflow": "Problem Unpublishing Workflow", "problemUnpublishingWorkflowDescription": "There was a problem unpublishing the workflow. Please try again.", "workflowUnpublished": "Workflow Unpublished", - "sentToCanvas": "Sent to Canvas", - "sentToUpscale": "Sent to Upscale", "promptGenerationStarted": "Prompt generation started", "uploadAndPromptGenerationFailed": "Failed to upload image and generate prompt", "promptExpansionFailed": "We ran into an issue. Please try prompt expansion again.", @@ -2685,7 +2685,6 @@ "selectPreset": "Select Style Preset", "noMatchingPresets": "No matching presets" }, - "ui": { "tabs": { "generate": "Generate", diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx index 63a86479b2c..a60ca776572 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHeader.tsx @@ -7,6 +7,7 @@ import { useNodeHasErrors } from 'features/nodes/hooks/useNodeIsInvalid'; import { memo } from 'react'; import InvocationNodeCollapsedHandles from './InvocationNodeCollapsedHandles'; +import { InvocationNodeHelpButton } from './InvocationNodeHelpButton'; import { InvocationNodeInfoIcon } from './InvocationNodeInfoIcon'; import InvocationNodeStatusIndicator from './InvocationNodeStatusIndicator'; @@ -38,6 +39,7 @@ const InvocationNodeHeader = ({ nodeId, isOpen }: Props) => { + {!isOpen && } diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHelpButton.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHelpButton.tsx new file mode 100644 index 00000000000..b34f03ae023 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHelpButton.tsx @@ -0,0 +1,31 @@ +import { Icon, useDisclosure } from '@invoke-ai/ui-library'; +import { memo } from 'react'; +import { PiQuestionBold } from 'react-icons/pi'; + +import { InvocationNodeHelpModal } from './InvocationNodeHelpModal'; + +interface Props { + nodeId: string; +} + +export const InvocationNodeHelpButton = memo(({ nodeId: _nodeId }: Props) => { + const { isOpen, onOpen, onClose } = useDisclosure(); + + return ( + <> + + + + ); +}); + +InvocationNodeHelpButton.displayName = 'InvocationNodeHelpButton'; diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHelpModal.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHelpModal.tsx new file mode 100644 index 00000000000..52530c06f80 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeHelpModal.tsx @@ -0,0 +1,252 @@ +import { + Box, + Image, + Modal, + ModalBody, + ModalCloseButton, + ModalContent, + ModalHeader, + ModalOverlay, + Spinner, + Text, +} from '@invoke-ai/ui-library'; +import { logger } from 'app/logging/logger'; +import DOMPurify from 'dompurify'; +import { useNodeTemplateOrThrow } from 'features/nodes/hooks/useNodeTemplateOrThrow'; +import { marked } from 'marked'; +import { memo, type ReactElement, useEffect, useState } from 'react'; +import { useTranslation } from 'react-i18next'; + +const log = logger('system'); + +interface NodeDocsContent { + markdown: string; + basePath: string; +} + +interface Props { + isOpen: boolean; + onClose: () => void; +} + +/** + * Resolves a potentially relative image path to an absolute path based on the docs base path. + * Handles paths starting with './' or not starting with '/' or 'http'. + */ +const resolveImagePath = (src: string | undefined, basePath: string): string => { + if (!src) { + return ''; + } + // If it's already an absolute URL or data URL, return as-is + if (src.startsWith('http://') || src.startsWith('https://') || src.startsWith('data:') || src.startsWith('/')) { + return src; + } + // Handle relative paths like './images/...' or 'images/...' + const relativePath = src.startsWith('./') ? src.slice(2) : src; + // Normalize path to avoid double slashes + const normalizedBasePath = basePath.endsWith('/') ? basePath.slice(0, -1) : basePath; + const normalizedRelativePath = relativePath.startsWith('/') ? relativePath.slice(1) : relativePath; + return `${normalizedBasePath}/${normalizedRelativePath}`; +}; + +/** + * Rewrite relative image paths in markdown to be absolute based on basePath + */ +const rewriteRelativeImagePaths = (markdown: string, basePath: string): string => { + return markdown.replace(/!\[([^\]]*)\]\((?!\s*(?:https?:\/\/|\/|data:))([^)]+)\)/g, (_match, alt, src) => { + const cleaned = src.startsWith('./') ? src.slice(2) : src; + const normalized = cleaned.startsWith('/') ? cleaned.slice(1) : cleaned; + return `![${alt}](${basePath}/${normalized})`; + }); +}; + +/** + * Creates markdown components with proper image path resolution. + */ +// We will not use react-markdown components anymore; keep resolveImagePath for potential future work +const _createMarkdownComponents = (basePath: string) => ({ + img: ({ src, alt }: { src?: string; alt?: string }) => ( + {alt + ), +}); + +export const InvocationNodeHelpModal = memo(({ isOpen, onClose }: Props): ReactElement => { + const nodeTemplate = useNodeTemplateOrThrow(); + const { t, i18n } = useTranslation(); + const [docsContent, setDocsContent] = useState(null); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(null); + const [sanitizedHtml, setSanitizedHtml] = useState(null); + + useEffect(() => { + if (!isOpen) { + // Reset state when modal closes to prevent stale data + setDocsContent(null); + setError(null); + return; + } + + const loadDocs = async () => { + setIsLoading(true); + setError(null); + + const nodeType = nodeTemplate.type; + // Sanitize nodeType to prevent path traversal - only allow alphanumeric, underscore, and hyphen + const sanitizedNodeType = nodeType.replace(/[^a-zA-Z0-9_-]/g, ''); + if (sanitizedNodeType !== nodeType) { + log.warn({ nodeType }, 'Node type contains invalid characters for docs path'); + } + + const currentLanguage = i18n.language; + const fallbackLanguage = 'en'; + // Sanitize language code as well + const sanitizedLanguage = currentLanguage.replace(/[^a-zA-Z-]/g, ''); + + // Try to load docs for current language first, then fallback to English + const languagesToTry = + sanitizedLanguage !== fallbackLanguage ? [sanitizedLanguage, fallbackLanguage] : [fallbackLanguage]; + + for (const lang of languagesToTry) { + try { + const basePath = `/nodeDocs/${lang}`; + const response = await fetch(`${basePath}/${sanitizedNodeType}.md`); + if (response.ok) { + const markdown = await response.text(); + setDocsContent({ markdown, basePath }); + setIsLoading(false); + return; + } + } catch { + // Log error but continue to next language + log.debug(`Failed to fetch node docs for ${sanitizedNodeType} (${lang})`); + } + } + + // No docs found for any language + setError(t('nodes.noDocsAvailable')); + setIsLoading(false); + }; + + loadDocs(); + }, [isOpen, nodeTemplate.type, i18n.language, t]); + + useEffect(() => { + if (!docsContent) { + setSanitizedHtml(null); + return; + } + + let mounted = true; + (async () => { + const htmlOrPromise = marked.parse(rewriteRelativeImagePaths(docsContent.markdown, docsContent.basePath)); + const html = typeof htmlOrPromise === 'string' ? htmlOrPromise : await htmlOrPromise; + if (!mounted) { + return; + } + setSanitizedHtml(DOMPurify.sanitize(html)); + })(); + + return () => { + mounted = false; + }; + }, [docsContent]); + + return ( + + + + + {nodeTemplate.title} - {t('nodes.help')} + + + + {isLoading && } + {error && {error}} + {sanitizedHtml && ( + + )} + + + + ); +}); + +InvocationNodeHelpModal.displayName = 'InvocationNodeHelpModal'; diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 1f0464d1cc4..f967ea82743 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -1958,6 +1958,52 @@ export type paths = { patch?: never; trace?: never; }; + "/nodeDocs/{lang}/{name}.md": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Node Doc + * @description Return packaged markdown for a node. + * + * This endpoint reads packaged resources from the installed `invokeai.resources` + * package via importlib. + */ + get: operations["get_node_doc_nodeDocs__lang___name__md_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/nodeDocs/{lang}/images/{image_name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Node Doc Image + * @description Return packaged image resource for node docs. + * + * Only reads from `invokeai.resources` packaged data. Adds `X-Content-Type-Options` + * and a conservative Cache-Control header. + */ + get: operations["get_node_doc_image_nodeDocs__lang__images__image_name__get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; }; export type webhooks = Record; export type components = { @@ -30429,4 +30475,68 @@ export interface operations { }; }; }; + get_node_doc_nodeDocs__lang___name__md_get: { + parameters: { + query?: never; + header?: never; + path: { + lang: string; + name: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + get_node_doc_image_nodeDocs__lang__images__image_name__get: { + parameters: { + query?: never; + header?: never; + path: { + lang: string; + image_name: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; } diff --git a/invokeai/frontend/web/src/types/dompurify.d.ts b/invokeai/frontend/web/src/types/dompurify.d.ts new file mode 100644 index 00000000000..5a8f523aab7 --- /dev/null +++ b/invokeai/frontend/web/src/types/dompurify.d.ts @@ -0,0 +1,7 @@ +declare module 'dompurify' { + interface DOMPurifyInstance { + sanitize: (dirty: string) => string; + } + const DOMPurify: DOMPurifyInstance; + export default DOMPurify; +} diff --git a/invokeai/resources/__init__.py b/invokeai/resources/__init__.py new file mode 100644 index 00000000000..f3ad163ee77 --- /dev/null +++ b/invokeai/resources/__init__.py @@ -0,0 +1 @@ +# package to hold static resources used by the InvokeAI package diff --git a/invokeai/resources/node_docs/_INFO_.md b/invokeai/resources/node_docs/_INFO_.md new file mode 100644 index 00000000000..be3042c7d2b --- /dev/null +++ b/invokeai/resources/node_docs/_INFO_.md @@ -0,0 +1,69 @@ +This folder contains developer-authored node documentation to be displayed in the Workflow Editor. + +## Naming: + +- One Markdown file per invocation, named exactly after its invocation_type with a .md suffix (e.g., "img_crop.md" for the "img_crop" invocation). +- Files live in this folder (and in language subfolders such as en/). + +## Authoring: + +- Description: Explain the intended use case(s) for the node, and any important details about its behavior. The intention here is to explain to the user **why** and **how** they would use this node. The description should not be a repeat of the node's technical specification, but rather a user-focused explanation of its purpose and functionality, written in clear, non-technical language. + +- Inputs: List and describe each input port, including expected data types and any important details about how the input affects the node's behavior. Use code formatting for input names (e.g., `Input Name`) when listing and referring to them. + +- Outputs: (if applicable) List and describe each output port, including data types and any important details about the output data. If the node has a single output that is already explained in the description, this section can be omitted. + +- Examples: Provide one or more example usages of the node, including images where applicable. Each example should include a brief description of the scenario being demonstrated. + +## Images: + +- Place image files in the `images/` subfolder next to the markdown file. Reference them using relative paths in the markdown. +- Ensure image names are unique and descriptive. Images can be reused across multiple docs if applicable, e.g., SD1.5 denoise and prompt nodes can all be shown in a single image since their usage is tied together. +- Images can be screenshots of the node in use, example outputs, or diagrams illustrating concepts. +- When displaying node usage examples, keep the example focused on the node and its immediate upstream/downstream connections. For best readability, keep the image width approximately two or three nodes wide. +- To reduce space requirements, use JPG format for all example images. + +[Use IMAGE_PLACEHOLDER for any images at this time. We will replace these with actual images later.] + +## Submitting: + +- Check that your markdown renders correctly in a markdown viewer in the UI. +- Because these docs are included as an installed module and served through API, new folders will only be included after a `uv pip install`. This ensures parity between dev and user installs. Just adding new files to existing folders *shouldn't* require installing again. + +## Doc Template: + +# [NODE NAME] + +[Node description goes here.] + +## Inputs + +- [Input 1 Name]: [Description of input 1] +- [Input 2 Name]: [Description of input 2] +- ... + +## Outputs + +- [Output 1 Name]: [Description of output 1] +- [Output 2 Name]: [Description of output 2] +- ... + +--- + +## Example Usage + +### [Example Scenario 1] + +![Alt Text for Example 1](./images/[image_file_name_1].png) +[Brief description of Example Scenario 1.] + +### [Example Scenario 2] + +![Alt Text for Example 2](./images/[image_file_name_2].png) +[Brief description of Example Scenario 2.] + +--- + +## Notes: + +- [Any additional notes or important details about the node's behavior.] diff --git a/invokeai/resources/node_docs/en/apply_mask_to_image.md b/invokeai/resources/node_docs/en/apply_mask_to_image.md new file mode 100644 index 00000000000..4414b5afe45 --- /dev/null +++ b/invokeai/resources/node_docs/en/apply_mask_to_image.md @@ -0,0 +1,26 @@ +# Apply Mask to Image + +Extracts a region from an image defined by a mask (black=keep, white=discard) and uses the mask as the alpha channel so the extracted region can be composited elsewhere. + +## Inputs + +- `image`: The source image to extract from (RGBA expected). +- `mask`: The mask defining the region (black=keep, white=discard). +- `invert_mask`: Whether to invert the mask before applying it. + +## Outputs + +- `image`: The resulting image where the mask has been applied as the alpha channel. + +## Example Usage + +### Extract masked region + +Use `Apply Mask to Image` to produce an RGBA image where only the masked area remains visible for pasting into another composition. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Turn a mask into an image alpha to extract a region. + +## Notes + +- The mask is used directly as the alpha channel; black areas become opaque. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/blank_image.md b/invokeai/resources/node_docs/en/blank_image.md new file mode 100644 index 00000000000..595d693df2f --- /dev/null +++ b/invokeai/resources/node_docs/en/blank_image.md @@ -0,0 +1,28 @@ +# Blank Image + +Creates a new blank image with the specified dimensions, color, and mode, then forwards it downstream. Useful for creating a canvas for compositing, drawing, or testing. + +## Inputs + +- `width`: The width of the image in pixels (default `512`). +- `height`: The height of the image in pixels (default `512`). +- `mode`: The image mode (`RGB` or `RGBA`) determining channels and transparency. +- `color`: The background color to fill the image. Supports RGBA values. + +--- + +## Example Usage + +### Pasting images onto a solid background border + +![image](images/paste_images_blank_background.jpg) + +In this example, the `Blank Image` node creates a solid color background. Multiple images are then pasted onto this background to create a composite image with a border effect. + +![image](images/paste_images_blank_background_result.jpg) + +### Generating random color noise for Text2Img workflows + +![image](images/blank_image_random_color_noise.jpg) + +In this example, a blank image is created with middle gray before being adjusted for saturation, hue, and brightness to create a random color input for a Text2Img workflow. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/canvas_paste_back.md b/invokeai/resources/node_docs/en/canvas_paste_back.md new file mode 100644 index 00000000000..0caaee9e81b --- /dev/null +++ b/invokeai/resources/node_docs/en/canvas_paste_back.md @@ -0,0 +1,10 @@ +# Canvas Paste Back + +Combines two images using a mask, used in several of the Unified Canvas workflows. This pastes the `target_image` onto the `source_image` using a prepared mask. The node dilates (expands) and thenblurs the mask before using it to blend the images. This reduces the visible seam when pasting inpaint results back into the original canvas. + +## Inputs + +- `source_image`: The image onto which the target will be pasted. +- `target_image`: The image to paste into the source. +- `mask`: The mask controlling where the paste occurs. +- `mask_blur`: Amount of Gaussian blur to apply to the mask (default `0`). \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/color_correct.md b/invokeai/resources/node_docs/en/color_correct.md new file mode 100644 index 00000000000..224c40c2e30 --- /dev/null +++ b/invokeai/resources/node_docs/en/color_correct.md @@ -0,0 +1,32 @@ +# Color Correct + +Matches the color histogram of a base image to a reference image. You can limit correction to specific channels or colorspaces and optionally apply a mask to restrict where correction occurs. + +## Inputs + +- `base_image`: The image to color-correct. +- `color_reference`: Reference image whose colors should be matched. +- `mask` (optional): Mask limiting correction (white=use original, black=apply correction). +- `colorspace`: Colorspace to operate in (`RGB`, `YCbCr`, `YCbCr-Chroma`, `YCbCr-Luma`). Default `RGB`. + +## Outputs + +- `image`: The color-corrected image with original alpha preserved. + +--- + +## Example Usage + +### Fix color drift from high-res passes + +Depending on upscaling, manipulations, controlnets, loras, or other inputs, the colors of a high-resolution output from a multi-stage generation might drift from the original low-res image. This node can be used to restore the original color balance by matching the high-res output back to the low-res input, restoring saturation or brightness depending on the colorspace used. + +![image](images/highres_color_correction.jpg) + +![image](images/highres_color_correction_result.jpg) + +--- + +## Notes + +- The node performs histogram matching per-channel using cumulative distribution functions. This can lead to colors (combinations of channels) that were not present in the reference image. This is especially true when operating in `RGB` colorspace. Using `YCbCr` or its variants can help preserve color relationships. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/crop_image_to_bounding_box.md b/invokeai/resources/node_docs/en/crop_image_to_bounding_box.md new file mode 100644 index 00000000000..bff3bfe6695 --- /dev/null +++ b/invokeai/resources/node_docs/en/crop_image_to_bounding_box.md @@ -0,0 +1,25 @@ +# Crop Image to Bounding Box + +Crops an image to a provided bounding box. If no bounding box is supplied, the node crops to the image's non-transparent bounding box. + +## Inputs + +- `image`: The image to crop. +- `bounding_box` (optional): The bounding box to crop to; if omitted, the image's non-transparent extents are used. + +## Outputs + +- `image`: The cropped image. + +## Example Usage + +### Trim transparent edges + +Use `Crop Image to Bounding Box` to remove surrounding transparent pixels or to crop to a specific box. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Crop to the non-transparent content of an image. + +## Notes + +- If a bounding box is provided it must be compatible with the image dimensions. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/expand_mask_with_fade.md b/invokeai/resources/node_docs/en/expand_mask_with_fade.md new file mode 100644 index 00000000000..8d4eda199e9 --- /dev/null +++ b/invokeai/resources/node_docs/en/expand_mask_with_fade.md @@ -0,0 +1,9 @@ +# Expand Mask with Fade + +Expands a binary mask outward by a specified fade distance and applies a smooth fade from black to white. Black indicates areas to keep from the generated image and white indicates areas to discard. + +## Inputs + +- `mask`: The mask to expand (grayscale). +- `threshold`: Threshold used to binarize the input mask (default `0`). +- `fade_size_px`: Fade distance in pixels (default `32`). If `0`, the mask is returned unchanged. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/float_math.md b/invokeai/resources/node_docs/en/float_math.md new file mode 100644 index 00000000000..6a25494d5b9 --- /dev/null +++ b/invokeai/resources/node_docs/en/float_math.md @@ -0,0 +1,34 @@ +# Float Math + +The Float Math node performs common floating-point operations on two inputs. Use it when you need precise decimal arithmetic, roots, exponentiation, or min/max comparisons with float inputs. + +## Inputs + +- `Operation`: The operation to perform. Choices: + - Add A+B: Adds `A` and `B`. + - Subtract A-B: Subtracts `B` from `A`. + - Multiply A\*B: Multiplies `A` by `B`. + - Divide A/B: Floating-point division. + - Exponentiate A^B: Raises `A` to the power `B` (watch out for zero-to-negative exponents). + - Absolute Value of A: Absolute value of `A` (ignores `B`). + - Square Root of A: Square root of `A` (ignores `B`; result invalid for negative `A`). + - Minimum(A,B): The smaller of `A` and `B`. + - Maximum(A,B): The larger of `A` and `B`. +- `A`: First float input. +- `B`: Second float input. + +## Outputs + +- Result: Float — the result of the selected operation. + +## Example Usage + +![Example](./images/IMAGE_PLACEHOLDER.png) +Use Float Math to compute a square root or fractional power for precise scaling. + +## Notes: + +- Division by zero is invalid and will produce a validation error; ensure `B` is not zero for DIV. +- Exponentiation will error if raising zero to a negative exponent. Root operations that produce complex numbers are rejected. +- Square Root operates on `A` only; negative `A` will be rejected because it would produce a complex result. +- For integer-only operations, use the Integer Math node. diff --git a/invokeai/resources/node_docs/en/float_to_int.md b/invokeai/resources/node_docs/en/float_to_int.md new file mode 100644 index 00000000000..0f70376aa4b --- /dev/null +++ b/invokeai/resources/node_docs/en/float_to_int.md @@ -0,0 +1,17 @@ +# Float to Integer + +The Float to Integer node rounds floating-point numbers to integers. At "Multiple of" 1, it performs standard rounding to the nearest integer. By adjusting the "Multiple of" parameter, users can round to the nearest specified multiple. Multiples of 64 are helpfulfor image dimensions that are more compatible with denoising models, 2 will return the nearest even number, etc. The "Method" parameter allows you to choose the rounding direction. + +## Inputs + +- Value: The floating-point number to be converted to an integer. +- Multiple of: The multiple to which the value should be rounded. Leave at 1 for rounding to the nearest integer. +- Method: The direction to apply rounding: + - Nearest: Rounds to the nearest multiple. + - Floor: Rounds down to the nearest multiple. + - Ceil: Rounds up to the nearest multiple. + - Truncate: Rounds towards zero to the nearest multiple. + +## Notes: + +- This node uses numpy floor/ceiling operations, so direction is consistent for both positive and negative values. For example; flooring 3.7 results in 3, but flooring -3.7 results in -4, and not -3. To round towards the next lesser magnitude (i.e., -3), use the Truncate method. diff --git a/invokeai/resources/node_docs/en/images/blank_image_random_color_noise.jpg b/invokeai/resources/node_docs/en/images/blank_image_random_color_noise.jpg new file mode 100644 index 00000000000..e4b5519dbde Binary files /dev/null and b/invokeai/resources/node_docs/en/images/blank_image_random_color_noise.jpg differ diff --git a/invokeai/resources/node_docs/en/images/highres_color_correction.jpg b/invokeai/resources/node_docs/en/images/highres_color_correction.jpg new file mode 100644 index 00000000000..981eac4706d Binary files /dev/null and b/invokeai/resources/node_docs/en/images/highres_color_correction.jpg differ diff --git a/invokeai/resources/node_docs/en/images/highres_color_correction_result.jpg b/invokeai/resources/node_docs/en/images/highres_color_correction_result.jpg new file mode 100644 index 00000000000..e7fe4c835b1 Binary files /dev/null and b/invokeai/resources/node_docs/en/images/highres_color_correction_result.jpg differ diff --git a/invokeai/resources/node_docs/en/images/paste_images_blank_background.jpg b/invokeai/resources/node_docs/en/images/paste_images_blank_background.jpg new file mode 100644 index 00000000000..6dc465ceb64 Binary files /dev/null and b/invokeai/resources/node_docs/en/images/paste_images_blank_background.jpg differ diff --git a/invokeai/resources/node_docs/en/images/paste_images_blank_background_result.jpg b/invokeai/resources/node_docs/en/images/paste_images_blank_background_result.jpg new file mode 100644 index 00000000000..1211bd51ff6 Binary files /dev/null and b/invokeai/resources/node_docs/en/images/paste_images_blank_background_result.jpg differ diff --git a/invokeai/resources/node_docs/en/images/z_image_example_t2i.jpg b/invokeai/resources/node_docs/en/images/z_image_example_t2i.jpg new file mode 100644 index 00000000000..0e421f37ec2 Binary files /dev/null and b/invokeai/resources/node_docs/en/images/z_image_example_t2i.jpg differ diff --git a/invokeai/resources/node_docs/en/img_blur.md b/invokeai/resources/node_docs/en/img_blur.md new file mode 100644 index 00000000000..c53f85c9faf --- /dev/null +++ b/invokeai/resources/node_docs/en/img_blur.md @@ -0,0 +1,9 @@ +# Blur Image + +Applies a blur to an image while correctly handling premultiplied alpha so transparent edges don't darken. Supports Gaussian and Box blur modes. + +## Inputs + +- `image`: The image to blur. +- `radius`: The blur radius (default `8.0`). +- `blur_type`: The blur algorithm to use: `gaussian` (default) or `box`. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_chan.md b/invokeai/resources/node_docs/en/img_chan.md new file mode 100644 index 00000000000..d0534bdf4c6 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_chan.md @@ -0,0 +1,12 @@ +# Extract Image Channel + +Extracts a single channel (A, R, G, or B) from an image and returns it as a grayscale image. Use this node to inspect or work with individual channels. + +## Inputs + +- `image`: The image to extract the channel from. +- `channel`: The channel to extract (`A`, `R`, `G`, or `B`). Default is `A`. + +## Outputs + +- `image`: A single-channel grayscale image representing the requested channel. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_channel_multiply.md b/invokeai/resources/node_docs/en/img_channel_multiply.md new file mode 100644 index 00000000000..c47cf0bc044 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_channel_multiply.md @@ -0,0 +1,27 @@ +# Multiply Image Channel + +Scales a particular color channel by a factor and optionally inverts it. Works across multiple colorspaces and restores original alpha. + +## Inputs + +- `image`: The image to adjust. +- `channel`: Which channel to scale (e.g., `Green (RGBA)`, `Cb (YCbCr)`). +- `scale`: Multiplicative factor to apply (default `1.0`). +- `invert_channel`: If `true`, the channel is inverted after scaling. + +## Outputs + +- `image`: The image after channel scaling. + +## Example Usage + +### Desaturate by scaling saturation + +Use `Multiply Image Channel` to reduce the saturation channel in `HSV` by scaling it below 1.0. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Scale a single channel to affect color/contrast. + +## Notes + +- The node clips values to the valid 0–255 range and restores the original alpha channel when appropriate. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_channel_offset.md b/invokeai/resources/node_docs/en/img_channel_offset.md new file mode 100644 index 00000000000..5e39f0986ea --- /dev/null +++ b/invokeai/resources/node_docs/en/img_channel_offset.md @@ -0,0 +1,26 @@ +# Offset Image Channel + +Adds or subtracts a value from a chosen color channel in various colorspaces (RGBA, CMYK, HSV, LAB, YCbCr). Useful for fine-tuning specific channels like red, hue, or luminance. + +## Inputs + +- `image`: The image to adjust. +- `channel`: Which channel to modify (e.g., `Red (RGBA)`, `Hue (HSV)`, `Luminosity (LAB)`). +- `offset`: Integer amount to add (or subtract if negative) to the channel (range `-255..255`). + +## Outputs + +- `image`: The adjusted image with the channel offset applied. + +## Example Usage + +### Boost red channel + +Use `Offset Image Channel` to increase or decrease a specific channel, such as warming an image by boosting red. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Adjust a single channel without affecting others. + +## Notes + +- When adjusting the hue channel, values wrap around rather than clamp. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_conv.md b/invokeai/resources/node_docs/en/img_conv.md new file mode 100644 index 00000000000..072ee31e5c6 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_conv.md @@ -0,0 +1,8 @@ +# Convert Image Mode + +Converts an image to a different mode (for example, `L`, `RGB`, `RGBA`, etc.). Use this node when a downstream operation requires a specific image mode or to change color/alpha representation. + +## Inputs + +- `image`: The image to convert. +- `mode`: The mode to convert the image to (e.g., `L`, `RGB`, `RGBA`). Default is `L`. diff --git a/invokeai/resources/node_docs/en/img_crop.md b/invokeai/resources/node_docs/en/img_crop.md new file mode 100644 index 00000000000..10228154919 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_crop.md @@ -0,0 +1,15 @@ +# Crop Image + +Crops an input image to a rectangular region. The crop rectangle can extend outside the boundaries of the original image; areas outside the image will be transparent. + +## Inputs + +- `image`: The image to crop. +- `x`: Left x coordinate of the crop rectangle (default `0`). +- `y`: Top y coordinate of the crop rectangle (default `0`). +- `width`: Width of the crop rectangle in pixels (default `512`). +- `height`: Height of the crop rectangle in pixels (default `512`). + +## Notes + +- If the crop rectangle extends beyond the original image, transparent padding is added to reach the requested size. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_hue_adjust.md b/invokeai/resources/node_docs/en/img_hue_adjust.md new file mode 100644 index 00000000000..de18d95e7da --- /dev/null +++ b/invokeai/resources/node_docs/en/img_hue_adjust.md @@ -0,0 +1,25 @@ +# Adjust Image Hue + +Rotates the hue of an image by a specified number of degrees. Useful for creative color shifts or quick recoloring. + +## Inputs + +- `image`: The image to adjust. +- `hue`: Degrees to rotate hue (0–360). Positive values rotate the hue forward. + +## Outputs + +- `image`: The hue-adjusted image (converted back to RGBA and saved). + +## Example Usage + +### Recolor elements + +Use `Adjust Image Hue` to quickly shift the overall color palette of an image or to create variations. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Shift hues for stylistic variation. + +## Notes + +- The node internally converts the image to HSV, adjusts the hue channel, and converts back to RGBA. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_ilerp.md b/invokeai/resources/node_docs/en/img_ilerp.md new file mode 100644 index 00000000000..f69aa1c6c47 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_ilerp.md @@ -0,0 +1,26 @@ +# Inverse Lerp Image + +Inverse-linear remaps pixel values by mapping an input range `[min..max]` back to `[0..255]`. Helpful for normalizing or preparing images where a known intensity window should be stretched to full range. + +## Inputs + +- `image`: The image to remap. +- `min`: Input minimum value (default `0`). +- `max`: Input maximum value (default `255`). + +## Outputs + +- `image`: The remapped image. + +## Example Usage + +### Normalize window + +Use `Inverse Lerp Image` when you want to stretch a specific intensity window to the full 0-255 range for visualization or further processing. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Stretch a specific intensity window to full range. + +## Notes + +- Values outside the `[min..max]` input range are clipped to 0 or 255 respectively. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_lerp.md b/invokeai/resources/node_docs/en/img_lerp.md new file mode 100644 index 00000000000..03d52338ab0 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_lerp.md @@ -0,0 +1,26 @@ +# Lerp Image + +Linearly remaps pixel values of an image from [0..255] to a specified `[min..max]` range. Useful for adjusting contrast or re-normalizing image data. + +## Inputs + +- `image`: The image to remap. +- `min`: Output minimum value (default `0`). +- `max`: Output maximum value (default `255`). + +## Outputs + +- `image`: The remapped image. + +## Example Usage + +### Adjust output range + +Use `Lerp Image` to expand or compress the numeric range of pixel values before further processing. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Remap pixel intensities to a new range. + +## Notes + +- Works on all channels and preserves image dimensions. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_mul.md b/invokeai/resources/node_docs/en/img_mul.md new file mode 100644 index 00000000000..02062d8ff61 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_mul.md @@ -0,0 +1,25 @@ +# Multiply Images + +Multiplies two images together using pixel-wise multiplication. This is useful for combining masks, applying multiply blend modes, or modulating brightness. + +## Inputs + +- `image1`: The first image to multiply. +- `image2`: The second image to multiply. + +## Outputs + +- `image`: The resulting image after multiplication. + +## Example Usage + +### Combine masks + +Use `Multiply Images` to combine two masks so that only regions present in both remain. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Multiply two masks to intersect their regions. + +## Notes + +- Uses `PIL.ImageChops.multiply()` under the hood. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_noise.md b/invokeai/resources/node_docs/en/img_noise.md new file mode 100644 index 00000000000..b6f2f66018e --- /dev/null +++ b/invokeai/resources/node_docs/en/img_noise.md @@ -0,0 +1,31 @@ +# Add Image Noise + +Adds noise to an image, either Gaussian or Salt-and-Pepper. Optionally restrict noise to regions using a mask. Alpha is preserved. + +## Inputs + +- `image`: The image to add noise to. +- `mask` (optional): Grayscale mask determining where to apply noise (black=noise, white=no noise). +- `seed`: Random seed for reproducible noise. +- `noise_type`: `gaussian` (default) or `salt_and_pepper`. +- `amount`: Strength of the noise (0–1, default `0.1`). +- `noise_color`: If `true`, produce colored noise; otherwise use monochrome noise. +- `size`: Size of noise points (default `1`). + +## Outputs + +- `image`: The noisy image with original alpha restored. + +## Example Usage + +### Add film grain + +Use `Add Image Noise` to add subtle grain for realism or to break up banding artifacts. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Add subtle Gaussian noise for natural texture. + +## Notes + +- For `salt_and_pepper`, noise is applied probabilistically per-pixel according to `amount`. +- The node respects the provided `mask` by inverting it internally before compositing the noisy region. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_nsfw.md b/invokeai/resources/node_docs/en/img_nsfw.md new file mode 100644 index 00000000000..a9fb3a05149 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_nsfw.md @@ -0,0 +1,24 @@ +# Blur NSFW Image + +Checks an image for NSFW content and applies a blur if the image is considered NSFW. Use this node to automatically obfuscate images that may violate content policies. + +## Inputs + +- `image`: The image to check and blur if needed. + +## Outputs + +- `image`: The possibly blurred image. If the image is not flagged, it is returned unchanged. + +## Example Usage + +### Protect displaying content + +Place `Blur NSFW Image` before nodes that display or export images to ensure potentially sensitive content is safely blurred. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Automatically blur potential NSFW content. + +## Notes + +- Uses an internal safety checker to determine whether to blur; behavior depends on the configured safety model. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_pad_crop.md b/invokeai/resources/node_docs/en/img_pad_crop.md new file mode 100644 index 00000000000..a689c8d8c3a --- /dev/null +++ b/invokeai/resources/node_docs/en/img_pad_crop.md @@ -0,0 +1,28 @@ +# Center Pad or Crop Image + +Pad or crop an image from the center by specifying pixel amounts for each side. Positive values add padding (transparent area) outward; negative values crop inward. + +## Inputs + +- `image`: The image to modify. +- `left`: Pixels to pad/crop on the left (positive pads, negative crops). +- `right`: Pixels to pad/crop on the right. +- `top`: Pixels to pad/crop on the top. +- `bottom`: Pixels to pad/crop on the bottom. + +## Outputs + +- `image`: The padded or cropped image. + +## Example Usage + +### Expand canvas for composition + +Use `Center Pad or Crop Image` to add border space around an image for compositing or to crop equally from both sides. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Center-pad an image to prepare space for new elements. + +## Notes + +- The operation centers the original image within the new dimensions, so padding/cropping applies equally relative to the original center. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_paste.md b/invokeai/resources/node_docs/en/img_paste.md new file mode 100644 index 00000000000..e26286207e6 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_paste.md @@ -0,0 +1,30 @@ +# Paste Image + +Pastes one image into another at a specified offset, optionally using a mask to control transparency. The base image can be cropped to its original size after pasting. + +## Inputs + +- `base_image`: The target image to paste into. +- `image`: The image to paste on top of the base image. +- `mask` (optional): A mask controlling the paste (white=keep/paste areas). If supplied, the mask is inverted internally before use. +- `x`: Left x coordinate where the image is pasted (default `0`). +- `y`: Top y coordinate where the image is pasted (default `0`). +- `crop`: If `true`, the resulting composite will be cropped to the dimensions of the base image in case the pasted image extends beyond its borders (default `false`). + +--- + +## Example Usage + +### Composite elements + +![image](images/paste_images_blank_background.jpg) + +In this example, the `Blank Image` node creates a solid color background. Multiple images are then pasted onto this background to create a composite image with a border effect. + +![image](images/paste_images_blank_background_result.jpg) + +--- + +## Notes + +- The coordinates `x` and `y` determine where the top-left corner of the pasted image will be placed on the base image. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_resize.md b/invokeai/resources/node_docs/en/img_resize.md new file mode 100644 index 00000000000..26e12e5bd89 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_resize.md @@ -0,0 +1,27 @@ +# Resize Image + +Resize an image to exact pixel dimensions using configurable resampling modes. Choose the resampling filter for quality or speed trade-offs. + +## Inputs + +- `image`: The image to resize. +- `width`: Destination width in pixels (default `512`). +- `height`: Destination height in pixels (default `512`). +- `resample_mode`: Resampling filter to use (`nearest`, `box`, `bilinear`, `hamming`, `bicubic`, `lanczos`). Default is `bicubic`. + +## Outputs + +- `image`: The resized image. + +## Example Usage + +### Prepare a model input + +Use `Resize Image` to make sure images are the correct size before passing to models or other nodes that require specific dimensions. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Resize for model inputs or exports. + +## Notes + +- Choosing `lanczos` or `bicubic` yields higher-quality downsamples, while `nearest` is fastest. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_scale.md b/invokeai/resources/node_docs/en/img_scale.md new file mode 100644 index 00000000000..71df1a4143a --- /dev/null +++ b/invokeai/resources/node_docs/en/img_scale.md @@ -0,0 +1,26 @@ +# Scale Image + +Scale an image by a multiplicative factor, preserving the original aspect ratio. Useful for quick upscales or downscales where only a factor is known. + +## Inputs + +- `image`: The image to scale. +- `scale_factor`: Multiplicative factor to scale dimensions by (default `2.0`). +- `resample_mode`: Resampling filter to use (`nearest`, `box`, `bilinear`, `hamming`, `bicubic`, `lanczos`). Default is `bicubic`. + +## Outputs + +- `image`: The scaled image. + +## Example Usage + +### Quick upscale + +Use `Scale Image` to double the size of an image before further processing or for preview purposes. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Double the size of an image while preserving sharpness. + +## Notes + +- The node multiplies both width and height by the same factor so aspect ratio is preserved. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/img_watermark.md b/invokeai/resources/node_docs/en/img_watermark.md new file mode 100644 index 00000000000..be9980c1bd8 --- /dev/null +++ b/invokeai/resources/node_docs/en/img_watermark.md @@ -0,0 +1,25 @@ +# Add Invisible Watermark + +Embeds an invisible watermark into an image using a text key. This watermark is not visible but can be detected by compatible tools to assert provenance. + +## Inputs + +- `image`: The image to watermark. +- `text`: Watermark text/key (default `InvokeAI`). + +## Outputs + +- `image`: The watermarked image. + +## Example Usage + +### Mark generated images + +Use `Add Invisible Watermark` to embed a hidden provenance tag into images before saving or sharing. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Embed an invisible watermark for provenance. + +## Notes + +- The watermark is not visible in the pixel data but can be detected by compatible watermark readers. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/integer_math.md b/invokeai/resources/node_docs/en/integer_math.md new file mode 100644 index 00000000000..b9f05a5ce88 --- /dev/null +++ b/invokeai/resources/node_docs/en/integer_math.md @@ -0,0 +1,34 @@ +# Integer Math + +The Integer Math node performs a range of integer operations (add, subtract, multiply, divide, modulus, power, absolute, min, max) on two integer inputs. Use it when you need a single node to perform common integer arithmetic with built-in validation for operations that would produce invalid integer results. + +## Inputs + +- operation: The operation to perform. Choices: + - Add A+B: Adds `A` and `B`. + - Subtract A-B: Subtracts `B` from `A`. + - Multiply A\*B: Multiplies `A` by `B`. + - Divide A/B: Integer division; fractional part discarded. + - Exponentiate A^B: Raises `A` to the power `B` (b must be >= 0). + - Modulus A%B: Remainder of `A` divided by `B`. + - Absolute Value of A: Absolute value of `A` (ignores `B`). + - Minimum(A,B): The smaller of `A` and `B`. + - Maximum(A,B): The larger of `A` and `B`. +- `A`: First integer (primary operand). +- `B`: Second integer (secondary operand; some operations may ignore it). + +## Outputs + +- Result: Integer — the result of the selected operation. + +## Example Usage + +![Example](./images/IMAGE_PLACEHOLDER.png) +Use the Integer Math node to compute integer exponents or combine two counters with a chosen operation. + +## Notes: + +- Division and modulus by zero are invalid and will produce a validation error; ensure `B` is not zero for DIV or MOD. +- Exponentiation (EXP) requires a non-negative exponent (`B` >= 0); negative exponents are rejected because they don't produce integers. +- Division uses integer division (equivalent to int(`A` / `B`)), so fractional portions are discarded rather than rounded. +- Absolute Value ignores the `B` input. diff --git a/invokeai/resources/node_docs/en/mask_combine.md b/invokeai/resources/node_docs/en/mask_combine.md new file mode 100644 index 00000000000..574ce76c1cc --- /dev/null +++ b/invokeai/resources/node_docs/en/mask_combine.md @@ -0,0 +1,25 @@ +# Combine Masks + +Combines two masks by multiplying them together so that only areas present in both masks remain. Useful for intersecting mask regions. + +## Inputs + +- `mask1`: The first grayscale mask. +- `mask2`: The second grayscale mask. + +## Outputs + +- `image`: The combined mask (saved as mask category) resulting from pixel-wise multiplication. + +## Example Usage + +### Intersect selections + +Use `Combine Masks` to intersect two separate selections so downstream operations only affect the overlap region. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Multiply masks to compute their intersection. + +## Notes + +- Uses `PIL.ImageChops.multiply()` for combination. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/mask_edge.md b/invokeai/resources/node_docs/en/mask_edge.md new file mode 100644 index 00000000000..c1e3214db89 --- /dev/null +++ b/invokeai/resources/node_docs/en/mask_edge.md @@ -0,0 +1,28 @@ +# Mask Edge + +Creates an edge-based mask from an image by combining gradient and Canny edge detection, dilating the result, and optionally blurring. The final mask is inverted so black indicates areas to keep when inpainting. + +## Inputs + +- `image`: The image to compute edges from (converted to grayscale internally). +- `edge_size`: Pixel size used to dilate the detected edges. +- `edge_blur`: Amount of blur to apply to the resulting mask. +- `low_threshold`: Lower threshold for Canny edge detection. +- `high_threshold`: Upper threshold for Canny edge detection. + +## Outputs + +- `image`: A grayscale mask image (category: mask) where black indicates areas to retain and white indicates areas to discard. + +## Example Usage + +### Create inpainting edge mask + +Use `Mask Edge` to generate masks that preserve the interior of shapes while isolating edges for inpainting or blending. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Generate an edge mask to guide inpainting along object boundaries. + +## Notes + +- The node leverages OpenCV for Canny detection and dilation; performance depends on image size. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/mask_from_id.md b/invokeai/resources/node_docs/en/mask_from_id.md new file mode 100644 index 00000000000..4e5a2786eb3 --- /dev/null +++ b/invokeai/resources/node_docs/en/mask_from_id.md @@ -0,0 +1,27 @@ +# Mask from Segmented Image + +Generate a binary mask isolating a particular ID color from a segmented/ID map image. Use this to extract regions corresponding to a specific object or class in an ID map. + +## Inputs + +- `image`: The ID map image (typically a segmented image with distinct colors representing classes). +- `color`: The target ID color to isolate. +- `threshold`: Distance threshold for color matching (default `100`). +- `invert`: If `true`, the resulting mask will be inverted. + +## Outputs + +- `image`: A binary mask (category: mask) highlighting the matched ID region. + +## Example Usage + +### Extract object selection + +Use `Mask from Segmented Image` to create a mask of a single object class from a segmentation output. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Generate a mask for a class by color. + +## Notes + +- The node computes Euclidean color distance in RGBA space and thresholds it to build the mask. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/paste_image_into_bounding_box.md b/invokeai/resources/node_docs/en/paste_image_into_bounding_box.md new file mode 100644 index 00000000000..6a67a69730c --- /dev/null +++ b/invokeai/resources/node_docs/en/paste_image_into_bounding_box.md @@ -0,0 +1,26 @@ +# Paste Image into Bounding Box + +Pastes a source image into a target image at the area defined by a bounding box. The source image must match the bounding box size. + +## Inputs + +- `source_image`: The image to paste (must match bounding box dimensions). +- `target_image`: The image to paste into. +- `bounding_box`: The bounding box (x, y, width, height) defining where to paste the source. + +## Outputs + +- `image`: The resulting composited image. + +## Example Usage + +### Tile into target + +Use `Paste Image into Bounding Box` to place a patch into a larger image at an explicit rectangle (useful for tiled generation or compositing). + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Paste a prepared patch into a target area on a canvas. + +## Notes + +- The bounding box must fit inside the target image and the source must match the box size. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/rand_float.md b/invokeai/resources/node_docs/en/rand_float.md new file mode 100644 index 00000000000..5466bc7b87d --- /dev/null +++ b/invokeai/resources/node_docs/en/rand_float.md @@ -0,0 +1,24 @@ +# Random Float + +The Random Float node outputs a single non-deterministic floating-point number sampled uniformly from a range. Use it to introduce small continuous variation or random seeds for float parameters. + +## Inputs + +- `Low`: Inclusive lower bound (float). +- `High`: Exclusive upper bound (float) — generated value will be >= `Low` and < `High`. +- `Decimals`: Number of decimal places to round the result to (integer). + +## Outputs + +- Result: Float — a randomly chosen float rounded to the specified number of decimals. + +## Example Usage + +![Example](./images/IMAGE_PLACEHOLDER.png) +Generate a randomized float parameter (e.g., color hue offset) with controlled precision. + +## Notes: + +- This node is non-deterministic (use_cache=False) and will produce a new value each run. +- The node rounds the sampled float to the requested number of decimals before output. +- Ensure low < high to avoid errors. diff --git a/invokeai/resources/node_docs/en/rand_int.md b/invokeai/resources/node_docs/en/rand_int.md new file mode 100644 index 00000000000..e94ffa84fb7 --- /dev/null +++ b/invokeai/resources/node_docs/en/rand_int.md @@ -0,0 +1,23 @@ +# Random Integer + +The Random Integer node outputs a single non-deterministic integer drawn from a range. Use it when you need jittered counts, random indices, or other unpredictable integer values. + +## Inputs + +- `Low`: Inclusive lower bound (integer). +- `High`: Exclusive upper bound (integer) — the generated value will be >= `Low` and < `High`. + +## Outputs + +- Result: Integer — a randomly chosen integer in [`Low`, `High`). + +## Example Usage + +![Example](./images/IMAGE_PLACEHOLDER.png) +Generate a random index or offset to vary results across runs. + +## Notes: + +- This node is non-deterministic (use_cache=False) and will produce a new value each run. +- high is exclusive; set high = low + 1 to get either low only. +- Ensure low < high to avoid errors. diff --git a/invokeai/resources/node_docs/en/round_float.md b/invokeai/resources/node_docs/en/round_float.md new file mode 100644 index 00000000000..b135b91c2b5 --- /dev/null +++ b/invokeai/resources/node_docs/en/round_float.md @@ -0,0 +1,23 @@ +# Round Float + +The Round Float node reduces a floating-point number to a specified number of decimal places. Use it when you want to control numeric precision for display, comparison, or downstream calculations. + +## Inputs + +- `Value`: The float value to round. +- `Decimals`: Number of decimal places to retain (integer). Use 0 for whole-number results. + +## Outputs + +- Result: Float — the rounded value. + +## Example Usage + +![Example](./images/IMAGE_PLACEHOLDER.png) +Round a noisy parameter to two decimal places for stable downstream behavior. + +## Notes: + +- Rounding uses Python's round behavior (ties round to the nearest even value). +- Negative values are rounded according to the same rule (e.g., rounding -1.5 to 0 decimals yields -2.0 under nearest-even tie resolution). +- If you need integer results, use the Float to Integer node which supports rounding to multiples and different rounding methods. diff --git a/invokeai/resources/node_docs/en/save_image.md b/invokeai/resources/node_docs/en/save_image.md new file mode 100644 index 00000000000..c309f63d9b8 --- /dev/null +++ b/invokeai/resources/node_docs/en/save_image.md @@ -0,0 +1,24 @@ +# Save Image + +Saves a copy of an image to the image store. Unlike primitive image outputs, this invocation explicitly stores a persistent copy that can be reused later. + +## Inputs + +- `image`: The image to save. + +## Outputs + +- `image`: The saved image entry passed downstream. + +## Example Usage + +### Persist a result + +Use `Save Image` when you want to persist the current image state before further modifying it. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Save a copy of the current image for later use. + +## Notes + +- The node always writes a new image record to the image store. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/show_image.md b/invokeai/resources/node_docs/en/show_image.md new file mode 100644 index 00000000000..08b9fec98ee --- /dev/null +++ b/invokeai/resources/node_docs/en/show_image.md @@ -0,0 +1,18 @@ +# Show Image + +Displays a provided image using your operating system's image viewer and forwards the image through the pipeline unchanged. Use this node when you want to quickly inspect an image while building or debugging a workflow. + +## Inputs + +- `image`: The image to show. This is typically an image produced earlier in the pipeline or a loaded image resource. + +## Outputs + +- `image`: The same image, passed through so downstream nodes can continue processing it. + +--- + +## Notes + +- The node launches the system image viewer; behavior depends on the host OS. +- This uses python `PIL.Image.show()`, which opens a viewer on the host system, not within the InvokeAI UI. If you are running InvokeAI on a remote server, the image will open on the server's display instead of your local machine. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/tomask.md b/invokeai/resources/node_docs/en/tomask.md new file mode 100644 index 00000000000..d4cad7b2357 --- /dev/null +++ b/invokeai/resources/node_docs/en/tomask.md @@ -0,0 +1,25 @@ +# Mask from Alpha + +Extracts the alpha channel from an image and returns it as a grayscale mask. Optionally inverts the mask. + +## Inputs + +- `image`: The image containing an alpha channel to extract. +- `invert`: If `true`, the extracted alpha mask will be inverted. + +## Outputs + +- `image`: A grayscale mask image (white/black) representing the alpha channel. + +## Example Usage + +### Create an edit mask + +Use `Mask from Alpha` to turn an image's transparency into a mask you can use for inpainting or compositing. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Extract alpha to create a mask for selective edits. + +## Notes + +- The output is saved as a mask category image for use in inpainting and other mask-aware nodes. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/unsharp_mask.md b/invokeai/resources/node_docs/en/unsharp_mask.md new file mode 100644 index 00000000000..743dfac427d --- /dev/null +++ b/invokeai/resources/node_docs/en/unsharp_mask.md @@ -0,0 +1,26 @@ +# Unsharp Mask + +Applies an unsharp mask filter to enhance perceived sharpness by boosting high-frequency components. Preserves alpha channels when present. + +## Inputs + +- `image`: The image to sharpen. +- `radius`: Radius of the Gaussian blur used to create the unsharp mask (default `2`). +- `strength`: Strength of the effect as a percentage (default `50`). + +## Outputs + +- `image`: The sharpened image. + +## Example Usage + +### Increase clarity + +Use `Unsharp Mask` to bring out edge detail and improve perceived sharpness after upscaling or denoising. + +![IMAGE_PLACEHOLDER](./images/IMAGE_PLACEHOLDER.png) +Sharpen fine details while preserving transparency. + +## Notes + +- The node handles images with alpha by temporarily working in RGB and restoring the original alpha channel. \ No newline at end of file diff --git a/invokeai/resources/node_docs/en/z_image_denoise.md b/invokeai/resources/node_docs/en/z_image_denoise.md new file mode 100644 index 00000000000..12a6a6d4238 --- /dev/null +++ b/invokeai/resources/node_docs/en/z_image_denoise.md @@ -0,0 +1,17 @@ +# Z Image Denoise + +The Z Image Denoise node performs diffusion-based denoising on a latent image using the Z-Image model architecture. It can be used in either Text-to-Image or Image-to-Image workflows. + +## Inputs + +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. + +## Example Usage + +### Text-to-Image + +![Z Image Denoise Text-to-Image Example](./images/z_image_example_t2i.jpg) + +## Notes: + +- For Z-Image Turbo models, the indended Guidance Scale (CFG) is 1.0, which disables the negative prompt influence and doubles the generation speed. diff --git a/pyproject.toml b/pyproject.toml index adfe5982baf..710cdae2367 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -183,6 +183,7 @@ version = { attr = "invokeai.version.__version__" } "invokeai.configs*", "invokeai.app*", "invokeai.invocation_api*", + "invokeai.resources*", ] [tool.setuptools.package-data] @@ -198,6 +199,7 @@ version = { attr = "invokeai.version.__version__" } "invokeai.frontend.web.dist" = ["**"] "invokeai.frontend.web.static" = ["**"] "invokeai.app.invocations" = ["**"] +"invokeai.resources" = ["node_docs/**"] #=== Begin: PyTest and Coverage [tool.pytest.ini_options]