diff --git a/.gitignore b/.gitignore
index 09734267ff5..ecc1f38edc9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,7 @@ __pycache__
/SwinIR/*
/repositories
/venv
+/venv310
/tmp
/model.ckpt
/models/**/*
@@ -37,3 +38,5 @@ notification.mp3
/node_modules
/package-lock.json
/.coverage*
+/output.log
+/webui-user.sh
\ No newline at end of file
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index ef23968c563..7945d9fab14 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -32,7 +32,7 @@ def before_ui():
script_callbacks.on_infotext_pasted(networks.infotext_pasted)
-shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
+shared.options_templates.update(shared.options_section((None, "Extra Networks"), {
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
@@ -42,7 +42,7 @@ def before_ui():
}))
-shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), {
+shared.options_templates.update(shared.options_section((None, "Compatibility"), {
"lora_functional": shared.OptionInfo(False, "Lora/Networks: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
}))
diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
index 2d8d2d1c014..a90d374002e 100644
--- a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
+++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
@@ -1,7 +1,7 @@
import gradio as gr
from modules import shared
-shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), {
+shared.options_templates.update(shared.options_section((None, "Canvas Hotkeys"), {
"canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
"canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
"canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
index 983f87ff033..27b38991317 100644
--- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py
+++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
@@ -64,7 +64,7 @@ def before_process(self, p, *args):
p.override_settings[name] = value
-shared.options_templates.update(shared.options_section(('ui', "User interface"), {
+shared.options_templates.update(shared.options_section((None, "User interface"), {
"extra_options_txt2img": shared.OptionInfo([], "Options in main UI - txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(),
"extra_options_img2img": shared.OptionInfo([], "Options in main UI - img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(),
"extra_options_cols": shared.OptionInfo(1, "Options in main UI - number of columns", gr.Number, {"precision": 0}).needs_reload_ui(),
diff --git a/modules/api/api.py b/modules/api/api.py
index e6edffe7144..e51b6a77563 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -546,7 +546,9 @@ def unloadapi(self):
return {}
def reloadapi(self):
+ print("start reload api")
reload_model_weights()
+ print("end reload api")
return {}
diff --git a/modules/hashes.py b/modules/hashes.py
index 01b0865ea74..4527dd1114c 100644
--- a/modules/hashes.py
+++ b/modules/hashes.py
@@ -30,6 +30,14 @@ def calculate_remote_sha256(filename):
return hash_sha256.hexdigest()
+# def calculate_remote_sha256(filename):
+# blksize = 1024 * 1024
+
+# buf = read_remote_model(filename, start = 0, size=blksize)
+# hash_object = hashlib.sha256(buf)
+
+# return hash_object.hexdigest()
+
def sha256_from_cache(filename, title, use_addnet_hash=False, remote_model = False):
hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes")
ondisk_mtime = os.path.getmtime(filename) if not remote_model else get_remote_model_mmtime(filename)
diff --git a/modules/initialize_util.py b/modules/initialize_util.py
index 6b15feeb86b..2b80cef71b6 100644
--- a/modules/initialize_util.py
+++ b/modules/initialize_util.py
@@ -168,7 +168,7 @@ def configure_opts_onchange():
from modules.call_queue import wrap_queued_call
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
- shared.opts.onchange("load_remote_ckpt", wrap_queued_call(lambda: (sd_models.list_models(),sd_models.load_model())), call=False)
+ # shared.opts.onchange("load_remote_ckpt", wrap_queued_call(lambda: (sd_models.list_models(),sd_models.load_model())), call=False)
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
shared.opts.onchange("sd_vae_overrides_per_model_preferences", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 6e54d06367c..ab21a419ff3 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -351,7 +351,11 @@ def prepare_environment():
if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
startup_timer.record("install torch")
-
+
+ if not is_installed("cython"):
+ run_pip("install cython", "cython")
+ startup_timer.record("install cython")
+
if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"):
raise RuntimeError(
'Torch is not able to use GPU; '
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 4211131dfa1..5ce5a3257c9 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -76,7 +76,7 @@ def read_metadata():
self.metadata = cache.cached_data_for_file('safetensors-metadata', "checkpoint/" + name + ('[remote]' if self.remote_model else '' ), filename, read_metadata, remote_model)
except Exception as e:
errors.display(e, f"reading metadata for {filename}")
-
+ print("CheckpointInfo start")
self.name = name
self.name_for_extra = os.path.splitext(os.path.basename(filename))[0]
self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
@@ -84,13 +84,18 @@ def read_metadata():
self.sha256 = hashes.sha256_from_cache(self.filename, f"checkpoint/{name + ('[remote]' if self.remote_model else '' )}", remote_model=remote_model)
self.shorthash = self.sha256[0:10] if self.sha256 else None
-
+ print("sha256: %s" % self.sha256)
+ print("shorthash: %s" % self.shorthash)
+
self.title = name + ('[remote]' if self.remote_model else '' )+ ('' if self.shorthash is None else f'[{self.shorthash}]')
self.short_title = self.name_for_extra + ('[remote]' if self.remote_model else '') + ('' if self.shorthash is None else f'[{self.shorthash}]')
+ print("title: %s" % self.title)
+ print("short_title: %s" % self.short_title)
self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]']
if self.shorthash:
self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
+ print("CheckpointInfo end")
def register(self):
checkpoints_list[self.title] = self
@@ -144,32 +149,34 @@ def list_models():
checkpoints_list.clear()
checkpoint_aliases.clear()
- if not shared.opts.load_remote_ckpt:
- cmd_ckpt = shared.cmd_opts.ckpt
- if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
- model_url = None
- else:
- model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
+ # if not shared.opts.load_remote_ckpt:
+ # cmd_ckpt = shared.cmd_opts.ckpt
+ # if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
+ # model_url = None
+ # else:
+ # model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
- model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
+ # model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
- if os.path.exists(cmd_ckpt):
- checkpoint_info = CheckpointInfo(cmd_ckpt)
- checkpoint_info.register()
+ # if os.path.exists(cmd_ckpt):
+ # checkpoint_info = CheckpointInfo(cmd_ckpt)
+ # checkpoint_info.register()
- shared.opts.data['sd_model_checkpoint'] = checkpoint_info.title
- elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
- print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
+ # shared.opts.data['sd_model_checkpoint'] = checkpoint_info.title
+ # elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
+ # print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
- for filename in model_list:
- checkpoint_info = CheckpointInfo(filename)
- checkpoint_info.register()
+ # for filename in model_list:
+ # checkpoint_info = CheckpointInfo(filename)
+ # checkpoint_info.register()
- else:
- remote_models = list_remote_models(ext_filter=[".ckpt", ".safetensors"])
- for filename in remote_models:
- checkpoint_info = CheckpointInfo(filename, remote_model=True)
- checkpoint_info.register()
+ # else:
+ remote_models = list_remote_models(ext_filter=[".ckpt", ".safetensors"])
+ for filename in remote_models:
+ checkpoint_info = CheckpointInfo(filename, remote_model=True)
+ checkpoint_info.register()
+ print ("list_model: %s " % filename)
+
re_strip_checksum = re.compile(r"\s*\[[^]]+]\s*$")
diff --git a/modules/sd_remote_models.py b/modules/sd_remote_models.py
index 18147302e1c..673bacf8b10 100644
--- a/modules/sd_remote_models.py
+++ b/modules/sd_remote_models.py
@@ -4,12 +4,18 @@
import threading
from io import BytesIO
from modules import shared
+from osstorchconnector import OssCheckpoint
+import torch
-
+def __check_bucket_opts():
+ if os.environ.get('BUCKET_NAME') and os.environ.get('BUCKET_ENDPOINT'):
+ return True
+ print("Bucket opts not specified.")
+ return False
def __bucket__():
auth = oss2.Auth(os.environ.get('ACCESS_KEY_ID'), os.environ.get('ACCESS_KEY_SECRET'))
- return oss2.Bucket(auth, shared.opts.bucket_endpoint, shared.opts.bucket_name, enable_crc=False)
+ return oss2.Bucket(auth, os.environ.get('BUCKET_ENDPOINT'), os.environ.get('BUCKET_NAME'), enable_crc=False)
def __get_object_size(object_name):
simplifiedmeta = __bucket__().get_object_meta(object_name)
@@ -19,8 +25,10 @@ def get_remote_model_mmtime(model_name):
return __bucket__().head_object(model_name).last_modified
def list_remote_models(ext_filter):
- dir = shared.opts.bucket_model_ckpt_dir if shared.opts.bucket_model_ckpt_dir.endswith('/') else shared.opts.bucket_model_ckpt_dir + '/'
+ if not __check_bucket_opts():
+ return []
output = []
+ dir = os.environ.get('BUCKET_MODEL_DIR') if os.environ.get('BUCKET_MODEL_DIR').endswith('/') else os.environ.get('BUCKET_MODEL_DIR') + '/'
for obj in oss2.ObjectIteratorV2(__bucket__(), prefix = dir, delimiter = '/', start_after=dir, fetch_owner=False):
if obj.is_prefix():
print('directory: ', obj.key)
@@ -69,6 +77,21 @@ def read_remote_model(checkpoint_file, start=0, size=-1):
buffer.seek(0)
return buffer
+
+
+def load_remote_model_ckpt(checkpoint_file, map_location) -> bytes:
+ if not __check_bucket_opts():
+ return bytes()
+
+ checkpoint = OssCheckpoint(endpoint=os.environ.get('BUCKET_ENDPOINT'))
+ CHECKPOINT_URI = "oss://%s/%s" % (os.environ.get('BUCKET_NAME'), checkpoint_file)
+ print("load %s state.." % CHECKPOINT_URI)
+ state_dict = None
+ with checkpoint.reader(CHECKPOINT_URI) as reader:
+ state_dict = torch.load(reader, map_location = map_location, weights_only = True)
+ print("type:", type(state_dict))
+ return state_dict
+
def __range_get(object_name, buffer, offset, start, end, read_chunk_size):
chunk_size = int(read_chunk_size)
with __bucket__().get_object(object_name, byte_range=(start, end))as object_stream:
diff --git a/modules/shared_options.py b/modules/shared_options.py
index 83eee599aed..0779f22877a 100644
--- a/modules/shared_options.py
+++ b/modules/shared_options.py
@@ -21,7 +21,7 @@
"outdir_init_images"
}
-options_templates.update(options_section(('saving-images', "Saving images/grids"), {
+options_templates.update(options_section((None, "Saving images/grids"), {
"samples_save": OptionInfo(True, "Always save all generated images"),
"samples_format": OptionInfo('png', 'File format for images'),
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
@@ -64,7 +64,7 @@
"save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."),
}))
-options_templates.update(options_section(('saving-paths', "Paths for saving"), {
+options_templates.update(options_section((None, "Paths for saving"), {
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
@@ -76,7 +76,7 @@
"outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs),
}))
-options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
+options_templates.update(options_section((None, "Saving to a directory"), {
"save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
@@ -84,21 +84,21 @@
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
}))
-options_templates.update(options_section(('upscaling', "Upscaling"), {
+options_templates.update(options_section((None, "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}),
}))
-options_templates.update(options_section(('face-restoration', "Face restoration"), {
+options_templates.update(options_section((None, "Face restoration"), {
"face_restoration": OptionInfo(False, "Restore faces", infotext='Face restoration').info("will use a third-party model on generation result to reconstruct faces"),
"face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
}))
-options_templates.update(options_section(('system', "System"), {
+options_templates.update(options_section((None, "System"), {
"auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}),
"show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(),
"show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(),
@@ -111,13 +111,13 @@
"hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."),
}))
-options_templates.update(options_section(('API', "API"), {
+options_templates.update(options_section((None, "API"), {
"api_enable_requests": OptionInfo(True, "Allow http:// and https:// URLs for input images in API", restrict_api=True),
"api_forbid_local_requests": OptionInfo(True, "Forbid URLs to local resources", restrict_api=True),
"api_useragent": OptionInfo("", "User agent for requests", restrict_api=True),
}))
-options_templates.update(options_section(('training', "Training"), {
+options_templates.update(options_section((None, "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
@@ -132,7 +132,7 @@
"training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
}))
-options_templates.update(options_section(('sd', "Stable Diffusion"), {
+options_templates.update(options_section((None, "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints, infotext='Model hash'),
"sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}),
"sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"),
@@ -149,14 +149,14 @@
"hires_fix_refiner_pass": OptionInfo("second pass", "Hires fix: which pass to enable refiner for", gr.Radio, {"choices": ["first pass", "second pass", "both passes"]}, infotext="Hires refiner"),
}))
-options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), {
+options_templates.update(options_section((None, "Stable Diffusion XL"), {
"sdxl_crop_top": OptionInfo(0, "crop top coordinate"),
"sdxl_crop_left": OptionInfo(0, "crop left coordinate"),
"sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"),
"sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"),
}))
-options_templates.update(options_section(('vae', "VAE"), {
+options_templates.update(options_section((None, "VAE"), {
"sd_vae_explanation": OptionHTML("""
VAE is a neural network that transforms a standard RGB
image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling
@@ -171,7 +171,7 @@
"sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Decoder').info("method to decode latent to image"),
}))
-options_templates.update(options_section(('img2img', "img2img"), {
+options_templates.update(options_section((None, "img2img"), {
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Conditional mask weight'),
"initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.0, "maximum": 1.5, "step": 0.001}, infotext='Noise multiplier'),
"img2img_extra_noise": OptionInfo(0.0, "Extra noise multiplier for img2img and hires fix", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Extra noise').info("0 = disabled (default); should be lower than denoising strength"),
@@ -186,7 +186,7 @@
"return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
}))
-options_templates.update(options_section(('optimizations', "Optimizations"), {
+options_templates.update(options_section((None, "Optimizations"), {
"cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
"s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
"token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
@@ -197,7 +197,7 @@
"batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
}))
-options_templates.update(options_section(('compatibility', "Compatibility"), {
+options_templates.update(options_section((None, "Compatibility"), {
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
"no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
@@ -207,7 +207,7 @@
"use_old_scheduling": OptionInfo(False, "Use old prompt editing timelines.", infotext="Old prompt editing timelines").info("For [red:green:N]; old: If N < 1, it's a fraction of steps (and hires fix uses range from 0 to 1), if N >= 1, it's an absolute number of steps; new: If N has a decimal point in it, it's a fraction of steps (and hires fix uses range from 1 to 2), othewrwise it's an absolute number of steps"),
}))
-options_templates.update(options_section(('interrogate', "Interrogate"), {
+options_templates.update(options_section((None, "Interrogate"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"),
"interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"),
"interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
@@ -222,7 +222,7 @@
"deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"),
}))
-options_templates.update(options_section(('extra_networks', "Extra Networks"), {
+options_templates.update(options_section((None, "Extra Networks"), {
"extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."),
"extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'),
"extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}),
@@ -237,7 +237,8 @@
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks),
}))
-options_templates.update(options_section(('ui', "User interface"), {
+import os
+options_templates.update(options_section((None, "User interface"), {
"localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(),
"gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the gallery.").needs_reload_ui(),
"gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"),
@@ -264,14 +265,14 @@
"hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(),
"hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(),
"disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(),
- "load_remote_ckpt": OptionInfo(False, "Load ckpt models from remote object storage").needs_reload_ui(),
- 'bucket_name': OptionInfo("", "Bucket name to download ckpt model"),
- 'bucket_endpoint': OptionInfo("", "Bucket endpoint to download ckpt model"),
- 'bucket_model_ckpt_dir': OptionInfo("", "Ckpt model directory in bucket"),
+ # "load_remote_ckpt": OptionInfo(True, "Load ckpt models from remote object storage").needs_reload_ui(),
+ # 'bucket_name': OptionInfo(os.environ.get('BUCKET_NAME'), "Bucket name to download ckpt model"),
+ # 'bucket_endpoint': OptionInfo(os.environ.get('BUCKET_ENDPOINT'), "Bucket endpoint to download ckpt model"),
+ # 'bucket_model_ckpt_dir': OptionInfo(os.environ.get('BUCKET_MODEL_DIR'), "Ckpt model directory in bucket"),
}))
-options_templates.update(options_section(('infotext', "Infotext"), {
+options_templates.update(options_section((None, "Infotext"), {
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
"add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"),
@@ -286,7 +287,7 @@
}))
-options_templates.update(options_section(('ui', "Live previews"), {
+options_templates.update(options_section((None, "Live previews"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
"live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}),
@@ -299,7 +300,7 @@
"live_preview_fast_interrupt": OptionInfo(False, "Return image with chosen live preview method on interrupt").info("makes interrupts faster"),
}))
-options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
+options_templates.update(options_section((None, "Sampler parameters"), {
"hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(),
"eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unpredictable results"),
"eta_ancestral": OptionInfo(1.0, "Eta for k-diffusion samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; currently only applies to ancestral samplers (i.e. Euler a) and SDE samplers"),
@@ -321,7 +322,7 @@
'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'),
}))
-options_templates.update(options_section(('postprocessing', "Postprocessing"), {
+options_templates.update(options_section((None, "Postprocessing"), {
'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
diff --git a/modules/ui.py b/modules/ui.py
index 579bab9800c..747d2d810c7 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -540,705 +540,705 @@ def create_ui():
extra_tabs.__exit__()
- scripts.scripts_current = scripts.scripts_img2img
- scripts.scripts_img2img.initialize_scripts(is_img2img=True)
-
- with gr.Blocks(analytics_enabled=False) as img2img_interface:
- toprow = Toprow(is_img2img=True)
-
- extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs")
- extra_tabs.__enter__()
-
- with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, ResizeHandleRow(equal_height=False):
- with gr.Column(variant='compact', elem_id="img2img_settings"):
- copy_image_buttons = []
- copy_image_destinations = {}
-
- def add_copy_image_controls(tab_name, elem):
- with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"):
- gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}")
-
- for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']):
- if name == tab_name:
- gr.Button(title, interactive=False)
- copy_image_destinations[name] = elem
- continue
-
- button = gr.Button(title)
- copy_image_buttons.append((button, name, elem))
-
- with gr.Tabs(elem_id="mode_img2img"):
- img2img_selected_tab = gr.State(0)
-
- with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img:
- init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height)
- add_copy_image_controls('img2img', init_img)
-
- with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
- sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color)
- add_copy_image_controls('sketch', sketch)
-
- with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
- init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color)
- add_copy_image_controls('inpaint', init_img_with_mask)
-
- with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
- inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color)
- inpaint_color_sketch_orig = gr.State(None)
- add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
-
- def update_orig(image, state):
- if image is not None:
- same_size = state is not None and state.size == image.size
- has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1))
- edited = same_size and has_exact_match
- return image if not edited or state is None else state
-
- inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig)
-
- with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload:
- init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base")
- init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask")
-
- with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
- hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
- gr.HTML(
- "
Process images in a directory on the same machine where the server is running." +
- "
Use an empty output directory to save pictures normally instead of writing to the output directory." +
- f"
Add inpaint batch mask directory to enable inpaint batch processing."
- f"{hidden}
Process images in a directory on the same machine where the server is running." +
+ # "
Use an empty output directory to save pictures normally instead of writing to the output directory." +
+ # f"
Add inpaint batch mask directory to enable inpaint batch processing."
+ # f"{hidden}
See wiki for detailed explanation.
") - - with gr.Row(variant="compact", equal_height=False): - with gr.Tabs(elem_id="train_tabs"): - - with gr.Tab(label="Create embedding", id="create_embedding"): - new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") - initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") - nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") - overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") - - with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): - new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") - new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") - new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=hypernetworks_ui.keys, elem_id="train_new_hypernetwork_activation_func") - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") - new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") - new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") - new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") - overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - - with gr.Tab(label="Preprocess images", id="preprocess_images"): - process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") - process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") - - with gr.Row(): - process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") - process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") - process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") - process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") - process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") - process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") - - with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") - - with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") - process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - - with gr.Column(visible=False) as process_multicrop_col: - gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') - with gr.Row(): - process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") - process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") - with gr.Row(): - process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") - process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") - with gr.Row(): - process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") - process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") - run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") - - process_split.change( - fn=lambda show: gr_show(show), - inputs=[process_split], - outputs=[process_split_extra_row], - ) - - process_focal_crop.change( - fn=lambda show: gr_show(show), - inputs=[process_focal_crop], - outputs=[process_focal_crop_row], - ) - - process_multicrop.change( - fn=lambda show: gr_show(show), - inputs=[process_multicrop], - outputs=[process_multicrop_col], - ) - - def get_textual_inversion_template_names(): - return sorted(textual_inversion.textual_inversion_templates) - - with gr.Tab(label="Train", id="train"): - gr.HTML(value="Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]
") - with FormRow(): - train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) - create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") - - with FormRow(): - embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") - hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - - with FormRow(): - clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) - clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) - - with FormRow(): - batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") - - dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") - log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") - - with FormRow(): - template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) - create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") - - training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") - training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") - varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") - steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") - - with FormRow(): - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") - - use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") - - save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") - preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") - - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") - - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") - - with gr.Row(): - train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") - interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") - train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") - - params = script_callbacks.UiTrainTabParams(txt2img_preview_params) - - script_callbacks.ui_train_tabs_callback(params) - - with gr.Column(elem_id='ti_gallery_container'): - ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery', columns=4) - gr.HTML(elem_id="ti_progress", value="") - ti_outcome = gr.HTML(elem_id="ti_error", value="") - - create_embedding.click( - fn=textual_inversion_ui.create_embedding, - inputs=[ - new_embedding_name, - initialization_text, - nvpt, - overwrite_old_embedding, - ], - outputs=[ - train_embedding_name, - ti_output, - ti_outcome, - ] - ) - - create_hypernetwork.click( - fn=hypernetworks_ui.create_hypernetwork, - inputs=[ - new_hypernetwork_name, - new_hypernetwork_sizes, - overwrite_old_hypernetwork, - new_hypernetwork_layer_structure, - new_hypernetwork_activation_func, - new_hypernetwork_initialization_option, - new_hypernetwork_add_layer_norm, - new_hypernetwork_use_dropout, - new_hypernetwork_dropout_structure - ], - outputs=[ - train_hypernetwork_name, - ti_output, - ti_outcome, - ] - ) - - run_preprocess.click( - fn=wrap_gradio_gpu_call(textual_inversion_ui.preprocess, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - process_src, - process_dst, - process_width, - process_height, - preprocess_txt_action, - process_keep_original_size, - process_flip, - process_split, - process_caption, - process_caption_deepbooru, - process_split_threshold, - process_overlap_ratio, - process_focal_crop, - process_focal_crop_face_weight, - process_focal_crop_entropy_weight, - process_focal_crop_edges_weight, - process_focal_crop_debug, - process_multicrop, - process_multicrop_mindim, - process_multicrop_maxdim, - process_multicrop_minarea, - process_multicrop_maxarea, - process_multicrop_objective, - process_multicrop_threshold, - ], - outputs=[ - ti_output, - ti_outcome, - ], - ) - - train_embedding.click( - fn=wrap_gradio_gpu_call(textual_inversion_ui.train_embedding, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_embedding_name, - embedding_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - save_image_with_stored_embedding, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - train_hypernetwork.click( - fn=wrap_gradio_gpu_call(hypernetworks_ui.train_hypernetwork, extra_outputs=[gr.update()]), - _js="start_training_textual_inversion", - inputs=[ - dummy_component, - train_hypernetwork_name, - hypernetwork_learn_rate, - batch_size, - gradient_step, - dataset_directory, - log_directory, - training_width, - training_height, - varsize, - steps, - clip_grad_mode, - clip_grad_value, - shuffle_tags, - tag_drop_out, - latent_sampling_method, - use_weight, - create_image_every, - save_embedding_every, - template_file, - preview_from_txt2img, - *txt2img_preview_params, - ], - outputs=[ - ti_output, - ti_outcome, - ] - ) - - interrupt_training.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - interrupt_preprocessing.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) + # with gr.Blocks(analytics_enabled=False) as pnginfo_interface: + # with gr.Row(equal_height=False): + # with gr.Column(variant='panel'): + # image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") + + # with gr.Column(variant='panel'): + # html = gr.HTML() + # generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") + # html2 = gr.HTML() + # with gr.Row(): + # buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) + + # for tabname, button in buttons.items(): + # parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( + # paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image, + # )) + + # image.change( + # fn=wrap_gradio_call(modules.extras.run_pnginfo), + # inputs=[image], + # outputs=[html, generation_info, html2], + # ) + + # modelmerger_ui = ui_checkpoint_merger.UiCheckpointMerger() + + # with gr.Blocks(analytics_enabled=False) as train_interface: + # with gr.Row(equal_height=False): + # gr.HTML(value="See wiki for detailed explanation.
") + + # with gr.Row(variant="compact", equal_height=False): + # with gr.Tabs(elem_id="train_tabs"): + + # with gr.Tab(label="Create embedding", id="create_embedding"): + # new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") + # initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") + # nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") + # overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") + + # with gr.Row(): + # with gr.Column(scale=3): + # gr.HTML(value="") + + # with gr.Column(): + # create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") + + # with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): + # new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") + # new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") + # new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") + # new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=hypernetworks_ui.keys, elem_id="train_new_hypernetwork_activation_func") + # new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") + # new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") + # new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") + # new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") + # overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") + + # with gr.Row(): + # with gr.Column(scale=3): + # gr.HTML(value="") + + # with gr.Column(): + # create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") + + # with gr.Tab(label="Preprocess images", id="preprocess_images"): + # process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") + # process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") + # process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") + # process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") + # preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") + + # with gr.Row(): + # process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") + # process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") + # process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") + # process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") + # process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop") + # process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") + # process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") + + # with gr.Row(visible=False) as process_split_extra_row: + # process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") + # process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") + + # with gr.Row(visible=False) as process_focal_crop_row: + # process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") + # process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") + # process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") + # process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") + + # with gr.Column(visible=False) as process_multicrop_col: + # gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') + # with gr.Row(): + # process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim") + # process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim") + # with gr.Row(): + # process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea") + # process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea") + # with gr.Row(): + # process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") + # process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") + + # with gr.Row(): + # with gr.Column(scale=3): + # gr.HTML(value="") + + # with gr.Column(): + # with gr.Row(): + # interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") + # run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") + + # process_split.change( + # fn=lambda show: gr_show(show), + # inputs=[process_split], + # outputs=[process_split_extra_row], + # ) + + # process_focal_crop.change( + # fn=lambda show: gr_show(show), + # inputs=[process_focal_crop], + # outputs=[process_focal_crop_row], + # ) + + # process_multicrop.change( + # fn=lambda show: gr_show(show), + # inputs=[process_multicrop], + # outputs=[process_multicrop_col], + # ) + + # def get_textual_inversion_template_names(): + # return sorted(textual_inversion.textual_inversion_templates) + + # with gr.Tab(label="Train", id="train"): + # gr.HTML(value="Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]
") + # with FormRow(): + # train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) + # create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") + + # train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) + # create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") + + # with FormRow(): + # embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") + # hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") + + # with FormRow(): + # clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) + # clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) + + # with FormRow(): + # batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") + # gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") + + # dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") + # log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") + + # with FormRow(): + # template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names()) + # create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file") + + # training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") + # training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") + # varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize") + # steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") + + # with FormRow(): + # create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") + # save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + + # use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight") + + # save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") + # preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") + + # shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") + # tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") + + # latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") + + # with gr.Row(): + # train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") + # interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") + # train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") + + # params = script_callbacks.UiTrainTabParams(txt2img_preview_params) + + # script_callbacks.ui_train_tabs_callback(params) + + # with gr.Column(elem_id='ti_gallery_container'): + # ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) + # gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery', columns=4) + # gr.HTML(elem_id="ti_progress", value="") + # ti_outcome = gr.HTML(elem_id="ti_error", value="") + + # create_embedding.click( + # fn=textual_inversion_ui.create_embedding, + # inputs=[ + # new_embedding_name, + # initialization_text, + # nvpt, + # overwrite_old_embedding, + # ], + # outputs=[ + # train_embedding_name, + # ti_output, + # ti_outcome, + # ] + # ) + + # create_hypernetwork.click( + # fn=hypernetworks_ui.create_hypernetwork, + # inputs=[ + # new_hypernetwork_name, + # new_hypernetwork_sizes, + # overwrite_old_hypernetwork, + # new_hypernetwork_layer_structure, + # new_hypernetwork_activation_func, + # new_hypernetwork_initialization_option, + # new_hypernetwork_add_layer_norm, + # new_hypernetwork_use_dropout, + # new_hypernetwork_dropout_structure + # ], + # outputs=[ + # train_hypernetwork_name, + # ti_output, + # ti_outcome, + # ] + # ) + + # run_preprocess.click( + # fn=wrap_gradio_gpu_call(textual_inversion_ui.preprocess, extra_outputs=[gr.update()]), + # _js="start_training_textual_inversion", + # inputs=[ + # dummy_component, + # process_src, + # process_dst, + # process_width, + # process_height, + # preprocess_txt_action, + # process_keep_original_size, + # process_flip, + # process_split, + # process_caption, + # process_caption_deepbooru, + # process_split_threshold, + # process_overlap_ratio, + # process_focal_crop, + # process_focal_crop_face_weight, + # process_focal_crop_entropy_weight, + # process_focal_crop_edges_weight, + # process_focal_crop_debug, + # process_multicrop, + # process_multicrop_mindim, + # process_multicrop_maxdim, + # process_multicrop_minarea, + # process_multicrop_maxarea, + # process_multicrop_objective, + # process_multicrop_threshold, + # ], + # outputs=[ + # ti_output, + # ti_outcome, + # ], + # ) + + # train_embedding.click( + # fn=wrap_gradio_gpu_call(textual_inversion_ui.train_embedding, extra_outputs=[gr.update()]), + # _js="start_training_textual_inversion", + # inputs=[ + # dummy_component, + # train_embedding_name, + # embedding_learn_rate, + # batch_size, + # gradient_step, + # dataset_directory, + # log_directory, + # training_width, + # training_height, + # varsize, + # steps, + # clip_grad_mode, + # clip_grad_value, + # shuffle_tags, + # tag_drop_out, + # latent_sampling_method, + # use_weight, + # create_image_every, + # save_embedding_every, + # template_file, + # save_image_with_stored_embedding, + # preview_from_txt2img, + # *txt2img_preview_params, + # ], + # outputs=[ + # ti_output, + # ti_outcome, + # ] + # ) + + # train_hypernetwork.click( + # fn=wrap_gradio_gpu_call(hypernetworks_ui.train_hypernetwork, extra_outputs=[gr.update()]), + # _js="start_training_textual_inversion", + # inputs=[ + # dummy_component, + # train_hypernetwork_name, + # hypernetwork_learn_rate, + # batch_size, + # gradient_step, + # dataset_directory, + # log_directory, + # training_width, + # training_height, + # varsize, + # steps, + # clip_grad_mode, + # clip_grad_value, + # shuffle_tags, + # tag_drop_out, + # latent_sampling_method, + # use_weight, + # create_image_every, + # save_embedding_every, + # template_file, + # preview_from_txt2img, + # *txt2img_preview_params, + # ], + # outputs=[ + # ti_output, + # ti_outcome, + # ] + # ) + + # interrupt_training.click( + # fn=lambda: shared.state.interrupt(), + # inputs=[], + # outputs=[], + # ) + + # interrupt_preprocessing.click( + # fn=lambda: shared.state.interrupt(), + # inputs=[], + # outputs=[], + # ) loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) @@ -1247,18 +1247,18 @@ def get_textual_inversion_template_names(): interfaces = [ (txt2img_interface, "txt2img", "txt2img"), - (img2img_interface, "img2img", "img2img"), - (extras_interface, "Extras", "extras"), - (pnginfo_interface, "PNG Info", "pnginfo"), - (modelmerger_ui.blocks, "Checkpoint Merger", "modelmerger"), - (train_interface, "Train", "train"), + # (img2img_interface, "img2img", "img2img"), + # (extras_interface, "Extras", "extras"), + # (pnginfo_interface, "PNG Info", "pnginfo"), + # (modelmerger_ui.blocks, "Checkpoint Merger", "modelmerger"), + # (train_interface, "Train", "train"), ] interfaces += script_callbacks.ui_tabs_callback() interfaces += [(settings.interface, "Settings", "settings")] - extensions_interface = ui_extensions.create_ui() - interfaces += [(extensions_interface, "Extensions", "extensions")] + # extensions_interface = ui_extensions.create_ui() + # interfaces += [(extensions_interface, "Extensions", "extensions")] shared.tab_names = [] for _interface, label, _ifid in interfaces: @@ -1267,7 +1267,7 @@ def get_textual_inversion_template_names(): with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: settings.add_quicksettings() - parameters_copypaste.connect_paste_params_buttons() + # parameters_copypaste.connect_paste_params_buttons() with gr.Tabs(elem_id="tabs") as tabs: tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} @@ -1279,12 +1279,12 @@ def get_textual_inversion_template_names(): with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): interface.render() - if ifid not in ["extensions", "settings"]: - loadsave.add_block(interface, ifid) + # if ifid not in ["extensions", "settings"]: + # loadsave.add_block(interface, ifid) - loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) + # loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) - loadsave.setup_ui() + # loadsave.setup_ui() if os.path.exists(os.path.join(script_path, "notification.mp3")): gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) @@ -1295,11 +1295,11 @@ def get_textual_inversion_template_names(): settings.add_functionality(demo) - update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") - settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + # update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") + # settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + # demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) - modelmerger_ui.setup_ui(dummy_component=dummy_component, sd_model_checkpoint_component=settings.component_dict['sd_model_checkpoint']) + # modelmerger_ui.setup_ui(dummy_component=dummy_component, sd_model_checkpoint_component=settings.component_dict['sd_model_checkpoint']) loadsave.dump_defaults() demo.ui_loadsave = loadsave diff --git a/modules/ui_settings.py b/modules/ui_settings.py index 8ff9c074718..21c5ffaa3a0 100644 --- a/modules/ui_settings.py +++ b/modules/ui_settings.py @@ -123,7 +123,7 @@ def create_ui(self, loadsave, dummy_component): current_row = None with gr.Tabs(elem_id="settings"): for i, (k, item) in enumerate(opts.data_labels.items()): - section_must_be_skipped = item.section[0] is None + section_must_be_skipped = item.section[1] != "Licenses" if previous_section != item.section and not section_must_be_skipped: elem_id, text = item.section @@ -154,27 +154,27 @@ def create_ui(self, loadsave, dummy_component): current_row.__exit__() current_tab.__exit__() - with gr.TabItem("Defaults", id="defaults", elem_id="settings_tab_defaults"): - loadsave.create_ui() + # with gr.TabItem("Defaults", id="defaults", elem_id="settings_tab_defaults"): + # loadsave.create_ui() - with gr.TabItem("Sysinfo", id="sysinfo", elem_id="settings_tab_sysinfo"): - gr.HTML('Download system info