From 01f531e9b162be94601b9f3a451cef9a9dd7dd53 Mon Sep 17 00:00:00 2001 From: SunChaser Date: Fri, 8 Mar 2024 17:25:28 +0800 Subject: [PATCH 01/90] fix: fix syntax errors --- modules/upscaler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/upscaler.py b/modules/upscaler.py index 3aee69db8..cc662fc9c 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -20,7 +20,7 @@ class Upscaler: filter = None model = None user_path = None - scalers: [] + scalers = [] tile = True def __init__(self, create_dirs=False): From 0411eced89688f55aec356eea0c7d29377d37bc8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 10 Mar 2024 07:52:57 +0300 Subject: [PATCH 02/90] add names to callbacks --- modules/extensions.py | 17 +++++ modules/script_callbacks.py | 126 +++++++++++++++++++++--------------- 2 files changed, 91 insertions(+), 52 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index 04bda297e..ab835d3f2 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -186,6 +186,7 @@ class Extension: def list_extensions(): extensions.clear() + extension_paths.clear() if shared.cmd_opts.disable_all_extensions: print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***") @@ -220,6 +221,7 @@ def list_extensions(): is_builtin = dirname == extensions_builtin_dir extension = Extension(name=extension_dirname, path=path, enabled=extension_dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin, metadata=metadata) extensions.append(extension) + extension_paths[extension.path] = extension loaded_extensions[canonical_name] = extension # check for requirements @@ -238,4 +240,19 @@ def list_extensions(): continue +def find_extension(filename): + parentdir = os.path.dirname(os.path.realpath(filename)) + + while parentdir != filename: + extension = extension_paths.get(parentdir) + if extension is not None: + return extension + + filename = parentdir + parentdir = os.path.dirname(filename) + + return None + + extensions: list[Extension] = [] +extension_paths: dict[str, Extension] = {} diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 08bc52564..98952ec78 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -1,13 +1,14 @@ +from __future__ import annotations + import dataclasses import inspect import os -from collections import namedtuple from typing import Optional, Any from fastapi import FastAPI from gradio import Blocks -from modules import errors, timer +from modules import errors, timer, extensions def report_exception(c, job): @@ -116,7 +117,35 @@ class BeforeTokenCounterParams: is_positive: bool = True -ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) +@dataclasses.dataclass +class ScriptCallback: + script: str + callback: '' + name: str = None + + +def add_callback(callbacks, fun, *, name=None, category='unknown'): + stack = [x for x in inspect.stack() if x.filename != __file__] + filename = stack[0].filename if stack else 'unknown file' + + extension = extensions.find_extension(filename) + extension_name = extension.canonical_name if extension else 'base' + + callback_name = f"{extension_name}/{os.path.basename(filename)}/{category}" + if name is not None: + callback_name += f'/{name}' + + unique_callback_name = callback_name + for index in range(1000): + existing = any(x.name == unique_callback_name for x in callbacks) + if not existing: + break + + unique_callback_name = f'{callback_name}-{index+1}' + + callbacks.append(ScriptCallback(filename, fun, unique_callback_name)) + + callback_map = dict( callbacks_app_started=[], callbacks_model_loaded=[], @@ -328,13 +357,6 @@ def before_token_counter_callback(params: BeforeTokenCounterParams): report_exception(c, 'before_token_counter') -def add_callback(callbacks, fun): - stack = [x for x in inspect.stack() if x.filename != __file__] - filename = stack[0].filename if stack else 'unknown file' - - callbacks.append(ScriptCallback(filename, fun)) - - def remove_current_script_callbacks(): stack = [x for x in inspect.stack() if x.filename != __file__] filename = stack[0].filename if stack else 'unknown file' @@ -351,24 +373,24 @@ def remove_callbacks_for_function(callback_func): callback_list.remove(callback_to_remove) -def on_app_started(callback): +def on_app_started(callback, *, name=None): """register a function to be called when the webui started, the gradio `Block` component and fastapi `FastAPI` object are passed as the arguments""" - add_callback(callback_map['callbacks_app_started'], callback) + add_callback(callback_map['callbacks_app_started'], callback, name=name, category='app_started') -def on_before_reload(callback): +def on_before_reload(callback, *, name=None): """register a function to be called just before the server reloads.""" - add_callback(callback_map['callbacks_on_reload'], callback) + add_callback(callback_map['callbacks_on_reload'], callback, name=name, category='on_reload') -def on_model_loaded(callback): +def on_model_loaded(callback, *, name=None): """register a function to be called when the stable diffusion model is created; the model is passed as an argument; this function is also called when the script is reloaded. """ - add_callback(callback_map['callbacks_model_loaded'], callback) + add_callback(callback_map['callbacks_model_loaded'], callback, name=name, category='model_loaded') -def on_ui_tabs(callback): +def on_ui_tabs(callback, *, name=None): """register a function to be called when the UI is creating new tabs. The function must either return a None, which means no new tabs to be added, or a list, where each element is a tuple: @@ -378,71 +400,71 @@ def on_ui_tabs(callback): title is tab text displayed to user in the UI elem_id is HTML id for the tab """ - add_callback(callback_map['callbacks_ui_tabs'], callback) + add_callback(callback_map['callbacks_ui_tabs'], callback, name=name, category='ui_tabs') -def on_ui_train_tabs(callback): +def on_ui_train_tabs(callback, *, name=None): """register a function to be called when the UI is creating new tabs for the train tab. Create your new tabs with gr.Tab. """ - add_callback(callback_map['callbacks_ui_train_tabs'], callback) + add_callback(callback_map['callbacks_ui_train_tabs'], callback, name=name, category='ui_train_tabs') -def on_ui_settings(callback): +def on_ui_settings(callback, *, name=None): """register a function to be called before UI settings are populated; add your settings by using shared.opts.add_option(shared.OptionInfo(...)) """ - add_callback(callback_map['callbacks_ui_settings'], callback) + add_callback(callback_map['callbacks_ui_settings'], callback, name=name, category='ui_settings') -def on_before_image_saved(callback): +def on_before_image_saved(callback, *, name=None): """register a function to be called before an image is saved to a file. The callback is called with one argument: - params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object. """ - add_callback(callback_map['callbacks_before_image_saved'], callback) + add_callback(callback_map['callbacks_before_image_saved'], callback, name=name, category='before_image_saved') -def on_image_saved(callback): +def on_image_saved(callback, *, name=None): """register a function to be called after an image is saved to a file. The callback is called with one argument: - params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing. """ - add_callback(callback_map['callbacks_image_saved'], callback) + add_callback(callback_map['callbacks_image_saved'], callback, name=name, category='image_saved') -def on_extra_noise(callback): +def on_extra_noise(callback, *, name=None): """register a function to be called before adding extra noise in img2img or hires fix; The callback is called with one argument: - params: ExtraNoiseParams - contains noise determined by seed and latent representation of image """ - add_callback(callback_map['callbacks_extra_noise'], callback) + add_callback(callback_map['callbacks_extra_noise'], callback, name=name, category='extra_noise') -def on_cfg_denoiser(callback): +def on_cfg_denoiser(callback, *, name=None): """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. The callback is called with one argument: - params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details. """ - add_callback(callback_map['callbacks_cfg_denoiser'], callback) + add_callback(callback_map['callbacks_cfg_denoiser'], callback, name=name, category='cfg_denoiser') -def on_cfg_denoised(callback): +def on_cfg_denoised(callback, *, name=None): """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. The callback is called with one argument: - params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details. """ - add_callback(callback_map['callbacks_cfg_denoised'], callback) + add_callback(callback_map['callbacks_cfg_denoised'], callback, name=name, category='cfg_denoised') -def on_cfg_after_cfg(callback): +def on_cfg_after_cfg(callback, *, name=None): """register a function to be called in the kdiffussion cfg_denoiser method after cfg calculations are completed. The callback is called with one argument: - params: AfterCFGCallbackParams - parameters to be passed to the script for post-processing after cfg calculation. """ - add_callback(callback_map['callbacks_cfg_after_cfg'], callback) + add_callback(callback_map['callbacks_cfg_after_cfg'], callback, name=name, category='cfg_after_cfg') -def on_before_component(callback): +def on_before_component(callback, *, name=None): """register a function to be called before a component is created. The callback is called with arguments: - component - gradio component that is about to be created. @@ -451,61 +473,61 @@ def on_before_component(callback): Use elem_id/label fields of kwargs to figure out which component it is. This can be useful to inject your own components somewhere in the middle of vanilla UI. """ - add_callback(callback_map['callbacks_before_component'], callback) + add_callback(callback_map['callbacks_before_component'], callback, name=name, category='before_component') -def on_after_component(callback): +def on_after_component(callback, *, name=None): """register a function to be called after a component is created. See on_before_component for more.""" - add_callback(callback_map['callbacks_after_component'], callback) + add_callback(callback_map['callbacks_after_component'], callback, name=name, category='after_component') -def on_image_grid(callback): +def on_image_grid(callback, *, name=None): """register a function to be called before making an image grid. The callback is called with one argument: - params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified. """ - add_callback(callback_map['callbacks_image_grid'], callback) + add_callback(callback_map['callbacks_image_grid'], callback, name=name, category='image_grid') -def on_infotext_pasted(callback): +def on_infotext_pasted(callback, *, name=None): """register a function to be called before applying an infotext. The callback is called with two arguments: - infotext: str - raw infotext. - result: dict[str, any] - parsed infotext parameters. """ - add_callback(callback_map['callbacks_infotext_pasted'], callback) + add_callback(callback_map['callbacks_infotext_pasted'], callback, name=name, category='infotext_pasted') -def on_script_unloaded(callback): +def on_script_unloaded(callback, *, name=None): """register a function to be called before the script is unloaded. Any hooks/hijacks/monkeying about that the script did should be reverted here""" - add_callback(callback_map['callbacks_script_unloaded'], callback) + add_callback(callback_map['callbacks_script_unloaded'], callback, name=name, category='script_unloaded') -def on_before_ui(callback): +def on_before_ui(callback, *, name=None): """register a function to be called before the UI is created.""" - add_callback(callback_map['callbacks_before_ui'], callback) + add_callback(callback_map['callbacks_before_ui'], callback, name=name, category='before_ui') -def on_list_optimizers(callback): +def on_list_optimizers(callback, *, name=None): """register a function to be called when UI is making a list of cross attention optimization options. The function will be called with one argument, a list, and shall add objects of type modules.sd_hijack_optimizations.SdOptimization to it.""" - add_callback(callback_map['callbacks_list_optimizers'], callback) + add_callback(callback_map['callbacks_list_optimizers'], callback, name=name, category='list_optimizers') -def on_list_unets(callback): +def on_list_unets(callback, *, name=None): """register a function to be called when UI is making a list of alternative options for unet. The function will be called with one argument, a list, and shall add objects of type modules.sd_unet.SdUnetOption to it.""" - add_callback(callback_map['callbacks_list_unets'], callback) + add_callback(callback_map['callbacks_list_unets'], callback, name=name, category='list_unets') -def on_before_token_counter(callback): +def on_before_token_counter(callback, *, name=None): """register a function to be called when UI is counting tokens for a prompt. The function will be called with one argument of type BeforeTokenCounterParams, and should modify its fields if necessary.""" - add_callback(callback_map['callbacks_before_token_counter'], callback) + add_callback(callback_map['callbacks_before_token_counter'], callback, name=name, category='before_token_counter') From 9b842e9ec745000248d00c3ebef2d599e8f033fa Mon Sep 17 00:00:00 2001 From: SunChaser Date: Sun, 10 Mar 2024 16:19:59 +0800 Subject: [PATCH 03/90] fix: resolve type annotation warnings --- modules/upscaler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/upscaler.py b/modules/upscaler.py index cc662fc9c..9d13ee993 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -20,7 +20,7 @@ class Upscaler: filter = None model = None user_path = None - scalers = [] + scalers: list = [] tile = True def __init__(self, create_dirs=False): From 7e5e67330b092a65a8dd9e04a18969c458cb05d1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 10 Mar 2024 14:07:51 +0300 Subject: [PATCH 04/90] add UI for reordering callbacks --- modules/script_callbacks.py | 92 +++++++++++++++++++++++++++---------- modules/scripts.py | 90 ++++++++++++++++++++++++++++-------- modules/shared_items.py | 42 +++++++++++++++++ modules/ui_settings.py | 8 +++- style.css | 4 ++ 5 files changed, 192 insertions(+), 44 deletions(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 98952ec78..a0ecf5a53 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -8,7 +8,7 @@ from typing import Optional, Any from fastapi import FastAPI from gradio import Blocks -from modules import errors, timer, extensions +from modules import errors, timer, extensions, shared def report_exception(c, job): @@ -124,9 +124,10 @@ class ScriptCallback: name: str = None -def add_callback(callbacks, fun, *, name=None, category='unknown'): - stack = [x for x in inspect.stack() if x.filename != __file__] - filename = stack[0].filename if stack else 'unknown file' +def add_callback(callbacks, fun, *, name=None, category='unknown', filename=None): + if filename is None: + stack = [x for x in inspect.stack() if x.filename != __file__] + filename = stack[0].filename if stack else 'unknown file' extension = extensions.find_extension(filename) extension_name = extension.canonical_name if extension else 'base' @@ -146,6 +147,43 @@ def add_callback(callbacks, fun, *, name=None, category='unknown'): callbacks.append(ScriptCallback(filename, fun, unique_callback_name)) +def sort_callbacks(category, unordered_callbacks, *, enable_user_sort=True): + callbacks = unordered_callbacks.copy() + + if enable_user_sort: + for name in reversed(getattr(shared.opts, 'prioritized_callbacks_' + category, [])): + index = next((i for i, callback in enumerate(callbacks) if callback.name == name), None) + if index is not None: + callbacks.insert(0, callbacks.pop(index)) + + return callbacks + + +def ordered_callbacks(category, unordered_callbacks=None, *, enable_user_sort=True): + if unordered_callbacks is None: + unordered_callbacks = callback_map.get('callbacks_' + category, []) + + if not enable_user_sort: + return sort_callbacks(category, unordered_callbacks, enable_user_sort=False) + + callbacks = ordered_callbacks_map.get(category) + if callbacks is not None and len(callbacks) == len(unordered_callbacks): + return callbacks + + callbacks = sort_callbacks(category, unordered_callbacks) + + ordered_callbacks_map[category] = callbacks + return callbacks + + +def enumerate_callbacks(): + for category, callbacks in callback_map.items(): + if category.startswith('callbacks_'): + category = category[10:] + + yield category, callbacks + + callback_map = dict( callbacks_app_started=[], callbacks_model_loaded=[], @@ -170,14 +208,18 @@ callback_map = dict( callbacks_before_token_counter=[], ) +ordered_callbacks_map = {} + def clear_callbacks(): for callback_list in callback_map.values(): callback_list.clear() + ordered_callbacks_map.clear() + def app_started_callback(demo: Optional[Blocks], app: FastAPI): - for c in callback_map['callbacks_app_started']: + for c in ordered_callbacks('app_started'): try: c.callback(demo, app) timer.startup_timer.record(os.path.basename(c.script)) @@ -186,7 +228,7 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI): def app_reload_callback(): - for c in callback_map['callbacks_on_reload']: + for c in ordered_callbacks('on_reload'): try: c.callback() except Exception: @@ -194,7 +236,7 @@ def app_reload_callback(): def model_loaded_callback(sd_model): - for c in callback_map['callbacks_model_loaded']: + for c in ordered_callbacks('model_loaded'): try: c.callback(sd_model) except Exception: @@ -204,7 +246,7 @@ def model_loaded_callback(sd_model): def ui_tabs_callback(): res = [] - for c in callback_map['callbacks_ui_tabs']: + for c in ordered_callbacks('ui_tabs'): try: res += c.callback() or [] except Exception: @@ -214,7 +256,7 @@ def ui_tabs_callback(): def ui_train_tabs_callback(params: UiTrainTabParams): - for c in callback_map['callbacks_ui_train_tabs']: + for c in ordered_callbacks('ui_train_tabs'): try: c.callback(params) except Exception: @@ -222,7 +264,7 @@ def ui_train_tabs_callback(params: UiTrainTabParams): def ui_settings_callback(): - for c in callback_map['callbacks_ui_settings']: + for c in ordered_callbacks('ui_settings'): try: c.callback() except Exception: @@ -230,7 +272,7 @@ def ui_settings_callback(): def before_image_saved_callback(params: ImageSaveParams): - for c in callback_map['callbacks_before_image_saved']: + for c in ordered_callbacks('before_image_saved'): try: c.callback(params) except Exception: @@ -238,7 +280,7 @@ def before_image_saved_callback(params: ImageSaveParams): def image_saved_callback(params: ImageSaveParams): - for c in callback_map['callbacks_image_saved']: + for c in ordered_callbacks('image_saved'): try: c.callback(params) except Exception: @@ -246,7 +288,7 @@ def image_saved_callback(params: ImageSaveParams): def extra_noise_callback(params: ExtraNoiseParams): - for c in callback_map['callbacks_extra_noise']: + for c in ordered_callbacks('extra_noise'): try: c.callback(params) except Exception: @@ -254,7 +296,7 @@ def extra_noise_callback(params: ExtraNoiseParams): def cfg_denoiser_callback(params: CFGDenoiserParams): - for c in callback_map['callbacks_cfg_denoiser']: + for c in ordered_callbacks('cfg_denoiser'): try: c.callback(params) except Exception: @@ -262,7 +304,7 @@ def cfg_denoiser_callback(params: CFGDenoiserParams): def cfg_denoised_callback(params: CFGDenoisedParams): - for c in callback_map['callbacks_cfg_denoised']: + for c in ordered_callbacks('cfg_denoised'): try: c.callback(params) except Exception: @@ -270,7 +312,7 @@ def cfg_denoised_callback(params: CFGDenoisedParams): def cfg_after_cfg_callback(params: AfterCFGCallbackParams): - for c in callback_map['callbacks_cfg_after_cfg']: + for c in ordered_callbacks('cfg_after_cfg'): try: c.callback(params) except Exception: @@ -278,7 +320,7 @@ def cfg_after_cfg_callback(params: AfterCFGCallbackParams): def before_component_callback(component, **kwargs): - for c in callback_map['callbacks_before_component']: + for c in ordered_callbacks('before_component'): try: c.callback(component, **kwargs) except Exception: @@ -286,7 +328,7 @@ def before_component_callback(component, **kwargs): def after_component_callback(component, **kwargs): - for c in callback_map['callbacks_after_component']: + for c in ordered_callbacks('after_component'): try: c.callback(component, **kwargs) except Exception: @@ -294,7 +336,7 @@ def after_component_callback(component, **kwargs): def image_grid_callback(params: ImageGridLoopParams): - for c in callback_map['callbacks_image_grid']: + for c in ordered_callbacks('image_grid'): try: c.callback(params) except Exception: @@ -302,7 +344,7 @@ def image_grid_callback(params: ImageGridLoopParams): def infotext_pasted_callback(infotext: str, params: dict[str, Any]): - for c in callback_map['callbacks_infotext_pasted']: + for c in ordered_callbacks('infotext_pasted'): try: c.callback(infotext, params) except Exception: @@ -310,7 +352,7 @@ def infotext_pasted_callback(infotext: str, params: dict[str, Any]): def script_unloaded_callback(): - for c in reversed(callback_map['callbacks_script_unloaded']): + for c in reversed(ordered_callbacks('script_unloaded')): try: c.callback() except Exception: @@ -318,7 +360,7 @@ def script_unloaded_callback(): def before_ui_callback(): - for c in reversed(callback_map['callbacks_before_ui']): + for c in reversed(ordered_callbacks('before_ui')): try: c.callback() except Exception: @@ -328,7 +370,7 @@ def before_ui_callback(): def list_optimizers_callback(): res = [] - for c in callback_map['callbacks_list_optimizers']: + for c in ordered_callbacks('list_optimizers'): try: c.callback(res) except Exception: @@ -340,7 +382,7 @@ def list_optimizers_callback(): def list_unets_callback(): res = [] - for c in callback_map['callbacks_list_unets']: + for c in ordered_callbacks('list_unets'): try: c.callback(res) except Exception: @@ -350,7 +392,7 @@ def list_unets_callback(): def before_token_counter_callback(params: BeforeTokenCounterParams): - for c in callback_map['callbacks_before_token_counter']: + for c in ordered_callbacks('before_token_counter'): try: c.callback(params) except Exception: diff --git a/modules/scripts.py b/modules/scripts.py index 77f5e4f3e..e1a435827 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -138,7 +138,6 @@ class Script: """ pass - def before_process(self, p, *args): """ This function is called very early during processing begins for AlwaysVisible scripts. @@ -562,6 +561,25 @@ class ScriptRunner: self.paste_field_names = [] self.inputs = [None] + self.callback_map = {} + self.callback_names = [ + 'before_process', + 'process', + 'before_process_batch', + 'after_extra_networks_activate', + 'process_batch', + 'postprocess', + 'postprocess_batch', + 'postprocess_batch_list', + 'post_sample', + 'on_mask_blend', + 'postprocess_image', + 'postprocess_maskoverlay', + 'postprocess_image_after_composite', + 'before_component', + 'after_component', + ] + self.on_before_component_elem_id = {} """dict of callbacks to be called before an element is created; key=elem_id, value=list of callbacks""" @@ -600,6 +618,8 @@ class ScriptRunner: self.scripts.append(script) self.selectable_scripts.append(script) + self.callback_map.clear() + self.apply_on_before_component_callbacks() def apply_on_before_component_callbacks(self): @@ -769,8 +789,42 @@ class ScriptRunner: return processed + def list_scripts_for_method(self, method_name): + if method_name in ('before_component', 'after_component'): + return self.scripts + else: + return self.alwayson_scripts + + def create_ordered_callbacks_list(self, method_name, *, enable_user_sort=True): + script_list = self.list_scripts_for_method(method_name) + category = f'script_{method_name}' + callbacks = [] + + for script in script_list: + if getattr(script.__class__, method_name, None) == getattr(Script, method_name, None): + continue + + script_callbacks.add_callback(callbacks, script, category=category, name=script.__class__.__name__, filename=script.filename) + + return script_callbacks.sort_callbacks(category, callbacks, enable_user_sort=enable_user_sort) + + def ordered_callbacks(self, method_name, *, enable_user_sort=True): + script_list = self.list_scripts_for_method(method_name) + category = f'script_{method_name}' + + scrpts_len, callbacks = self.callback_map.get(category, (-1, None)) + + if callbacks is None or scrpts_len != len(script_list): + callbacks = self.create_ordered_callbacks_list(method_name, enable_user_sort=enable_user_sort) + self.callback_map[category] = len(script_list), callbacks + + return callbacks + + def ordered_scripts(self, method_name): + return [x.callback for x in self.ordered_callbacks(method_name)] + def before_process(self, p): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('before_process'): try: script_args = p.script_args[script.args_from:script.args_to] script.before_process(p, *script_args) @@ -778,7 +832,7 @@ class ScriptRunner: errors.report(f"Error running before_process: {script.filename}", exc_info=True) def process(self, p): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('process'): try: script_args = p.script_args[script.args_from:script.args_to] script.process(p, *script_args) @@ -786,7 +840,7 @@ class ScriptRunner: errors.report(f"Error running process: {script.filename}", exc_info=True) def before_process_batch(self, p, **kwargs): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('before_process_batch'): try: script_args = p.script_args[script.args_from:script.args_to] script.before_process_batch(p, *script_args, **kwargs) @@ -794,7 +848,7 @@ class ScriptRunner: errors.report(f"Error running before_process_batch: {script.filename}", exc_info=True) def after_extra_networks_activate(self, p, **kwargs): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('after_extra_networks_activate'): try: script_args = p.script_args[script.args_from:script.args_to] script.after_extra_networks_activate(p, *script_args, **kwargs) @@ -802,7 +856,7 @@ class ScriptRunner: errors.report(f"Error running after_extra_networks_activate: {script.filename}", exc_info=True) def process_batch(self, p, **kwargs): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('process_batch'): try: script_args = p.script_args[script.args_from:script.args_to] script.process_batch(p, *script_args, **kwargs) @@ -810,7 +864,7 @@ class ScriptRunner: errors.report(f"Error running process_batch: {script.filename}", exc_info=True) def postprocess(self, p, processed): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('postprocess'): try: script_args = p.script_args[script.args_from:script.args_to] script.postprocess(p, processed, *script_args) @@ -818,7 +872,7 @@ class ScriptRunner: errors.report(f"Error running postprocess: {script.filename}", exc_info=True) def postprocess_batch(self, p, images, **kwargs): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('postprocess_batch'): try: script_args = p.script_args[script.args_from:script.args_to] script.postprocess_batch(p, *script_args, images=images, **kwargs) @@ -826,7 +880,7 @@ class ScriptRunner: errors.report(f"Error running postprocess_batch: {script.filename}", exc_info=True) def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, **kwargs): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('postprocess_batch_list'): try: script_args = p.script_args[script.args_from:script.args_to] script.postprocess_batch_list(p, pp, *script_args, **kwargs) @@ -834,7 +888,7 @@ class ScriptRunner: errors.report(f"Error running postprocess_batch_list: {script.filename}", exc_info=True) def post_sample(self, p, ps: PostSampleArgs): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('post_sample'): try: script_args = p.script_args[script.args_from:script.args_to] script.post_sample(p, ps, *script_args) @@ -842,7 +896,7 @@ class ScriptRunner: errors.report(f"Error running post_sample: {script.filename}", exc_info=True) def on_mask_blend(self, p, mba: MaskBlendArgs): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('on_mask_blend'): try: script_args = p.script_args[script.args_from:script.args_to] script.on_mask_blend(p, mba, *script_args) @@ -850,7 +904,7 @@ class ScriptRunner: errors.report(f"Error running post_sample: {script.filename}", exc_info=True) def postprocess_image(self, p, pp: PostprocessImageArgs): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('postprocess_image'): try: script_args = p.script_args[script.args_from:script.args_to] script.postprocess_image(p, pp, *script_args) @@ -858,7 +912,7 @@ class ScriptRunner: errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True) def postprocess_maskoverlay(self, p, ppmo: PostProcessMaskOverlayArgs): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('postprocess_maskoverlay'): try: script_args = p.script_args[script.args_from:script.args_to] script.postprocess_maskoverlay(p, ppmo, *script_args) @@ -866,7 +920,7 @@ class ScriptRunner: errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True) def postprocess_image_after_composite(self, p, pp: PostprocessImageArgs): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('postprocess_image_after_composite'): try: script_args = p.script_args[script.args_from:script.args_to] script.postprocess_image_after_composite(p, pp, *script_args) @@ -880,7 +934,7 @@ class ScriptRunner: except Exception: errors.report(f"Error running on_before_component: {script.filename}", exc_info=True) - for script in self.scripts: + for script in self.ordered_scripts('before_component'): try: script.before_component(component, **kwargs) except Exception: @@ -893,7 +947,7 @@ class ScriptRunner: except Exception: errors.report(f"Error running on_after_component: {script.filename}", exc_info=True) - for script in self.scripts: + for script in self.ordered_scripts('after_component'): try: script.after_component(component, **kwargs) except Exception: @@ -921,7 +975,7 @@ class ScriptRunner: self.scripts[si].args_to = args_to def before_hr(self, p): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('before_hr'): try: script_args = p.script_args[script.args_from:script.args_to] script.before_hr(p, *script_args) @@ -929,7 +983,7 @@ class ScriptRunner: errors.report(f"Error running before_hr: {script.filename}", exc_info=True) def setup_scrips(self, p, *, is_ui=True): - for script in self.alwayson_scripts: + for script in self.ordered_scripts('setup'): if not is_ui and script.setup_for_ui_only: continue diff --git a/modules/shared_items.py b/modules/shared_items.py index 88f636452..11f10b3f7 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -1,5 +1,8 @@ +import html import sys +from modules import script_callbacks, scripts, ui_components +from modules.options import OptionHTML, OptionInfo from modules.shared_cmd_options import cmd_opts @@ -118,6 +121,45 @@ def ui_reorder_categories(): yield "scripts" +def callbacks_order_settings(): + options = { + "sd_vae_explanation": OptionHTML(""" + For categories below, callbacks added to dropdowns happen before others, in order listed. + """), + + } + + callback_options = {} + + for category, _ in script_callbacks.enumerate_callbacks(): + callback_options[category] = script_callbacks.ordered_callbacks(category, enable_user_sort=False) + + for method_name in scripts.scripts_txt2img.callback_names: + callback_options["script_" + method_name] = scripts.scripts_txt2img.create_ordered_callbacks_list(method_name, enable_user_sort=False) + + for method_name in scripts.scripts_img2img.callback_names: + callbacks = callback_options.get("script_" + method_name, []) + + for addition in scripts.scripts_img2img.create_ordered_callbacks_list(method_name, enable_user_sort=False): + if any(x.name == addition.name for x in callbacks): + continue + + callbacks.append(addition) + + callback_options["script_" + method_name] = callbacks + + for category, callbacks in callback_options.items(): + if not callbacks: + continue + + option_info = OptionInfo([], f"{category} callback priority", ui_components.DropdownMulti, {"choices": [x.name for x in callbacks]}) + option_info.needs_restart() + option_info.html("
Default order:
    " + "".join(f"
  1. {html.escape(x.name)}
  2. \n" for x in callbacks) + "
") + options['prioritized_callbacks_' + category] = option_info + + return options + + class Shared(sys.modules[__name__].__class__): """ this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than diff --git a/modules/ui_settings.py b/modules/ui_settings.py index d17ef1d95..087b91f3b 100644 --- a/modules/ui_settings.py +++ b/modules/ui_settings.py @@ -1,7 +1,8 @@ import gradio as gr -from modules import ui_common, shared, script_callbacks, scripts, sd_models, sysinfo, timer +from modules import ui_common, shared, script_callbacks, scripts, sd_models, sysinfo, timer, shared_items from modules.call_queue import wrap_gradio_call +from modules.options import options_section from modules.shared import opts from modules.ui_components import FormRow from modules.ui_gradio_extensions import reload_javascript @@ -108,6 +109,11 @@ class UiSettings: shared.settings_components = self.component_dict + # we add this as late as possible so that scripts have already registered their callbacks + opts.data_labels.update(options_section(('callbacks', "Callbacks", "system"), { + **shared_items.callbacks_order_settings(), + })) + opts.reorder() with gr.Blocks(analytics_enabled=False) as settings_interface: diff --git a/style.css b/style.css index 29eae4127..f6a89b8f9 100644 --- a/style.css +++ b/style.css @@ -528,6 +528,10 @@ table.popup-table .link{ opacity: 0.75; } +.settings-comment .info ol{ + margin: 0.4em 0 0.8em 1em; +} + #sysinfo_download a.sysinfo_big_link{ font-size: 24pt; } From 2f55d669a26ca2c785b92656b8f32b56839f3750 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 10 Mar 2024 15:14:04 +0300 Subject: [PATCH 05/90] add support for specifying callback order in metadata --- modules/extensions.py | 24 ++++++++++++++++++++++++ modules/script_callbacks.py | 34 +++++++++++++++++++++++++++++++++- modules/scripts.py | 27 +++------------------------ modules/util.py | 24 ++++++++++++++++++++++++ 4 files changed, 84 insertions(+), 25 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index ab835d3f2..1f620ff16 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,6 +1,7 @@ from __future__ import annotations import configparser +import dataclasses import os import threading import re @@ -22,6 +23,13 @@ def active(): return [x for x in extensions if x.enabled] +@dataclasses.dataclass +class CallbackOrderInfo: + name: str + before: list + after: list + + class ExtensionMetadata: filename = "metadata.ini" config: configparser.ConfigParser @@ -65,6 +73,22 @@ class ExtensionMetadata: # both "," and " " are accepted as separator return [x for x in re.split(r"[,\s]+", text.strip()) if x] + def list_callback_order_instructions(self): + for section in self.config.sections(): + if not section.startswith("callbacks/"): + continue + + callback_name = section[10:] + + if not callback_name.startswith(self.canonical_name): + errors.report(f"Callback order section for extension {self.canonical_name} is referencing the wrong extension: {section}") + continue + + before = self.parse_list(self.config.get(section, 'Before', fallback='')) + after = self.parse_list(self.config.get(section, 'After', fallback='')) + + yield CallbackOrderInfo(callback_name, before, after) + class Extension: lock = threading.Lock() diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index a0ecf5a53..4d80b433b 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -8,7 +8,7 @@ from typing import Optional, Any from fastapi import FastAPI from gradio import Blocks -from modules import errors, timer, extensions, shared +from modules import errors, timer, extensions, shared, util def report_exception(c, job): @@ -149,6 +149,38 @@ def add_callback(callbacks, fun, *, name=None, category='unknown', filename=None def sort_callbacks(category, unordered_callbacks, *, enable_user_sort=True): callbacks = unordered_callbacks.copy() + callback_lookup = {x.name: x for x in callbacks} + dependencies = {} + + order_instructions = {} + for extension in extensions.extensions: + for order_instruction in extension.metadata.list_callback_order_instructions(): + if order_instruction.name in callback_lookup: + if order_instruction.name not in order_instructions: + order_instructions[order_instruction.name] = [] + + order_instructions[order_instruction.name].append(order_instruction) + + if order_instructions: + for callback in callbacks: + dependencies[callback.name] = [] + + for callback in callbacks: + for order_instruction in order_instructions.get(callback.name, []): + for after in order_instruction.after: + if after not in callback_lookup: + continue + + dependencies[callback.name].append(after) + + for before in order_instruction.before: + if before not in callback_lookup: + continue + + dependencies[before].append(callback.name) + + sorted_names = util.topological_sort(dependencies) + callbacks = [callback_lookup[x] for x in sorted_names] if enable_user_sort: for name in reversed(getattr(shared.opts, 'prioritized_callbacks_' + category, [])): diff --git a/modules/scripts.py b/modules/scripts.py index e1a435827..20710b37d 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -7,7 +7,9 @@ from dataclasses import dataclass import gradio as gr -from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing, errors, timer +from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing, errors, timer, util + +topological_sort = util.topological_sort AlwaysVisible = object() @@ -368,29 +370,6 @@ scripts_data = [] postprocessing_scripts_data = [] ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"]) -def topological_sort(dependencies): - """Accepts a dictionary mapping name to its dependencies, returns a list of names ordered according to dependencies. - Ignores errors relating to missing dependeencies or circular dependencies - """ - - visited = {} - result = [] - - def inner(name): - visited[name] = True - - for dep in dependencies.get(name, []): - if dep in dependencies and dep not in visited: - inner(dep) - - result.append(name) - - for depname in dependencies: - if depname not in visited: - inner(depname) - - return result - @dataclass class ScriptWithDependencies: diff --git a/modules/util.py b/modules/util.py index 8d1aea44f..48268f04c 100644 --- a/modules/util.py +++ b/modules/util.py @@ -136,3 +136,27 @@ class MassFileLister: def reset(self): """Clear the cache of all directories.""" self.cached_dirs.clear() + + +def topological_sort(dependencies): + """Accepts a dictionary mapping name to its dependencies, returns a list of names ordered according to dependencies. + Ignores errors relating to missing dependeencies or circular dependencies + """ + + visited = {} + result = [] + + def inner(name): + visited[name] = True + + for dep in dependencies.get(name, []): + if dep in dependencies and dep not in visited: + inner(dep) + + result.append(name) + + for depname in dependencies: + if depname not in visited: + inner(depname) + + return result From 3670b4f49e070e8b8da20c88f4d6bda9ba18895c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 10 Mar 2024 15:16:12 +0300 Subject: [PATCH 06/90] lint --- modules/script_callbacks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 4d80b433b..2cd65e832 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -120,7 +120,7 @@ class BeforeTokenCounterParams: @dataclasses.dataclass class ScriptCallback: script: str - callback: '' + callback: any name: str = None From eb10da8bb74a0f664c01052f04168837387598b2 Mon Sep 17 00:00:00 2001 From: Andray Date: Mon, 11 Mar 2024 05:15:09 +0400 Subject: [PATCH 07/90] type hinting in shared.py --- modules/shared.py | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index b4ba14ad7..8d1791532 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -7,6 +7,10 @@ from modules import shared_cmd_options, shared_gradio_themes, options, shared_it from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from modules import util +falseVar = False # avoid circular import for type hinting +if falseVar: + from modules import shared_state, styles, interrogate, shared_total_tqdm, memmon + cmd_opts = shared_cmd_options.cmd_opts parser = shared_cmd_options.parser @@ -16,11 +20,11 @@ styles_filename = cmd_opts.styles_file = cmd_opts.styles_file if len(cmd_opts.st config_filename = cmd_opts.ui_settings_file hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} -demo = None +demo: gr.Blocks = None -device = None +device: str = None -weight_load_location = None +weight_load_location: str = None xformers_available = False @@ -28,21 +32,21 @@ hypernetworks = {} loaded_hypernetworks = [] -state = None +state: 'shared_state.State' = None -prompt_styles = None +prompt_styles: 'styles.StyleDatabase' = None -interrogator = None +interrogator: 'interrogate.InterrogateModels' = None face_restorers = [] -options_templates = None -opts = None -restricted_opts = None +options_templates: dict = None +opts: options.Options = None +restricted_opts: set[str] = None sd_model: sd_models_types.WebuiSdModel = None -settings_components = None +settings_components: dict = None """assigned from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" tab_names = [] @@ -65,9 +69,9 @@ progress_print_out = sys.stdout gradio_theme = gr.themes.Base() -total_tqdm = None +total_tqdm: 'shared_total_tqdm.TotalTQDM' = None -mem_mon = None +mem_mon: 'memmon.MemUsageMonitor' = None options_section = options.options_section OptionInfo = options.OptionInfo From 2d57a2df660ec096969c89eb1ec72ebaa9a34636 Mon Sep 17 00:00:00 2001 From: Andray <33491867+light-and-ray@users.noreply.github.com> Date: Mon, 11 Mar 2024 07:40:15 +0400 Subject: [PATCH 08/90] Update modules/shared.py Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com> --- modules/shared.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 8d1791532..4cf7f6a81 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -6,9 +6,9 @@ import gradio as gr from modules import shared_cmd_options, shared_gradio_themes, options, shared_items, sd_models_types from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from modules import util +from typing import TYPE_CHECKING -falseVar = False # avoid circular import for type hinting -if falseVar: +if TYPE_CHECKING: from modules import shared_state, styles, interrogate, shared_total_tqdm, memmon cmd_opts = shared_cmd_options.cmd_opts From 1a1205f601d27e1ca5052e01cf4f877615f6a499 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 12 Mar 2024 03:26:50 +0900 Subject: [PATCH 09/90] fix Restore progress --- javascript/ui.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/javascript/ui.js b/javascript/ui.js index 1eef6d337..e0f5feebd 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -136,8 +136,7 @@ function showSubmitInterruptingPlaceholder(tabname) { function showRestoreProgressButton(tabname, show) { var button = gradioApp().getElementById(tabname + "_restore_progress"); if (!button) return; - - button.style.display = show ? "flex" : "none"; + button.style.setProperty('display', show ? 'flex' : 'none', 'important'); } function submit() { @@ -209,6 +208,7 @@ function restoreProgressTxt2img() { var id = localGet("txt2img_task_id"); if (id) { + showSubmitInterruptingPlaceholder('txt2img'); requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() { showSubmitButtons('txt2img', true); }, null, 0); @@ -223,6 +223,7 @@ function restoreProgressImg2img() { var id = localGet("img2img_task_id"); if (id) { + showSubmitInterruptingPlaceholder('img2img'); requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() { showSubmitButtons('img2img', true); }, null, 0); From 4079b17dd979564bd762b42b31162c7e3e2edb7b Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 12 Mar 2024 01:47:23 +0400 Subject: [PATCH 10/90] move postprocessing-for-training into builtin extensions --- .../scripts/postprocessing_autosized_crop.py | 0 .../scripts}/postprocessing_caption.py | 0 .../scripts}/postprocessing_create_flipped_copies.py | 0 .../scripts}/postprocessing_focal_crop.py | 0 .../scripts}/postprocessing_split_oversized.py | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename scripts/processing_autosized_crop.py => extensions-builtin/postprocessing-for-training/scripts/postprocessing_autosized_crop.py (100%) rename {scripts => extensions-builtin/postprocessing-for-training/scripts}/postprocessing_caption.py (100%) rename {scripts => extensions-builtin/postprocessing-for-training/scripts}/postprocessing_create_flipped_copies.py (100%) rename {scripts => extensions-builtin/postprocessing-for-training/scripts}/postprocessing_focal_crop.py (100%) rename {scripts => extensions-builtin/postprocessing-for-training/scripts}/postprocessing_split_oversized.py (100%) diff --git a/scripts/processing_autosized_crop.py b/extensions-builtin/postprocessing-for-training/scripts/postprocessing_autosized_crop.py similarity index 100% rename from scripts/processing_autosized_crop.py rename to extensions-builtin/postprocessing-for-training/scripts/postprocessing_autosized_crop.py diff --git a/scripts/postprocessing_caption.py b/extensions-builtin/postprocessing-for-training/scripts/postprocessing_caption.py similarity index 100% rename from scripts/postprocessing_caption.py rename to extensions-builtin/postprocessing-for-training/scripts/postprocessing_caption.py diff --git a/scripts/postprocessing_create_flipped_copies.py b/extensions-builtin/postprocessing-for-training/scripts/postprocessing_create_flipped_copies.py similarity index 100% rename from scripts/postprocessing_create_flipped_copies.py rename to extensions-builtin/postprocessing-for-training/scripts/postprocessing_create_flipped_copies.py diff --git a/scripts/postprocessing_focal_crop.py b/extensions-builtin/postprocessing-for-training/scripts/postprocessing_focal_crop.py similarity index 100% rename from scripts/postprocessing_focal_crop.py rename to extensions-builtin/postprocessing-for-training/scripts/postprocessing_focal_crop.py diff --git a/scripts/postprocessing_split_oversized.py b/extensions-builtin/postprocessing-for-training/scripts/postprocessing_split_oversized.py similarity index 100% rename from scripts/postprocessing_split_oversized.py rename to extensions-builtin/postprocessing-for-training/scripts/postprocessing_split_oversized.py From 2e3a0f39f6bcbb53837e43341507ffda0bd36eec Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 12 Mar 2024 02:28:15 +0400 Subject: [PATCH 11/90] move upscale postprocessing under input accordion --- scripts/postprocessing_upscale.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index e269682d0..80692e5c2 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -4,7 +4,7 @@ import numpy as np from modules import scripts_postprocessing, shared import gradio as gr -from modules.ui_components import FormRow, ToolButton +from modules.ui_components import FormRow, ToolButton, InputAccordion from modules.ui import switch_values_symbol upscale_cache = {} @@ -17,7 +17,14 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): def ui(self): selected_tab = gr.Number(value=0, visible=False) - with gr.Column(): + with InputAccordion(True, label="Upscale", elem_id="extras_upscale") as upscale_enabled: + with FormRow(): + extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) + + with FormRow(): + extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) + extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") + with FormRow(): with gr.Tabs(elem_id="extras_resize_mode"): with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by: @@ -32,18 +39,12 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn", tooltip="Switch width/height") upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") - with FormRow(): - extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) - - with FormRow(): - extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) - extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") - upscaling_res_switch_btn.click(lambda w, h: (h, w), inputs=[upscaling_resize_w, upscaling_resize_h], outputs=[upscaling_resize_w, upscaling_resize_h], show_progress=False) tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab]) tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab]) return { + "upscale_enabled": upscale_enabled, "upscale_mode": selected_tab, "upscale_by": upscaling_resize, "upscale_to_width": upscaling_resize_w, @@ -81,7 +82,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): return image - def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): + def process_firstpass(self, pp: scripts_postprocessing.PostprocessedImage, upscale_enabled=True, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): if upscale_mode == 1: pp.shared.target_width = upscale_to_width pp.shared.target_height = upscale_to_height @@ -89,7 +90,10 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): pp.shared.target_width = int(pp.image.width * upscale_by) pp.shared.target_height = int(pp.image.height * upscale_by) - def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): + def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_enabled=True, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): + if not upscale_enabled: + return + if upscaler_1_name == "None": upscaler_1_name = None From 8262cd71c49e58a109ca8d5e57d6590831b5ead7 Mon Sep 17 00:00:00 2001 From: DGdev91 Date: Tue, 12 Mar 2024 00:09:07 +0100 Subject: [PATCH 12/90] Better workaround for Navi1, removing --pre for Navi3 --- webui.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/webui.sh b/webui.sh index 361255f69..89c52b519 100755 --- a/webui.sh +++ b/webui.sh @@ -129,11 +129,11 @@ case "$gpu_info" in export HSA_OVERRIDE_GFX_VERSION=10.3.0 if [[ -z "${TORCH_COMMAND}" ]] then - pyv="$(${python_cmd} -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')" - if [[ $(bc <<< "$pyv <= 3.10") -eq 1 ]] + pyv="$(${python_cmd} -c 'import sys; print(float(".".join(map(str, sys.version_info[0:2]))) <= 3.10)')" + if [[ $pyv == "True" ]] then - # Navi users will still use torch 1.13 because 2.0 does not seem to work. - export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.6" + # Using an old nightly compiled against rocm 5.2 for Navi1, see https://github.com/pytorch/pytorch/issues/106728#issuecomment-1749511711 + export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl" else printf "\e[1m\e[31mERROR: RX 5000 series GPUs must be using at max python 3.10, aborting...\e[0m" exit 1 @@ -143,7 +143,7 @@ case "$gpu_info" in *"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0 ;; *"Navi 3"*) [[ -z "${TORCH_COMMAND}" ]] && \ - export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7" + export TORCH_COMMAND="pip install torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7" ;; *"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0 printf "\n%s\n" "${delimiter}" From 994e08aac1f80932dbd87a59a75ca6d4411bfe3a Mon Sep 17 00:00:00 2001 From: wangshuai09 <391746016@qq.com> Date: Tue, 12 Mar 2024 18:41:44 +0800 Subject: [PATCH 13/90] ascend npu readme --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index f4cfcf290..bc08e7ad1 100644 --- a/README.md +++ b/README.md @@ -98,6 +98,7 @@ Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-di - [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) - [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. - [Intel CPUs, Intel GPUs (both integrated and discrete)](https://github.com/openvinotoolkit/stable-diffusion-webui/wiki/Installation-on-Intel-Silicon) (external wiki page) +- [Ascend NPUs](https://github.com/wangshuai09/stable-diffusion-webui/wiki/Install-and-run-on-Ascend-NPUs) (external wiki page) Alternatively, use online services (like Google Colab): From b980c8140ba336b3151cef7b4d1c1d2a3bca9130 Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 12 Mar 2024 22:21:59 +0400 Subject: [PATCH 14/90] featch only active branch updates for extensions --- modules/extensions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/extensions.py b/modules/extensions.py index 04bda297e..6542cb7ac 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -156,6 +156,8 @@ class Extension: def check_updates(self): repo = Repo(self.path) for fetch in repo.remote().fetch(dry_run=True): + if self.branch and fetch.name != f'{repo.remote().name}/{self.branch}': + continue if fetch.flags != fetch.HEAD_UPTODATE: self.can_update = True self.status = "new commits" From 74e2e5279c6a4147a8a893a137c028fda0479d07 Mon Sep 17 00:00:00 2001 From: DGdev91 Date: Wed, 13 Mar 2024 00:17:24 +0100 Subject: [PATCH 15/90] Workaround for Navi1: pytorch nightly whl for 3.8 and 3.9 --- webui.sh | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/webui.sh b/webui.sh index 89c52b519..0b1d4d094 100755 --- a/webui.sh +++ b/webui.sh @@ -129,13 +129,19 @@ case "$gpu_info" in export HSA_OVERRIDE_GFX_VERSION=10.3.0 if [[ -z "${TORCH_COMMAND}" ]] then - pyv="$(${python_cmd} -c 'import sys; print(float(".".join(map(str, sys.version_info[0:2]))) <= 3.10)')" - if [[ $pyv == "True" ]] + pyv="$(${python_cmd} -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')" + # Using an old nightly compiled against rocm 5.2 for Navi1, see https://github.com/pytorch/pytorch/issues/106728#issuecomment-1749511711 + if [[ $pyv == "3.8" ]] + then + export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl" + if [[ $pyv == "3.9" ]] + then + export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl" + if [[ $pyv == "3.10" ]] then - # Using an old nightly compiled against rocm 5.2 for Navi1, see https://github.com/pytorch/pytorch/issues/106728#issuecomment-1749511711 export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl" else - printf "\e[1m\e[31mERROR: RX 5000 series GPUs must be using at max python 3.10, aborting...\e[0m" + printf "\e[1m\e[31mERROR: RX 5000 series GPUs python version must be between 3.8 and 3.10, aborting...\e[0m" exit 1 fi fi From 9fbfb8ad324415597765e3e63b1ce4a123f91539 Mon Sep 17 00:00:00 2001 From: DGdev91 Date: Wed, 13 Mar 2024 00:43:01 +0100 Subject: [PATCH 16/90] Better workaround for Navi1 - fix if --- webui.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webui.sh b/webui.sh index 0b1d4d094..b348c387e 100755 --- a/webui.sh +++ b/webui.sh @@ -134,10 +134,10 @@ case "$gpu_info" in if [[ $pyv == "3.8" ]] then export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl" - if [[ $pyv == "3.9" ]] + elif [[ $pyv == "3.9" ]] then export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl" - if [[ $pyv == "3.10" ]] + elif [[ $pyv == "3.10" ]] then export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl" else From 2efc7c1b0535e65274844c2e554bcdfd2b29a1f0 Mon Sep 17 00:00:00 2001 From: DGdev91 Date: Wed, 13 Mar 2024 00:54:32 +0100 Subject: [PATCH 17/90] Better workaround for Navi1, removing --pre for Navi3 --- webui.sh | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/webui.sh b/webui.sh index 361255f69..b348c387e 100755 --- a/webui.sh +++ b/webui.sh @@ -130,12 +130,18 @@ case "$gpu_info" in if [[ -z "${TORCH_COMMAND}" ]] then pyv="$(${python_cmd} -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')" - if [[ $(bc <<< "$pyv <= 3.10") -eq 1 ]] + # Using an old nightly compiled against rocm 5.2 for Navi1, see https://github.com/pytorch/pytorch/issues/106728#issuecomment-1749511711 + if [[ $pyv == "3.8" ]] then - # Navi users will still use torch 1.13 because 2.0 does not seem to work. - export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.6" + export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl" + elif [[ $pyv == "3.9" ]] + then + export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl" + elif [[ $pyv == "3.10" ]] + then + export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl" else - printf "\e[1m\e[31mERROR: RX 5000 series GPUs must be using at max python 3.10, aborting...\e[0m" + printf "\e[1m\e[31mERROR: RX 5000 series GPUs python version must be between 3.8 and 3.10, aborting...\e[0m" exit 1 fi fi @@ -143,7 +149,7 @@ case "$gpu_info" in *"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0 ;; *"Navi 3"*) [[ -z "${TORCH_COMMAND}" ]] && \ - export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7" + export TORCH_COMMAND="pip install torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7" ;; *"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0 printf "\n%s\n" "${delimiter}" From 9f2ae1cb85015ee3cc79f1de92641032df154d5b Mon Sep 17 00:00:00 2001 From: KohakuBlueleaf Date: Wed, 13 Mar 2024 11:47:33 +0800 Subject: [PATCH 18/90] Add missing .mean --- extensions-builtin/Lora/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index 183f8bd7c..473ba29e8 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -173,7 +173,7 @@ class NetworkModule: orig_weight = orig_weight.to(updown) merged_scale1 = updown + orig_weight dora_merged = ( - merged_scale1 / merged_scale1(dim=self.dora_mean_dim, keepdim=True) * self.dora_scale + merged_scale1 / merged_scale1.mean(dim=self.dora_mean_dim, keepdim=True) * self.dora_scale ) final_updown = dora_merged - orig_weight return final_updown From d18eb10ecdbf3690571d2d7ce0c0fe548d00d836 Mon Sep 17 00:00:00 2001 From: Haoming Date: Wed, 13 Mar 2024 21:15:52 +0800 Subject: [PATCH 19/90] add hook --- modules/ui_postprocessing.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index 7261c2df8..e9d82d46a 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -4,6 +4,30 @@ import modules.infotext_utils as parameters_copypaste from modules.ui_components import ResizeHandleRow +def hook_scale_update(inputs): + resize = upscaler = None + for script in inputs: + if script.label == "Resize": + resize = script + elif script.label == "Upscaler 1": + upscaler = script + elif resize and upscaler: + break + + def update_scale(upscaler: str, slider: float): + if upscaler[1] in ('x', 'X'): + try: + scale = int(upscaler[0]) + return gr.update(value=scale) + except ValueError: + return gr.update(value=slider) + + return gr.update(value=slider) + + if resize and upscaler: + upscaler.input(update_scale, inputs=[upscaler, resize], outputs=[resize]) + + def create_ui(): dummy_component = gr.Label(visible=False) tab_index = gr.Number(value=0, visible=False) @@ -23,6 +47,7 @@ def create_ui(): show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") script_inputs = scripts.scripts_postproc.setup_ui() + hook_scale_update(script_inputs) with gr.Column(): toprow = ui_toprow.Toprow(is_compact=True, is_img2img=False, id_part="extras") From fd71b761ff6b6636204ffcd71212cdbb7bb5d658 Mon Sep 17 00:00:00 2001 From: Haoming Date: Thu, 14 Mar 2024 09:55:14 +0800 Subject: [PATCH 20/90] use re instead of hardcoding Now supports all natively provided upscaler as well --- modules/ui_postprocessing.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index e9d82d46a..7fd889105 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -5,6 +5,9 @@ from modules.ui_components import ResizeHandleRow def hook_scale_update(inputs): + import re + pattern = r'(\d)[xX]|[xX](\d)' + resize = upscaler = None for script in inputs: if script.label == "Resize": @@ -15,14 +18,17 @@ def hook_scale_update(inputs): break def update_scale(upscaler: str, slider: float): - if upscaler[1] in ('x', 'X'): - try: - scale = int(upscaler[0]) - return gr.update(value=scale) - except ValueError: - return gr.update(value=slider) + match = re.search(pattern, upscaler) - return gr.update(value=slider) + if match: + if match.group(1): + return gr.update(value=int(match.group(1))) + + else: + return gr.update(value=int(match.group(2))) + + else: + return gr.update(value=slider) if resize and upscaler: upscaler.input(update_scale, inputs=[upscaler, resize], outputs=[resize]) From 4e17fc36d87a1d983c542dbfb606a93e70a68e2a Mon Sep 17 00:00:00 2001 From: Haoming Date: Thu, 14 Mar 2024 10:04:09 +0800 Subject: [PATCH 21/90] add user setting Now this is disabled by default --- modules/shared_options.py | 1 + modules/ui_postprocessing.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 21643afe0..99a051aaf 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -101,6 +101,7 @@ options_templates.update(options_section(('upscaling', "Upscaling", "postprocess "DAT_tile": OptionInfo(192, "Tile size for DAT upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), "DAT_tile_overlap": OptionInfo(8, "Tile overlap for DAT upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}), + "scaleBy_from_upscaler": OptionInfo(False, "Automatically set the Scale by factor based on the name of the selected Upscaler.").info("Will not change the value when no matching pattern is found."), })) options_templates.update(options_section(('face-restoration', "Face restoration", "postprocessing"), { diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index 7fd889105..af44f6195 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -53,7 +53,8 @@ def create_ui(): show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") script_inputs = scripts.scripts_postproc.setup_ui() - hook_scale_update(script_inputs) + if getattr(shared.opts, 'scaleBy_from_upscaler', False): + hook_scale_update(script_inputs) with gr.Column(): toprow = ui_toprow.Toprow(is_compact=True, is_img2img=False, id_part="extras") From c40f33ca0475a39a44576bc32dba9f87b011e9b2 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 15 Mar 2024 08:22:36 +0900 Subject: [PATCH 22/90] PEP 604 annotations --- modules/styles.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/styles.py b/modules/styles.py index a9d8636a9..25f22d3dd 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,3 +1,4 @@ +from __future__ import annotations from pathlib import Path from modules import errors import csv From 07805cbeee144fb112039fd114dba1479cf4ba63 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 14 Mar 2024 17:05:14 -0700 Subject: [PATCH 23/90] fix: AttributeError when attempting to reshape rescale by org_module weight --- extensions-builtin/Lora/network_oft.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 7821a8a7d..1c515ebb7 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -36,13 +36,6 @@ class NetworkModuleOFT(network.NetworkModule): # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) - # LyCORIS BOFT - if self.oft_blocks.dim() == 4: - self.is_boft = True - self.rescale = weights.w.get('rescale', None) - if self.rescale is not None: - self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1)) - is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported @@ -54,6 +47,13 @@ class NetworkModuleOFT(network.NetworkModule): elif is_other_linear: self.out_dim = self.sd_module.embed_dim + # LyCORIS BOFT + if self.oft_blocks.dim() == 4: + self.is_boft = True + self.rescale = weights.w.get('rescale', None) + if self.rescale is not None and not is_other_linear: + self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1)) + self.num_blocks = self.dim self.block_size = self.out_dim // self.dim self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim From 76fd487818b1801906830a218c41ac434f2bd43d Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 14 Mar 2024 21:59:53 -0400 Subject: [PATCH 24/90] Make imageviewer event listeners browser consistent --- javascript/imageviewer.js | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 625c5d148..d4d4f016d 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -131,19 +131,15 @@ function setupImageForLightbox(e) { e.style.cursor = 'pointer'; e.style.userSelect = 'none'; - var isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1; - - // For Firefox, listening on click first switched to next image then shows the lightbox. - // If you know how to fix this without switching to mousedown event, please. - // For other browsers the event is click to make it possiblr to drag picture. - var event = isFirefox ? 'mousedown' : 'click'; - - e.addEventListener(event, function(evt) { + e.addEventListener('mousedown', function(evt) { if (evt.button == 1) { open(evt.target.src); evt.preventDefault(); return; } + }, true); + + e.addEventListener('click', function(evt) { if (!opts.js_modal_lightbox || evt.button != 0) return; modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed); From 8eaa7e9f0446a386129afae19fa27cbac151d190 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 15 Mar 2024 04:05:04 +0000 Subject: [PATCH 25/90] Support dragdrop for URLs --- javascript/dragdrop.js | 21 +++++++++++++++++---- modules/images.py | 33 +++++++++++++++++++++++---------- modules/ui_toprow.py | 8 ++++++++ 3 files changed, 48 insertions(+), 14 deletions(-) diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js index d680daf52..01ae6e4db 100644 --- a/javascript/dragdrop.js +++ b/javascript/dragdrop.js @@ -56,6 +56,10 @@ function eventHasFiles(e) { return false; } +function getEventUrl(e) { + return e?.dataTransfer?.getData('URL') || e?.dataTransfer?.getData('text/html')?.match(/(?:src|href)=["'](.*?)["']/)?.[1]; +} + function dragDropTargetIsPrompt(target) { if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true; if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true; @@ -76,21 +80,30 @@ window.document.addEventListener('dragover', e => { window.document.addEventListener('drop', e => { const target = e.composedPath()[0]; - if (!eventHasFiles(e)) return; + const url = getEventUrl(e); + if (!eventHasFiles(e) && !url) return; if (dragDropTargetIsPrompt(target)) { e.stopPropagation(); e.preventDefault(); - let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image"; + const isImg2img = get_tab_index('tabs') == 1; + let prompt_image_target = isImg2img ? "img2img_prompt_image" : "txt2img_prompt_image"; + let prompt_url_target = isImg2img ? "img2img_prompt_url" : "txt2img_prompt_url"; - const imgParent = gradioApp().getElementById(prompt_target); + const imgParent = gradioApp().getElementById(prompt_image_target); + const urlParent = gradioApp().getElementById(prompt_url_target); const files = e.dataTransfer.files; const fileInput = imgParent.querySelector('input[type="file"]'); - if (fileInput) { + const urlInput = urlParent.querySelector('textarea'); + if (files && fileInput) { fileInput.files = files; fileInput.dispatchEvent(new Event('change')); } + if (url && urlInput) { + urlInput.value = url; + urlInput.dispatchEvent(new Event('input')); + } } var targetImage = target.closest('[data-testid="image"]'); diff --git a/modules/images.py b/modules/images.py index c50b2455d..665dbc372 100644 --- a/modules/images.py +++ b/modules/images.py @@ -772,18 +772,31 @@ Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]} def image_data(data): import gradio as gr - try: - image = read(io.BytesIO(data)) - textinfo, _ = read_info_from_image(image) - return textinfo, None - except Exception: - pass + if not data: + return gr.update(), None - try: - text = data.decode('utf8') - assert len(text) < 10000 - return text, None + if isinstance(data, bytes): + try: + image = Image.open(io.BytesIO(data)) + textinfo, _ = read_info_from_image(image) + return textinfo, None + except Exception: + pass + try: + text = data.decode('utf8') + assert len(text) < 10000 + return text, None + except Exception: + pass + + import requests + try: + r = requests.get(data, timeout=5) + if r.status_code == 200: + image = Image.open(io.BytesIO(r.content)) + textinfo, _ = read_info_from_image(image) + return textinfo, None except Exception: pass diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py index dc3c3aa38..bce93da95 100644 --- a/modules/ui_toprow.py +++ b/modules/ui_toprow.py @@ -82,6 +82,7 @@ class Toprow: with gr.Row(elem_id=f"{self.id_part}_prompt_row", elem_classes=["prompt-row"]): self.prompt = gr.Textbox(label="Prompt", elem_id=f"{self.id_part}_prompt", show_label=False, lines=3, placeholder="Prompt\n(Press Ctrl+Enter to generate, Alt+Enter to skip, Esc to interrupt)", elem_classes=["prompt"]) self.prompt_img = gr.File(label="", elem_id=f"{self.id_part}_prompt_image", file_count="single", type="binary", visible=False) + self.prompt_url = gr.Textbox(label="", elem_id=f"{self.id_part}_prompt_url", visible=False) with gr.Row(elem_id=f"{self.id_part}_neg_prompt_row", elem_classes=["prompt-row"]): self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{self.id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt\n(Press Ctrl+Enter to generate, Alt+Enter to skip, Esc to interrupt)", elem_classes=["prompt"]) @@ -93,6 +94,13 @@ class Toprow: show_progress=False, ) + self.prompt_url.input( + fn=modules.images.image_data, + inputs=[self.prompt_url], + outputs=[self.prompt, self.prompt_url], + show_progress=False, + ) + def create_submit_box(self): with gr.Row(elem_id=f"{self.id_part}_generate_box", elem_classes=["generate-box"] + (["generate-box-compact"] if self.is_compact else []), render=not self.is_compact) as submit_box: self.submit_box = submit_box From 5f4203bf9b622a0eb3b6a1eb2c8ef8dbb56930a9 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 14 Mar 2024 22:21:08 -0600 Subject: [PATCH 26/90] Strip comments from hires fix prompt --- modules/processing_scripts/comments.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/modules/processing_scripts/comments.py b/modules/processing_scripts/comments.py index 638e39f29..cf81dfd8b 100644 --- a/modules/processing_scripts/comments.py +++ b/modules/processing_scripts/comments.py @@ -26,6 +26,13 @@ class ScriptStripComments(scripts.Script): p.main_prompt = strip_comments(p.main_prompt) p.main_negative_prompt = strip_comments(p.main_negative_prompt) + if getattr(p, 'enable_hr', False): + p.all_hr_prompts = [strip_comments(x) for x in p.all_hr_prompts] + p.all_hr_negative_prompts = [strip_comments(x) for x in p.all_hr_negative_prompts] + + p.hr_prompt = strip_comments(p.hr_prompt) + p.hr_negative_prompt = strip_comments(p.hr_negative_prompt) + def before_token_counter(params: script_callbacks.BeforeTokenCounterParams): if not shared.opts.enable_prompt_comments: From 6f51e0555376cff9f185cb4f1dde8537bfec92af Mon Sep 17 00:00:00 2001 From: Andray Date: Fri, 15 Mar 2024 12:01:43 +0400 Subject: [PATCH 27/90] prevent alt menu for firefox --- .../canvas-zoom-and-pan/javascript/zoom.js | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 64e7a638a..aa27ac157 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -966,3 +966,26 @@ onUiLoaded(async() => { // Add integration with Inpaint Anything // applyZoomAndPanIntegration("None", ["#ia_sam_image", "#ia_sel_mask"]); }); + + +onUiLoaded(function() { + let isAltPressed = false; + + function handleAltKeyDown(e) { + if (e.code === "AltLeft" || e.code === "AltRight") { + isAltPressed = true; + } else { + isAltPressed = false; + } + } + + function handleAltKeyUp(e) { + if (isAltPressed) { + e.preventDefault(); + } + isAltPressed = false; + } + + document.addEventListener("keydown", handleAltKeyDown); + document.addEventListener("keyup", handleAltKeyUp); +}); From 887a5122083d27fd819bfeb54524dbdc791961cc Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 15 Mar 2024 21:06:54 +0900 Subject: [PATCH 28/90] fix issue with Styles when Hires prompt is used --- modules/infotext_utils.py | 31 ++++++++++++++++++++----------- modules/infotext_versions.py | 1 + modules/processing.py | 15 ++++++++------- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index a1cbfb17d..723cb1f82 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -265,17 +265,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model else: prompt += ("" if prompt == "" else "\n") + line - if shared.opts.infotext_styles != "Ignore": - found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt) - - if shared.opts.infotext_styles == "Apply": - res["Styles array"] = found_styles - elif shared.opts.infotext_styles == "Apply if any" and found_styles: - res["Styles array"] = found_styles - - res["Prompt"] = prompt - res["Negative prompt"] = negative_prompt - for k, v in re_param.findall(lastline): try: if v[0] == '"' and v[-1] == '"': @@ -290,6 +279,26 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model except Exception: print(f"Error parsing \"{k}: {v}\"") + # Extract styles from prompt + if shared.opts.infotext_styles != "Ignore": + found_styles, prompt_no_styles, negative_prompt_no_styles = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt) + + same_hr_styles = True + if ("Hires prompt" in res or "Hires negative prompt" in res) and (infotext_ver > infotext_versions.v180_hr_styles if (infotext_ver := infotext_versions.parse_version(res.get("Version"))) else True): + hr_prompt, hr_negative_prompt = res.get("Hires prompt", prompt), res.get("Hires negative prompt", negative_prompt) + hr_found_styles, hr_prompt_no_styles, hr_negative_prompt_no_styles = shared.prompt_styles.extract_styles_from_prompt(hr_prompt, hr_negative_prompt) + if same_hr_styles := found_styles == hr_found_styles: + res["Hires prompt"] = '' if hr_prompt_no_styles == prompt_no_styles else hr_prompt_no_styles + res['Hires negative prompt'] = '' if hr_negative_prompt_no_styles == negative_prompt_no_styles else hr_negative_prompt_no_styles + + if same_hr_styles: + prompt, negative_prompt = prompt_no_styles, negative_prompt_no_styles + if (shared.opts.infotext_styles == "Apply if any" and found_styles) or shared.opts.infotext_styles == "Apply": + res['Styles array'] = found_styles + + res["Prompt"] = prompt + res["Negative prompt"] = negative_prompt + # Missing CLIP skip means it was set to 1 (the default) if "Clip skip" not in res: res["Clip skip"] = "1" diff --git a/modules/infotext_versions.py b/modules/infotext_versions.py index b5552a312..0d2d6282a 100644 --- a/modules/infotext_versions.py +++ b/modules/infotext_versions.py @@ -6,6 +6,7 @@ import re v160 = version.parse("1.6.0") v170_tsnr = version.parse("v1.7.0-225") v180 = version.parse("1.8.0") +v180_hr_styles = version.parse("1.8.0-136") # todo: change to the actual version number after merge def parse_version(text): diff --git a/modules/processing.py b/modules/processing.py index 86194b057..d6873a510 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -702,7 +702,7 @@ def program_version(): return res -def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None): +def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None, all_hr_prompts=None, all_hr_negative_prompts=None): if index is None: index = position_in_batch + iteration * p.batch_size @@ -745,11 +745,18 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, "Tiling": "True" if p.tiling else None, + "Hires prompt": None, # This is set later, insert here to keep order + "Hires negative prompt": None, # This is set later, insert here to keep order **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, "User": p.user if opts.add_user_name_to_info else None, } + if all_hr_prompts := all_hr_prompts or getattr(p, 'all_hr_prompts', None): + generation_params['Hires prompt'] = all_hr_prompts[index] if all_hr_prompts[index] != all_prompts[index] else None + if all_hr_negative_prompts := all_hr_negative_prompts or getattr(p, 'all_hr_negative_prompts', None): + generation_params['Hires negative prompt'] = all_hr_negative_prompts[index] if all_hr_negative_prompts[index] != all_negative_prompts[index] else None + generation_params_text = ", ".join([k if k == v else f'{k}: {infotext_utils.quote(v)}' for k, v in generation_params.items() if v is not None]) prompt_text = p.main_prompt if use_main_prompt else all_prompts[index] @@ -1194,12 +1201,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name: self.extra_generation_params["Hires sampler"] = self.hr_sampler_name - if tuple(self.hr_prompt) != tuple(self.prompt): - self.extra_generation_params["Hires prompt"] = self.hr_prompt - - if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt): - self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt - self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") if self.enable_hr and self.latent_scale_mode is None: if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers): From a3a648bf6bf49d3ec51de3cee74035ac71f1662d Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 16 Mar 2024 05:57:23 +0900 Subject: [PATCH 29/90] bump action version --- .github/workflows/on_pull_request.yaml | 8 ++++---- .github/workflows/run_tests.yaml | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml index 9e44c806a..c595b80aa 100644 --- a/.github/workflows/on_pull_request.yaml +++ b/.github/workflows/on_pull_request.yaml @@ -11,8 +11,8 @@ jobs: if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name steps: - name: Checkout Code - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: python-version: 3.11 # NB: there's no cache: pip here since we're not installing anything @@ -29,9 +29,9 @@ jobs: if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name steps: - name: Checkout Code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Node.js - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: 18 - run: npm i --ci diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index f42e4758e..0610f4f54 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -11,9 +11,9 @@ jobs: if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name steps: - name: Checkout Code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python 3.10 - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.10.6 cache: pip @@ -22,7 +22,7 @@ jobs: launch.py - name: Cache models id: cache-models - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: models key: "2023-12-30" @@ -68,13 +68,13 @@ jobs: python -m coverage report -i python -m coverage html -i - name: Upload main app output - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: output path: output.txt - name: Upload coverage HTML - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: htmlcov From 63c3c4dbc357e1e400b40fe22521857eeaeb01b4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 16 Mar 2024 09:04:08 +0300 Subject: [PATCH 30/90] simplify code for #15244 --- modules/shared_options.py | 2 +- modules/ui_postprocessing.py | 32 ------------------------------- scripts/postprocessing_upscale.py | 14 ++++++++++++++ 3 files changed, 15 insertions(+), 33 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 99a051aaf..fc9f13d6f 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -101,7 +101,7 @@ options_templates.update(options_section(('upscaling', "Upscaling", "postprocess "DAT_tile": OptionInfo(192, "Tile size for DAT upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), "DAT_tile_overlap": OptionInfo(8, "Tile overlap for DAT upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}), - "scaleBy_from_upscaler": OptionInfo(False, "Automatically set the Scale by factor based on the name of the selected Upscaler.").info("Will not change the value when no matching pattern is found."), + "set_scale_by_when_changing_upscaler": OptionInfo(False, "Automatically set the Scale by factor based on the name of the selected Upscaler."), })) options_templates.update(options_section(('face-restoration', "Face restoration", "postprocessing"), { diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index af44f6195..7261c2df8 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -4,36 +4,6 @@ import modules.infotext_utils as parameters_copypaste from modules.ui_components import ResizeHandleRow -def hook_scale_update(inputs): - import re - pattern = r'(\d)[xX]|[xX](\d)' - - resize = upscaler = None - for script in inputs: - if script.label == "Resize": - resize = script - elif script.label == "Upscaler 1": - upscaler = script - elif resize and upscaler: - break - - def update_scale(upscaler: str, slider: float): - match = re.search(pattern, upscaler) - - if match: - if match.group(1): - return gr.update(value=int(match.group(1))) - - else: - return gr.update(value=int(match.group(2))) - - else: - return gr.update(value=slider) - - if resize and upscaler: - upscaler.input(update_scale, inputs=[upscaler, resize], outputs=[resize]) - - def create_ui(): dummy_component = gr.Label(visible=False) tab_index = gr.Number(value=0, visible=False) @@ -53,8 +23,6 @@ def create_ui(): show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") script_inputs = scripts.scripts_postproc.setup_ui() - if getattr(shared.opts, 'scaleBy_from_upscaler', False): - hook_scale_update(script_inputs) with gr.Column(): toprow = ui_toprow.Toprow(is_compact=True, is_img2img=False, id_part="extras") diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index e269682d0..d8ba70ed8 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -1,3 +1,5 @@ +import re + from PIL import Image import numpy as np @@ -39,10 +41,22 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") + def on_selected_upscale_method(upscale_method): + if not shared.opts.set_scale_by_when_changing_upscaler: + return gr.update() + + match = re.search(r'(\d)[xX]|[xX](\d)', upscale_method) + if not match: + return gr.update() + + return gr.update(value=int(match.group(1) or match.group(2))) + upscaling_res_switch_btn.click(lambda w, h: (h, w), inputs=[upscaling_resize_w, upscaling_resize_h], outputs=[upscaling_resize_w, upscaling_resize_h], show_progress=False) tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab]) tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab]) + extras_upscaler_1.change(on_selected_upscale_method, inputs=[extras_upscaler_1], outputs=[upscaling_resize], show_progress="hidden") + return { "upscale_mode": selected_tab, "upscale_by": upscaling_resize, From 38a7dc54885f9df098a3275d687d3b82fa54de1d Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 16 Mar 2024 17:19:38 +0900 Subject: [PATCH 31/90] v180_hr_styles actual version number --- modules/infotext_versions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/infotext_versions.py b/modules/infotext_versions.py index 0d2d6282a..cea676cda 100644 --- a/modules/infotext_versions.py +++ b/modules/infotext_versions.py @@ -6,7 +6,7 @@ import re v160 = version.parse("1.6.0") v170_tsnr = version.parse("v1.7.0-225") v180 = version.parse("1.8.0") -v180_hr_styles = version.parse("1.8.0-136") # todo: change to the actual version number after merge +v180_hr_styles = version.parse("1.8.0-139") def parse_version(text): From cc8ea32501e09787d73815d2a982071fd3a4686a Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 12 Mar 2024 21:29:57 +0400 Subject: [PATCH 32/90] fix ui-config for InputAccordion --- modules/ui_loadsave.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py index 2555cdb6c..0cc1ab82a 100644 --- a/modules/ui_loadsave.py +++ b/modules/ui_loadsave.py @@ -104,6 +104,8 @@ class UiLoadsave: apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None)) if type(x) == InputAccordion: + if hasattr(x, 'custom_script_source'): + x.accordion.custom_script_source = x.custom_script_source if x.accordion.visible: apply_field(x.accordion, 'visible') apply_field(x, 'value') From 79514e5b8e112c47128f3da506267e8540219097 Mon Sep 17 00:00:00 2001 From: Andray Date: Sat, 16 Mar 2024 16:06:21 +0400 Subject: [PATCH 33/90] prevent defaults for alt only if mouse inside image --- .../canvas-zoom-and-pan/javascript/zoom.js | 48 ++++++++++--------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index aa27ac157..63900025f 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -839,6 +839,31 @@ onUiLoaded(async() => { document.addEventListener("keydown", handleMoveKeyDown); document.addEventListener("keyup", handleMoveKeyUp); + // Prevent firefox to open toolbar on pressing alt + if (hotkeysConfig.canvas_hotkey_zoom === "Alt") { + let isAltPressed = false; + + function handleAltKeyDown(e) { + if (!activeElement) return; + if (e.code === "AltLeft" || e.code === "AltRight") { + isAltPressed = true; + } else { + isAltPressed = false; + } + } + + function handleAltKeyUp(e) { + if (isAltPressed) { + e.preventDefault(); + } + isAltPressed = false; + } + + document.addEventListener("keydown", handleAltKeyDown); + document.addEventListener("keyup", handleAltKeyUp); + } + + // Detect zoom level and update the pan speed. function updatePanPosition(movementX, movementY) { let panSpeed = 2; @@ -966,26 +991,3 @@ onUiLoaded(async() => { // Add integration with Inpaint Anything // applyZoomAndPanIntegration("None", ["#ia_sam_image", "#ia_sel_mask"]); }); - - -onUiLoaded(function() { - let isAltPressed = false; - - function handleAltKeyDown(e) { - if (e.code === "AltLeft" || e.code === "AltRight") { - isAltPressed = true; - } else { - isAltPressed = false; - } - } - - function handleAltKeyUp(e) { - if (isAltPressed) { - e.preventDefault(); - } - isAltPressed = false; - } - - document.addEventListener("keydown", handleAltKeyDown); - document.addEventListener("keyup", handleAltKeyUp); -}); From 9142ce8188cc0d7b8847df0c2f11ab47a0b13a50 Mon Sep 17 00:00:00 2001 From: Andray Date: Sat, 16 Mar 2024 16:14:57 +0400 Subject: [PATCH 34/90] fix linter and do not require reload page if option was changed --- .../canvas-zoom-and-pan/javascript/zoom.js | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 63900025f..a575862df 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -839,30 +839,31 @@ onUiLoaded(async() => { document.addEventListener("keydown", handleMoveKeyDown); document.addEventListener("keyup", handleMoveKeyUp); + // Prevent firefox to open toolbar on pressing alt - if (hotkeysConfig.canvas_hotkey_zoom === "Alt") { - let isAltPressed = false; + let isAltPressed = false; - function handleAltKeyDown(e) { - if (!activeElement) return; - if (e.code === "AltLeft" || e.code === "AltRight") { - isAltPressed = true; - } else { - isAltPressed = false; - } - } - - function handleAltKeyUp(e) { - if (isAltPressed) { - e.preventDefault(); - } + function handleAltKeyDown(e) { + if (!activeElement) return; + if (hotkeysConfig.canvas_hotkey_zoom !== "Alt") return; + if (e.code === "AltLeft" || e.code === "AltRight") { + isAltPressed = true; + } else { isAltPressed = false; } - - document.addEventListener("keydown", handleAltKeyDown); - document.addEventListener("keyup", handleAltKeyUp); } + function handleAltKeyUp(e) { + if (hotkeysConfig.canvas_hotkey_zoom !== "Alt") return; + if (isAltPressed) { + e.preventDefault(); + } + isAltPressed = false; + } + + document.addEventListener("keydown", handleAltKeyDown); + document.addEventListener("keyup", handleAltKeyUp); + // Detect zoom level and update the pan speed. function updatePanPosition(movementX, movementY) { From eb2ea8df1de2aaaab8a1ed069b34ca7385e30af2 Mon Sep 17 00:00:00 2001 From: Andray Date: Sat, 16 Mar 2024 17:42:25 +0400 Subject: [PATCH 35/90] check e.key in up event --- .../canvas-zoom-and-pan/javascript/zoom.js | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index a575862df..dfd7b17a2 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -841,24 +841,24 @@ onUiLoaded(async() => { // Prevent firefox to open toolbar on pressing alt - let isAltPressed = false; + let wasAltPressed = false; function handleAltKeyDown(e) { if (!activeElement) return; if (hotkeysConfig.canvas_hotkey_zoom !== "Alt") return; if (e.code === "AltLeft" || e.code === "AltRight") { - isAltPressed = true; + wasAltPressed = true; } else { - isAltPressed = false; + wasAltPressed = false; } } function handleAltKeyUp(e) { if (hotkeysConfig.canvas_hotkey_zoom !== "Alt") return; - if (isAltPressed) { + if (wasAltPressed || (activeElement && e.key === "Alt")) { e.preventDefault(); } - isAltPressed = false; + wasAltPressed = false; } document.addEventListener("keydown", handleAltKeyDown); From 7598a92436be66f5ba63a9234ba8cdbcbc3cc662 Mon Sep 17 00:00:00 2001 From: Andray Date: Sat, 16 Mar 2024 17:49:05 +0400 Subject: [PATCH 36/90] use e.key instead of e.code --- extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index dfd7b17a2..c6b463fd4 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -846,7 +846,7 @@ onUiLoaded(async() => { function handleAltKeyDown(e) { if (!activeElement) return; if (hotkeysConfig.canvas_hotkey_zoom !== "Alt") return; - if (e.code === "AltLeft" || e.code === "AltRight") { + if (e.key === "Alt") { wasAltPressed = true; } else { wasAltPressed = false; From c364b607764047cab27887bb78122fc302e2041b Mon Sep 17 00:00:00 2001 From: Andray Date: Fri, 15 Mar 2024 12:07:11 +0400 Subject: [PATCH 37/90] handle 0 wheel deltaX --- extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 64e7a638a..b0963f4fe 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -793,7 +793,7 @@ onUiLoaded(async() => { targetElement.addEventListener("wheel", e => { // change zoom level - const operation = e.deltaY > 0 ? "-" : "+"; + const operation = (e.deltaY || -e.wheelDelta) > 0 ? "-" : "+"; changeZoomLevel(operation, e); // Handle brush size adjustment with ctrl key pressed From 0283826179cd3aec923053877f0742919c52e166 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 16 Mar 2024 18:44:36 +0300 Subject: [PATCH 38/90] prevent make alt key from opening main menu if it's used for brush size also --- .../canvas-zoom-and-pan/javascript/zoom.js | 33 +++++++++---------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 6e61def21..ed2ef99b0 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -252,6 +252,7 @@ onUiLoaded(async() => { let isMoving = false; let mouseX, mouseY; let activeElement; + let interactedWithAltKey = false; const elements = Object.fromEntries( Object.keys(elementIDs).map(id => [ @@ -508,6 +509,10 @@ onUiLoaded(async() => { if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) { e.preventDefault(); + if(hotkeysConfig.canvas_hotkey_zoom === "Alt"){ + interactedWithAltKey = true; + } + let zoomPosX, zoomPosY; let delta = 0.2; if (elemData[elemId].zoomLevel > 7) { @@ -800,6 +805,10 @@ onUiLoaded(async() => { if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) { e.preventDefault(); + if(hotkeysConfig.canvas_hotkey_adjust === "Alt"){ + interactedWithAltKey = true; + } + // Increase or decrease brush size based on scroll direction adjustBrushSize(elemId, e.deltaY); } @@ -840,28 +849,16 @@ onUiLoaded(async() => { document.addEventListener("keyup", handleMoveKeyUp); - // Prevent firefox to open toolbar on pressing alt - let wasAltPressed = false; - - function handleAltKeyDown(e) { - if (!activeElement) return; - if (hotkeysConfig.canvas_hotkey_zoom !== "Alt") return; - if (e.key === "Alt") { - wasAltPressed = true; - } else { - wasAltPressed = false; - } - } - + // Prevent firefox from opening main menu when alt is used as a hotkey for zoom or brush size function handleAltKeyUp(e) { - if (hotkeysConfig.canvas_hotkey_zoom !== "Alt") return; - if (wasAltPressed || (activeElement && e.key === "Alt")) { - e.preventDefault(); + if (e.key !== "Alt" || !interactedWithAltKey) { + return; } - wasAltPressed = false; + + e.preventDefault(); + interactedWithAltKey = false; } - document.addEventListener("keydown", handleAltKeyDown); document.addEventListener("keyup", handleAltKeyUp); From bf35c661834be65d10a1e77f6cecbf7e5e6a687e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 16 Mar 2024 18:45:19 +0300 Subject: [PATCH 39/90] fix for #15179 --- modules/upscaler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/upscaler.py b/modules/upscaler.py index 9d13ee993..4ffd428c6 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -20,7 +20,7 @@ class Upscaler: filter = None model = None user_path = None - scalers: list = [] + scalers: list tile = True def __init__(self, create_dirs=False): From 1792e193b1ad22727d9628dda9c5c6457fd9f294 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sat, 16 Mar 2024 23:52:29 +0800 Subject: [PATCH 40/90] Use correct implementation, fix device error --- extensions-builtin/Lora/network.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index 183f8bd7c..30b979f59 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -153,7 +153,7 @@ class NetworkModule: self.scale = weights.w["scale"].item() if "scale" in weights.w else None self.dora_scale = weights.w.get("dora_scale", None) - self.dora_mean_dim = tuple(i for i in range(len(self.shape)) if i != 1) + self.dora_norm_dims = len(self.shape) - 1 def multiplier(self): if 'transformer' in self.sd_key[:20]: @@ -170,10 +170,22 @@ class NetworkModule: return 1.0 def apply_weight_decompose(self, updown, orig_weight): - orig_weight = orig_weight.to(updown) + # Match the device/dtype + orig_weight = orig_weight.to(updown.dtype) + dora_scale = self.dora_scale.to(device=orig_weight.device, dtype=updown.dtype) + updown = updown.to(orig_weight.device) + merged_scale1 = updown + orig_weight + merged_scale1_norm = ( + merged_scale1.transpose(0, 1) + .reshape(merged_scale1.shape[1], -1) + .norm(dim=1, keepdim=True) + .reshape(merged_scale1.shape[1], *[1] * self.dora_norm_dims) + .transpose(0, 1) + ) + dora_merged = ( - merged_scale1 / merged_scale1(dim=self.dora_mean_dim, keepdim=True) * self.dora_scale + merged_scale1 * (dora_scale / merged_scale1_norm) ) final_updown = dora_merged - orig_weight return final_updown From 199c51d688c5770037cdb1d3521461902e5857c3 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 17 Mar 2024 00:00:07 +0800 Subject: [PATCH 41/90] linter --- extensions-builtin/Lora/network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index 30b979f59..412864292 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -185,7 +185,7 @@ class NetworkModule: ) dora_merged = ( - merged_scale1 * (dora_scale / merged_scale1_norm) + merged_scale1 * (dora_scale / merged_scale1_norm) ) final_updown = dora_merged - orig_weight return final_updown From 3da13f0cc912acb37052ff4416f238e714bd2a57 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 16 Mar 2024 15:46:29 -0600 Subject: [PATCH 42/90] Fix dragging to/from firefox --- javascript/dragdrop.js | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js index 01ae6e4db..bb7be89f9 100644 --- a/javascript/dragdrop.js +++ b/javascript/dragdrop.js @@ -57,7 +57,7 @@ function eventHasFiles(e) { } function getEventUrl(e) { - return e?.dataTransfer?.getData('URL') || e?.dataTransfer?.getData('text/html')?.match(/(?:src|href)=["'](.*?)["']/)?.[1]; + return e.dataTransfer?.getData('text/uri-list') || e.dataTransfer?.getData('text/plain'); } function dragDropTargetIsPrompt(target) { @@ -96,13 +96,12 @@ window.document.addEventListener('drop', e => { const files = e.dataTransfer.files; const fileInput = imgParent.querySelector('input[type="file"]'); const urlInput = urlParent.querySelector('textarea'); - if (files && fileInput) { - fileInput.files = files; - fileInput.dispatchEvent(new Event('change')); - } if (url && urlInput) { urlInput.value = url; urlInput.dispatchEvent(new Event('input')); + } else if (files && fileInput) { + fileInput.files = files; + fileInput.dispatchEvent(new Event('change')); } } From 83a9dd82db511f8f7545d953d6c6baf2c2c9a7a3 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 16 Mar 2024 16:08:42 -0600 Subject: [PATCH 43/90] Download image client-side --- javascript/dragdrop.js | 22 +++++++++------------- modules/images.py | 35 +++++++++++------------------------ modules/ui_toprow.py | 8 -------- 3 files changed, 20 insertions(+), 45 deletions(-) diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js index bb7be89f9..86591aa24 100644 --- a/javascript/dragdrop.js +++ b/javascript/dragdrop.js @@ -56,10 +56,6 @@ function eventHasFiles(e) { return false; } -function getEventUrl(e) { - return e.dataTransfer?.getData('text/uri-list') || e.dataTransfer?.getData('text/plain'); -} - function dragDropTargetIsPrompt(target) { if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true; if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true; @@ -78,9 +74,9 @@ window.document.addEventListener('dragover', e => { e.dataTransfer.dropEffect = 'copy'; }); -window.document.addEventListener('drop', e => { +window.document.addEventListener('drop', async e => { const target = e.composedPath()[0]; - const url = getEventUrl(e); + const url = e.dataTransfer.getData('text/uri-list') || e.dataTransfer.getData('text/plain'); if (!eventHasFiles(e) && !url) return; if (dragDropTargetIsPrompt(target)) { @@ -89,19 +85,19 @@ window.document.addEventListener('drop', e => { const isImg2img = get_tab_index('tabs') == 1; let prompt_image_target = isImg2img ? "img2img_prompt_image" : "txt2img_prompt_image"; - let prompt_url_target = isImg2img ? "img2img_prompt_url" : "txt2img_prompt_url"; const imgParent = gradioApp().getElementById(prompt_image_target); - const urlParent = gradioApp().getElementById(prompt_url_target); const files = e.dataTransfer.files; const fileInput = imgParent.querySelector('input[type="file"]'); - const urlInput = urlParent.querySelector('textarea'); - if (url && urlInput) { - urlInput.value = url; - urlInput.dispatchEvent(new Event('input')); - } else if (files && fileInput) { + if (eventHasFiles(e) && fileInput) { fileInput.files = files; fileInput.dispatchEvent(new Event('change')); + } else if (url) { + const request = await fetch(url); + const data = new DataTransfer(); + data.items.add(new File([await request.blob()], 'image.png')); + fileInput.files = data.files; + fileInput.dispatchEvent(new Event('change')); } } diff --git a/modules/images.py b/modules/images.py index 665dbc372..c50b2455d 100644 --- a/modules/images.py +++ b/modules/images.py @@ -772,31 +772,18 @@ Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]} def image_data(data): import gradio as gr - if not data: - return gr.update(), None - - if isinstance(data, bytes): - try: - image = Image.open(io.BytesIO(data)) - textinfo, _ = read_info_from_image(image) - return textinfo, None - except Exception: - pass - - try: - text = data.decode('utf8') - assert len(text) < 10000 - return text, None - except Exception: - pass - - import requests try: - r = requests.get(data, timeout=5) - if r.status_code == 200: - image = Image.open(io.BytesIO(r.content)) - textinfo, _ = read_info_from_image(image) - return textinfo, None + image = read(io.BytesIO(data)) + textinfo, _ = read_info_from_image(image) + return textinfo, None + except Exception: + pass + + try: + text = data.decode('utf8') + assert len(text) < 10000 + return text, None + except Exception: pass diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py index bce93da95..dc3c3aa38 100644 --- a/modules/ui_toprow.py +++ b/modules/ui_toprow.py @@ -82,7 +82,6 @@ class Toprow: with gr.Row(elem_id=f"{self.id_part}_prompt_row", elem_classes=["prompt-row"]): self.prompt = gr.Textbox(label="Prompt", elem_id=f"{self.id_part}_prompt", show_label=False, lines=3, placeholder="Prompt\n(Press Ctrl+Enter to generate, Alt+Enter to skip, Esc to interrupt)", elem_classes=["prompt"]) self.prompt_img = gr.File(label="", elem_id=f"{self.id_part}_prompt_image", file_count="single", type="binary", visible=False) - self.prompt_url = gr.Textbox(label="", elem_id=f"{self.id_part}_prompt_url", visible=False) with gr.Row(elem_id=f"{self.id_part}_neg_prompt_row", elem_classes=["prompt-row"]): self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{self.id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt\n(Press Ctrl+Enter to generate, Alt+Enter to skip, Esc to interrupt)", elem_classes=["prompt"]) @@ -94,13 +93,6 @@ class Toprow: show_progress=False, ) - self.prompt_url.input( - fn=modules.images.image_data, - inputs=[self.prompt_url], - outputs=[self.prompt, self.prompt_url], - show_progress=False, - ) - def create_submit_box(self): with gr.Row(elem_id=f"{self.id_part}_generate_box", elem_classes=["generate-box"] + (["generate-box-compact"] if self.is_compact else []), render=not self.is_compact) as submit_box: self.submit_box = submit_box From 446cd5a58b22b6771a35696f4dfe4063f4998ebe Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 16 Mar 2024 20:19:12 -0400 Subject: [PATCH 44/90] dragdrop: add error handling for URLs --- javascript/dragdrop.js | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js index 86591aa24..0c0183564 100644 --- a/javascript/dragdrop.js +++ b/javascript/dragdrop.js @@ -93,11 +93,20 @@ window.document.addEventListener('drop', async e => { fileInput.files = files; fileInput.dispatchEvent(new Event('change')); } else if (url) { - const request = await fetch(url); - const data = new DataTransfer(); - data.items.add(new File([await request.blob()], 'image.png')); - fileInput.files = data.files; - fileInput.dispatchEvent(new Event('change')); + try { + const request = await fetch(url); + if (!request.ok) { + console.error('Error fetching URL:', url, request.status); + return; + } + const data = new DataTransfer(); + data.items.add(new File([await request.blob()], 'image.png')); + fileInput.files = data.files; + fileInput.dispatchEvent(new Event('change')); + } catch (error) { + console.error('Error fetching URL:', url, error); + return; + } } } From 93c7b9d7fc4555b20b7c4d06d8acf02c697d22cb Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 17 Mar 2024 07:02:31 +0300 Subject: [PATCH 45/90] linter for #15262 --- extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index ed2ef99b0..97421178c 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -509,7 +509,7 @@ onUiLoaded(async() => { if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) { e.preventDefault(); - if(hotkeysConfig.canvas_hotkey_zoom === "Alt"){ + if (hotkeysConfig.canvas_hotkey_zoom === "Alt") { interactedWithAltKey = true; } @@ -805,7 +805,7 @@ onUiLoaded(async() => { if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) { e.preventDefault(); - if(hotkeysConfig.canvas_hotkey_adjust === "Alt"){ + if (hotkeysConfig.canvas_hotkey_adjust === "Alt") { interactedWithAltKey = true; } From e9b8a89b3c775b55ffdd4fc43acbeba02bb0c7cb Mon Sep 17 00:00:00 2001 From: Andray Date: Sun, 17 Mar 2024 09:29:11 +0400 Subject: [PATCH 46/90] allow use zoom.js outside webui context --- .../canvas-zoom-and-pan/javascript/zoom.js | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 97421178c..24b7793d9 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -29,6 +29,7 @@ onUiLoaded(async() => { }); function getActiveTab(elements, all = false) { + if (!elements.img2imgTabs) return null; const tabs = elements.img2imgTabs.querySelectorAll("button"); if (all) return tabs; @@ -43,6 +44,7 @@ onUiLoaded(async() => { // Get tab ID function getTabId(elements) { const activeTab = getActiveTab(elements); + if (!activeTab) return null; return tabNameToElementId[activeTab.innerText]; } @@ -366,9 +368,9 @@ onUiLoaded(async() => { // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui. function fixCanvas() { - const activeTab = getActiveTab(elements).textContent.trim(); + const activeTab = getActiveTab(elements)?.textContent.trim(); - if (activeTab !== "img2img") { + if (activeTab && activeTab !== "img2img") { const img = targetElement.querySelector(`${elemId} img`); if (img && img.style.display !== "none") { @@ -788,13 +790,15 @@ onUiLoaded(async() => { targetElement.addEventListener("mouseleave", handleMouseLeave); // Reset zoom when click on another tab - elements.img2imgTabs.addEventListener("click", resetZoom); - elements.img2imgTabs.addEventListener("click", () => { - // targetElement.style.width = ""; - if (parseInt(targetElement.style.width) > 865) { - setTimeout(fitToElement, 0); - } - }); + if (elements.img2imgTabs) { + elements.img2imgTabs.addEventListener("click", resetZoom); + elements.img2imgTabs.addEventListener("click", () => { + // targetElement.style.width = ""; + if (parseInt(targetElement.style.width) > 865) { + setTimeout(fitToElement, 0); + } + }); + } targetElement.addEventListener("wheel", e => { // change zoom level @@ -935,9 +939,9 @@ onUiLoaded(async() => { } - applyZoomAndPan(elementIDs.sketch, false); - applyZoomAndPan(elementIDs.inpaint, false); - applyZoomAndPan(elementIDs.inpaintSketch, false); + elementIDs.sketch && applyZoomAndPan(elementIDs.sketch, false); + elementIDs.inpaint && applyZoomAndPan(elementIDs.inpaint, false); + elementIDs.inpaintSketch && applyZoomAndPan(elementIDs.inpaintSketch, false); // Make the function global so that other extensions can take advantage of this solution const applyZoomAndPanIntegration = async(id, elementIDs) => { From 66355b47756e1e6e7e1acc5d7f515fe59fbb7cc6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 17 Mar 2024 09:18:32 +0300 Subject: [PATCH 47/90] use diskcache library for caching --- .gitignore | 1 + modules/cache.py | 82 ++++++++++++++++++--------------------- requirements.txt | 1 + requirements_versions.txt | 1 + 4 files changed, 40 insertions(+), 45 deletions(-) diff --git a/.gitignore b/.gitignore index 6790e9ee7..519b4a53d 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,4 @@ notification.mp3 /package-lock.json /.coverage* /test/test_outputs +/cache diff --git a/modules/cache.py b/modules/cache.py index a9822a0eb..9df248d7b 100644 --- a/modules/cache.py +++ b/modules/cache.py @@ -2,48 +2,47 @@ import json import os import os.path import threading -import time + +import diskcache +import tqdm from modules.paths import data_path, script_path cache_filename = os.environ.get('SD_WEBUI_CACHE_FILE', os.path.join(data_path, "cache.json")) -cache_data = None +cache_dir = os.environ.get('SD_WEBUI_CACHE_DIR', os.path.join(data_path, "cache")) +caches = {} cache_lock = threading.Lock() -dump_cache_after = None -dump_cache_thread = None - def dump_cache(): - """ - Marks cache for writing to disk. 5 seconds after no one else flags the cache for writing, it is written. - """ + """old function for dumping cache to disk; does nothing since diskcache.""" - global dump_cache_after - global dump_cache_thread + pass - def thread_func(): - global dump_cache_after - global dump_cache_thread - while dump_cache_after is not None and time.time() < dump_cache_after: - time.sleep(1) +def convert_old_cached_data(): + try: + with open(cache_filename, "r", encoding="utf8") as file: + data = json.load(file) + except FileNotFoundError: + return + except Exception: + os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json")) + print('[ERROR] issue occurred while trying to read cache.json; old cache has been moved to tmp/cache.json') + return - with cache_lock: - cache_filename_tmp = cache_filename + "-" - with open(cache_filename_tmp, "w", encoding="utf8") as file: - json.dump(cache_data, file, indent=4, ensure_ascii=False) + total_count = sum(len(keyvalues) for keyvalues in data.values()) - os.replace(cache_filename_tmp, cache_filename) + with tqdm.tqdm(total=total_count, desc="converting cache") as progress: + for subsection, keyvalues in data.items(): + cache_obj = caches.get(subsection) + if cache_obj is None: + cache_obj = diskcache.Cache(os.path.join(cache_dir, subsection)) + caches[subsection] = cache_obj - dump_cache_after = None - dump_cache_thread = None - - with cache_lock: - dump_cache_after = time.time() + 5 - if dump_cache_thread is None: - dump_cache_thread = threading.Thread(name='cache-writer', target=thread_func) - dump_cache_thread.start() + for key, value in keyvalues.items(): + cache_obj[key] = value + progress.update(1) def cache(subsection): @@ -54,28 +53,21 @@ def cache(subsection): subsection (str): The subsection identifier for the cache. Returns: - dict: The cache data for the specified subsection. + diskcache.Cache: The cache data for the specified subsection. """ - global cache_data - - if cache_data is None: + cache_obj = caches.get(subsection) + if not cache_obj: with cache_lock: - if cache_data is None: - try: - with open(cache_filename, "r", encoding="utf8") as file: - cache_data = json.load(file) - except FileNotFoundError: - cache_data = {} - except Exception: - os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json")) - print('[ERROR] issue occurred while trying to read cache.json, move current cache to tmp/cache.json and create new cache') - cache_data = {} + if not os.path.exists(cache_dir) and os.path.isfile(cache_filename): + convert_old_cached_data() - s = cache_data.get(subsection, {}) - cache_data[subsection] = s + cache_obj = caches.get(subsection) + if not cache_obj: + cache_obj = diskcache.Cache(os.path.join(cache_dir, subsection)) + caches[subsection] = cache_obj - return s + return cache_obj def cached_data_for_file(subsection, title, filename, func): diff --git a/requirements.txt b/requirements.txt index 731a1be7d..8699c02be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,7 @@ accelerate blendmodes clean-fid +diskcache einops facexlib fastapi>=0.90.1 diff --git a/requirements_versions.txt b/requirements_versions.txt index 5e30b5ea1..87aae9136 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -3,6 +3,7 @@ Pillow==9.5.0 accelerate==0.21.0 blendmodes==2022 clean-fid==0.1.35 +diskcache==5.6.3 einops==0.4.1 facexlib==0.3.0 fastapi==0.94.0 From c3f75d1d854332c5ddb8524886e9c0ff22a28ea4 Mon Sep 17 00:00:00 2001 From: Andray Date: Sun, 17 Mar 2024 10:30:11 +0400 Subject: [PATCH 48/90] little fixes zoom.js --- extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 24b7793d9..7807f7f61 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -280,7 +280,7 @@ onUiLoaded(async() => { const targetElement = gradioApp().querySelector(elemId); if (!targetElement) { - console.log("Element not found"); + console.log("Element not found", elemId); return; } @@ -939,9 +939,9 @@ onUiLoaded(async() => { } - elementIDs.sketch && applyZoomAndPan(elementIDs.sketch, false); - elementIDs.inpaint && applyZoomAndPan(elementIDs.inpaint, false); - elementIDs.inpaintSketch && applyZoomAndPan(elementIDs.inpaintSketch, false); + applyZoomAndPan(elementIDs.sketch, false); + applyZoomAndPan(elementIDs.inpaint, false); + applyZoomAndPan(elementIDs.inpaintSketch, false); // Make the function global so that other extensions can take advantage of this solution const applyZoomAndPanIntegration = async(id, elementIDs) => { From b1cd0189bc30ed7c8dca6fb7bc3d248a056f3e15 Mon Sep 17 00:00:00 2001 From: Andray Date: Sun, 17 Mar 2024 12:51:40 +0400 Subject: [PATCH 49/90] allow variants for extension name in metadata.ini --- modules/extensions.py | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index 88a389388..6a3c6c7ee 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -10,6 +10,10 @@ from modules import shared, errors, cache, scripts from modules.gitpython_hack import Repo from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 +extensions: list[Extension] = [] +extension_paths: dict[str, Extension] = {} +loaded_extensions: dict[str, Exception] = {} + os.makedirs(extensions_dir, exist_ok=True) @@ -50,7 +54,7 @@ class ExtensionMetadata: self.canonical_name = self.config.get("Extension", "Name", fallback=canonical_name) self.canonical_name = canonical_name.lower().strip() - self.requires = self.get_script_requirements("Requires", "Extension") + self.requires = None def get_script_requirements(self, field, section, extra_section=None): """reads a list of requirements from the config; field is the name of the field in the ini file, @@ -62,7 +66,33 @@ class ExtensionMetadata: if extra_section: x = x + ', ' + self.config.get(extra_section, field, fallback='') - return self.parse_list(x.lower()) + tmp_list = self.parse_list(x.lower()) + + if len(tmp_list) >= 3: + names_variants = [] + i = 0 + while i < len(tmp_list) - 2: + if tmp_list[i] != "|": + names_variants.append([tmp_list[i]]) + i += 1 + else: + names_variants[-1].append(tmp_list[i + 1]) + i += 2 + while i < len(tmp_list): + names_variants.append([tmp_list[i]]) + i += 1 + + result_list = [] + + for name_variants in names_variants: + for variant in name_variants: + if variant in loaded_extensions.keys(): + break + result_list.append(variant) + else: + result_list = tmp_list + + return result_list def parse_list(self, text): """converts a line from config ("ext1 ext2, ext3 ") into a python list (["ext1", "ext2", "ext3"])""" @@ -213,6 +243,7 @@ class Extension: def list_extensions(): extensions.clear() extension_paths.clear() + loaded_extensions.clear() if shared.cmd_opts.disable_all_extensions: print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***") @@ -223,7 +254,6 @@ def list_extensions(): elif shared.opts.disable_all_extensions == "extra": print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***") - loaded_extensions = {} # scan through extensions directory and load metadata for dirname in [extensions_builtin_dir, extensions_dir]: @@ -250,6 +280,9 @@ def list_extensions(): extension_paths[extension.path] = extension loaded_extensions[canonical_name] = extension + for extension in extensions: + extension.metadata.requires = extension.metadata.get_script_requirements("Requires", "Extension") + # check for requirements for extension in extensions: if not extension.enabled: @@ -279,6 +312,3 @@ def find_extension(filename): return None - -extensions: list[Extension] = [] -extension_paths: dict[str, Extension] = {} From 908d522057b9e72ea8b3350112142956d10a22cf Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 17 Mar 2024 11:12:37 +0300 Subject: [PATCH 50/90] update ruff to 0.3.3 --- .github/workflows/on_pull_request.yaml | 2 +- pyproject.toml | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml index c595b80aa..9326c6a45 100644 --- a/.github/workflows/on_pull_request.yaml +++ b/.github/workflows/on_pull_request.yaml @@ -20,7 +20,7 @@ jobs: # not to have GHA download an (at the time of writing) 4 GB cache # of PyTorch and other dependencies. - name: Install Ruff - run: pip install ruff==0.1.6 + run: pip install ruff==0.3.3 - name: Run Ruff run: ruff . lint-js: diff --git a/pyproject.toml b/pyproject.toml index d03036e7d..10ebc84b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,6 +2,8 @@ target-version = "py39" +[tool.ruff.lint] + extend-select = [ "B", "C", @@ -25,10 +27,10 @@ ignore = [ "W605", # invalid escape sequence, messes with some docstrings ] -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] "webui.py" = ["E402"] # Module level import not at top of file -[tool.ruff.flake8-bugbear] +[tool.ruff.lint.flake8-bugbear] # Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`. extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"] From 79cbc92abfee5e2c8b9fcf8eab3e45e9523af310 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 17 Mar 2024 13:30:20 +0300 Subject: [PATCH 51/90] change code for variant requirements in metadata.ini --- modules/extensions.py | 32 +++++++------------------------- 1 file changed, 7 insertions(+), 25 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index 6a3c6c7ee..6549af7fc 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -66,33 +66,15 @@ class ExtensionMetadata: if extra_section: x = x + ', ' + self.config.get(extra_section, field, fallback='') - tmp_list = self.parse_list(x.lower()) + listed_requirements = self.parse_list(x.lower()) + res = [] - if len(tmp_list) >= 3: - names_variants = [] - i = 0 - while i < len(tmp_list) - 2: - if tmp_list[i] != "|": - names_variants.append([tmp_list[i]]) - i += 1 - else: - names_variants[-1].append(tmp_list[i + 1]) - i += 2 - while i < len(tmp_list): - names_variants.append([tmp_list[i]]) - i += 1 + for requirement in listed_requirements: + loaded_requirements = (x for x in requirement.split("|") if x in loaded_extensions) + relevant_requirement = next(loaded_requirements, listed_requirements[0]) + res.append(relevant_requirement) - result_list = [] - - for name_variants in names_variants: - for variant in name_variants: - if variant in loaded_extensions.keys(): - break - result_list.append(variant) - else: - result_list = tmp_list - - return result_list + return res def parse_list(self, text): """converts a line from config ("ext1 ext2, ext3 ") into a python list (["ext1", "ext2", "ext3"])""" From daa1b33247e58d775cea46ea13079d4fd14732d5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 17 Mar 2024 18:16:12 +0300 Subject: [PATCH 52/90] make reloading UI scripts optional when doing Reload UI, and off by default --- modules/initialize.py | 2 +- modules/shared_options.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/initialize.py b/modules/initialize.py index 08ad4c0b0..6c9c2388f 100644 --- a/modules/initialize.py +++ b/modules/initialize.py @@ -109,7 +109,7 @@ def initialize_rest(*, reload_script_modules=False): with startup_timer.subcategory("load scripts"): scripts.load_scripts() - if reload_script_modules: + if reload_script_modules and shared.opts.enable_reloading_ui_scripts: for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]: importlib.reload(module) startup_timer.record("reload script modules") diff --git a/modules/shared_options.py b/modules/shared_options.py index fc9f13d6f..29f98de31 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -315,6 +315,8 @@ options_templates.update(options_section(('ui', "User interface", "ui"), { "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), + "enable_reloading_ui_scripts": OptionInfo(False, "Reload UI scripts when using Reload UI option").info("useful for developing: if you make changes to UI scripts code, it is applied when the UI is reloded."), + })) From 611faaddef098468e2c6daa06a9d1d81393f5641 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 17 Mar 2024 23:19:24 +0300 Subject: [PATCH 53/90] change the default name for callback from None to "unnamed" --- modules/script_callbacks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 2cd65e832..d5a97ecff 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -121,7 +121,7 @@ class BeforeTokenCounterParams: class ScriptCallback: script: str callback: any - name: str = None + name: str = "unnamed" def add_callback(callbacks, fun, *, name=None, category='unknown', filename=None): From df4da02ab00b24de0f3d1cd03665af1fc18984ca Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Sun, 17 Mar 2024 20:25:25 +0000 Subject: [PATCH 54/90] Tweak diskcache limits --- modules/cache.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/modules/cache.py b/modules/cache.py index 9df248d7b..f4e5f702b 100644 --- a/modules/cache.py +++ b/modules/cache.py @@ -20,6 +20,14 @@ def dump_cache(): pass +def make_cache(subsection: str) -> diskcache.Cache: + return diskcache.Cache( + os.path.join(cache_dir, subsection), + size_limit=2**32, # 4 GB, culling oldest first + disk_min_file_size=2**18, # keep up to 256KB in Sqlite + ) + + def convert_old_cached_data(): try: with open(cache_filename, "r", encoding="utf8") as file: @@ -37,7 +45,7 @@ def convert_old_cached_data(): for subsection, keyvalues in data.items(): cache_obj = caches.get(subsection) if cache_obj is None: - cache_obj = diskcache.Cache(os.path.join(cache_dir, subsection)) + cache_obj = make_cache(subsection) caches[subsection] = cache_obj for key, value in keyvalues.items(): @@ -64,7 +72,7 @@ def cache(subsection): cache_obj = caches.get(subsection) if not cache_obj: - cache_obj = diskcache.Cache(os.path.join(cache_dir, subsection)) + cache_obj = make_cache(subsection) caches[subsection] = cache_obj return cache_obj From 2a6054f83657d5b55a9f2151cd9b71ef6491fc09 Mon Sep 17 00:00:00 2001 From: Dalton Date: Sun, 17 Mar 2024 22:37:19 -0400 Subject: [PATCH 55/90] Update sd_hijack_ddpm_v1.py --- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 04adc5eb2..21e6c61c8 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -14,7 +14,7 @@ from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only +from pytorch_lightning.utilities.rank_zero import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma From 51cb20ec39e1ecb40ee8f35bbacf4ecf5c42d1f4 Mon Sep 17 00:00:00 2001 From: Dalton Date: Sun, 17 Mar 2024 22:45:31 -0400 Subject: [PATCH 56/90] Update ddpm_edit.py --- modules/models/diffusion/ddpm_edit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index 6db340da4..526560059 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -19,7 +19,7 @@ from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only +from pytorch_lightning.utilities.rank_zero import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma From c4664b5a9c2f2302dae8be1c1daab94ad8a80001 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 18 Mar 2024 08:00:30 +0300 Subject: [PATCH 57/90] fix for listing wrong requirements for extensions --- modules/extensions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/extensions.py b/modules/extensions.py index 6549af7fc..5ad934b4d 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -71,7 +71,7 @@ class ExtensionMetadata: for requirement in listed_requirements: loaded_requirements = (x for x in requirement.split("|") if x in loaded_extensions) - relevant_requirement = next(loaded_requirements, listed_requirements[0]) + relevant_requirement = next(loaded_requirements, requirement) res.append(relevant_requirement) return res From e9d4da7b567b37f8ad0b5f1d1cc00b7f83aff744 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 19 Mar 2024 00:54:56 +0900 Subject: [PATCH 58/90] restore outputs path output -> outputs --- modules/paths_internal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/paths_internal.py b/modules/paths_internal.py index 6058b0cde..cf9da45ab 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -32,6 +32,6 @@ models_path = os.path.join(data_path, "models") extensions_dir = os.path.join(data_path, "extensions") extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") config_states_dir = os.path.join(script_path, "config_states") -default_output_dir = os.path.join(data_path, "output") +default_output_dir = os.path.join(data_path, "outputs") roboto_ttf_file = os.path.join(modules_path, 'Roboto-Regular.ttf') From 3fa1ebed6295f95d56e0d4b7d4f83f4128e2d78a Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Mon, 18 Mar 2024 21:31:14 -0600 Subject: [PATCH 59/90] Escape btn_copy_path filename --- modules/ui_extra_networks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 34c46ed40..bfb66a709 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -10,6 +10,7 @@ from modules.images import read_info_from_image, save_image_with_geninfo import gradio as gr import json import html +import re from fastapi.exceptions import HTTPException from modules.infotext_utils import image_from_url_text @@ -236,7 +237,7 @@ class ExtraNetworksPage: ) onclick = html.escape(onclick) - btn_copy_path = self.btn_copy_path_tpl.format(**{"filename": item["filename"]}) + btn_copy_path = self.btn_copy_path_tpl.format(**{"filename": re.sub(r"[\\\"']", r"\\\g<0>", item["filename"])}) btn_metadata = "" metadata = item.get("metadata") if metadata: From c4a00affc501392677d53b76f282d8fae8e43fd1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Tue, 19 Mar 2024 08:10:27 +0300 Subject: [PATCH 60/90] use existing quote_js function for #15316 --- html/extra-networks-copy-path-button.html | 4 ++-- modules/ui_extra_networks.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/html/extra-networks-copy-path-button.html b/html/extra-networks-copy-path-button.html index 8083bb033..8388e3198 100644 --- a/html/extra-networks-copy-path-button.html +++ b/html/extra-networks-copy-path-button.html @@ -1,5 +1,5 @@
+ onclick='extraNetworksCopyCardPath(event, {filename})' + data-clipboard-text={filename}>
\ No newline at end of file diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index f30ce004b..5436d52c1 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -10,7 +10,6 @@ from modules.images import read_info_from_image, save_image_with_geninfo import gradio as gr import json import html -import re from fastapi.exceptions import HTTPException from modules.infotext_utils import image_from_url_text @@ -152,6 +151,7 @@ def quote_js(s): s = s.replace('"', '\\"') return f'"{s}"' + class ExtraNetworksPage: def __init__(self, title): self.title = title @@ -239,7 +239,7 @@ class ExtraNetworksPage: ) onclick = html.escape(onclick) - btn_copy_path = self.btn_copy_path_tpl.format(**{"filename": re.sub(r"[\\\"']", r"\\\g<0>", item["filename"])}) + btn_copy_path = self.btn_copy_path_tpl.format(**{"filename": quote_js(item["filename"])}) btn_metadata = "" metadata = item.get("metadata") if metadata: From a6b5a513f9b84384b630cc26f5e7ceea4b64897d Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 19 Mar 2024 20:05:54 +0800 Subject: [PATCH 61/90] Implementation for sgm_uniform branch --- modules/sd_samplers_custom_schedulers.py | 12 ++++++++++++ modules/sd_samplers_kdiffusion.py | 9 ++++++++- modules/shared_options.py | 2 +- 3 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 modules/sd_samplers_custom_schedulers.py diff --git a/modules/sd_samplers_custom_schedulers.py b/modules/sd_samplers_custom_schedulers.py new file mode 100644 index 000000000..78d6a2cd6 --- /dev/null +++ b/modules/sd_samplers_custom_schedulers.py @@ -0,0 +1,12 @@ +import torch + + +def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): + start = inner_model.sigma_to_t(torch.tensor(sigma_max)) + end = inner_model.sigma_to_t(torch.tensor(sigma_min)) + sigs = [ + inner_model.t_to_sigma(ts) + for ts in torch.linspace(start, end, n)[:-1] + ] + sigs += [0.0] + return torch.FloatTensor(sigs).to(device) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 337106c02..516552a1c 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -3,6 +3,7 @@ import inspect import k_diffusion.sampling from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser from modules.sd_samplers_cfg_denoiser import CFGDenoiser # noqa: F401 +from modules.sd_samplers_custom_schedulers import sgm_uniform from modules.script_callbacks import ExtraNoiseParams, extra_noise_callback from modules.shared import opts @@ -62,7 +63,8 @@ k_diffusion_scheduler = { 'Automatic': None, 'karras': k_diffusion.sampling.get_sigmas_karras, 'exponential': k_diffusion.sampling.get_sigmas_exponential, - 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential + 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential, + 'sgm_uniform' : sgm_uniform, } @@ -121,6 +123,11 @@ class KDiffusionSampler(sd_samplers_common.Sampler): if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: sigmas_kwargs['rho'] = opts.rho p.extra_generation_params["Schedule rho"] = opts.rho + if opts.k_sched_type == 'sgm_uniform': + # Ensure the "step" will be target step + 1 + steps += 1 if not discard_next_to_last_sigma else 0 + sigmas_kwargs['inner_model'] = self.model_wrap + sigmas_kwargs.pop('rho', None) sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': diff --git a/modules/shared_options.py b/modules/shared_options.py index 29f98de31..84c9b2247 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -368,7 +368,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}, infotext='Sigma tmin').info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}, infotext='Sigma tmax').info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}, infotext='Sigma noise').info('amount of additional noise to counteract loss of detail during sampling'), - 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}, infotext='Schedule type').info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), + 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential", "sgm_uniform"]}, infotext='Schedule type').info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number, infotext='Schedule min sigma').info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number, infotext='Schedule max sigma').info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), 'rho': OptionInfo(0.0, "rho", gr.Number, infotext='Schedule rho').info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), From 61f321756fd3f0bb6310161a2022c9a86a71c2d1 Mon Sep 17 00:00:00 2001 From: Dalton Date: Tue, 19 Mar 2024 14:44:31 -0400 Subject: [PATCH 62/90] Update ddpm_edit.py --- modules/models/diffusion/ddpm_edit.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index 526560059..120674238 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -9,6 +9,7 @@ https://github.com/CompVis/taming-transformers # File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion). # See more details in LICENSE. +import sys import torch import torch.nn as nn import numpy as np @@ -19,6 +20,8 @@ from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid +import pytorch_lightning.utilities.rank_zero +sys.modules['pytorch_lightning.utilities.distributed'] = sys.modules['pytorch_lightning.utilities.rank_zero'] from pytorch_lightning.utilities.rank_zero import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config From 86276832e0204aebb6353b22dcce1c832a2ae865 Mon Sep 17 00:00:00 2001 From: Dalton Date: Tue, 19 Mar 2024 14:45:07 -0400 Subject: [PATCH 63/90] Update sd_hijack_ddpm_v1.py --- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 21e6c61c8..594dbc776 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -4,6 +4,7 @@ # Some models such as LDSR require VQ to work correctly # The classes are suffixed with "V1" and added back to the "ldm.models.diffusion.ddpm" module +import sys import torch import torch.nn as nn import numpy as np @@ -14,6 +15,8 @@ from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid +import pytorch_lightning.utilities.rank_zero +sys.modules['pytorch_lightning.utilities.distributed'] = sys.modules['pytorch_lightning.utilities.rank_zero'] from pytorch_lightning.utilities.rank_zero import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config From 8f450321fef86d904626a03a4e38f1719e225e11 Mon Sep 17 00:00:00 2001 From: Dalton Date: Tue, 19 Mar 2024 14:53:30 -0400 Subject: [PATCH 64/90] Formatting ddpm_edit --- modules/models/diffusion/ddpm_edit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index 120674238..935d3564f 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -14,13 +14,13 @@ import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl +import pytorch_lightning.utilities.rank_zero from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid -import pytorch_lightning.utilities.rank_zero sys.modules['pytorch_lightning.utilities.distributed'] = sys.modules['pytorch_lightning.utilities.rank_zero'] from pytorch_lightning.utilities.rank_zero import rank_zero_only From 49779413aafecea511cd01b3b253643ff42998ca Mon Sep 17 00:00:00 2001 From: Dalton Date: Tue, 19 Mar 2024 14:54:06 -0400 Subject: [PATCH 65/90] Formatting sd_hijack_ddpm_v1.py --- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 594dbc776..cd54ab4b1 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -9,13 +9,13 @@ import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl +import pytorch_lightning.utilities.rank_zero from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid -import pytorch_lightning.utilities.rank_zero sys.modules['pytorch_lightning.utilities.distributed'] = sys.modules['pytorch_lightning.utilities.rank_zero'] from pytorch_lightning.utilities.rank_zero import rank_zero_only From d7f48472ccb279c13c5f5d67144b888083feb978 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 19 Mar 2024 18:50:25 -0600 Subject: [PATCH 66/90] Fix extra networks buttons when filename contains an apostrophe --- html/extra-networks-copy-path-button.html | 2 +- html/extra-networks-edit-item-button.html | 2 +- html/extra-networks-metadata-button.html | 2 +- javascript/extraNetworks.js | 11 +++++++---- modules/ui_extra_networks.py | 2 -- 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/html/extra-networks-copy-path-button.html b/html/extra-networks-copy-path-button.html index 8083bb033..50304b42d 100644 --- a/html/extra-networks-copy-path-button.html +++ b/html/extra-networks-copy-path-button.html @@ -1,5 +1,5 @@
\ No newline at end of file diff --git a/html/extra-networks-edit-item-button.html b/html/extra-networks-edit-item-button.html index 0fe43082a..fd728600f 100644 --- a/html/extra-networks-edit-item-button.html +++ b/html/extra-networks-edit-item-button.html @@ -1,4 +1,4 @@
+ onclick="extraNetworksEditUserMetadata(event, '{tabname}', '{extra_networks_tabname}')">
\ No newline at end of file diff --git a/html/extra-networks-metadata-button.html b/html/extra-networks-metadata-button.html index 285b5b3b6..4ef013bc0 100644 --- a/html/extra-networks-metadata-button.html +++ b/html/extra-networks-metadata-button.html @@ -1,4 +1,4 @@ \ No newline at end of file diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index d5855fe96..655193eb7 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -543,16 +543,18 @@ function requestGet(url, data, handler, errorHandler) { xhr.send(js); } -function extraNetworksCopyCardPath(event, path) { - navigator.clipboard.writeText(path); +function extraNetworksCopyCardPath(event) { + navigator.clipboard.writeText(event.target.getAttribute("data-clipboard-text")); event.stopPropagation(); } -function extraNetworksRequestMetadata(event, extraPage, cardName) { +function extraNetworksRequestMetadata(event, extraPage) { var showError = function() { extraNetworksShowMetadata("there was an error getting metadata"); }; + var cardName = event.target.parentElement.parentElement.getAttribute("data-name"); + requestGet("./sd_extra_networks/metadata", {page: extraPage, item: cardName}, function(data) { if (data && data.metadata) { extraNetworksShowMetadata(data.metadata); @@ -566,7 +568,7 @@ function extraNetworksRequestMetadata(event, extraPage, cardName) { var extraPageUserMetadataEditors = {}; -function extraNetworksEditUserMetadata(event, tabname, extraPage, cardName) { +function extraNetworksEditUserMetadata(event, tabname, extraPage) { var id = tabname + '_' + extraPage + '_edit_user_metadata'; var editor = extraPageUserMetadataEditors[id]; @@ -578,6 +580,7 @@ function extraNetworksEditUserMetadata(event, tabname, extraPage, cardName) { extraPageUserMetadataEditors[id] = editor; } + var cardName = event.target.parentElement.parentElement.getAttribute("data-name"); editor.nameTextarea.value = cardName; updateInput(editor.nameTextarea); diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 34c46ed40..afb95b516 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -243,14 +243,12 @@ class ExtraNetworksPage: btn_metadata = self.btn_metadata_tpl.format( **{ "extra_networks_tabname": self.extra_networks_tabname, - "name": html.escape(item["name"]), } ) btn_edit_item = self.btn_edit_item_tpl.format( **{ "tabname": tabname, "extra_networks_tabname": self.extra_networks_tabname, - "name": html.escape(item["name"]), } ) From b5c33341a18f9dc3e20d06b56540af330fafa285 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 19 Mar 2024 19:06:56 -0600 Subject: [PATCH 67/90] Don't use quote_js on filename --- modules/ui_extra_networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 741a4e143..6a206af07 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -239,7 +239,7 @@ class ExtraNetworksPage: ) onclick = html.escape(onclick) - btn_copy_path = self.btn_copy_path_tpl.format(**{"filename": quote_js(item["filename"])}) + btn_copy_path = self.btn_copy_path_tpl.format(**{"filename": item["filename"]}) btn_metadata = "" metadata = item.get("metadata") if metadata: From 25cd53d77561fda3dbfd6c280f0d2852cb162bda Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 20 Mar 2024 09:17:11 +0300 Subject: [PATCH 68/90] scheduler selection in main UI --- modules/img2img.py | 4 +- modules/processing.py | 2 + modules/processing_scripts/sampler.py | 79 +++++++++++++++++++++ modules/scripts.py | 3 + modules/sd_samplers.py | 15 ++-- modules/sd_samplers_custom_schedulers.py | 12 ---- modules/sd_samplers_kdiffusion.py | 90 +++++++++--------------- modules/sd_schedulers.py | 38 ++++++++++ modules/shared_options.py | 1 - modules/txt2img.py | 4 +- modules/ui.py | 35 ++------- 11 files changed, 175 insertions(+), 108 deletions(-) create mode 100644 modules/processing_scripts/sampler.py delete mode 100644 modules/sd_samplers_custom_schedulers.py create mode 100644 modules/sd_schedulers.py diff --git a/modules/img2img.py b/modules/img2img.py index e7fb3ea3c..9e316d451 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -146,7 +146,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal return batch_results -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -193,10 +193,8 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s prompt=prompt, negative_prompt=negative_prompt, styles=prompt_styles, - sampler_name=sampler_name, batch_size=batch_size, n_iter=n_iter, - steps=steps, cfg_scale=cfg_scale, width=width, height=height, diff --git a/modules/processing.py b/modules/processing.py index d6873a510..ac5541418 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -152,6 +152,7 @@ class StableDiffusionProcessing: seed_resize_from_w: int = -1 seed_enable_extras: bool = True sampler_name: str = None + scheduler: str = None batch_size: int = 1 n_iter: int = 1 steps: int = 50 @@ -721,6 +722,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params = { "Steps": p.steps, "Sampler": p.sampler_name, + "Schedule type": p.scheduler, "CFG scale": p.cfg_scale, "Image CFG scale": getattr(p, 'image_cfg_scale', None), "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index], diff --git a/modules/processing_scripts/sampler.py b/modules/processing_scripts/sampler.py new file mode 100644 index 000000000..83b952550 --- /dev/null +++ b/modules/processing_scripts/sampler.py @@ -0,0 +1,79 @@ +import gradio as gr +import functools + +from modules import scripts, sd_samplers, sd_schedulers, shared +from modules.infotext_utils import PasteField +from modules.ui_components import FormRow, FormGroup + + +def get_sampler_from_infotext(d: dict): + return get_sampler_and_scheduler(d.get("Sampler"), d.get("Schedule type"))[0] + + +def get_scheduler_from_infotext(d: dict): + return get_sampler_and_scheduler(d.get("Sampler"), d.get("Schedule type"))[1] + + +@functools.cache +def get_sampler_and_scheduler(sampler_name, scheduler_name): + default_sampler = sd_samplers.samplers[0] + found_scheduler = sd_schedulers.schedulers_map.get(scheduler_name, sd_schedulers.schedulers[0]) + + name = sampler_name or default_sampler.name + + for scheduler in sd_schedulers.schedulers: + name_options = [scheduler.label, scheduler.name, *(scheduler.aliases or [])] + + for name_option in name_options: + if name.endswith(" " + name_option): + found_scheduler = scheduler + name = name[0:-(len(name_option) + 1)] + break + + sampler = sd_samplers.all_samplers_map.get(name, default_sampler) + + # revert back to Automatic if it's the default scheduler for the selected sampler + if sampler.options.get('scheduler', None) == found_scheduler.name: + found_scheduler = sd_schedulers.schedulers[0] + + return sampler.name, found_scheduler.label + + +class ScriptSampler(scripts.ScriptBuiltinUI): + section = "sampler" + + def __init__(self): + self.steps = None + self.sampler_name = None + self.scheduler = None + + def title(self): + return "Sampler" + + def ui(self, is_img2img): + sampler_names = [x.name for x in sd_samplers.visible_samplers()] + scheduler_names = [x.label for x in sd_schedulers.schedulers] + + if shared.opts.samplers_in_dropdown: + with FormRow(elem_id=f"sampler_selection_{self.tabname}"): + self.sampler_name = gr.Dropdown(label='Sampling method', elem_id=f"{self.tabname}_sampling", choices=sampler_names, value=sampler_names[0]) + self.scheduler = gr.Dropdown(label='Schedule type', elem_id=f"{self.tabname}_scheduler", choices=scheduler_names, value=scheduler_names[0]) + self.steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{self.tabname}_steps", label="Sampling steps", value=20) + else: + with FormGroup(elem_id=f"sampler_selection_{self.tabname}"): + self.steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{self.tabname}_steps", label="Sampling steps", value=20) + self.sampler_name = gr.Radio(label='Sampling method', elem_id=f"{self.tabname}_sampling", choices=sampler_names, value=sampler_names[0]) + self.scheduler = gr.Dropdown(label='Schedule type', elem_id=f"{self.tabname}_scheduler", choices=scheduler_names, value=scheduler_names[0]) + + self.infotext_fields = [ + PasteField(self.steps, "Steps", api="steps"), + PasteField(self.sampler_name, get_sampler_from_infotext, api="sampler_name"), + PasteField(self.scheduler, get_scheduler_from_infotext, api="scheduler"), + ] + + return self.steps, self.sampler_name, self.scheduler + + def setup(self, p, steps, sampler_name, scheduler): + p.steps = steps + p.sampler_name = sampler_name + p.scheduler = scheduler diff --git a/modules/scripts.py b/modules/scripts.py index 20710b37d..264503ca3 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -352,6 +352,9 @@ class ScriptBuiltinUI(Script): return f'{tabname}{item_id}' + def show(self, is_img2img): + return AlwaysVisible + current_basedir = paths.script_path diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index a58528a0b..1f50297e6 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,7 +1,10 @@ -from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, sd_samplers_lcm, shared +from __future__ import annotations + +from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, sd_samplers_lcm, shared, sd_samplers_common # imports for functions that previously were here and are used by other modules -from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401 +samples_to_image_grid = sd_samplers_common.samples_to_image_grid +sample_to_image = sd_samplers_common.sample_to_image all_samplers = [ *sd_samplers_kdiffusion.samplers_data_k_diffusion, @@ -10,8 +13,8 @@ all_samplers = [ ] all_samplers_map = {x.name: x for x in all_samplers} -samplers = [] -samplers_for_img2img = [] +samplers: list[sd_samplers_common.SamplerData] = [] +samplers_for_img2img: list[sd_samplers_common.SamplerData] = [] samplers_map = {} samplers_hidden = {} @@ -57,4 +60,8 @@ def visible_sampler_names(): return [x.name for x in samplers if x.name not in samplers_hidden] +def visible_samplers(): + return [x for x in samplers if x.name not in samplers_hidden] + + set_samplers() diff --git a/modules/sd_samplers_custom_schedulers.py b/modules/sd_samplers_custom_schedulers.py deleted file mode 100644 index 78d6a2cd6..000000000 --- a/modules/sd_samplers_custom_schedulers.py +++ /dev/null @@ -1,12 +0,0 @@ -import torch - - -def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): - start = inner_model.sigma_to_t(torch.tensor(sigma_max)) - end = inner_model.sigma_to_t(torch.tensor(sigma_min)) - sigs = [ - inner_model.t_to_sigma(ts) - for ts in torch.linspace(start, end, n)[:-1] - ] - sigs += [0.0] - return torch.FloatTensor(sigs).to(device) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 516552a1c..0db0e34a7 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,41 +1,28 @@ import torch import inspect import k_diffusion.sampling -from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser +from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser, sd_schedulers from modules.sd_samplers_cfg_denoiser import CFGDenoiser # noqa: F401 -from modules.sd_samplers_custom_schedulers import sgm_uniform from modules.script_callbacks import ExtraNoiseParams, extra_noise_callback from modules.shared import opts import modules.shared as shared samplers_k_diffusion = [ - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}), - ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}), + ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {'scheduler': 'karras'}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}), + ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde'], {'scheduler': 'exponential', "brownian_noise": True}), + ('DPM++ 2M SDE Heun', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun'], {'scheduler': 'exponential', "brownian_noise": True, "solver_type": "heun"}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), + ('DPM++ 3M SDE', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde'], {'scheduler': 'exponential', 'discard_next_to_last_sigma': True, "brownian_noise": True}), ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True, "second_order": True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), - ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}), - ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}), - ('DPM++ 2M SDE Heun', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun'], {"brownian_noise": True, "solver_type": "heun"}), - ('DPM++ 2M SDE Heun Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun_ka'], {'scheduler': 'karras', "brownian_noise": True, "solver_type": "heun"}), - ('DPM++ 2M SDE Heun Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_heun_exp'], {'scheduler': 'exponential', "brownian_noise": True, "solver_type": "heun"}), - ('DPM++ 3M SDE', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde'], {'discard_next_to_last_sigma': True, "brownian_noise": True}), - ('DPM++ 3M SDE Karras', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "brownian_noise": True}), - ('DPM++ 3M SDE Exponential', 'sample_dpmpp_3m_sde', ['k_dpmpp_3m_sde_exp'], {'scheduler': 'exponential', 'discard_next_to_last_sigma': True, "brownian_noise": True}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "second_order": True}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), - ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), ('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras', "second_order": True}), ] @@ -59,13 +46,7 @@ sampler_extra_params = { } k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion} -k_diffusion_scheduler = { - 'Automatic': None, - 'karras': k_diffusion.sampling.get_sigmas_karras, - 'exponential': k_diffusion.sampling.get_sigmas_exponential, - 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential, - 'sgm_uniform' : sgm_uniform, -} +k_diffusion_scheduler = {x.name: x.function for x in sd_schedulers.schedulers} class CFGDenoiserKDiffusion(sd_samplers_cfg_denoiser.CFGDenoiser): @@ -98,47 +79,44 @@ class KDiffusionSampler(sd_samplers_common.Sampler): steps += 1 if discard_next_to_last_sigma else 0 + scheduler_name = p.scheduler or 'Automatic' + if scheduler_name == 'Automatic': + scheduler_name = self.config.options.get('scheduler', None) + + scheduler = sd_schedulers.schedulers_map.get(scheduler_name) + + m_sigma_min, m_sigma_max = self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item() + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) + if p.sampler_noise_scheduler_override: sigmas = p.sampler_noise_scheduler_override(steps) - elif opts.k_sched_type != "Automatic": - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max) - sigmas_kwargs = { - 'sigma_min': sigma_min, - 'sigma_max': sigma_max, - } + elif scheduler is None or scheduler.function is None: + sigmas = self.model_wrap.get_sigmas(steps) + else: + sigmas_kwargs = {'sigma_min': sigma_min, 'sigma_max': sigma_max} - sigmas_func = k_diffusion_scheduler[opts.k_sched_type] - p.extra_generation_params["Schedule type"] = opts.k_sched_type + p.extra_generation_params["Schedule type"] = scheduler.label - if opts.sigma_min != m_sigma_min and opts.sigma_min != 0: + if opts.sigma_min != 0 and opts.sigma_min != m_sigma_min: sigmas_kwargs['sigma_min'] = opts.sigma_min p.extra_generation_params["Schedule min sigma"] = opts.sigma_min - if opts.sigma_max != m_sigma_max and opts.sigma_max != 0: + + if opts.sigma_max != 0 and opts.sigma_max != m_sigma_max: sigmas_kwargs['sigma_max'] = opts.sigma_max p.extra_generation_params["Schedule max sigma"] = opts.sigma_max - default_rho = 1. if opts.k_sched_type == "polyexponential" else 7. - - if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho: + if scheduler.default_rho != -1 and opts.rho != 0 and opts.rho != scheduler.default_rho: sigmas_kwargs['rho'] = opts.rho p.extra_generation_params["Schedule rho"] = opts.rho - if opts.k_sched_type == 'sgm_uniform': + + if scheduler.need_inner_model: + sigmas_kwargs['inner_model'] = self.model_wrap + + if scheduler.name == "sgm_uniform": # XXX check this # Ensure the "step" will be target step + 1 steps += 1 if not discard_next_to_last_sigma else 0 - sigmas_kwargs['inner_model'] = self.model_wrap - sigmas_kwargs.pop('rho', None) - sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) - elif self.config is not None and self.config.options.get('scheduler', None) == 'exponential': - m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - sigmas = k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=m_sigma_min, sigma_max=m_sigma_max, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) + sigmas = scheduler.function(n=steps, **sigmas_kwargs, device=shared.device) if discard_next_to_last_sigma: sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) diff --git a/modules/sd_schedulers.py b/modules/sd_schedulers.py new file mode 100644 index 000000000..c327c90bd --- /dev/null +++ b/modules/sd_schedulers.py @@ -0,0 +1,38 @@ +import dataclasses + +import torch + +import k_diffusion + + +@dataclasses.dataclass +class Scheduler: + name: str + label: str + function: any + + default_rho: float = -1 + need_inner_model: bool = False + aliases: list = None + + +def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): + start = inner_model.sigma_to_t(torch.tensor(sigma_max)) + end = inner_model.sigma_to_t(torch.tensor(sigma_min)) + sigs = [ + inner_model.t_to_sigma(ts) + for ts in torch.linspace(start, end, n)[:-1] + ] + sigs += [0.0] + return torch.FloatTensor(sigs).to(device) + + +schedulers = [ + Scheduler('automatic', 'Automatic', None), + Scheduler('karras', 'Karras', k_diffusion.sampling.get_sigmas_karras, default_rho=7.0), + Scheduler('exponential', 'Exponential', k_diffusion.sampling.get_sigmas_exponential), + Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0), + Scheduler('sgm_uniform', 'SGM Uniform', sgm_uniform, need_inner_model=True, aliases=["SGMUniform"]), +] + +schedulers_map = {**{x.name: x for x in schedulers}, **{x.label: x for x in schedulers}} diff --git a/modules/shared_options.py b/modules/shared_options.py index 84c9b2247..590ae6a69 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -368,7 +368,6 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}, infotext='Sigma tmin').info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'), 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}, infotext='Sigma tmax').info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}, infotext='Sigma noise').info('amount of additional noise to counteract loss of detail during sampling'), - 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential", "sgm_uniform"]}, infotext='Schedule type').info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"), 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number, infotext='Schedule min sigma').info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"), 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number, infotext='Schedule max sigma').info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"), 'rho': OptionInfo(0.0, "rho", gr.Number, infotext='Schedule rho').info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), diff --git a/modules/txt2img.py b/modules/txt2img.py index fc56b8a86..53fa0099d 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -11,7 +11,7 @@ from PIL import Image import gradio as gr -def txt2img_create_processing(id_task: str, request: gr.Request, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args, force_enable_hr=False): +def txt2img_create_processing(id_task: str, request: gr.Request, prompt: str, negative_prompt: str, prompt_styles, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args, force_enable_hr=False): override_settings = create_override_settings_dict(override_settings_texts) if force_enable_hr: @@ -24,10 +24,8 @@ def txt2img_create_processing(id_task: str, request: gr.Request, prompt: str, ne prompt=prompt, styles=prompt_styles, negative_prompt=negative_prompt, - sampler_name=sampler_name, batch_size=batch_size, n_iter=n_iter, - steps=steps, cfg_scale=cfg_scale, width=width, height=height, diff --git a/modules/ui.py b/modules/ui.py index 7b4341627..c964d5e22 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,7 +12,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import gradio_extensons # noqa: F401 +from modules import gradio_extensons, sd_schedulers # noqa: F401 from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, scripts, sd_samplers, processing, ui_extra_networks, ui_toprow, launch_utils from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion, ResizeHandleRow from modules.paths import script_path @@ -229,19 +229,6 @@ def create_output_panel(tabname, outdir, toprow=None): return ui_common.create_output_panel(tabname, outdir, toprow) -def create_sampler_and_steps_selection(choices, tabname): - if opts.samplers_in_dropdown: - with FormRow(elem_id=f"sampler_selection_{tabname}"): - sampler_name = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0]) - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - else: - with FormGroup(elem_id=f"sampler_selection_{tabname}"): - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) - sampler_name = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0]) - - return steps, sampler_name - - def ordered_ui_categories(): user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder_list)} @@ -295,9 +282,6 @@ def create_ui(): if category == "prompt": toprow.create_inline_toprow_prompts() - if category == "sampler": - steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img") - elif category == "dimensions": with FormRow(): with gr.Column(elem_id="txt2img_column_size", scale=4): @@ -396,8 +380,6 @@ def create_ui(): toprow.prompt, toprow.negative_prompt, toprow.ui_styles.dropdown, - steps, - sampler_name, batch_count, batch_size, cfg_scale, @@ -461,8 +443,6 @@ def create_ui(): txt2img_paste_fields = [ PasteField(toprow.prompt, "Prompt", api="prompt"), PasteField(toprow.negative_prompt, "Negative prompt", api="negative_prompt"), - PasteField(steps, "Steps", api="steps"), - PasteField(sampler_name, "Sampler", api="sampler_name"), PasteField(cfg_scale, "CFG scale", api="cfg_scale"), PasteField(width, "Size-1", api="width"), PasteField(height, "Size-2", api="height"), @@ -488,11 +468,13 @@ def create_ui(): paste_button=toprow.paste, tabname="txt2img", source_text_component=toprow.prompt, source_image_component=None, )) + steps = scripts.scripts_txt2img.script('Sampler').steps + txt2img_preview_params = [ toprow.prompt, toprow.negative_prompt, steps, - sampler_name, + scripts.scripts_txt2img.script('Sampler').sampler_name, cfg_scale, scripts.scripts_txt2img.script('Seed').seed, width, @@ -623,9 +605,6 @@ def create_ui(): with FormRow(): resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - if category == "sampler": - steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") - elif category == "dimensions": with FormRow(): with gr.Column(elem_id="img2img_column_size", scale=4): @@ -754,8 +733,6 @@ def create_ui(): inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, - steps, - sampler_name, mask_blur, mask_alpha, inpainting_fill, @@ -840,6 +817,8 @@ def create_ui(): **interrogate_args, ) + steps = scripts.scripts_img2img.script('Sampler').steps + toprow.ui_styles.dropdown.change(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.token_counter]) toprow.ui_styles.dropdown.change(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.negative_token_counter]) toprow.token_button.click(fn=update_token_counter, inputs=[toprow.prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.token_counter]) @@ -848,8 +827,6 @@ def create_ui(): img2img_paste_fields = [ (toprow.prompt, "Prompt"), (toprow.negative_prompt, "Negative prompt"), - (steps, "Steps"), - (sampler_name, "Sampler"), (cfg_scale, "CFG scale"), (image_cfg_scale, "Image CFG scale"), (width, "Size-1"), From 61f488302f37ad959eb5bb2eebed7e106c40c8b7 Mon Sep 17 00:00:00 2001 From: Art Gourieff <85128026+Gourieff@users.noreply.github.com> Date: Wed, 20 Mar 2024 13:28:32 +0700 Subject: [PATCH 69/90] FIX: Allow PNG-RGBA for Extras Tab --- modules/postprocessing.py | 2 +- modules/ui_postprocessing.py | 2 +- scripts/postprocessing_codeformer.py | 2 +- scripts/postprocessing_gfpgan.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index f14882321..9afcfef86 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -66,7 +66,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if parameters: existing_pnginfo["parameters"] = parameters - initial_pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB")) + initial_pp = scripts_postprocessing.PostprocessedImage(image_data) scripts.scripts_postproc.run(initial_pp, args) diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index 7261c2df8..dc08350d1 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -12,7 +12,7 @@ def create_ui(): with gr.Column(variant='compact'): with gr.Tabs(elem_id="mode_extras"): with gr.TabItem('Single Image', id="single_image", elem_id="extras_single_tab") as tab_single: - extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") + extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image", image_mode="RGBA") with gr.TabItem('Batch Process', id="batch_process", elem_id="extras_batch_process_tab") as tab_batch: image_batch = gr.Files(label="Batch Process", interactive=True, elem_id="extras_image_batch") diff --git a/scripts/postprocessing_codeformer.py b/scripts/postprocessing_codeformer.py index e1e156ddc..53a0cc44c 100644 --- a/scripts/postprocessing_codeformer.py +++ b/scripts/postprocessing_codeformer.py @@ -25,7 +25,7 @@ class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing if codeformer_visibility == 0 or not enable: return - restored_img = codeformer_model.codeformer.restore(np.array(pp.image, dtype=np.uint8), w=codeformer_weight) + restored_img = codeformer_model.codeformer.restore(np.array(pp.image.convert("RGB"), dtype=np.uint8), w=codeformer_weight) res = Image.fromarray(restored_img) if codeformer_visibility < 1.0: diff --git a/scripts/postprocessing_gfpgan.py b/scripts/postprocessing_gfpgan.py index 6e7566055..57e362399 100644 --- a/scripts/postprocessing_gfpgan.py +++ b/scripts/postprocessing_gfpgan.py @@ -22,7 +22,7 @@ class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing): if gfpgan_visibility == 0 or not enable: return - restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8)) + restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image.convert("RGB"), dtype=np.uint8)) res = Image.fromarray(restored_img) if gfpgan_visibility < 1.0: From 76f8436bfab829c40f6776540ff14f428b9a3557 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 20 Mar 2024 10:27:32 +0300 Subject: [PATCH 70/90] add Uniform scheduler --- modules/sd_schedulers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/sd_schedulers.py b/modules/sd_schedulers.py index c327c90bd..2a7ab0924 100644 --- a/modules/sd_schedulers.py +++ b/modules/sd_schedulers.py @@ -16,6 +16,10 @@ class Scheduler: aliases: list = None +def uniform(n, sigma_min, sigma_max, inner_model, device): + return inner_model.get_sigmas(n) + + def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): start = inner_model.sigma_to_t(torch.tensor(sigma_max)) end = inner_model.sigma_to_t(torch.tensor(sigma_min)) @@ -29,6 +33,7 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): schedulers = [ Scheduler('automatic', 'Automatic', None), + Scheduler('uniform', 'Uniform', uniform, need_inner_model=True), Scheduler('karras', 'Karras', k_diffusion.sampling.get_sigmas_karras, default_rho=7.0), Scheduler('exponential', 'Exponential', k_diffusion.sampling.get_sigmas_exponential), Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0), From ac9aa44cb8f91b18934f03c8ce8bb2061c61da6f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 20 Mar 2024 10:27:53 +0300 Subject: [PATCH 71/90] do not add 'Automatic' to infotext --- modules/processing.py | 2 +- modules/sd_samplers_kdiffusion.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index ac5541418..716329fae 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -722,7 +722,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params = { "Steps": p.steps, "Sampler": p.sampler_name, - "Schedule type": p.scheduler, + "Schedule type": None, "CFG scale": p.cfg_scale, "Image CFG scale": getattr(p, 'image_cfg_scale', None), "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index], diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 0db0e34a7..04b2f7f0c 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -95,7 +95,8 @@ class KDiffusionSampler(sd_samplers_common.Sampler): else: sigmas_kwargs = {'sigma_min': sigma_min, 'sigma_max': sigma_max} - p.extra_generation_params["Schedule type"] = scheduler.label + if scheduler.label != 'Automatic': + p.extra_generation_params["Schedule type"] = scheduler.label if opts.sigma_min != 0 and opts.sigma_min != m_sigma_min: sigmas_kwargs['sigma_min'] = opts.sigma_min From 31306ce6721e6f1ae323da64767da91897b3a1c2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 20 Mar 2024 10:29:52 +0300 Subject: [PATCH 72/90] change the behavior of discard_next_to_last_sigma for sgm_uniform to match other schedulers --- modules/sd_samplers_kdiffusion.py | 4 ---- modules/sd_schedulers.py | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 04b2f7f0c..d053e48b1 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -113,10 +113,6 @@ class KDiffusionSampler(sd_samplers_common.Sampler): if scheduler.need_inner_model: sigmas_kwargs['inner_model'] = self.model_wrap - if scheduler.name == "sgm_uniform": # XXX check this - # Ensure the "step" will be target step + 1 - steps += 1 if not discard_next_to_last_sigma else 0 - sigmas = scheduler.function(n=steps, **sigmas_kwargs, device=shared.device) if discard_next_to_last_sigma: diff --git a/modules/sd_schedulers.py b/modules/sd_schedulers.py index 2a7ab0924..75eb3ac03 100644 --- a/modules/sd_schedulers.py +++ b/modules/sd_schedulers.py @@ -25,7 +25,7 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): end = inner_model.sigma_to_t(torch.tensor(sigma_min)) sigs = [ inner_model.t_to_sigma(ts) - for ts in torch.linspace(start, end, n)[:-1] + for ts in torch.linspace(start, end, n + 1)[:-1] ] sigs += [0.0] return torch.FloatTensor(sigs).to(device) From 702edb288e53b9cf9d1727e0ca89f95102907c04 Mon Sep 17 00:00:00 2001 From: Art Gourieff <85128026+Gourieff@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:14:28 +0700 Subject: [PATCH 73/90] FIX: initial_pp RGBA right way --- modules/api/api.py | 2 +- modules/postprocessing.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 4e6560826..3e42dd7b1 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -99,7 +99,7 @@ def decode_base64_to_image(encoding): raise HTTPException(status_code=500, detail="Invalid encoded image") from e -def encode_pil_to_base64(image): +def encode_pil_to_base64(image: Image.Image): with io.BytesIO() as output_bytes: if isinstance(image, str): return image diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 9afcfef86..40cf866a4 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -66,7 +66,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if parameters: existing_pnginfo["parameters"] = parameters - initial_pp = scripts_postprocessing.PostprocessedImage(image_data) + initial_pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGBA")) if image_data.mode == "RGBA" else scripts_postprocessing.PostprocessedImage(image_data.convert("RGB")) scripts.scripts_postproc.run(initial_pp, args) From 8ec890192141fec26265dadc0e50f1525eb87007 Mon Sep 17 00:00:00 2001 From: Art Gourieff <85128026+Gourieff@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:20:29 +0700 Subject: [PATCH 74/90] FIX: No specific type for 'image' arg Roll back --- modules/api/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 3e42dd7b1..4e6560826 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -99,7 +99,7 @@ def decode_base64_to_image(encoding): raise HTTPException(status_code=500, detail="Invalid encoded image") from e -def encode_pil_to_base64(image: Image.Image): +def encode_pil_to_base64(image): with io.BytesIO() as output_bytes: if isinstance(image, str): return image From 5fd9a40b92c572c357773018cd47b0a7d3b8f9c3 Mon Sep 17 00:00:00 2001 From: Dalton Date: Wed, 20 Mar 2024 23:01:50 -0400 Subject: [PATCH 75/90] Revert sd_hijack_ddpm_v1.py --- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index cd54ab4b1..7944fb13c 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -16,8 +16,7 @@ from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid -sys.modules['pytorch_lightning.utilities.distributed'] = sys.modules['pytorch_lightning.utilities.rank_zero'] -from pytorch_lightning.utilities.rank_zero import rank_zero_only +from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma From f010dfffb9b6021f8b4e4f03f22532f7edef7137 Mon Sep 17 00:00:00 2001 From: Dalton Date: Wed, 20 Mar 2024 23:02:30 -0400 Subject: [PATCH 76/90] Revert ddpm_edit.py --- modules/models/diffusion/ddpm_edit.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index 935d3564f..347117cb9 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -21,8 +21,7 @@ from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid -sys.modules['pytorch_lightning.utilities.distributed'] = sys.modules['pytorch_lightning.utilities.rank_zero'] -from pytorch_lightning.utilities.rank_zero import rank_zero_only +from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma From b5b04912b523cfa7fd7e93026b1ca6c0d6456c3f Mon Sep 17 00:00:00 2001 From: Dalton Date: Wed, 20 Mar 2024 23:06:00 -0400 Subject: [PATCH 77/90] Include running pytorch lightning check --- modules/initialize.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/initialize.py b/modules/initialize.py index f7313ff4d..de92863f3 100644 --- a/modules/initialize.py +++ b/modules/initialize.py @@ -51,6 +51,7 @@ def check_versions(): def initialize(): from modules import initialize_util initialize_util.fix_torch_version() + initialize_util.fix_pytorch_lightning() initialize_util.fix_asyncio_event_loop_policy() initialize_util.validate_tls_options() initialize_util.configure_sigint_handler() From 4eb5e09873720c55003bea89d527f99e62f36cb0 Mon Sep 17 00:00:00 2001 From: Dalton Date: Wed, 20 Mar 2024 23:28:40 -0400 Subject: [PATCH 78/90] Update initialize_util.py --- modules/initialize_util.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/modules/initialize_util.py b/modules/initialize_util.py index b6767138d..63cea137f 100644 --- a/modules/initialize_util.py +++ b/modules/initialize_util.py @@ -24,6 +24,13 @@ def fix_torch_version(): torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) +def fix_pytorch_lightning(): + import pytorch_lightning + # Checks if pytorch_lightning.utilities.distributed already exists in the sys.modules cache + if 'pytorch_lightning.utilities.distributed' not in sys.modules: + # Lets the user know that the library was not found and then will set it to pytorch_lightning.utilities.rank_zero + print(f"Pytorch_lightning.distributed not found, attempting pytorch_lightning.rank_zero") + sys.modules["pytorch_lightning.utilities.distributed"] = pytorch_lightning.utilities.rank_zero def fix_asyncio_event_loop_policy(): """ From 4bc296332021cb07ba1fe449d72049604efd7a02 Mon Sep 17 00:00:00 2001 From: Dalton Date: Wed, 20 Mar 2024 23:33:15 -0400 Subject: [PATCH 79/90] Remove unnecessary import --- modules/initialize_util.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/initialize_util.py b/modules/initialize_util.py index 63cea137f..7a661476d 100644 --- a/modules/initialize_util.py +++ b/modules/initialize_util.py @@ -25,7 +25,6 @@ def fix_torch_version(): torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) def fix_pytorch_lightning(): - import pytorch_lightning # Checks if pytorch_lightning.utilities.distributed already exists in the sys.modules cache if 'pytorch_lightning.utilities.distributed' not in sys.modules: # Lets the user know that the library was not found and then will set it to pytorch_lightning.utilities.rank_zero From 41907b25f0baa62f773f37a73dc4e656a710bc8a Mon Sep 17 00:00:00 2001 From: Dalton Date: Wed, 20 Mar 2024 23:35:32 -0400 Subject: [PATCH 80/90] Cleanup sd_hijack_ddpm_v1.py Forgot some things to revert --- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 7944fb13c..04adc5eb2 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -4,12 +4,10 @@ # Some models such as LDSR require VQ to work correctly # The classes are suffixed with "V1" and added back to the "ldm.models.diffusion.ddpm" module -import sys import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl -import pytorch_lightning.utilities.rank_zero from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager From 4e6e2574aba19a2a1b248601e33fad82fb881523 Mon Sep 17 00:00:00 2001 From: Dalton Date: Wed, 20 Mar 2024 23:36:35 -0400 Subject: [PATCH 81/90] Cleanup ddpm_edit.py Fully reverts this time --- modules/models/diffusion/ddpm_edit.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index 347117cb9..6db340da4 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -9,12 +9,10 @@ https://github.com/CompVis/taming-transformers # File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion). # See more details in LICENSE. -import sys import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl -import pytorch_lightning.utilities.rank_zero from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager From 32ba7575010a3fdd024cc4058650d81988d126f1 Mon Sep 17 00:00:00 2001 From: Dalton Date: Wed, 20 Mar 2024 23:55:04 -0400 Subject: [PATCH 82/90] Re-add import but after if check --- modules/initialize_util.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/initialize_util.py b/modules/initialize_util.py index 7a661476d..8abe78814 100644 --- a/modules/initialize_util.py +++ b/modules/initialize_util.py @@ -27,6 +27,7 @@ def fix_torch_version(): def fix_pytorch_lightning(): # Checks if pytorch_lightning.utilities.distributed already exists in the sys.modules cache if 'pytorch_lightning.utilities.distributed' not in sys.modules: + import pytorch_lightning # Lets the user know that the library was not found and then will set it to pytorch_lightning.utilities.rank_zero print(f"Pytorch_lightning.distributed not found, attempting pytorch_lightning.rank_zero") sys.modules["pytorch_lightning.utilities.distributed"] = pytorch_lightning.utilities.rank_zero From 5c5594ff1632ccab6c1f2ae7de3f6c69b1ab6f78 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 21 Mar 2024 07:09:40 +0300 Subject: [PATCH 83/90] linter --- modules/initialize_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/initialize_util.py b/modules/initialize_util.py index 8abe78814..79a72cb3a 100644 --- a/modules/initialize_util.py +++ b/modules/initialize_util.py @@ -29,7 +29,7 @@ def fix_pytorch_lightning(): if 'pytorch_lightning.utilities.distributed' not in sys.modules: import pytorch_lightning # Lets the user know that the library was not found and then will set it to pytorch_lightning.utilities.rank_zero - print(f"Pytorch_lightning.distributed not found, attempting pytorch_lightning.rank_zero") + print("Pytorch_lightning.distributed not found, attempting pytorch_lightning.rank_zero") sys.modules["pytorch_lightning.utilities.distributed"] = pytorch_lightning.utilities.rank_zero def fix_asyncio_event_loop_policy(): From 57727e554d8f87b4cf438390d6cb05a27d1734f5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 21 Mar 2024 07:22:27 +0300 Subject: [PATCH 84/90] make #15334 work without making copies of images --- modules/postprocessing.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 7685e9258..5a4e693a8 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -66,7 +66,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if parameters: existing_pnginfo["parameters"] = parameters - initial_pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGBA")) if image_data.mode == "RGBA" else scripts_postprocessing.PostprocessedImage(image_data.convert("RGB")) + initial_pp = scripts_postprocessing.PostprocessedImage(image_data if image_data.mode in ("RGBA", "RGB") else image_data.convert("RGB")) scripts.scripts_postproc.run(initial_pp, args) @@ -122,8 +122,6 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode != 2 or show_extras_results: outputs.append(pp.image) - image_data.close() - devices.torch_gc() shared.state.end() return outputs, ui_common.plaintext_to_html(infotext), '' From 721c4309c26374983589bcaba2f401a4bf01e2c2 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 21 Mar 2024 16:29:51 +0400 Subject: [PATCH 85/90] escape brackets in lora random prompt generator --- extensions-builtin/Lora/ui_edit_user_metadata.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/extensions-builtin/Lora/ui_edit_user_metadata.py b/extensions-builtin/Lora/ui_edit_user_metadata.py index 3160aecfa..7a07a544e 100644 --- a/extensions-builtin/Lora/ui_edit_user_metadata.py +++ b/extensions-builtin/Lora/ui_edit_user_metadata.py @@ -149,6 +149,8 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor) v = random.random() * max_count if count > v: + for x in "({[]})": + tag = tag.replace(x, '\\' + x) res.append(tag) return ", ".join(sorted(res)) From 2941e1f1f3cffc8e2024b359a53c3226c7114d0b Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Fri, 22 Mar 2024 11:12:59 +0200 Subject: [PATCH 86/90] Add Size as an XYZ Grid option --- scripts/xyz_grid.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 57ee47088..43965fc90 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -106,6 +106,17 @@ def apply_upscale_latent_space(p, x, xs): opts.data["use_scale_latent_for_hires_fix"] = False +def apply_size(p, x: str, xs) -> None: + try: + width, _, height = x.partition('x') + width = int(width.strip()) + height = int(height.strip()) + p.width = width + p.height = height + except ValueError: + print(f"Invalid size in XYZ plot: {x}") + + def find_vae(name: str): if name.lower() in ['auto', 'automatic']: return modules.sd_vae.unspecified @@ -271,6 +282,7 @@ axis_options = [ AxisOption("Refiner switch at", float, apply_field('refiner_switch_at')), AxisOption("RNG source", str, apply_override("randn_source"), choices=lambda: ["GPU", "CPU", "NV"]), AxisOption("FP8 mode", str, apply_override("fp8_storage"), cost=0.9, choices=lambda: ["Disable", "Enable for SDXL", "Enable"]), + AxisOption("Size", str, apply_size), ] From f3ca6a92adf4ffe37d1b6fbe4d9d365c8aee5d80 Mon Sep 17 00:00:00 2001 From: kaalibro Date: Sat, 23 Mar 2024 00:50:37 +0500 Subject: [PATCH 87/90] Fix for #15333 - Fix "X/Y/Z plot" not working with "Schedule type" - Fix "Schedule type" not being saved to "params.txt" --- modules/processing.py | 2 +- scripts/xyz_grid.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 716329fae..ac5541418 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -722,7 +722,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params = { "Steps": p.steps, "Sampler": p.sampler_name, - "Schedule type": None, + "Schedule type": p.scheduler, "CFG scale": p.cfg_scale, "Image CFG scale": getattr(p, 'image_cfg_scale', None), "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index], diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 57ee47088..6cc82b7f6 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -11,7 +11,7 @@ import numpy as np import modules.scripts as scripts import gradio as gr -from modules import images, sd_samplers, processing, sd_models, sd_vae, sd_samplers_kdiffusion, errors +from modules import images, sd_samplers, processing, sd_models, sd_vae, sd_schedulers, errors from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img from modules.shared import opts, state import modules.shared as shared @@ -248,7 +248,7 @@ axis_options = [ AxisOption("Sigma min", float, apply_field("s_tmin")), AxisOption("Sigma max", float, apply_field("s_tmax")), AxisOption("Sigma noise", float, apply_field("s_noise")), - AxisOption("Schedule type", str, apply_override("k_sched_type"), choices=lambda: list(sd_samplers_kdiffusion.k_diffusion_scheduler)), + AxisOption("Schedule type", str, apply_field("scheduler"), choices=lambda: [x.label for x in sd_schedulers.schedulers]), AxisOption("Schedule min sigma", float, apply_override("sigma_min")), AxisOption("Schedule max sigma", float, apply_override("sigma_max")), AxisOption("Schedule rho", float, apply_override("rho")), From c4402500c76ebad4a614c8167031b8d20a442b2f Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 24 Mar 2024 02:33:10 -0400 Subject: [PATCH 88/90] Support `ssmd_cover_images` --- extensions-builtin/Lora/network.py | 1 - .../Lora/ui_extra_networks_lora.py | 2 +- modules/ui_extra_networks.py | 42 +++++++++++++++++++ 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py index 412864292..20f8df3d4 100644 --- a/extensions-builtin/Lora/network.py +++ b/extensions-builtin/Lora/network.py @@ -29,7 +29,6 @@ class NetworkOnDisk: def read_metadata(): metadata = sd_models.read_metadata_from_safetensors(filename) - metadata.pop('ssmd_cover_images', None) # those are cover images, and they are too big to display in UI as text return metadata diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index 66d15dd05..b627f7dc2 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -31,7 +31,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): "name": name, "filename": lora_on_disk.filename, "shorthash": lora_on_disk.shorthash, - "preview": self.find_preview(path), + "preview": self.find_preview(path) or self.find_embedded_preview(path, name, lora_on_disk.metadata), "description": self.find_description(path), "search_terms": search_terms, "local_preview": f"{path}.{shared.opts.samples_format}", diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index f4627ce8d..087cb8a05 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -1,6 +1,8 @@ import functools import os.path import urllib.parse +from base64 import b64decode +from io import BytesIO from pathlib import Path from typing import Optional, Union from dataclasses import dataclass @@ -11,6 +13,7 @@ import gradio as gr import json import html from fastapi.exceptions import HTTPException +from PIL import Image from modules.infotext_utils import image_from_url_text @@ -108,6 +111,31 @@ def fetch_file(filename: str = ""): return FileResponse(filename, headers={"Accept-Ranges": "bytes"}) +def fetch_cover_images(page: str = "", item: str = "", index: int = 0): + from starlette.responses import Response + + page = next(iter([x for x in extra_pages if x.name == page]), None) + if page is None: + raise HTTPException(status_code=404, detail="File not found") + + metadata = page.metadata.get(item) + if metadata is None: + raise HTTPException(status_code=404, detail="File not found") + + cover_images = json.loads(metadata.get('ssmd_cover_images', {})) + image = cover_images[index] if index < len(cover_images) else None + if not image: + raise HTTPException(status_code=404, detail="File not found") + + try: + image = Image.open(BytesIO(b64decode(image))) + buffer = BytesIO() + image.save(buffer, format=image.format) + return Response(content=buffer.getvalue(), media_type=image.get_format_mimetype()) + except Exception as err: + raise ValueError(f"File cannot be fetched: {item}. Failed to load cover image.") from err + + def get_metadata(page: str = "", item: str = ""): from starlette.responses import JSONResponse @@ -119,6 +147,8 @@ def get_metadata(page: str = "", item: str = ""): if metadata is None: return JSONResponse({}) + metadata = {i:metadata[i] for i in metadata if i != 'ssmd_cover_images'} # those are cover images, and they are too big to display in UI as text + return JSONResponse({"metadata": json.dumps(metadata, indent=4, ensure_ascii=False)}) @@ -142,6 +172,7 @@ def get_single_card(page: str = "", tabname: str = "", name: str = ""): def add_pages_to_demo(app): app.add_api_route("/sd_extra_networks/thumb", fetch_file, methods=["GET"]) + app.add_api_route("/sd_extra_networks/cover-images", fetch_cover_images, methods=["GET"]) app.add_api_route("/sd_extra_networks/metadata", get_metadata, methods=["GET"]) app.add_api_route("/sd_extra_networks/get-single-card", get_single_card, methods=["GET"]) @@ -627,6 +658,17 @@ class ExtraNetworksPage: return None + def find_embedded_preview(self, path, name, metadata): + """ + Find if embedded preview exists in safetensors metadata and return endpoint for it. + """ + + file = f"{path}.safetensors" + if self.lister.exists(file) and 'ssmd_cover_images' in metadata and len(list(filter(None, json.loads(metadata['ssmd_cover_images'])))) > 0: + return f"./sd_extra_networks/cover-images?page={self.extra_networks_tabname}&item={name}" + + return None + def find_description(self, path): """ Find and read a description file for a given path (without extension). From 9aa9e980a9a2846755b72482e8b83b12cbc3ca0f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 24 Mar 2024 11:00:16 +0300 Subject: [PATCH 89/90] support scheduler selection in hires fix --- modules/infotext_utils.py | 3 ++ modules/processing.py | 6 +++ modules/processing_scripts/sampler.py | 38 +---------------- modules/sd_samplers.py | 60 ++++++++++++++++++++++++++- modules/sd_samplers_kdiffusion.py | 6 ++- modules/txt2img.py | 3 +- modules/ui.py | 11 +++-- 7 files changed, 83 insertions(+), 44 deletions(-) diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index 723cb1f82..1c91d076d 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -314,6 +314,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Hires sampler" not in res: res["Hires sampler"] = "Use same sampler" + if "Hires schedule type" not in res: + res["Hires schedule type"] = "Use same scheduler" + if "Hires checkpoint" not in res: res["Hires checkpoint"] = "Use same checkpoint" diff --git a/modules/processing.py b/modules/processing.py index ac5541418..2baca4f5f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1115,6 +1115,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): hr_resize_y: int = 0 hr_checkpoint_name: str = None hr_sampler_name: str = None + hr_scheduler: str = None hr_prompt: str = '' hr_negative_prompt: str = '' force_task_id: str = None @@ -1203,6 +1204,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name: self.extra_generation_params["Hires sampler"] = self.hr_sampler_name + self.extra_generation_params["Hires schedule type"] = None # to be set in sd_samplers_kdiffusion.py + + if self.hr_scheduler is None: + self.hr_scheduler = self.scheduler + self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") if self.enable_hr and self.latent_scale_mode is None: if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers): diff --git a/modules/processing_scripts/sampler.py b/modules/processing_scripts/sampler.py index 83b952550..5d50a162c 100644 --- a/modules/processing_scripts/sampler.py +++ b/modules/processing_scripts/sampler.py @@ -1,44 +1,10 @@ import gradio as gr -import functools from modules import scripts, sd_samplers, sd_schedulers, shared from modules.infotext_utils import PasteField from modules.ui_components import FormRow, FormGroup -def get_sampler_from_infotext(d: dict): - return get_sampler_and_scheduler(d.get("Sampler"), d.get("Schedule type"))[0] - - -def get_scheduler_from_infotext(d: dict): - return get_sampler_and_scheduler(d.get("Sampler"), d.get("Schedule type"))[1] - - -@functools.cache -def get_sampler_and_scheduler(sampler_name, scheduler_name): - default_sampler = sd_samplers.samplers[0] - found_scheduler = sd_schedulers.schedulers_map.get(scheduler_name, sd_schedulers.schedulers[0]) - - name = sampler_name or default_sampler.name - - for scheduler in sd_schedulers.schedulers: - name_options = [scheduler.label, scheduler.name, *(scheduler.aliases or [])] - - for name_option in name_options: - if name.endswith(" " + name_option): - found_scheduler = scheduler - name = name[0:-(len(name_option) + 1)] - break - - sampler = sd_samplers.all_samplers_map.get(name, default_sampler) - - # revert back to Automatic if it's the default scheduler for the selected sampler - if sampler.options.get('scheduler', None) == found_scheduler.name: - found_scheduler = sd_schedulers.schedulers[0] - - return sampler.name, found_scheduler.label - - class ScriptSampler(scripts.ScriptBuiltinUI): section = "sampler" @@ -67,8 +33,8 @@ class ScriptSampler(scripts.ScriptBuiltinUI): self.infotext_fields = [ PasteField(self.steps, "Steps", api="steps"), - PasteField(self.sampler_name, get_sampler_from_infotext, api="sampler_name"), - PasteField(self.scheduler, get_scheduler_from_infotext, api="scheduler"), + PasteField(self.sampler_name, sd_samplers.get_sampler_from_infotext, api="sampler_name"), + PasteField(self.scheduler, sd_samplers.get_scheduler_from_infotext, api="scheduler"), ] return self.steps, self.sampler_name, self.scheduler diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 1f50297e6..6b7b84b6d 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,6 +1,8 @@ from __future__ import annotations -from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, sd_samplers_lcm, shared, sd_samplers_common +import functools + +from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, sd_samplers_lcm, shared, sd_samplers_common, sd_schedulers # imports for functions that previously were here and are used by other modules samples_to_image_grid = sd_samplers_common.samples_to_image_grid @@ -64,4 +66,60 @@ def visible_samplers(): return [x for x in samplers if x.name not in samplers_hidden] +def get_sampler_from_infotext(d: dict): + return get_sampler_and_scheduler(d.get("Sampler"), d.get("Schedule type"))[0] + + +def get_scheduler_from_infotext(d: dict): + return get_sampler_and_scheduler(d.get("Sampler"), d.get("Schedule type"))[1] + + +def get_hr_sampler_and_scheduler(d: dict): + hr_sampler = d.get("Hires sampler", "Use same sampler") + sampler = d.get("Sampler") if hr_sampler == "Use same sampler" else hr_sampler + + hr_scheduler = d.get("Hires schedule type", "Use same scheduler") + scheduler = d.get("Schedule type") if hr_scheduler == "Use same scheduler" else hr_scheduler + + sampler, scheduler = get_sampler_and_scheduler(sampler, scheduler) + + sampler = sampler if sampler != d.get("Sampler") else "Use same sampler" + scheduler = scheduler if scheduler != d.get("Schedule type") else "Use same scheduler" + + return sampler, scheduler + + +def get_hr_sampler_from_infotext(d: dict): + return get_hr_sampler_and_scheduler(d)[0] + + +def get_hr_scheduler_from_infotext(d: dict): + return get_hr_sampler_and_scheduler(d)[1] + + +@functools.cache +def get_sampler_and_scheduler(sampler_name, scheduler_name): + default_sampler = samplers[0] + found_scheduler = sd_schedulers.schedulers_map.get(scheduler_name, sd_schedulers.schedulers[0]) + + name = sampler_name or default_sampler.name + + for scheduler in sd_schedulers.schedulers: + name_options = [scheduler.label, scheduler.name, *(scheduler.aliases or [])] + + for name_option in name_options: + if name.endswith(" " + name_option): + found_scheduler = scheduler + name = name[0:-(len(name_option) + 1)] + break + + sampler = all_samplers_map.get(name, default_sampler) + + # revert back to Automatic if it's the default scheduler for the selected sampler + if sampler.options.get('scheduler', None) == found_scheduler.name: + found_scheduler = sd_schedulers.schedulers[0] + + return sampler.name, found_scheduler.label + + set_samplers() diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index d053e48b1..b45f85b07 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -79,7 +79,7 @@ class KDiffusionSampler(sd_samplers_common.Sampler): steps += 1 if discard_next_to_last_sigma else 0 - scheduler_name = p.scheduler or 'Automatic' + scheduler_name = (p.hr_scheduler if p.is_hr_pass else p.scheduler) or 'Automatic' if scheduler_name == 'Automatic': scheduler_name = self.config.options.get('scheduler', None) @@ -95,8 +95,10 @@ class KDiffusionSampler(sd_samplers_common.Sampler): else: sigmas_kwargs = {'sigma_min': sigma_min, 'sigma_max': sigma_max} - if scheduler.label != 'Automatic': + if scheduler.label != 'Automatic' and not p.is_hr_pass: p.extra_generation_params["Schedule type"] = scheduler.label + elif scheduler.label != p.extra_generation_params.get("Schedule type"): + p.extra_generation_params["Hires schedule type"] = scheduler.label if opts.sigma_min != 0 and opts.sigma_min != m_sigma_min: sigmas_kwargs['sigma_min'] = opts.sigma_min diff --git a/modules/txt2img.py b/modules/txt2img.py index 53fa0099d..6f20253ae 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -11,7 +11,7 @@ from PIL import Image import gradio as gr -def txt2img_create_processing(id_task: str, request: gr.Request, prompt: str, negative_prompt: str, prompt_styles, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args, force_enable_hr=False): +def txt2img_create_processing(id_task: str, request: gr.Request, prompt: str, negative_prompt: str, prompt_styles, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_scheduler: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args, force_enable_hr=False): override_settings = create_override_settings_dict(override_settings_texts) if force_enable_hr: @@ -38,6 +38,7 @@ def txt2img_create_processing(id_task: str, request: gr.Request, prompt: str, ne hr_resize_y=hr_resize_y, hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name, hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name, + hr_scheduler=None if hr_scheduler == 'Use same scheduler' else hr_scheduler, hr_prompt=hr_prompt, hr_negative_prompt=hr_negative_prompt, override_settings=override_settings, diff --git a/modules/ui.py b/modules/ui.py index c964d5e22..9b138e0aa 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -322,10 +322,11 @@ def create_ui(): with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container: - hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") + hr_checkpoint_name = gr.Dropdown(label='Checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint") create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh") - hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") + hr_sampler_name = gr.Dropdown(label='Sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler") + hr_scheduler = gr.Dropdown(label='Schedule type', elem_id="hr_scheduler", choices=["Use same scheduler"] + [x.label for x in sd_schedulers.schedulers], value="Use same scheduler") with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container: with gr.Column(scale=80): @@ -394,6 +395,7 @@ def create_ui(): hr_resize_y, hr_checkpoint_name, hr_sampler_name, + hr_scheduler, hr_prompt, hr_negative_prompt, override_settings, @@ -456,8 +458,9 @@ def create_ui(): PasteField(hr_resize_x, "Hires resize-1", api="hr_resize_x"), PasteField(hr_resize_y, "Hires resize-2", api="hr_resize_y"), PasteField(hr_checkpoint_name, "Hires checkpoint", api="hr_checkpoint_name"), - PasteField(hr_sampler_name, "Hires sampler", api="hr_sampler_name"), - PasteField(hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" or d.get("Hires checkpoint", "Use same checkpoint") != "Use same checkpoint" else gr.update()), + PasteField(hr_sampler_name, sd_samplers.get_hr_sampler_from_infotext, api="hr_sampler_name"), + PasteField(hr_scheduler, sd_samplers.get_hr_scheduler_from_infotext, api="hr_scheduler"), + PasteField(hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" or d.get("Hires checkpoint", "Use same checkpoint") != "Use same checkpoint" or d.get("Hires schedule type", "Use same scheduler") != "Use same scheduler" else gr.update()), PasteField(hr_prompt, "Hires prompt", api="hr_prompt"), PasteField(hr_negative_prompt, "Hires negative prompt", api="hr_negative_prompt"), PasteField(hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()), From dfbdb5a135b2170c0a2330e5cf052a00784dbf74 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 25 Mar 2024 18:00:58 +0300 Subject: [PATCH 90/90] put request: gr.Request at start of img2img function similar to txt2img --- modules/img2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/img2img.py b/modules/img2img.py index 9e316d451..a1d042c21 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -146,7 +146,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal return batch_results -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args): +def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5