Merge branch 'dev' into pr/15530

This commit is contained in:
w-e-w 2024-11-01 03:57:20 +09:00
commit ab4a5378c6
32 changed files with 330 additions and 138 deletions

View file

@ -10,7 +10,7 @@ model:
num_idx: 1000
weighting_config:
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
target: sgm.modules.diffusionmodules.denoiser_weighting.VWeighting
scaling_config:
target: sgm.modules.diffusionmodules.denoiser_scaling.VScaling
discretization_config:
@ -21,7 +21,7 @@ model:
params:
adm_in_channels: 2816
num_classes: sequential
use_checkpoint: True
use_checkpoint: False
in_channels: 4
out_channels: 4
model_channels: 320

View file

@ -816,7 +816,7 @@ onUiLoaded(async() => {
// Increase or decrease brush size based on scroll direction
adjustBrushSize(elemId, e.deltaY);
}
});
}, {passive: false});
// Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
function handleMoveKeyDown(e) {

View file

@ -1,7 +1,7 @@
"""
Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE
Warn: The patch works well only if the input image has a width and height that are multiples of 128
Original author: @tfernd Github: https://github.com/tfernd/HyperTile
Original author: @tfernd GitHub: https://github.com/tfernd/HyperTile
"""
from __future__ import annotations

View file

@ -34,14 +34,14 @@ class ScriptPostprocessingAutosizedCrop(scripts_postprocessing.ScriptPostprocess
with ui_components.InputAccordion(False, label="Auto-sized crop") as enable:
gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
with gr.Row():
mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="postprocess_multicrop_mindim")
maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="postprocess_multicrop_maxdim")
mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id=self.elem_id_suffix("postprocess_multicrop_mindim"))
maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id=self.elem_id_suffix("postprocess_multicrop_maxdim"))
with gr.Row():
minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id="postprocess_multicrop_minarea")
maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id="postprocess_multicrop_maxarea")
minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id=self.elem_id_suffix("postprocess_multicrop_minarea"))
maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id=self.elem_id_suffix("postprocess_multicrop_maxarea"))
with gr.Row():
objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="postprocess_multicrop_objective")
threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="postprocess_multicrop_threshold")
objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id=self.elem_id_suffix("postprocess_multicrop_objective"))
threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id=self.elem_id_suffix("postprocess_multicrop_threshold"))
return {
"enable": enable,

View file

@ -11,10 +11,10 @@ class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing)
def ui(self):
with ui_components.InputAccordion(False, label="Auto focal point crop") as enable:
face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_face_weight")
entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_entropy_weight")
edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_edges_weight")
debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_focal_crop_face_weight"))
entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_focal_crop_entropy_weight"))
edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_focal_crop_edges_weight"))
debug = gr.Checkbox(label='Create debug image', elem_id=self.elem_id_suffix("train_process_focal_crop_debug"))
return {
"enable": enable,

View file

@ -35,8 +35,8 @@ class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostproces
def ui(self):
with ui_components.InputAccordion(False, label="Split oversized images") as enable:
with gr.Row():
split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_split_threshold")
overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="postprocess_overlap_ratio")
split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_split_threshold"))
overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id=self.elem_id_suffix("postprocess_overlap_ratio"))
return {
"enable": enable,

View file

@ -1,7 +1,7 @@
<div>
<a href="{api_docs}">API</a>
 • 
<a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui">Github</a>
<a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui">GitHub</a>
 • 
<a href="https://gradio.app">Gradio</a>
 • 

View file

@ -104,7 +104,7 @@ var contextMenuInit = function() {
e.preventDefault();
}
});
});
}, {passive: false});
});
eventListenerApplied = true;

View file

@ -29,10 +29,11 @@ const EXTRA_NETWORKS_INIT_DATA_TIMEOUT_MS = 60000;
const EXTRA_NETWORKS_FETCH_DATA_TIMEOUT_MS = 60000;
const EXTRA_NETWORKS_SETUP_DEBOUNCE_TIME_MS = 1000;
const re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/;
const re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/s;
const re_extranet_g = /<([^:^>]+:[^:]+):[\d.]+>/g;
const re_extranet_neg = /\(([^:^>]+:[\d.]+)\)/;
const re_extranet_g_neg = /\(([^:^>]+:[\d.]+)\)/g;
var globalPopup = null;
var globalPopupInner = null;
const storedPopupIds = {};

View file

@ -79,11 +79,12 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
var wakeLock = null;
var requestWakeLock = async function() {
if (!opts.prevent_screen_sleep_during_generation || wakeLock) return;
if (!opts.prevent_screen_sleep_during_generation || wakeLock !== null) return;
try {
wakeLock = await navigator.wakeLock.request('screen');
} catch (err) {
console.error('Wake Lock is not supported.');
wakeLock = false;
}
};

View file

@ -127,7 +127,7 @@
} else {
R.screenX = evt.changedTouches[0].screenX;
}
});
}, {passive: false});
});
resizeHandle.addEventListener('dblclick', onDoubleClick);

View file

@ -1,7 +1,7 @@
import os
from modules import modelloader, errors
from modules.shared import cmd_opts, opts
from modules.shared import cmd_opts, opts, hf_endpoint
from modules.upscaler import Upscaler, UpscalerData
from modules.upscaler_utils import upscale_with_model
@ -49,7 +49,18 @@ class UpscalerDAT(Upscaler):
scaler.local_data_path = modelloader.load_file_from_url(
scaler.data_path,
model_dir=self.model_download_path,
hash_prefix=scaler.sha256,
)
if os.path.getsize(scaler.local_data_path) < 200:
# Re-download if the file is too small, probably an LFS pointer
scaler.local_data_path = modelloader.load_file_from_url(
scaler.data_path,
model_dir=self.model_download_path,
hash_prefix=scaler.sha256,
re_download=True,
)
if not os.path.exists(scaler.local_data_path):
raise FileNotFoundError(f"DAT data missing: {scaler.local_data_path}")
return scaler
@ -60,20 +71,23 @@ def get_dat_models(scaler):
return [
UpscalerData(
name="DAT x2",
path="https://github.com/n0kovo/dat_upscaler_models/raw/main/DAT/DAT_x2.pth",
path=f"{hf_endpoint}/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x2.pth",
scale=2,
upscaler=scaler,
sha256='7760aa96e4ee77e29d4f89c3a4486200042e019461fdb8aa286f49aa00b89b51',
),
UpscalerData(
name="DAT x3",
path="https://github.com/n0kovo/dat_upscaler_models/raw/main/DAT/DAT_x3.pth",
path=f"{hf_endpoint}/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x3.pth",
scale=3,
upscaler=scaler,
sha256='581973e02c06f90d4eb90acf743ec9604f56f3c2c6f9e1e2c2b38ded1f80d197',
),
UpscalerData(
name="DAT x4",
path="https://github.com/n0kovo/dat_upscaler_models/raw/main/DAT/DAT_x4.pth",
path=f"{hf_endpoint}/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x4.pth",
scale=4,
upscaler=scaler,
sha256='391a6ce69899dff5ea3214557e9d585608254579217169faf3d4c353caff049e',
),
]

View file

@ -10,6 +10,7 @@ import torch
from modules import shared
from modules.upscaler import Upscaler, UpscalerLanczos, UpscalerNearest, UpscalerNone
from modules.util import load_file_from_url # noqa, backwards compatibility
if TYPE_CHECKING:
import spandrel
@ -17,30 +18,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
def load_file_from_url(
url: str,
*,
model_dir: str,
progress: bool = True,
file_name: str | None = None,
hash_prefix: str | None = None,
) -> str:
"""Download a file from `url` into `model_dir`, using the file present if possible.
Returns the path to the downloaded file.
"""
os.makedirs(model_dir, exist_ok=True)
if not file_name:
parts = urlparse(url)
file_name = os.path.basename(parts.path)
cached_file = os.path.abspath(os.path.join(model_dir, file_name))
if not os.path.exists(cached_file):
print(f'Downloading: "{url}" to {cached_file}\n')
from torch.hub import download_url_to_file
download_url_to_file(url, cached_file, progress=progress, hash_prefix=hash_prefix)
return cached_file
def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None, hash_prefix=None) -> list:
"""
A one-and done loader to try finding the desired models in specified directories.

View file

@ -24,7 +24,7 @@ class SafetensorsMapping(typing.Mapping):
return self.file.get_tensor(key)
CLIPL_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_l.safetensors"
CLIPL_URL = f"{shared.hf_endpoint}/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_l.safetensors"
CLIPL_CONFIG = {
"hidden_act": "quick_gelu",
"hidden_size": 768,
@ -33,7 +33,7 @@ CLIPL_CONFIG = {
"num_hidden_layers": 12,
}
CLIPG_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_g.safetensors"
CLIPG_URL = f"{shared.hf_endpoint}/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_g.safetensors"
CLIPG_CONFIG = {
"hidden_act": "gelu",
"hidden_size": 1280,
@ -43,7 +43,7 @@ CLIPG_CONFIG = {
"textual_inversion_key": "clip_g",
}
T5_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/t5xxl_fp16.safetensors"
T5_URL = f"{shared.hf_endpoint}/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/t5xxl_fp16.safetensors"
T5_CONFIG = {
"d_ff": 10240,
"d_model": 4096,

View file

@ -1259,7 +1259,10 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if self.hr_checkpoint_info is None:
raise Exception(f'Could not find checkpoint with name {self.hr_checkpoint_name}')
self.extra_generation_params["Hires checkpoint"] = self.hr_checkpoint_info.short_title
if shared.sd_model.sd_checkpoint_info == self.hr_checkpoint_info:
self.hr_checkpoint_info = None
else:
self.extra_generation_params["Hires checkpoint"] = self.hr_checkpoint_info.short_title
if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
self.extra_generation_params["Hires sampler"] = self.hr_sampler_name

View file

@ -13,6 +13,7 @@ class ScriptPostprocessingForMainUI(scripts.Script):
return scripts.AlwaysVisible
def ui(self, is_img2img):
self.script.tab_name = '_img2img' if is_img2img else '_txt2img'
self.postprocessing_controls = self.script.ui()
return self.postprocessing_controls.values()
@ -33,7 +34,7 @@ def create_auto_preprocessing_script_data():
for name in shared.opts.postprocessing_enable_in_main_ui:
script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None)
if script is None:
if script is None or script.script_class.extra_only:
continue
constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class())

View file

@ -1,3 +1,4 @@
import re
import dataclasses
import os
import gradio as gr
@ -59,6 +60,10 @@ class ScriptPostprocessing:
args_from = None
args_to = None
# define if the script should be used only in extras or main UI
extra_only = None
main_ui_only = None
order = 1000
"""scripts will be ordred by this value in postprocessing UI"""
@ -97,6 +102,31 @@ class ScriptPostprocessing:
def image_changed(self):
pass
tab_name = '' # used by ScriptPostprocessingForMainUI
replace_pattern = re.compile(r'\s')
rm_pattern = re.compile(r'[^a-z_0-9]')
def elem_id(self, item_id):
"""
Helper function to generate id for a HTML element
constructs final id out of script name and user-supplied item_id
'script_extras_{self.name.lower()}_{item_id}'
{tab_name} will append to the end of the id if set
tab_name will be set to '_img2img' or '_txt2img' if use by ScriptPostprocessingForMainUI
Extensions should use this function to generate element IDs
"""
return self.elem_id_suffix(f'extras_{self.name.lower()}_{item_id}')
def elem_id_suffix(self, base_id):
"""
Append tab_name to the base_id
Extensions that already have specific there element IDs and wish to keep their IDs the same when possible should use this function
"""
base_id = self.rm_pattern.sub('', self.replace_pattern.sub('_', base_id))
return f'{base_id}{self.tab_name}'
def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
try:
@ -119,10 +149,6 @@ class ScriptPostprocessingRunner:
for script_data in scripts_data:
script: ScriptPostprocessing = script_data.script_class()
script.filename = script_data.path
if script.name == "Simple Upscale":
continue
self.scripts.append(script)
def create_script_ui(self, script, inputs):
@ -152,7 +178,7 @@ class ScriptPostprocessingRunner:
return len(self.scripts)
filtered_scripts = [script for script in self.scripts if script.name not in scripts_filter_out]
filtered_scripts = [script for script in self.scripts if script.name not in scripts_filter_out and not script.main_ui_only]
script_scores = {script.name: (script_score(script.name), script.order, script.name, original_index) for original_index, script in enumerate(filtered_scripts)}
return sorted(filtered_scripts, key=lambda x: script_scores[x.name])

View file

@ -76,7 +76,7 @@ class DisableInitialization(ReplaceHelper):
def transformers_utils_hub_get_file_from_cache(original, url, *args, **kwargs):
# this file is always 404, prevent making request
if url == 'https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/added_tokens.json' or url == 'openai/clip-vit-large-patch14' and args[0] == 'added_tokens.json':
if url == f'{shared.hf_endpoint}/openai/clip-vit-large-patch14/resolve/main/added_tokens.json' or url == 'openai/clip-vit-large-patch14' and args[0] == 'added_tokens.json':
return None
try:

View file

@ -16,10 +16,12 @@ def dat_models_names():
return [x.name for x in modules.dat_model.get_dat_models(None)]
def postprocessing_scripts():
def postprocessing_scripts(filter_out_extra_only=False, filter_out_main_ui_only=False):
import modules.scripts
return modules.scripts.scripts_postproc.scripts
return list(filter(
lambda s: (not filter_out_extra_only or not s.extra_only) and (not filter_out_main_ui_only or not s.main_ui_only),
modules.scripts.scripts_postproc.scripts,
))
def sd_vae_items():

View file

@ -301,6 +301,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks", "s
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks),
"extra_networks_long_press_time_ms": OptionInfo(800, "Hold time required to register a long click").info("in milliseconds").info("default 800"),
"extra_networks_dbl_press_time_ms": OptionInfo(500, "Time between clicks to register a double click").info("in milliseconds").info("default 500"),
"textual_inversion_image_embedding_data_cache": OptionInfo(False, 'Cache the data of image embeddings').info('potentially increase TI load time at the cost some disk space'),
}))
options_templates.update(options_section(('ui_prompt_editing', "Prompt editing", "ui"), {
@ -420,9 +421,9 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
}))
options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), {
'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_disable_in_extras': OptionInfo([], "Disable postprocessing operations in extras tab", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts(filter_out_extra_only=True)]}),
'postprocessing_disable_in_extras': OptionInfo([], "Disable postprocessing operations in extras tab", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts(filter_out_main_ui_only=True)]}),
'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts(filter_out_main_ui_only=True)]}),
'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
'postprocessing_existing_caption_action': OptionInfo("Ignore", "Action for existing captions", gr.Radio, {"choices": ["Ignore", "Keep", "Prepend", "Append"]}).info("when generating captions using postprocessing; Ignore = use generated; Keep = use original; Prepend/Append = combine both"),
}))

View file

@ -12,7 +12,7 @@ import safetensors.torch
import numpy as np
from PIL import Image, PngImagePlugin
from modules import shared, devices, sd_hijack, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes
from modules import shared, devices, sd_hijack, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes, cache
import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler
@ -116,6 +116,7 @@ class EmbeddingDatabase:
self.expected_shape = -1
self.embedding_dirs = {}
self.previously_displayed_embeddings = ()
self.image_embedding_cache = cache.cache('image-embedding')
def add_embedding_dir(self, path):
self.embedding_dirs[path] = DirWithTextualInversionEmbeddings(path)
@ -154,6 +155,31 @@ class EmbeddingDatabase:
vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
return vec.shape[1]
def read_embedding_from_image(self, path, name):
try:
ondisk_mtime = os.path.getmtime(path)
if (cache_embedding := self.image_embedding_cache.get(path)) and ondisk_mtime == cache_embedding.get('mtime', 0):
# cache will only be used if the file has not been modified time matches
return cache_embedding.get('data', None), cache_embedding.get('name', None)
embed_image = Image.open(path)
if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
name = data.get('name', name)
elif data := extract_image_data_embed(embed_image):
name = data.get('name', name)
if data is None or shared.opts.textual_inversion_image_embedding_data_cache:
# data of image embeddings only will be cached if the option textual_inversion_image_embedding_data_cache is enabled
# results of images that are not embeddings will allways be cached to reduce unnecessary future disk reads
self.image_embedding_cache[path] = {'data': data, 'name': None if data is None else name, 'mtime': ondisk_mtime}
return data, name
except Exception:
errors.report(f"Error loading embedding {path}", exc_info=True)
return None, None
def load_from_file(self, path, filename):
name, ext = os.path.splitext(filename)
ext = ext.upper()
@ -163,17 +189,10 @@ class EmbeddingDatabase:
if second_ext.upper() == '.PREVIEW':
return
embed_image = Image.open(path)
if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
name = data.get('name', name)
else:
data = extract_image_data_embed(embed_image)
if data:
name = data.get('name', name)
else:
# if data is None, means this is not an embedding, just a preview image
return
data, name = self.read_embedding_from_image(path, name)
if data is None:
return
elif ext in ['.BIN', '.PT']:
data = torch.load(path, map_location="cpu")
elif ext in ['.SAFETENSORS']:
@ -191,7 +210,6 @@ class EmbeddingDatabase:
else:
print(f"Unable to load Textual inversion embedding due to data issue: '{name}'.")
def load_from_dir(self, embdir):
if not os.path.isdir(embdir.path):
return

View file

@ -91,6 +91,7 @@ class InputAccordion(gr.Checkbox):
Actually just a hidden checkbox, but creates an accordion that follows and is followed by the state of the checkbox.
"""
accordion_id_set = set()
global_index = 0
def __init__(self, value, **kwargs):
@ -99,6 +100,18 @@ class InputAccordion(gr.Checkbox):
self.accordion_id = f"input-accordion-{InputAccordion.global_index}"
InputAccordion.global_index += 1
if not InputAccordion.accordion_id_set:
from modules import script_callbacks
script_callbacks.on_script_unloaded(InputAccordion.reset)
if self.accordion_id in InputAccordion.accordion_id_set:
count = 1
while (unique_id := f'{self.accordion_id}-{count}') in InputAccordion.accordion_id_set:
count += 1
self.accordion_id = unique_id
InputAccordion.accordion_id_set.add(self.accordion_id)
kwargs_checkbox = {
**kwargs,
"elem_id": f"{self.accordion_id}-checkbox",
@ -143,3 +156,7 @@ class InputAccordion(gr.Checkbox):
def get_block_name(self):
return "checkbox"
@classmethod
def reset(cls):
cls.global_index = 0
cls.accordion_id_set.clear()

View file

@ -1321,10 +1321,8 @@ def add_pages_to_demo(app):
app.add_api_route("/sd_extra_networks/get-model-details", get_model_details, methods=["GET"])
def quote_js(s):
s = s.replace("\\", "\\\\")
s = s.replace('"', '\\"')
return f'"{s}"'
def quote_js(s: str):
return json.dumps(s, ensure_ascii=False)
def initialize():

View file

@ -176,7 +176,7 @@ class UiLoadsave:
if new_value == old_value:
continue
if old_value is None and new_value == '' or new_value == []:
if old_value is None and (new_value == '' or new_value == []):
continue
yield path, old_value, new_value

View file

@ -93,13 +93,14 @@ class UpscalerData:
scaler: Upscaler = None
model: None
def __init__(self, name: str, path: str, upscaler: Upscaler = None, scale: int = 4, model=None):
def __init__(self, name: str, path: str, upscaler: Upscaler = None, scale: int = 4, model=None, sha256: str = None):
self.name = name
self.data_path = path
self.local_data_path = path
self.scaler = upscaler
self.scale = scale
self.model = model
self.sha256 = sha256
def __repr__(self):
return f"<UpscalerData name={self.name} path={self.data_path} scale={self.scale}>"

View file

@ -211,3 +211,80 @@ Requested path was: {path}
subprocess.Popen(["explorer.exe", subprocess.check_output(["wslpath", "-w", path])])
else:
subprocess.Popen(["xdg-open", path])
def load_file_from_url(
url: str,
*,
model_dir: str,
progress: bool = True,
file_name: str | None = None,
hash_prefix: str | None = None,
re_download: bool = False,
) -> str:
"""Download a file from `url` into `model_dir`, using the file present if possible.
Returns the path to the downloaded file.
file_name: if specified, it will be used as the filename, otherwise the filename will be extracted from the url.
file is downloaded to {file_name}.tmp then moved to the final location after download is complete.
hash_prefix: sha256 hex string, if provided, the hash of the downloaded file will be checked against this prefix.
if the hash does not match, the temporary file is deleted and a ValueError is raised.
re_download: forcibly re-download the file even if it already exists.
"""
from urllib.parse import urlparse
import requests
try:
from tqdm import tqdm
except ImportError:
class tqdm:
def __init__(self, *args, **kwargs):
pass
def update(self, n=1, *args, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
if not file_name:
parts = urlparse(url)
file_name = os.path.basename(parts.path)
cached_file = os.path.abspath(os.path.join(model_dir, file_name))
if re_download or not os.path.exists(cached_file):
os.makedirs(model_dir, exist_ok=True)
temp_file = os.path.join(model_dir, f"{file_name}.tmp")
print(f'\nDownloading: "{url}" to {cached_file}')
response = requests.get(url, stream=True)
response.raise_for_status()
total_size = int(response.headers.get('content-length', 0))
with tqdm(total=total_size, unit='B', unit_scale=True, desc=file_name, disable=not progress) as progress_bar:
with open(temp_file, 'wb') as file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
file.write(chunk)
progress_bar.update(len(chunk))
if hash_prefix and not compare_sha256(temp_file, hash_prefix):
print(f"Hash mismatch for {temp_file}. Deleting the temporary file.")
os.remove(temp_file)
raise ValueError(f"File hash does not match the expected hash prefix {hash_prefix}!")
os.rename(temp_file, cached_file)
return cached_file
def compare_sha256(file_path: str, hash_prefix: str) -> bool:
"""Check if the SHA256 hash of the file matches the given prefix."""
import hashlib
hash_sha256 = hashlib.sha256()
blksize = 1024 * 1024
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(blksize), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest().startswith(hash_prefix.strip().lower())

View file

@ -12,8 +12,8 @@ class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing
def ui(self):
with ui_components.InputAccordion(False, label="CodeFormer") as enable:
with gr.Row():
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id="extras_codeformer_visibility")
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight")
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id=self.elem_id_suffix("extras_codeformer_visibility"))
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id=self.elem_id_suffix("extras_codeformer_weight"))
return {
"enable": enable,

View file

@ -11,7 +11,7 @@ class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing):
def ui(self):
with ui_components.InputAccordion(False, label="GFPGAN") as enable:
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id="extras_gfpgan_visibility")
gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Visibility", value=1.0, elem_id=self.elem_id_suffix("extras_gfpgan_visibility"))
return {
"enable": enable,

View file

@ -30,31 +30,31 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
def ui(self):
selected_tab = gr.Number(value=0, visible=False)
with InputAccordion(True, label="Upscale", elem_id="extras_upscale") as upscale_enabled:
with InputAccordion(True, label="Upscale", elem_id=self.elem_id_suffix("extras_upscale")) as upscale_enabled:
with FormRow():
extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id=self.elem_id_suffix("extras_upscaler_1"), choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
with FormRow():
extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility")
extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id=self.elem_id_suffix("extras_upscaler_2"), choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id=self.elem_id_suffix("extras_upscaler_2_visibility"))
with FormRow():
with gr.Tabs(elem_id="extras_resize_mode"):
with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by:
with gr.Tabs(elem_id=self.elem_id_suffix("extras_resize_mode")):
with gr.TabItem('Scale by', elem_id=self.elem_id_suffix("extras_scale_by_tab")) as tab_scale_by:
with gr.Row():
with gr.Column(scale=4):
upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize")
upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id=self.elem_id_suffix("extras_upscaling_resize"))
with gr.Column(scale=1, min_width=160):
max_side_length = gr.Number(label="Max side length", value=0, elem_id="extras_upscale_max_side_length", tooltip="If any of two sides of the image ends up larger than specified, will downscale it to fit. 0 = no limit.", min_width=160, step=8, minimum=0)
max_side_length = gr.Number(label="Max side length", value=0, elem_id=self.elem_id_suffix("extras_upscale_max_side_length"), tooltip="If any of two sides of the image ends up larger than specified, will downscale it to fit. 0 = no limit.", min_width=160, step=8, minimum=0)
with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to:
with gr.TabItem('Scale to', elem_id=self.elem_id_suffix("extras_scale_to_tab")) as tab_scale_to:
with FormRow():
with gr.Column(elem_id="upscaling_column_size", scale=4):
upscaling_resize_w = gr.Slider(minimum=64, maximum=8192, step=8, label="Width", value=512, elem_id="extras_upscaling_resize_w")
upscaling_resize_h = gr.Slider(minimum=64, maximum=8192, step=8, label="Height", value=512, elem_id="extras_upscaling_resize_h")
with gr.Column(elem_id="upscaling_dimensions_row", scale=1, elem_classes="dimensions-tools"):
upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn", tooltip="Switch width/height")
upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
with gr.Column(elem_id=self.elem_id_suffix("upscaling_column_size"), scale=4):
upscaling_resize_w = gr.Slider(minimum=64, maximum=8192, step=8, label="Width", value=512, elem_id=self.elem_id_suffix("extras_upscaling_resize_w"))
upscaling_resize_h = gr.Slider(minimum=64, maximum=8192, step=8, label="Height", value=512, elem_id=self.elem_id_suffix("extras_upscaling_resize_h"))
with gr.Column(elem_id=self.elem_id_suffix("upscaling_dimensions_row"), scale=1, elem_classes="dimensions-tools"):
upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id=self.elem_id_suffix("upscaling_res_switch_btn"), tooltip="Switch width/height")
upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id=self.elem_id_suffix("extras_upscaling_crop"))
def on_selected_upscale_method(upscale_method):
if not shared.opts.set_scale_by_when_changing_upscaler:
@ -169,6 +169,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
name = "Simple Upscale"
order = 900
main_ui_only = True
def ui(self):
with FormRow():

View file

@ -20,7 +20,7 @@ import modules.sd_models
import modules.sd_vae
import re
from modules.ui_components import ToolButton
from modules.ui_components import ToolButton, InputAccordion
fill_values_symbol = "\U0001f4d2" # 📒
@ -285,7 +285,7 @@ axis_options = [
]
def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend, include_lone_images, include_sub_grids, first_axes_processed, second_axes_processed, margin_size):
def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend, include_lone_images, include_sub_grids, first_axes_processed, second_axes_processed, margin_size, draw_grid):
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
title_texts = [[images.GridAnnotation(z)] for z in z_labels]
@ -370,29 +370,30 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend
print("Unexpected error: draw_xyz_grid failed to return even a single processed image")
return Processed(p, [])
z_count = len(zs)
if draw_grid:
z_count = len(zs)
for i in range(z_count):
start_index = (i * len(xs) * len(ys)) + i
end_index = start_index + len(xs) * len(ys)
grid = images.image_grid(processed_result.images[start_index:end_index], rows=len(ys))
for i in range(z_count):
start_index = (i * len(xs) * len(ys)) + i
end_index = start_index + len(xs) * len(ys)
grid = images.image_grid(processed_result.images[start_index:end_index], rows=len(ys))
if draw_legend:
grid_max_w, grid_max_h = map(max, zip(*(img.size for img in processed_result.images[start_index:end_index])))
grid = images.draw_grid_annotations(grid, grid_max_w, grid_max_h, hor_texts, ver_texts, margin_size)
processed_result.images.insert(i, grid)
processed_result.all_prompts.insert(i, processed_result.all_prompts[start_index])
processed_result.all_seeds.insert(i, processed_result.all_seeds[start_index])
processed_result.infotexts.insert(i, processed_result.infotexts[start_index])
z_grid = images.image_grid(processed_result.images[:z_count], rows=1)
z_sub_grid_max_w, z_sub_grid_max_h = map(max, zip(*(img.size for img in processed_result.images[:z_count])))
if draw_legend:
grid_max_w, grid_max_h = map(max, zip(*(img.size for img in processed_result.images[start_index:end_index])))
grid = images.draw_grid_annotations(grid, grid_max_w, grid_max_h, hor_texts, ver_texts, margin_size)
processed_result.images.insert(i, grid)
processed_result.all_prompts.insert(i, processed_result.all_prompts[start_index])
processed_result.all_seeds.insert(i, processed_result.all_seeds[start_index])
processed_result.infotexts.insert(i, processed_result.infotexts[start_index])
z_grid = images.image_grid(processed_result.images[:z_count], rows=1)
z_sub_grid_max_w, z_sub_grid_max_h = map(max, zip(*(img.size for img in processed_result.images[:z_count])))
if draw_legend:
z_grid = images.draw_grid_annotations(z_grid, z_sub_grid_max_w, z_sub_grid_max_h, title_texts, [[images.GridAnnotation()]])
processed_result.images.insert(0, z_grid)
# TODO: Deeper aspects of the program rely on grid info being misaligned between metadata arrays, which is not ideal.
# processed_result.all_prompts.insert(0, processed_result.all_prompts[0])
# processed_result.all_seeds.insert(0, processed_result.all_seeds[0])
processed_result.infotexts.insert(0, processed_result.infotexts[0])
z_grid = images.draw_grid_annotations(z_grid, z_sub_grid_max_w, z_sub_grid_max_h, title_texts, [[images.GridAnnotation()]])
processed_result.images.insert(0, z_grid)
# TODO: Deeper aspects of the program rely on grid info being misaligned between metadata arrays, which is not ideal.
# processed_result.all_prompts.insert(0, processed_result.all_prompts[0])
# processed_result.all_seeds.insert(0, processed_result.all_seeds[0])
processed_result.infotexts.insert(0, processed_result.infotexts[0])
return processed_result
@ -442,7 +443,6 @@ class Script(scripts.Script):
with gr.Row(variant="compact", elem_id="axis_options"):
with gr.Column():
draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=self.elem_id("draw_legend"))
no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=self.elem_id("no_fixed_seeds"))
with gr.Row():
vary_seeds_x = gr.Checkbox(label='Vary seeds for X', value=False, min_width=80, elem_id=self.elem_id("vary_seeds_x"), tooltip="Use different seeds for images along X axis.")
@ -450,9 +450,12 @@ class Script(scripts.Script):
vary_seeds_z = gr.Checkbox(label='Vary seeds for Z', value=False, min_width=80, elem_id=self.elem_id("vary_seeds_z"), tooltip="Use different seeds for images along Z axis.")
with gr.Column():
include_lone_images = gr.Checkbox(label='Include Sub Images', value=False, elem_id=self.elem_id("include_lone_images"))
include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids"))
csv_mode = gr.Checkbox(label='Use text inputs instead of dropdowns', value=False, elem_id=self.elem_id("csv_mode"))
with gr.Column():
with InputAccordion(True, label='Draw grid', elem_id=self.elem_id('draw_grid')) as draw_grid:
with gr.Row():
include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids"))
draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=self.elem_id("draw_legend"))
margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size"))
with gr.Row(variant="compact", elem_id="swap_axes"):
@ -534,9 +537,9 @@ class Script(scripts.Script):
(z_values_dropdown, lambda params: get_dropdown_update_from_params("Z", params)),
)
return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, vary_seeds_x, vary_seeds_y, vary_seeds_z, margin_size, csv_mode]
return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, vary_seeds_x, vary_seeds_y, vary_seeds_z, margin_size, csv_mode, draw_grid]
def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, vary_seeds_x, vary_seeds_y, vary_seeds_z, margin_size, csv_mode):
def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, vary_seeds_x, vary_seeds_y, vary_seeds_z, margin_size, csv_mode, draw_grid):
x_type, y_type, z_type = x_type or 0, y_type or 0, z_type or 0 # if axle type is None set to 0
if not no_fixed_seeds:
@ -781,7 +784,8 @@ class Script(scripts.Script):
include_sub_grids=include_sub_grids,
first_axes_processed=first_axes_processed,
second_axes_processed=second_axes_processed,
margin_size=margin_size
margin_size=margin_size,
draw_grid=draw_grid,
)
if not processed.images:
@ -790,14 +794,15 @@ class Script(scripts.Script):
z_count = len(zs)
# Set the grid infotexts to the real ones with extra_generation_params (1 main grid + z_count sub-grids)
processed.infotexts[:1 + z_count] = grid_infotext[:1 + z_count]
if draw_grid:
# Set the grid infotexts to the real ones with extra_generation_params (1 main grid + z_count sub-grids)
processed.infotexts[:1 + z_count] = grid_infotext[:1 + z_count]
if not include_lone_images:
# Don't need sub-images anymore, drop from list:
processed.images = processed.images[:z_count + 1]
processed.images = processed.images[:z_count + 1] if draw_grid else []
if opts.grid_save:
if draw_grid and opts.grid_save:
# Auto-save main and sub-grids:
grid_count = z_count + 1 if z_count > 1 else 1
for g in range(grid_count):
@ -807,7 +812,7 @@ class Script(scripts.Script):
if not include_sub_grids: # if not include_sub_grids then skip saving after the first grid
break
if not include_sub_grids:
if draw_grid and not include_sub_grids:
# Done with sub-grids, drop all related information:
for _ in range(z_count):
del processed.images[1]

View file

@ -4,7 +4,16 @@ if exist webui.settings.bat (
call webui.settings.bat
)
if not defined PYTHON (set PYTHON=python)
if not defined PYTHON (
for /f "delims=" %%A in ('where python ^| findstr /n . ^| findstr ^^1:') do (
if /i "%%~xA" == ".exe" (
set PYTHON=python
) else (
set PYTHON=call python
)
)
)
if defined GIT (set "GIT_PYTHON_GIT_EXECUTABLE=%GIT%")
if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv")

View file

@ -45,6 +45,44 @@ def api_only():
)
def warning_if_invalid_install_dir():
"""
Shows a warning if the webui is installed under a path that contains a leading dot in any of its parent directories.
Gradio '/file=' route will block access to files that have a leading dot in the path segments.
We use this route to serve files such as JavaScript and CSS to the webpage,
if those files are blocked, the webpage will not function properly.
See https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13292
This is a security feature was added to Gradio 3.32.0 and is removed in later versions,
this function replicates Gradio file access blocking logic.
This check should be removed when it's no longer applicable.
"""
from packaging.version import parse
from pathlib import Path
import gradio
if parse('3.32.0') <= parse(gradio.__version__) < parse('4'):
def abspath(path):
"""modified from Gradio 3.41.2 gradio.utils.abspath()"""
if path.is_absolute():
return path
is_symlink = path.is_symlink() or any(parent.is_symlink() for parent in path.parents)
return Path.cwd() / path if (is_symlink or path == path.resolve()) else path.resolve()
webui_root = Path(__file__).parent
if any(part.startswith(".") for part in abspath(webui_root).parts):
print(f'''{"!"*25} Warning {"!"*25}
WebUI is installed in a directory that has a leading dot (.) in one of its parent directories.
This will prevent WebUI from functioning properly.
Please move the installation to a different directory.
Current path: "{webui_root}"
For more information see: https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13292
{"!"*25} Warning {"!"*25}''')
def webui():
from modules.shared_cmd_options import cmd_opts
@ -53,6 +91,8 @@ def webui():
from modules import shared, ui_tempdir, script_callbacks, ui, progress, ui_extra_networks
warning_if_invalid_install_dir()
while 1:
if shared.opts.clean_temp_dir_at_start:
ui_tempdir.cleanup_tmpdr()