mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2026-01-30 12:22:26 -08:00
Fix various typos with crate-ci/typos
This commit is contained in:
parent
e2a8745abc
commit
e3fa46f26f
36 changed files with 76 additions and 71 deletions
|
|
@ -360,7 +360,7 @@ class Api:
|
|||
return script_args
|
||||
|
||||
def apply_infotext(self, request, tabname, *, script_runner=None, mentioned_script_args=None):
|
||||
"""Processes `infotext` field from the `request`, and sets other fields of the `request` accoring to what's in infotext.
|
||||
"""Processes `infotext` field from the `request`, and sets other fields of the `request` according to what's in infotext.
|
||||
|
||||
If request already has a field set, and that field is encountered in infotext too, the value from infotext is ignored.
|
||||
|
||||
|
|
@ -409,8 +409,8 @@ class Api:
|
|||
if request.override_settings is None:
|
||||
request.override_settings = {}
|
||||
|
||||
overriden_settings = infotext_utils.get_override_settings(params)
|
||||
for _, setting_name, value in overriden_settings:
|
||||
overridden_settings = infotext_utils.get_override_settings(params)
|
||||
for _, setting_name, value in overridden_settings:
|
||||
if setting_name not in request.override_settings:
|
||||
request.override_settings[setting_name] = value
|
||||
|
||||
|
|
|
|||
|
|
@ -100,8 +100,8 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
|
|||
sys_pct = sys_peak/max(sys_total, 1) * 100
|
||||
|
||||
toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)"
|
||||
toltip_r = "Reserved: total amout of video memory allocated by the Torch library "
|
||||
toltip_sys = "System: peak amout of video memory allocated by all running programs, out of total capacity"
|
||||
toltip_r = "Reserved: total amount of video memory allocated by the Torch library "
|
||||
toltip_sys = "System: peak amount of video memory allocated by all running programs, out of total capacity"
|
||||
|
||||
text_a = f"<abbr title='{toltip_a}'>A</abbr>: <span class='measurement'>{active_peak/1024:.2f} GB</span>"
|
||||
text_r = f"<abbr title='{toltip_r}'>R</abbr>: <span class='measurement'>{reserved_peak/1024:.2f} GB</span>"
|
||||
|
|
|
|||
|
|
@ -259,7 +259,7 @@ def test_for_nans(x, where):
|
|||
def first_time_calculation():
|
||||
"""
|
||||
just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and
|
||||
spends about 2.7 seconds doing that, at least wih NVidia.
|
||||
spends about 2.7 seconds doing that, at least with NVidia.
|
||||
"""
|
||||
|
||||
x = torch.zeros((1, 1)).to(device, dtype)
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ class ExtraNetwork:
|
|||
Where name matches the name of this ExtraNetwork object, and arg1:arg2:arg3 are any natural number of text arguments
|
||||
separated by colon.
|
||||
|
||||
Even if the user does not mention this ExtraNetwork in his prompt, the call will stil be made, with empty params_list -
|
||||
Even if the user does not mention this ExtraNetwork in his prompt, the call will still be made, with empty params_list -
|
||||
in this case, all effects of this extra networks should be disabled.
|
||||
|
||||
Can be called multiple times before deactivate() - each new call should override the previous call completely.
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ def initialize_rest(*, reload_script_modules=False):
|
|||
"""
|
||||
Accesses shared.sd_model property to load model.
|
||||
After it's available, if it has been loaded before this access by some extension,
|
||||
its optimization may be None because the list of optimizaers has neet been filled
|
||||
its optimization may be None because the list of optimizers has not been filled
|
||||
by that time, so we apply optimization again.
|
||||
"""
|
||||
from modules import devices
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
# before torch version 1.13, has_mps is only available in nightly pytorch and macOS 12.3+,
|
||||
# use check `getattr` and try it for compatibility.
|
||||
# in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availabilty,
|
||||
# in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availability,
|
||||
# since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://github.com/pytorch/pytorch/pull/103279
|
||||
def check_for_mps() -> bool:
|
||||
if version.parse(torch.__version__) <= version.parse("2.0.1"):
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ def load_upscalers():
|
|||
except Exception:
|
||||
pass
|
||||
|
||||
datas = []
|
||||
data = []
|
||||
commandline_options = vars(shared.cmd_opts)
|
||||
|
||||
# some of upscaler classes will not go away after reloading their modules, and we'll end
|
||||
|
|
@ -129,10 +129,10 @@ def load_upscalers():
|
|||
scaler = cls(commandline_model_path)
|
||||
scaler.user_path = commandline_model_path
|
||||
scaler.model_download_path = commandline_model_path or scaler.model_path
|
||||
datas += scaler.scalers
|
||||
data += scaler.scalers
|
||||
|
||||
shared.sd_upscalers = sorted(
|
||||
datas,
|
||||
data,
|
||||
# Special case for UpscalerNone keeps it at the beginning of the list.
|
||||
key=lambda x: x.name.lower() if not isinstance(x.scaler, (UpscalerNone, UpscalerLanczos, UpscalerNearest)) else ""
|
||||
)
|
||||
|
|
|
|||
|
|
@ -341,7 +341,7 @@ class DDPM(pl.LightningModule):
|
|||
elif self.parameterization == "x0":
|
||||
target = x_start
|
||||
else:
|
||||
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
|
||||
raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
|
||||
|
||||
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
|
||||
|
||||
|
|
@ -901,7 +901,7 @@ class LatentDiffusion(DDPM):
|
|||
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
||||
|
||||
if isinstance(cond, dict):
|
||||
# hybrid case, cond is exptected to be a dict
|
||||
# hybrid case, cond is expected to be a dict
|
||||
pass
|
||||
else:
|
||||
if not isinstance(cond, list):
|
||||
|
|
@ -937,7 +937,7 @@ class LatentDiffusion(DDPM):
|
|||
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
|
||||
|
||||
elif self.cond_stage_key == 'coordinates_bbox':
|
||||
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
|
||||
assert 'original_image_size' in self.split_input_params, 'BoundingBoxRescaling is missing original_image_size'
|
||||
|
||||
# assuming padding of unfold is always 0 and its dilation is always 1
|
||||
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
|
||||
|
|
@ -947,7 +947,7 @@ class LatentDiffusion(DDPM):
|
|||
num_downs = self.first_stage_model.encoder.num_resolutions - 1
|
||||
rescale_latent = 2 ** (num_downs)
|
||||
|
||||
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
|
||||
# get top left positions of patches as conforming for the bbbox tokenizer, therefore we
|
||||
# need to rescale the tl patch coordinates to be in between (0,1)
|
||||
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
|
||||
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ def randn_local(seed, shape):
|
|||
|
||||
|
||||
def randn_like(x):
|
||||
"""Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
|
||||
"""Generate a tensor with random numbers from a normal distribution using the previously initialized generator.
|
||||
|
||||
Use either randn() or manual_seed() to initialize the generator."""
|
||||
|
||||
|
|
@ -48,7 +48,7 @@ def randn_like(x):
|
|||
|
||||
|
||||
def randn_without_seed(shape, generator=None):
|
||||
"""Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
|
||||
"""Generate a tensor with random numbers from a normal distribution using the previously initialized generator.
|
||||
|
||||
Use either randn() or manual_seed() to initialize the generator."""
|
||||
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ class Script:
|
|||
"""If true, the script setup will only be run in Gradio UI, not in API"""
|
||||
|
||||
controls = None
|
||||
"""A list of controls retured by the ui()."""
|
||||
"""A list of controls returned by the ui()."""
|
||||
|
||||
def title(self):
|
||||
"""this function should return the title of the script. This is what will be displayed in the dropdown menu."""
|
||||
|
|
@ -109,7 +109,7 @@ class Script:
|
|||
|
||||
def show(self, is_img2img):
|
||||
"""
|
||||
is_img2img is True if this function is called for the img2img interface, and Fasle otherwise
|
||||
is_img2img is True if this function is called for the img2img interface, and False otherwise
|
||||
|
||||
This function should return:
|
||||
- False if the script should not be shown in UI at all
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class EmphasisIgnore(Emphasis):
|
|||
|
||||
class EmphasisOriginal(Emphasis):
|
||||
name = "Original"
|
||||
description = "the orginal emphasis implementation"
|
||||
description = "the original emphasis implementation"
|
||||
|
||||
def after_transformers(self):
|
||||
original_mean = self.z.mean()
|
||||
|
|
@ -48,7 +48,7 @@ class EmphasisOriginal(Emphasis):
|
|||
|
||||
class EmphasisOriginalNoNorm(EmphasisOriginal):
|
||||
name = "No norm"
|
||||
description = "same as orginal, but without normalization (seems to work better for SDXL)"
|
||||
description = "same as original, but without normalization (seems to work better for SDXL)"
|
||||
|
||||
def after_transformers(self):
|
||||
self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape)
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class PromptChunk:
|
|||
|
||||
PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding'])
|
||||
"""An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt
|
||||
chunk. Thos objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally
|
||||
chunk. Those objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally
|
||||
are applied by sd_hijack.EmbeddingsWithFixes's forward function."""
|
||||
|
||||
|
||||
|
|
@ -66,7 +66,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
|
|||
|
||||
def encode_with_transformers(self, tokens):
|
||||
"""
|
||||
converts a batch of token ids (in python lists) into a single tensor with numeric respresentation of those tokens;
|
||||
converts a batch of token ids (in python lists) into a single tensor with numeric representation of those tokens;
|
||||
All python lists with tokens are assumed to have same length, usually 77.
|
||||
if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on
|
||||
model - can be 768 and 1024.
|
||||
|
|
@ -136,7 +136,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
|
|||
if token == self.comma_token:
|
||||
last_comma = len(chunk.tokens)
|
||||
|
||||
# this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
|
||||
# this is when we are at the end of allotted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
|
||||
# is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
|
||||
elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack:
|
||||
break_location = last_comma + 1
|
||||
|
|
@ -206,7 +206,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
|
|||
be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280.
|
||||
An example shape returned by this function can be: (2, 77, 768).
|
||||
For SDXL, instead of returning one tensor avobe, it returns a tuple with two: the other one with shape (B, 1280) with pooled values.
|
||||
Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet
|
||||
Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one element
|
||||
is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
|
||||
"""
|
||||
|
||||
|
|
|
|||
|
|
@ -784,7 +784,7 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
|
|||
If it is loaded, returns that (moving it to GPU if necessary, and moving the currently loadded model to CPU if necessary).
|
||||
If not, returns the model that can be used to load weights from checkpoint_info's file.
|
||||
If no such model exists, returns None.
|
||||
Additionaly deletes loaded models that are over the limit set in settings (sd_checkpoints_limit).
|
||||
Additionally deletes loaded models that are over the limit set in settings (sd_checkpoints_limit).
|
||||
"""
|
||||
|
||||
already_loaded = None
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ restricted_opts = None
|
|||
sd_model: sd_models_types.WebuiSdModel = None
|
||||
|
||||
settings_components = None
|
||||
"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings"""
|
||||
"""assigned from ui.py, a mapping on setting names to gradio components repsponsible for those settings"""
|
||||
|
||||
tab_names = []
|
||||
|
||||
|
|
|
|||
|
|
@ -213,7 +213,7 @@ options_templates.update(options_section(('optimizations', "Optimizations", "sd"
|
|||
"pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
|
||||
"pad_cond_uncond_v0": OptionInfo(False, "Pad prompt/negative prompt (v0)", infotext='Pad conds v0').info("alternative implementation for the above; used prior to 1.6.0 for DDIM sampler; overrides the above if set; WARNING: truncates negative prompt if it's too long; changes seeds"),
|
||||
"persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"),
|
||||
"batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"),
|
||||
"batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond commandline argument"),
|
||||
"fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Radio, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."),
|
||||
"cache_fp16_weight": OptionInfo(False, "Cache FP16 weight for LoRA").info("Cache fp16 weight when enabling FP8, will increase the quality of LoRA. Use more system ram."),
|
||||
}))
|
||||
|
|
@ -370,7 +370,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
|
|||
'rho': OptionInfo(0.0, "rho", gr.Number, infotext='Schedule rho').info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"),
|
||||
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}, infotext='ENSD').info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
|
||||
'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma", infotext='Discard penultimate sigma').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
|
||||
'sgm_noise_multiplier': OptionInfo(False, "SGM noise multiplier", infotext='SGM noise multplier').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818").info("Match initial noise to official SDXL implementation - only useful for reproducing images"),
|
||||
'sgm_noise_multiplier': OptionInfo(False, "SGM noise multiplier", infotext='SGM noise multiplier').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818").info("Match initial noise to official SDXL implementation - only useful for reproducing images"),
|
||||
'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}, infotext='UniPC variant'),
|
||||
'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}, infotext='UniPC skip type'),
|
||||
'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"),
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ class State:
|
|||
self.current_image_sampling_step = self.sampling_step
|
||||
|
||||
except Exception:
|
||||
# when switching models during genration, VAE would be on CPU, so creating an image will fail.
|
||||
# when switching models during generation, VAE would be on CPU, so creating an image will fail.
|
||||
# we silently ignore this error
|
||||
errors.record_exception()
|
||||
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ def crop_image(im, settings):
|
|||
rect[3] -= 1
|
||||
d.rectangle(rect, outline=GREEN)
|
||||
results.append(im_debug)
|
||||
if settings.destop_view_image:
|
||||
if settings.desktop_view_image:
|
||||
im_debug.show()
|
||||
|
||||
return results
|
||||
|
|
@ -341,5 +341,5 @@ class Settings:
|
|||
self.entropy_points_weight = entropy_points_weight
|
||||
self.face_points_weight = face_points_weight
|
||||
self.annotate_image = annotate_image
|
||||
self.destop_view_image = False
|
||||
self.desktop_view_image = False
|
||||
self.dnn_model_path = dnn_model_path
|
||||
|
|
|
|||
|
|
@ -193,11 +193,11 @@ if __name__ == '__main__':
|
|||
|
||||
embedded_image = insert_image_data_embed(cap_image, test_embed)
|
||||
|
||||
retrived_embed = extract_image_data_embed(embedded_image)
|
||||
retrieved_embed = extract_image_data_embed(embedded_image)
|
||||
|
||||
assert str(retrived_embed) == str(test_embed)
|
||||
assert str(retrieved_embed) == str(test_embed)
|
||||
|
||||
embedded_image2 = insert_image_data_embed(cap_image, retrived_embed)
|
||||
embedded_image2 = insert_image_data_embed(cap_image, retrieved_embed)
|
||||
|
||||
assert embedded_image == embedded_image2
|
||||
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ class EmbeddingDatabase:
|
|||
if data:
|
||||
name = data.get('name', name)
|
||||
else:
|
||||
# if data is None, means this is not an embeding, just a preview image
|
||||
# if data is None, means this is not an embedding, just a preview image
|
||||
return
|
||||
elif ext in ['.BIN', '.PT']:
|
||||
data = torch.load(path, map_location="cpu")
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ def save_files(js_data, images, do_make_zip, index):
|
|||
logfile_path = os.path.join(shared.opts.outdir_save, "log.csv")
|
||||
|
||||
# NOTE: ensure csv integrity when fields are added by
|
||||
# updating headers and padding with delimeters where needed
|
||||
# updating headers and padding with delimiters where needed
|
||||
if os.path.exists(logfile_path):
|
||||
update_logfile(logfile_path, fields)
|
||||
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ class DropdownEditable(FormComponent, gr.Dropdown):
|
|||
class InputAccordion(gr.Checkbox):
|
||||
"""A gr.Accordion that can be used as an input - returns True if open, False if closed.
|
||||
|
||||
Actaully just a hidden checkbox, but creates an accordion that follows and is followed by the state of the checkbox.
|
||||
Actually just a hidden checkbox, but creates an accordion that follows and is followed by the state of the checkbox.
|
||||
"""
|
||||
|
||||
global_index = 0
|
||||
|
|
|
|||
|
|
@ -380,7 +380,7 @@ def install_extension_from_url(dirname, url, branch_name=None):
|
|||
except OSError as err:
|
||||
if err.errno == errno.EXDEV:
|
||||
# Cross device link, typical in docker or when tmp/ and extensions/ are on different file systems
|
||||
# Since we can't use a rename, do the slower but more versitile shutil.move()
|
||||
# Since we can't use a rename, do the slower but more versatile shutil.move()
|
||||
shutil.move(tmpdir, target_dir)
|
||||
else:
|
||||
# Something else, not enough free space, permissions, etc. rethrow it so that it gets handled.
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ class UiPromptStyles:
|
|||
with gr.Row():
|
||||
self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.")
|
||||
ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles")
|
||||
self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply_dialog", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.")
|
||||
self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply_dialog", tooltip="Apply all selected styles from the style selection dropdown in main UI to the prompt.")
|
||||
self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f"{tabname}_style_copy", tooltip="Copy main UI prompt to style.")
|
||||
|
||||
with gr.Row():
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue