mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2026-01-06 01:02:02 -08:00
Option for using fp16 weight when apply lora
This commit is contained in:
parent
b2e039d07b
commit
370a77f8e7
4 changed files with 25 additions and 7 deletions
|
|
@ -178,6 +178,7 @@ def configure_opts_onchange():
|
|||
shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
|
||||
shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
|
||||
shared.opts.onchange("fp8_storage", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
|
||||
shared.opts.onchange("cache_fp16_weight", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
|
||||
startup_timer.record("opts onchange")
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue