Skip to content

Commit 5bbbda4

Browse files
committed
Merge branch 'dev' into release_candidate
2 parents 48dd4d9 + 9f5a98d commit 5bbbda4

27 files changed

+324
-107
lines changed

.gitignore

+2
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ __pycache__
22
*.ckpt
33
*.safetensors
44
*.pth
5+
.DS_Store
56
/ESRGAN/*
67
/SwinIR/*
78
/repositories
@@ -40,3 +41,4 @@ notification.mp3
4041
/test/test_outputs
4142
/cache
4243
trace.json
44+
/sysinfo-????-??-??-??-??.json

README.md

+24-2
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ A web interface for Stable Diffusion, implemented using Gradio library.
7878
- Clip skip
7979
- Hypernetworks
8080
- Loras (same as Hypernetworks but more pretty)
81-
- A separate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt
81+
- A separate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt
8282
- Can select to load a different VAE from settings screen
8383
- Estimated completion time in progress bar
8484
- API
@@ -122,16 +122,38 @@ Alternatively, use online services (like Google Colab):
122122
# Debian-based:
123123
sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0
124124
# Red Hat-based:
125-
sudo dnf install wget git python3 gperftools-libs libglvnd-glx
125+
sudo dnf install wget git python3 gperftools-libs libglvnd-glx
126126
# openSUSE-based:
127127
sudo zypper install wget git python3 libtcmalloc4 libglvnd
128128
# Arch-based:
129129
sudo pacman -S wget git python3
130130
```
131+
If your system is very new, you need to install python3.11 or python3.10:
132+
```bash
133+
# Ubuntu 24.04
134+
sudo add-apt-repository ppa:deadsnakes/ppa
135+
sudo apt update
136+
sudo apt install python3.11
137+
138+
# Manjaro/Arch
139+
sudo pacman -S yay
140+
yay -S python311 # do not confuse with python3.11 package
141+
142+
# Only for 3.11
143+
# Then set up env variable in launch script
144+
export python_cmd="python3.11"
145+
# or in webui-user.sh
146+
python_cmd="python3.11"
147+
```
131148
2. Navigate to the directory you would like the webui to be installed and execute the following command:
132149
```bash
133150
wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh
134151
```
152+
Or just clone the repo wherever you want:
153+
```bash
154+
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui
155+
```
156+
135157
3. Run `webui.sh`.
136158
4. Check `webui-user.sh` for options.
137159
### Installation on Apple Silicon

extensions-builtin/Lora/network.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
import torch.nn.functional as F
88

99
from modules import sd_models, cache, errors, hashes, shared
10+
import modules.models.sd3.mmdit
1011

1112
NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module'])
1213

@@ -114,7 +115,10 @@ def __init__(self, net: Network, weights: NetworkWeights):
114115
self.sd_key = weights.sd_key
115116
self.sd_module = weights.sd_module
116117

117-
if hasattr(self.sd_module, 'weight'):
118+
if isinstance(self.sd_module, modules.models.sd3.mmdit.QkvLinear):
119+
s = self.sd_module.weight.shape
120+
self.shape = (s[0] // 3, s[1])
121+
elif hasattr(self.sd_module, 'weight'):
118122
self.shape = self.sd_module.weight.shape
119123
elif isinstance(self.sd_module, nn.MultiheadAttention):
120124
# For now, only self-attn use Pytorch's MHA

extensions-builtin/Lora/network_lora.py

+9-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import torch
22

33
import lyco_helpers
4+
import modules.models.sd3.mmdit
45
import network
56
from modules import devices
67

@@ -10,6 +11,13 @@ def create_module(self, net: network.Network, weights: network.NetworkWeights):
1011
if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]):
1112
return NetworkModuleLora(net, weights)
1213

14+
if all(x in weights.w for x in ["lora_A.weight", "lora_B.weight"]):
15+
w = weights.w.copy()
16+
weights.w.clear()
17+
weights.w.update({"lora_up.weight": w["lora_B.weight"], "lora_down.weight": w["lora_A.weight"]})
18+
19+
return NetworkModuleLora(net, weights)
20+
1321
return None
1422

1523

@@ -29,7 +37,7 @@ def create_module(self, weights, key, none_ok=False):
2937
if weight is None and none_ok:
3038
return None
3139

32-
is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention]
40+
is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention, modules.models.sd3.mmdit.QkvLinear]
3341
is_conv = type(self.sd_module) in [torch.nn.Conv2d]
3442

3543
if is_linear:

extensions-builtin/Lora/networks.py

+75-21
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020

2121
from modules import shared, devices, sd_models, errors, scripts, sd_hijack
2222
import modules.textual_inversion.textual_inversion as textual_inversion
23+
import modules.models.sd3.mmdit
2324

2425
from lora_logger import logger
2526

@@ -166,12 +167,26 @@ def load_network(name, network_on_disk):
166167

167168
keys_failed_to_match = {}
168169
is_sd2 = 'model_transformer_resblocks' in shared.sd_model.network_layer_mapping
170+
if hasattr(shared.sd_model, 'diffusers_weight_map'):
171+
diffusers_weight_map = shared.sd_model.diffusers_weight_map
172+
elif hasattr(shared.sd_model, 'diffusers_weight_mapping'):
173+
diffusers_weight_map = {}
174+
for k, v in shared.sd_model.diffusers_weight_mapping():
175+
diffusers_weight_map[k] = v
176+
shared.sd_model.diffusers_weight_map = diffusers_weight_map
177+
else:
178+
diffusers_weight_map = None
169179

170180
matched_networks = {}
171181
bundle_embeddings = {}
172182

173183
for key_network, weight in sd.items():
174-
key_network_without_network_parts, _, network_part = key_network.partition(".")
184+
185+
if diffusers_weight_map:
186+
key_network_without_network_parts, network_name, network_weight = key_network.rsplit(".", 2)
187+
network_part = network_name + '.' + network_weight
188+
else:
189+
key_network_without_network_parts, _, network_part = key_network.partition(".")
175190

176191
if key_network_without_network_parts == "bundle_emb":
177192
emb_name, vec_name = network_part.split(".", 1)
@@ -183,7 +198,11 @@ def load_network(name, network_on_disk):
183198
emb_dict[vec_name] = weight
184199
bundle_embeddings[emb_name] = emb_dict
185200

186-
key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2)
201+
if diffusers_weight_map:
202+
key = diffusers_weight_map.get(key_network_without_network_parts, key_network_without_network_parts)
203+
else:
204+
key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2)
205+
187206
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
188207

189208
if sd_module is None:
@@ -347,6 +366,28 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
347366
purge_networks_from_memory()
348367

349368

369+
def allowed_layer_without_weight(layer):
370+
if isinstance(layer, torch.nn.LayerNorm) and not layer.elementwise_affine:
371+
return True
372+
373+
return False
374+
375+
376+
def store_weights_backup(weight):
377+
if weight is None:
378+
return None
379+
380+
return weight.to(devices.cpu, copy=True)
381+
382+
383+
def restore_weights_backup(obj, field, weight):
384+
if weight is None:
385+
setattr(obj, field, None)
386+
return
387+
388+
getattr(obj, field).copy_(weight)
389+
390+
350391
def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
351392
weights_backup = getattr(self, "network_weights_backup", None)
352393
bias_backup = getattr(self, "network_bias_backup", None)
@@ -356,21 +397,15 @@ def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Li
356397

357398
if weights_backup is not None:
358399
if isinstance(self, torch.nn.MultiheadAttention):
359-
self.in_proj_weight.copy_(weights_backup[0])
360-
self.out_proj.weight.copy_(weights_backup[1])
400+
restore_weights_backup(self, 'in_proj_weight', weights_backup[0])
401+
restore_weights_backup(self.out_proj, 'weight', weights_backup[1])
361402
else:
362-
self.weight.copy_(weights_backup)
403+
restore_weights_backup(self, 'weight', weights_backup)
363404

364-
if bias_backup is not None:
365-
if isinstance(self, torch.nn.MultiheadAttention):
366-
self.out_proj.bias.copy_(bias_backup)
367-
else:
368-
self.bias.copy_(bias_backup)
405+
if isinstance(self, torch.nn.MultiheadAttention):
406+
restore_weights_backup(self.out_proj, 'bias', bias_backup)
369407
else:
370-
if isinstance(self, torch.nn.MultiheadAttention):
371-
self.out_proj.bias = None
372-
else:
373-
self.bias = None
408+
restore_weights_backup(self, 'bias', bias_backup)
374409

375410

376411
def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
@@ -389,37 +424,38 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
389424

390425
weights_backup = getattr(self, "network_weights_backup", None)
391426
if weights_backup is None and wanted_names != ():
392-
if current_names != ():
393-
raise RuntimeError("no backup weights found and current weights are not unchanged")
427+
if current_names != () and not allowed_layer_without_weight(self):
428+
raise RuntimeError(f"{network_layer_name} - no backup weights found and current weights are not unchanged")
394429

395430
if isinstance(self, torch.nn.MultiheadAttention):
396-
weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True))
431+
weights_backup = (store_weights_backup(self.in_proj_weight), store_weights_backup(self.out_proj.weight))
397432
else:
398-
weights_backup = self.weight.to(devices.cpu, copy=True)
433+
weights_backup = store_weights_backup(self.weight)
399434

400435
self.network_weights_backup = weights_backup
401436

402437
bias_backup = getattr(self, "network_bias_backup", None)
403438
if bias_backup is None and wanted_names != ():
404439
if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
405-
bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
440+
bias_backup = store_weights_backup(self.out_proj.bias)
406441
elif getattr(self, 'bias', None) is not None:
407-
bias_backup = self.bias.to(devices.cpu, copy=True)
442+
bias_backup = store_weights_backup(self.bias)
408443
else:
409444
bias_backup = None
410445

411446
# Unlike weight which always has value, some modules don't have bias.
412447
# Only report if bias is not None and current bias are not unchanged.
413448
if bias_backup is not None and current_names != ():
414449
raise RuntimeError("no backup bias found and current bias are not unchanged")
450+
415451
self.network_bias_backup = bias_backup
416452

417453
if current_names != wanted_names:
418454
network_restore_weights_from_backup(self)
419455

420456
for net in loaded_networks:
421457
module = net.modules.get(network_layer_name, None)
422-
if module is not None and hasattr(self, 'weight'):
458+
if module is not None and hasattr(self, 'weight') and not isinstance(module, modules.models.sd3.mmdit.QkvLinear):
423459
try:
424460
with torch.no_grad():
425461
if getattr(self, 'fp16_weight', None) is None:
@@ -479,6 +515,24 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
479515

480516
continue
481517

518+
if isinstance(self, modules.models.sd3.mmdit.QkvLinear) and module_q and module_k and module_v:
519+
try:
520+
with torch.no_grad():
521+
# Send "real" orig_weight into MHA's lora module
522+
qw, kw, vw = self.weight.chunk(3, 0)
523+
updown_q, _ = module_q.calc_updown(qw)
524+
updown_k, _ = module_k.calc_updown(kw)
525+
updown_v, _ = module_v.calc_updown(vw)
526+
del qw, kw, vw
527+
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
528+
self.weight += updown_qkv
529+
530+
except RuntimeError as e:
531+
logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
532+
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
533+
534+
continue
535+
482536
if module is None:
483537
continue
484538

modules/api/api.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def encode_pil_to_base64(image):
113113
image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality)
114114

115115
elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"):
116-
if image.mode == "RGBA":
116+
if image.mode in ("RGBA", "P"):
117117
image = image.convert("RGB")
118118
parameters = image.info.get('parameters', None)
119119
exif_bytes = piexif.dump({

modules/call_queue.py

+17-8
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,22 @@ def f(*args, **kwargs):
4747

4848

4949
def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
50+
@wraps(func)
51+
def f(*args, **kwargs):
52+
try:
53+
res = func(*args, **kwargs)
54+
finally:
55+
shared.state.skipped = False
56+
shared.state.interrupted = False
57+
shared.state.stopping_generation = False
58+
shared.state.job_count = 0
59+
shared.state.job = ""
60+
return res
61+
62+
return wrap_gradio_call_no_job(f, extra_outputs, add_stats)
63+
64+
65+
def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
5066
@wraps(func)
5167
def f(*args, extra_outputs_array=extra_outputs, **kwargs):
5268
run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats
@@ -66,9 +82,6 @@ def f(*args, extra_outputs_array=extra_outputs, **kwargs):
6682
arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
6783
errors.report(f"{message}\n{arg_str}", exc_info=True)
6884

69-
shared.state.job = ""
70-
shared.state.job_count = 0
71-
7285
if extra_outputs_array is None:
7386
extra_outputs_array = [None, '']
7487

@@ -77,11 +90,6 @@ def f(*args, extra_outputs_array=extra_outputs, **kwargs):
7790

7891
devices.torch_gc()
7992

80-
shared.state.skipped = False
81-
shared.state.interrupted = False
82-
shared.state.stopping_generation = False
83-
shared.state.job_count = 0
84-
8593
if not add_stats:
8694
return tuple(res)
8795

@@ -123,3 +131,4 @@ def f(*args, extra_outputs_array=extra_outputs, **kwargs):
123131
return tuple(res)
124132

125133
return f
134+

modules/infotext_utils.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -146,18 +146,19 @@ def connect_paste_params_buttons():
146146
destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None)
147147

148148
if binding.source_image_component and destination_image_component:
149+
need_send_dementions = destination_width_component and binding.tabname != 'inpaint'
149150
if isinstance(binding.source_image_component, gr.Gallery):
150-
func = send_image_and_dimensions if destination_width_component else image_from_url_text
151+
func = send_image_and_dimensions if need_send_dementions else image_from_url_text
151152
jsfunc = "extract_image_from_gallery"
152153
else:
153-
func = send_image_and_dimensions if destination_width_component else lambda x: x
154+
func = send_image_and_dimensions if need_send_dementions else lambda x: x
154155
jsfunc = None
155156

156157
binding.paste_button.click(
157158
fn=func,
158159
_js=jsfunc,
159160
inputs=[binding.source_image_component],
160-
outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component],
161+
outputs=[destination_image_component, destination_width_component, destination_height_component] if need_send_dementions else [destination_image_component],
161162
show_progress=False,
162163
)
163164

modules/launch_utils.py

-1
Original file line numberDiff line numberDiff line change
@@ -446,7 +446,6 @@ def prepare_environment():
446446
exit(0)
447447

448448

449-
450449
def configure_for_tests():
451450
if "--api" not in sys.argv:
452451
sys.argv.append("--api")

modules/models/sd3/mmdit.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,9 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
175175
#################################################################################
176176

177177

178+
class QkvLinear(torch.nn.Linear):
179+
pass
180+
178181
def split_qkv(qkv, head_dim):
179182
qkv = qkv.reshape(qkv.shape[0], qkv.shape[1], 3, -1, head_dim).movedim(2, 0)
180183
return qkv[0], qkv[1], qkv[2]
@@ -202,7 +205,7 @@ def __init__(
202205
self.num_heads = num_heads
203206
self.head_dim = dim // num_heads
204207

205-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias, dtype=dtype, device=device)
208+
self.qkv = QkvLinear(dim, dim * 3, bias=qkv_bias, dtype=dtype, device=device)
206209
if not pre_only:
207210
self.proj = nn.Linear(dim, dim, dtype=dtype, device=device)
208211
assert attn_mode in self.ATTENTION_MODES

0 commit comments

Comments
 (0)