Skip to content

Commit 394ffa7

Browse files
committed
Merge branch 'release_candidate'
2 parents baf6946 + dbc88c9 commit 394ffa7

File tree

102 files changed

+3379
-995
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

102 files changed

+3379
-995
lines changed

.eslintrc.js

+6-3
Original file line numberDiff line numberDiff line change
@@ -50,13 +50,14 @@ module.exports = {
5050
globals: {
5151
//script.js
5252
gradioApp: "readonly",
53+
executeCallbacks: "readonly",
54+
onAfterUiUpdate: "readonly",
55+
onOptionsChanged: "readonly",
5356
onUiLoaded: "readonly",
5457
onUiUpdate: "readonly",
55-
onOptionsChanged: "readonly",
5658
uiCurrentTab: "writable",
57-
uiElementIsVisible: "readonly",
5859
uiElementInSight: "readonly",
59-
executeCallbacks: "readonly",
60+
uiElementIsVisible: "readonly",
6061
//ui.js
6162
opts: "writable",
6263
all_gallery_buttons: "readonly",
@@ -84,5 +85,7 @@ module.exports = {
8485
// imageviewer.js
8586
modalPrevImage: "readonly",
8687
modalNextImage: "readonly",
88+
// token-counters.js
89+
setupTokenCounters: "readonly",
8790
}
8891
};

.github/ISSUE_TEMPLATE/bug_report.yml

+19-2
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,8 @@ body:
4343
- type: input
4444
id: commit
4545
attributes:
46-
label: Commit where the problem happens
47-
description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)
46+
label: Version or Commit where the problem happens
47+
description: "Which webui version or commit are you running ? (Do not write *Latest Version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Version: v1.2.3** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)"
4848
validations:
4949
required: true
5050
- type: dropdown
@@ -80,6 +80,23 @@ body:
8080
- AMD GPUs (RX 5000 below)
8181
- CPU
8282
- Other GPUs
83+
- type: dropdown
84+
id: cross_attention_opt
85+
attributes:
86+
label: Cross attention optimization
87+
description: What cross attention optimization are you using, Settings -> Optimizations -> Cross attention optimization
88+
multiple: false
89+
options:
90+
- Automatic
91+
- xformers
92+
- sdp-no-mem
93+
- sdp
94+
- Doggettx
95+
- V1
96+
- InvokeAI
97+
- "None "
98+
validations:
99+
required: true
83100
- type: dropdown
84101
id: browsers
85102
attributes:

CHANGELOG.md

+57
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,60 @@
1+
## 1.4.0
2+
3+
### Features:
4+
* zoom controls for inpainting
5+
* run basic torch calculation at startup in parallel to reduce the performance impact of first generation
6+
* option to pad prompt/neg prompt to be same length
7+
* remove taming_transformers dependency
8+
* custom k-diffusion scheduler settings
9+
* add an option to show selected settings in main txt2img/img2img UI
10+
* sysinfo tab in settings
11+
* infer styles from prompts when pasting params into the UI
12+
* an option to control the behavior of the above
13+
14+
### Minor:
15+
* bump Gradio to 3.32.0
16+
* bump xformers to 0.0.20
17+
* Add option to disable token counters
18+
* tooltip fixes & optimizations
19+
* make it possible to configure filename for the zip download
20+
* `[vae_filename]` pattern for filenames
21+
* Revert discarding penultimate sigma for DPM-Solver++(2M) SDE
22+
* change UI reorder setting to multiselect
23+
* read version info form CHANGELOG.md if git version info is not available
24+
* link footer API to Wiki when API is not active
25+
* persistent conds cache (opt-in optimization)
26+
27+
### Extensions:
28+
* After installing extensions, webui properly restarts the process rather than reloads the UI
29+
* Added VAE listing to web API. Via: /sdapi/v1/sd-vae
30+
* custom unet support
31+
* Add onAfterUiUpdate callback
32+
* refactor EmbeddingDatabase.register_embedding() to allow unregistering
33+
* add before_process callback for scripts
34+
* add ability for alwayson scripts to specify section and let user reorder those sections
35+
36+
### Bug Fixes:
37+
* Fix dragging text to prompt
38+
* fix incorrect quoting for infotext values with colon in them
39+
* fix "hires. fix" prompt sharing same labels with txt2img_prompt
40+
* Fix s_min_uncond default type int
41+
* Fix for #10643 (Inpainting mask sometimes not working)
42+
* fix bad styling for thumbs view in extra networks #10639
43+
* fix for empty list of optimizations #10605
44+
* small fixes to prepare_tcmalloc for Debian/Ubuntu compatibility
45+
* fix --ui-debug-mode exit
46+
* patch GitPython to not use leaky persistent processes
47+
* fix duplicate Cross attention optimization after UI reload
48+
* torch.cuda.is_available() check for SdOptimizationXformers
49+
* fix hires fix using wrong conds in second pass if using Loras.
50+
* handle exception when parsing generation parameters from png info
51+
* fix upcast attention dtype error
52+
* forcing Torch Version to 1.13.1 for RX 5000 series GPUs
53+
* split mask blur into X and Y components, patch Outpainting MK2 accordingly
54+
* don't die when a LoRA is a broken symlink
55+
* allow activation of Generate Forever during generation
56+
57+
158
## 1.3.2
259

360
### Bug Fixes:

extensions-builtin/LDSR/scripts/ldsr_model.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,10 @@
11
import os
2-
import sys
3-
import traceback
42

53
from basicsr.utils.download_util import load_file_from_url
64

75
from modules.upscaler import Upscaler, UpscalerData
86
from ldsr_model_arch import LDSR
9-
from modules import shared, script_callbacks
7+
from modules import shared, script_callbacks, errors
108
import sd_hijack_autoencoder # noqa: F401
119
import sd_hijack_ddpm_v1 # noqa: F401
1210

@@ -51,10 +49,8 @@ def load_model(self, path: str):
5149

5250
try:
5351
return LDSR(model, yaml)
54-
5552
except Exception:
56-
print("Error importing LDSR:", file=sys.stderr)
57-
print(traceback.format_exc(), file=sys.stderr)
53+
errors.report("Error importing LDSR", exc_info=True)
5854
return None
5955

6056
def do_upscale(self, img, path):

extensions-builtin/LDSR/sd_hijack_autoencoder.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from torch.optim.lr_scheduler import LambdaLR
1111

1212
from ldm.modules.ema import LitEma
13-
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
13+
from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
1414
from ldm.modules.diffusionmodules.model import Encoder, Decoder
1515
from ldm.util import instantiate_from_config
1616

@@ -91,8 +91,9 @@ def init_from_ckpt(self, path, ignore_keys=None):
9191
del sd[k]
9292
missing, unexpected = self.load_state_dict(sd, strict=False)
9393
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
94-
if len(missing) > 0:
94+
if missing:
9595
print(f"Missing Keys: {missing}")
96+
if unexpected:
9697
print(f"Unexpected Keys: {unexpected}")
9798

9899
def on_train_batch_end(self, *args, **kwargs):

extensions-builtin/LDSR/sd_hijack_ddpm_v1.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -195,9 +195,9 @@ def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
195195
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
196196
sd, strict=False)
197197
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
198-
if len(missing) > 0:
198+
if missing:
199199
print(f"Missing Keys: {missing}")
200-
if len(unexpected) > 0:
200+
if unexpected:
201201
print(f"Unexpected Keys: {unexpected}")
202202

203203
def q_mean_variance(self, x_start, t):
+147
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,147 @@
1+
# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
2+
# where the license is as follows:
3+
#
4+
# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
5+
#
6+
# Permission is hereby granted, free of charge, to any person obtaining a copy
7+
# of this software and associated documentation files (the "Software"), to deal
8+
# in the Software without restriction, including without limitation the rights
9+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10+
# copies of the Software, and to permit persons to whom the Software is
11+
# furnished to do so, subject to the following conditions:
12+
#
13+
# The above copyright notice and this permission notice shall be included in all
14+
# copies or substantial portions of the Software.
15+
#
16+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19+
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
20+
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21+
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
22+
# OR OTHER DEALINGS IN THE SOFTWARE./
23+
24+
import torch
25+
import torch.nn as nn
26+
import numpy as np
27+
from einops import rearrange
28+
29+
30+
class VectorQuantizer2(nn.Module):
31+
"""
32+
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
33+
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
34+
"""
35+
36+
# NOTE: due to a bug the beta term was applied to the wrong term. for
37+
# backwards compatibility we use the buggy version by default, but you can
38+
# specify legacy=False to fix it.
39+
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
40+
sane_index_shape=False, legacy=True):
41+
super().__init__()
42+
self.n_e = n_e
43+
self.e_dim = e_dim
44+
self.beta = beta
45+
self.legacy = legacy
46+
47+
self.embedding = nn.Embedding(self.n_e, self.e_dim)
48+
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
49+
50+
self.remap = remap
51+
if self.remap is not None:
52+
self.register_buffer("used", torch.tensor(np.load(self.remap)))
53+
self.re_embed = self.used.shape[0]
54+
self.unknown_index = unknown_index # "random" or "extra" or integer
55+
if self.unknown_index == "extra":
56+
self.unknown_index = self.re_embed
57+
self.re_embed = self.re_embed + 1
58+
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
59+
f"Using {self.unknown_index} for unknown indices.")
60+
else:
61+
self.re_embed = n_e
62+
63+
self.sane_index_shape = sane_index_shape
64+
65+
def remap_to_used(self, inds):
66+
ishape = inds.shape
67+
assert len(ishape) > 1
68+
inds = inds.reshape(ishape[0], -1)
69+
used = self.used.to(inds)
70+
match = (inds[:, :, None] == used[None, None, ...]).long()
71+
new = match.argmax(-1)
72+
unknown = match.sum(2) < 1
73+
if self.unknown_index == "random":
74+
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
75+
else:
76+
new[unknown] = self.unknown_index
77+
return new.reshape(ishape)
78+
79+
def unmap_to_all(self, inds):
80+
ishape = inds.shape
81+
assert len(ishape) > 1
82+
inds = inds.reshape(ishape[0], -1)
83+
used = self.used.to(inds)
84+
if self.re_embed > self.used.shape[0]: # extra token
85+
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
86+
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
87+
return back.reshape(ishape)
88+
89+
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
90+
assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
91+
assert rescale_logits is False, "Only for interface compatible with Gumbel"
92+
assert return_logits is False, "Only for interface compatible with Gumbel"
93+
# reshape z -> (batch, height, width, channel) and flatten
94+
z = rearrange(z, 'b c h w -> b h w c').contiguous()
95+
z_flattened = z.view(-1, self.e_dim)
96+
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
97+
98+
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
99+
torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
100+
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
101+
102+
min_encoding_indices = torch.argmin(d, dim=1)
103+
z_q = self.embedding(min_encoding_indices).view(z.shape)
104+
perplexity = None
105+
min_encodings = None
106+
107+
# compute loss for embedding
108+
if not self.legacy:
109+
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
110+
torch.mean((z_q - z.detach()) ** 2)
111+
else:
112+
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
113+
torch.mean((z_q - z.detach()) ** 2)
114+
115+
# preserve gradients
116+
z_q = z + (z_q - z).detach()
117+
118+
# reshape back to match original input shape
119+
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
120+
121+
if self.remap is not None:
122+
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
123+
min_encoding_indices = self.remap_to_used(min_encoding_indices)
124+
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
125+
126+
if self.sane_index_shape:
127+
min_encoding_indices = min_encoding_indices.reshape(
128+
z_q.shape[0], z_q.shape[2], z_q.shape[3])
129+
130+
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
131+
132+
def get_codebook_entry(self, indices, shape):
133+
# shape specifying (batch, height, width, channel)
134+
if self.remap is not None:
135+
indices = indices.reshape(shape[0], -1) # add batch axis
136+
indices = self.unmap_to_all(indices)
137+
indices = indices.reshape(-1) # flatten again
138+
139+
# get quantized latent vectors
140+
z_q = self.embedding(indices)
141+
142+
if shape is not None:
143+
z_q = z_q.view(shape)
144+
# reshape back to match original input shape
145+
z_q = z_q.permute(0, 3, 1, 2).contiguous()
146+
147+
return z_q

extensions-builtin/Lora/extra_networks_lora.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@ def __init__(self):
99
def activate(self, p, params_list):
1010
additional = shared.opts.sd_lora
1111

12-
if additional != "None" and additional in lora.available_loras and len([x for x in params_list if x.items[0] == additional]) == 0:
12+
if additional != "None" and additional in lora.available_loras and not any(x for x in params_list if x.items[0] == additional):
1313
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
1414
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
1515

1616
names = []
1717
multipliers = []
1818
for params in params_list:
19-
assert len(params.items) > 0
19+
assert params.items
2020

2121
names.append(params.items[0])
2222
multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)

extensions-builtin/Lora/lora.py

+7-3
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ def load_lora(name, lora_on_disk):
219219
else:
220220
raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha")
221221

222-
if len(keys_failed_to_match) > 0:
222+
if keys_failed_to_match:
223223
print(f"Failed to match keys when loading Lora {lora_on_disk.filename}: {keys_failed_to_match}")
224224

225225
return lora
@@ -267,7 +267,7 @@ def load_loras(names, multipliers=None):
267267
lora.multiplier = multipliers[i] if multipliers else 1.0
268268
loaded_loras.append(lora)
269269

270-
if len(failed_to_load_loras) > 0:
270+
if failed_to_load_loras:
271271
sd_hijack.model_hijack.comments.append("Failed to find Loras: " + ", ".join(failed_to_load_loras))
272272

273273

@@ -448,7 +448,11 @@ def list_available_loras():
448448
continue
449449

450450
name = os.path.splitext(os.path.basename(filename))[0]
451-
entry = LoraOnDisk(name, filename)
451+
try:
452+
entry = LoraOnDisk(name, filename)
453+
except OSError: # should catch FileNotFoundError and PermissionError etc.
454+
errors.report(f"Failed to load LoRA {name} from {filename}", exc_info=True)
455+
continue
452456

453457
available_loras[name] = entry
454458

extensions-builtin/Lora/ui_extra_networks_lora.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ def refresh(self):
1313
lora.list_available_loras()
1414

1515
def list_items(self):
16-
for name, lora_on_disk in lora.available_loras.items():
16+
for index, (name, lora_on_disk) in enumerate(lora.available_loras.items()):
1717
path, ext = os.path.splitext(lora_on_disk.filename)
1818

1919
alias = lora_on_disk.get_alias()
@@ -27,6 +27,8 @@ def list_items(self):
2727
"prompt": json.dumps(f"<lora:{alias}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
2828
"local_preview": f"{path}.{shared.opts.samples_format}",
2929
"metadata": json.dumps(lora_on_disk.metadata, indent=4) if lora_on_disk.metadata else None,
30+
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
31+
3032
}
3133

3234
def allowed_directories_for_previews(self):

0 commit comments

Comments
 (0)