Skip to content

Commit 2197ab8

Browse files
authored
Revert "Precompute is_sdxl_inpaint flag (AUTOMATIC1111#4)" (AUTOMATIC1111#5)
This reverts commit b7b2bdc.
1 parent b7b2bdc commit 2197ab8

File tree

3 files changed

+22
-22
lines changed

3 files changed

+22
-22
lines changed

modules/processing.py

+17-11
Original file line numberDiff line numberDiff line change
@@ -115,17 +115,20 @@ def txt2img_image_conditioning(sd_model, x, width, height):
115115
return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
116116

117117
else:
118-
if sd_model.model.is_sdxl_inpaint:
119-
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
120-
image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
121-
image_conditioning = images_tensor_to_samples(image_conditioning,
122-
approximation_indexes.get(opts.sd_vae_encode_method))
118+
sd = sd_model.model.state_dict()
119+
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
120+
if diffusion_model_input is not None:
121+
if diffusion_model_input.shape[1] == 9:
122+
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
123+
image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
124+
image_conditioning = images_tensor_to_samples(image_conditioning,
125+
approximation_indexes.get(opts.sd_vae_encode_method))
123126

124-
# Add the fake full 1s mask to the first dimension.
125-
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
126-
image_conditioning = image_conditioning.to(x.dtype)
127+
# Add the fake full 1s mask to the first dimension.
128+
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
129+
image_conditioning = image_conditioning.to(x.dtype)
127130

128-
return image_conditioning
131+
return image_conditioning
129132

130133
# Dummy zero conditioning if we're not using inpainting or unclip models.
131134
# Still takes up a bit of memory, but no encoder call.
@@ -387,8 +390,11 @@ def img2img_image_conditioning(self, source_image, latent_image, image_mask=None
387390
if self.sampler.conditioning_key == "crossattn-adm":
388391
return self.unclip_image_conditioning(source_image)
389392

390-
if self.sampler.model_wrap.inner_model.model.is_sdxl_inpaint:
391-
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
393+
sd = self.sampler.model_wrap.inner_model.model.state_dict()
394+
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
395+
if diffusion_model_input is not None:
396+
if diffusion_model_input.shape[1] == 9:
397+
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
392398

393399
# Dummy zero conditioning if we're not using inpainting or depth model.
394400
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)

modules/sd_models.py

-7
Original file line numberDiff line numberDiff line change
@@ -380,13 +380,6 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
380380
model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
381381
model.is_sd1 = not model.is_sdxl and not model.is_sd2
382382
model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys()
383-
# Set is_sdxl_inpaint flag.
384-
diffusion_model_input = state_dict.get('diffusion_model.input_blocks.0.0.weight', None)
385-
model.is_sdxl_inpaint = (
386-
model.is_sdxl and
387-
diffusion_model_input is not None and
388-
diffusion_model_input.shape[1] == 9
389-
)
390383
if model.is_sdxl:
391384
sd_models_xl.extend_sdxl(model)
392385

modules/sd_models_xl.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,11 @@ def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch:
3535

3636

3737
def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond):
38-
"""WARNING: This function is called once per denoising iteration. DO NOT add
39-
expensive functionc calls such as `model.state_dict`. """
40-
if self.model.is_sdxl_inpaint:
41-
x = torch.cat([x] + cond['c_concat'], dim=1)
38+
sd = self.model.state_dict()
39+
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
40+
if diffusion_model_input is not None:
41+
if diffusion_model_input.shape[1] == 9:
42+
x = torch.cat([x] + cond['c_concat'], dim=1)
4243

4344
return self.model(x, t, cond)
4445

0 commit comments

Comments
 (0)