Skip to content

Commit 9a5c689

Browse files
authored
Merge pull request #4 from 920232796/master
add hash and fix undo hijack bug
2 parents a25dfeb + 965fc5a commit 9a5c689

File tree

4 files changed

+12
-7
lines changed

4 files changed

+12
-7
lines changed

launch.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -234,11 +234,11 @@ def prepare_enviroment():
234234

235235
os.makedirs(dir_repos, exist_ok=True)
236236

237-
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", )
238-
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", )
239-
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", )
240-
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", )
241-
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", )
237+
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
238+
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
239+
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
240+
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
241+
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
242242

243243
if not is_installed("lpips"):
244244
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")

modules/sd_hijack.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,11 @@ def flatten(el):
112112
self.layers = flatten(m)
113113

114114
def undo_hijack(self, m):
115-
if type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
115+
116+
if shared.text_model_name == "XLMR-Large":
117+
m.cond_stage_model = m.cond_stage_model.wrapped
118+
119+
elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
116120
m.cond_stage_model = m.cond_stage_model.wrapped
117121

118122
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings

modules/shared.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
sd_model_file = os.path.join(script_path, 'model.ckpt')
2323
default_sd_model_file = sd_model_file
2424
parser = argparse.ArgumentParser()
25-
parser.add_argument("--config", type=str, default="configs/altdiffusion/ad-inference.yaml", help="path to config which constructs model",)
25+
parser.add_argument("--config", type=str, default=os.path.join(script_path, "v1-inference.yaml"), help="path to config which constructs model",)
2626
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
2727
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
2828
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))

v2-inference.yaml v2-inference-v.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ model:
22
base_learning_rate: 1.0e-4
33
target: ldm.models.diffusion.ddpm.LatentDiffusion
44
params:
5+
parameterization: "v"
56
linear_start: 0.00085
67
linear_end: 0.0120
78
num_timesteps_cond: 1

0 commit comments

Comments
 (0)