Skip to content

Commit 6fbfd19

Browse files
author
deforum
committed
fix dev branch by reverting
1 parent 93fd972 commit 6fbfd19

File tree

7 files changed

+120
-496
lines changed

7 files changed

+120
-496
lines changed

Deforum_Stable_Diffusion.ipynb

+25-79
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
"id": "c442uQJ_gUgy"
77
},
88
"source": [
9-
"# **Deforum Stable Diffusion v0.4 (DEV)**\n",
10-
"[Stable Diffusion](https://github.com/CompVis/stable-diffusion) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer and the [Stability.ai](https://stability.ai/) Team. [K Diffusion](https://github.com/crowsonkb/k-diffusion) by [Katherine Crowson](https://twitter.com/RiversHaveWings). You need to get the ckpt file and put it on your Google Drive first to use this. It can be downloaded from [HuggingFace](https://huggingface.co/CompVis/stable-diffusion).\n",
9+
"# **Deforum Stable Diffusion v0.4**\n",
10+
"[Stable Diffusion](https://github.com/CompVis/stable-diffusion) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Bj\u00f6rn Ommer and the [Stability.ai](https://stability.ai/) Team. [K Diffusion](https://github.com/crowsonkb/k-diffusion) by [Katherine Crowson](https://twitter.com/RiversHaveWings). You need to get the ckpt file and put it on your Google Drive first to use this. It can be downloaded from [HuggingFace](https://huggingface.co/CompVis/stable-diffusion).\n",
1111
"\n",
1212
"Notebook by [deforum](https://discord.gg/upmXXsrwZc)"
1313
]
@@ -39,8 +39,8 @@
3939
{
4040
"cell_type": "code",
4141
"metadata": {
42-
"id": "TxIOPT0G5Lx1",
43-
"cellView": "form"
42+
"cellView": "form",
43+
"id": "TxIOPT0G5Lx1"
4444
},
4545
"source": [
4646
"#@markdown **Model and Output Paths**\n",
@@ -96,7 +96,7 @@
9696
" all_process = [\n",
9797
" ['pip', 'install', 'torch==1.12.1+cu113', 'torchvision==0.13.1+cu113', '--extra-index-url', 'https://download.pytorch.org/whl/cu113'],\n",
9898
" ['pip', 'install', 'omegaconf==2.2.3', 'einops==0.4.1', 'pytorch-lightning==1.7.4', 'torchmetrics==0.9.3', 'torchtext==0.13.1', 'transformers==4.21.2', 'kornia==0.6.7'],\n",
99-
" ['git', 'clone', 'https://github.com/deforum/stable-diffusion/tree/dev'],\n",
99+
" ['git', 'clone', 'https://github.com/deforum/stable-diffusion'],\n",
100100
" ['pip', 'install', '-e', 'git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers'],\n",
101101
" ['pip', 'install', '-e', 'git+https://github.com/openai/CLIP.git@main#egg=clip'],\n",
102102
" ['pip', 'install', 'accelerate', 'ftfy', 'jsonmerge', 'matplotlib', 'resize-right', 'timm', 'torchdiffeq'],\n",
@@ -169,13 +169,6 @@
169169
"from ldm.models.diffusion.ddim import DDIMSampler\n",
170170
"from ldm.models.diffusion.plms import PLMSSampler\n",
171171
"\n",
172-
"def patch_conv(cls):\n",
173-
" init = cls.__init__\n",
174-
" def __init__(self, *args, **kwargs):\n",
175-
" return init(self, *args, **kwargs, padding_mode='circular')\n",
176-
" cls.__init__ = __init__\n",
177-
" \n",
178-
"\n",
179172
"def sanitize(prompt):\n",
180173
" whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n",
181174
" tmp = ''.join(filter(whitelist.__contains__, prompt))\n",
@@ -452,9 +445,7 @@
452445
" s = np.max(np.append(s,1.0))\n",
453446
" torch.clamp_(img, -1*s, s)\n",
454447
" torch.FloatTensor.div_(img, s)\n",
455-
" #if seamless:\n",
456-
" #print(\">> changed to seamless tiling mode\")\n",
457-
" # patch_conv(torch.nn.Conv2d)\n",
448+
"\n",
458449
" # Callback for samplers in the k-diffusion repo, called thus:\n",
459450
" # callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})\n",
460451
" def k_callback_(args_dict):\n",
@@ -467,9 +458,6 @@
467458
" is_masked = torch.logical_and(mask >= mask_schedule[args_dict['i']], mask != 0 )\n",
468459
" new_img = init_noise * torch.where(is_masked,1,0) + args_dict['x'] * torch.where(is_masked,0,1)\n",
469460
" args_dict['x'].copy_(new_img)\n",
470-
" #if seamless:\n",
471-
" #print(\">> changed to seamless tiling mode\")\n",
472-
" #patch_conv(torch.nn.Conv2d)\n",
473461
"\n",
474462
" # Function that is called on the image (img) and step (i) at each step\n",
475463
" def img_callback_(img, i):\n",
@@ -561,8 +549,6 @@
561549
" os.makedirs(args.outdir, exist_ok=True)\n",
562550
"\n",
563551
" sampler = PLMSSampler(model) if args.sampler == 'plms' else DDIMSampler(model)\n",
564-
" if seamless:\n",
565-
" print(\">> changed to seamless tiling mode\")\n",
566552
" model_wrap = CompVisDenoiser(model)\n",
567553
" batch_size = args.n_samples\n",
568554
" prompt = args.prompt\n",
@@ -705,31 +691,21 @@
705691
{
706692
"cell_type": "code",
707693
"metadata": {
708-
"id": "CIUJ7lWI4v53",
709-
"cellView": "form"
694+
"cellView": "form",
695+
"id": "CIUJ7lWI4v53"
710696
},
711697
"source": [
712-
"#@markdown **Select Model**\n",
713-
"model_config = \"custom\" #@param [\"custom\",\"v1-inference.yaml\"]\n",
698+
"#@markdown **Select and Load Model**\n",
699+
"\n",
700+
"model_config = \"v1-inference.yaml\" #@param [\"custom\",\"v1-inference.yaml\"]\n",
714701
"model_checkpoint = \"sd-v1-4.ckpt\" #@param [\"custom\",\"sd-v1-4-full-ema.ckpt\",\"sd-v1-4.ckpt\",\"sd-v1-3-full-ema.ckpt\",\"sd-v1-3.ckpt\",\"sd-v1-2-full-ema.ckpt\",\"sd-v1-2.ckpt\",\"sd-v1-1-full-ema.ckpt\",\"sd-v1-1.ckpt\"]\n",
715-
"custom_config_path = \"/content/stable-diffusion/configs/stable-diffusion/v1-inference.yaml\" #@param {type:\"string\"}\n",
702+
"custom_config_path = \"\" #@param {type:\"string\"}\n",
716703
"custom_checkpoint_path = \"\" #@param {type:\"string\"}\n",
717704
"\n",
718705
"load_on_run_all = True #@param {type: 'boolean'}\n",
719706
"half_precision = True # check\n",
720707
"check_sha256 = True #@param {type:\"boolean\"}\n",
721708
"\n",
722-
"#@markdown **Textual Inversion**\n",
723-
"use_textual_inversion = \n",
724-
"embedding_type = \".bin\" #@param [\".bin\",\".pt\"]\n",
725-
"embedding_path= \"/content/drive/MyDrive/AI/models/character-pingu.bin\" #@param {type:\"string\"}\n",
726-
"\n",
727-
"#@markdown **Enable Seamless Animations**\n",
728-
"seamless = False #@param {type:\"boolean\"}\n",
729-
"if seamless:\n",
730-
" #print(\">> changed to seamless tiling mode\")\n",
731-
" patch_conv(torch.nn.Conv2d)\n",
732-
"\n",
733709
"model_map = {\n",
734710
" \"sd-v1-4-full-ema.ckpt\": {'sha256': '14749efc0ae8ef0329391ad4436feb781b402f4fece4883c7ad8d10556d8a36a'},\n",
735711
" \"sd-v1-4.ckpt\": {'sha256': 'fe4efff1e174c627256e44ec2991ba279b3816e364b49f9be2abc0b3ff3f8556'},\n",
@@ -741,25 +717,6 @@
741717
" \"sd-v1-1.ckpt\": {'sha256': '86cd1d3ccb044d7ba8db743d717c9bac603c4043508ad2571383f954390f3cea'}\n",
742718
"}\n",
743719
"\n",
744-
"\n",
745-
"import shutil\n",
746-
"\n",
747-
"originalpt = r'/content/stable-diffusion/ldm/modules/embedding_managerpt.py'\n",
748-
"originalbin = r'/content/stable-diffusion/ldm/modules/embedding_managerbin.py'\n",
749-
"\n",
750-
"if embedding_type == \".pt\":\n",
751-
" file_path = \"/content/stable-diffusion/ldm/modules/embedding_manager.py\"\n",
752-
" if os.path.isfile(file_path):\n",
753-
" os.remove(file_path)\n",
754-
" shutil.copyfile(originalpt, file_path)\n",
755-
" print('using .pt embedding')\n",
756-
"elif embedding_type == \".bin\":\n",
757-
" file_path = \"/content/stable-diffusion/ldm/modules/embedding_manager.py\"\n",
758-
" if os.path.isfile(file_path):\n",
759-
" os.remove(file_path)\n",
760-
" shutil.copyfile(originalbin, file_path)\n",
761-
" print('using .bin embedding')\n",
762-
"\n",
763720
"# config path\n",
764721
"ckpt_config_path = custom_config_path if model_config == \"custom\" else os.path.join(models_path, model_config)\n",
765722
"if os.path.exists(ckpt_config_path):\n",
@@ -793,8 +750,6 @@
793750
"if ckpt_valid:\n",
794751
" print(f\"Using ckpt: {ckpt_path}\")\n",
795752
"\n",
796-
"#@markdown **Load Model**\n",
797-
"\n",
798753
"def load_model_from_config(config, ckpt, verbose=False, device='cuda', half_precision=True):\n",
799754
" map_location = \"cuda\" #@param [\"cpu\", \"cuda\"]\n",
800755
" print(f\"Loading model from {ckpt}\")\n",
@@ -803,9 +758,6 @@
803758
" print(f\"Global Step: {pl_sd['global_step']}\")\n",
804759
" sd = pl_sd[\"state_dict\"]\n",
805760
" model = instantiate_from_config(config.model)\n",
806-
" if embedding_path is not None:\n",
807-
" model.embedding_manager.load(embedding_path)\n",
808-
" device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n",
809761
" m, u = model.load_state_dict(sd, strict=False)\n",
810762
" if len(m) > 0 and verbose:\n",
811763
" print(\"missing keys:\")\n",
@@ -822,13 +774,8 @@
822774
" return model\n",
823775
"\n",
824776
"if load_on_run_all and ckpt_valid:\n",
825-
" if seamless:\n",
826-
" print(\">> changed to seamless tiling mode\")\n",
827-
" patch_conv.__init__(torch.nn.Conv2d)\n",
828777
" local_config = OmegaConf.load(f\"{ckpt_config_path}\")\n",
829778
" model = load_model_from_config(local_config, f\"{ckpt_path}\", half_precision=half_precision)\n",
830-
" if embedding_path is not None:\n",
831-
" model.embedding_manager.load(embedding_path)\n",
832779
" device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n",
833780
" model = model.to(device)"
834781
],
@@ -856,15 +803,15 @@
856803
{
857804
"cell_type": "code",
858805
"metadata": {
859-
"id": "8HJN2TE3vh-J",
860-
"cellView": "form"
806+
"cellView": "form",
807+
"id": "8HJN2TE3vh-J"
861808
},
862809
"source": [
863810
"\n",
864811
"def DeforumAnimArgs():\n",
865812
"\n",
866813
" #@markdown ####**Animation:**\n",
867-
" animation_mode = '3D' #@param ['None', '2D', '3D', 'Video Input', 'Interpolation'] {type:'string'}\n",
814+
" animation_mode = 'None' #@param ['None', '2D', '3D', 'Video Input', 'Interpolation'] {type:'string'}\n",
868815
" max_frames = 1000 #@param {type:\"number\"}\n",
869816
" border = 'wrap' #@param ['wrap', 'replicate'] {type:'string'}\n",
870817
"\n",
@@ -988,16 +935,16 @@
988935
"source": [
989936
"\n",
990937
"prompts = [\n",
991-
" \"a beautiful forest by Asher Brown Durand, * trending on Artstation\", #the first prompt I want\n",
992-
" \"a beautiful portrait of a woman by Artgerm, * trending on Artstation\", #the second prompt I want\n",
938+
" \"a beautiful forest by Asher Brown Durand, trending on Artstation\", #the first prompt I want\n",
939+
" \"a beautiful portrait of a woman by Artgerm, trending on Artstation\", #the second prompt I want\n",
993940
" #\"the third prompt I don't want it I commented it with an\",\n",
994941
"]\n",
995942
"\n",
996943
"animation_prompts = {\n",
997-
" 0: \" * \",\n",
998-
" 20: \"a beautiful banana * , trending on Artstation\",\n",
999-
" 30: \"a beautiful coconut * , trending on Artstation\",\n",
1000-
" 40: \"a beautiful durian * , trending on Artstation\",\n",
944+
" 0: \"a beautiful apple, trending on Artstation\",\n",
945+
" 20: \"a beautiful banana, trending on Artstation\",\n",
946+
" 30: \"a beautiful coconut, trending on Artstation\",\n",
947+
" 40: \"a beautiful durian, trending on Artstation\",\n",
1001948
"}"
1002949
],
1003950
"outputs": [],
@@ -1019,7 +966,6 @@
1019966
"cellView": "form"
1020967
},
1021968
"source": [
1022-
"#@markdown **Load Settings**\n",
1023969
"override_settings_with_file = False #@param {type:\"boolean\"}\n",
1024970
"custom_settings_file = \"/content/drive/MyDrive/Settings.txt\"#@param {type:\"string\"}\n",
1025971
"\n",
@@ -1049,7 +995,7 @@
1049995
" filename_format = \"{timestring}_{index}_{prompt}.png\" #@param [\"{timestring}_{index}_{seed}.png\",\"{timestring}_{index}_{prompt}.png\"]\n",
1050996
" seed_behavior = \"iter\" #@param [\"iter\",\"fixed\",\"random\"]\n",
1051997
" make_grid = False #@param {type:\"boolean\"}\n",
1052-
" grid_rows = 1 #@param \n",
998+
" grid_rows = 2 #@param \n",
1053999
" outdir = get_output_folder(output_path, batch_name)\n",
10541000
"\n",
10551001
" #@markdown **Init Settings**\n",
@@ -1550,8 +1496,8 @@
15501496
{
15511497
"cell_type": "code",
15521498
"metadata": {
1553-
"id": "no2jP8HTMBM0",
1554-
"cellView": "form"
1499+
"cellView": "form",
1500+
"id": "no2jP8HTMBM0"
15551501
},
15561502
"source": [
15571503
"skip_video_for_run_all = True #@param {type: 'boolean'}\n",
@@ -1626,5 +1572,5 @@
16261572
}
16271573
},
16281574
"nbformat": 4,
1629-
"nbformat_minor": 0
1575+
"nbformat_minor": 4
16301576
}

0 commit comments

Comments
 (0)