michaeltrs commited on
Commit
cb25637
1 Parent(s): ab30b8d

Re-add files with LFS tracking

Browse files
checkpoints/lora30k/pytorch_lora_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae276908f80efd67416c36860781cb8cfe84d94812ef09afa3fd4eef4e60d8da
3
+ size 12732240
gen_w_lora.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline
2
+ import torch
3
+ from transformers import CLIPTextModel
4
+
5
+
6
+ pipe_id = "stabilityai/stable-diffusion-2-1"
7
+ # checkpoint_dir = "/home/michaila/Projects/github/diffusers/examples/text_to_image/sd-2-1-train-finetune-LoRA-test5/checkpoint-2800/"
8
+ # checkpoint_dir = "/home/michaila/Projects/github/diffusers/examples/text_to_image/sd-2-1-train-finetune-wText-LoRA-lr1e5-r8/checkpoint-15500/"
9
+ # checkpoint_dir = '/home/michaila/Projects/github/diffusers/examples/text_to_image/sd-2-1-train-finetune-LoRA-ffhq-easyportr-2/checkpoint-100/'
10
+ # checkpoint_dir = "/home/michaila/Projects/github/diffusers/examples/text_to_image/sd-2-1-train-finetune-wText-LoRA-EasyPortait_lr1e5-r8/checkpoint-22000/"
11
+ # checkpoint_dir = "/home/michaila/Projects/github/diffusers/examples/text_to_image/sd-2-1-train-finetune-wText-LoRA-FFHQ-EasyPortrait_lr1e5-r8_768/checkpoint-30000/"
12
+ checkpoint_dir = "checkpoints/lora30k"
13
+
14
+ pipe = StableDiffusionPipeline.from_pretrained(pipe_id, torch_dtype=torch.float16).to("cuda")
15
+
16
+ # pipe.load_lora_weights("/home/michaila/Projects/github/diffusers/examples/text_to_image/sd-2-1-train-finetune-LoRA-ffhq-easyportr-2/checkpoint-500", weight_name="pytorch_lora_weights.safetensors") # , adapter_name="toy")
17
+ # pipe.load_lora_weights(checkpoint_dir, weight_name="pytorch_lora_weights.safetensors") # , adapter_name="toy")
18
+ # pipe.text_encoder.load_lora_weights(checkpoint_dir, weight_name="pytorch_lora_weights.safetensors") # , adapter_name="toy")
19
+ state_dict, network_alphas = StableDiffusionPipeline.lora_state_dict(
20
+ # Path to my trained lora output_dir
21
+ checkpoint_dir,
22
+ weight_name="pytorch_lora_weights.safetensors"
23
+ )
24
+ pipe.load_lora_into_unet(state_dict, network_alphas, pipe.unet, adapter_name='test_lora')
25
+ pipe.load_lora_into_text_encoder(state_dict, network_alphas, pipe.text_encoder, adapter_name='test_lora')
26
+ pipe.set_adapters(["test_lora"], adapter_weights=[1.0])
27
+ # pipe.set_adapters(["text_lora"], adapter_weights=[1.0])
28
+
29
+ # def generate(prompt, name='example', seed=1):
30
+ # lora_scale = 1.0
31
+ # image = pipe(
32
+ # prompt, num_inference_steps=50, cross_attention_kwargs={"scale": lora_scale}, generator=torch.manual_seed(seed)
33
+ # ).images[0]
34
+ # image.save(f"{checkpoint_dir}/{name}.png")
35
+
36
+
37
+ def generate(prompt, negprompt='', steps=50, name='example', seed=1):
38
+ lora_scale = 1.0
39
+ image = pipe(
40
+ prompt, negative_prompt=negprompt, num_inference_steps=steps, cross_attention_kwargs={"scale": lora_scale}, generator=torch.manual_seed(seed)
41
+ ).images[0]
42
+ image.save(f"{checkpoint_dir}/{'_'.join(prompt.replace('.', ' ').split(' '))}.png")
43
+
44
+
45
+ # prompt = "a color photo of a 30 year old man with a sad expression, beard, very little hair, a slightly open mouth, his eyes look directly at the camera."
46
+ # prompt = "a color photo of a 30 year old man with a sad expression, beard, very little hair, a fully open mouth, his eyes look directly at the camera."
47
+ # prompt = "a 50 year old asian woman with a neutral expression, little hair, a slightly open mouth and visible teeth."
48
+ # prompt = "a 50 year old asian woman smiling."
49
+ # prompt = "an 20 year old white man with slightly open mouth, visible teeth. His tongue is out, clearly visible."
50
+ # prompt = "A baby with fully closed mouth."
51
+ # prompt = "A 25 year old female with long, blonde hair, green eyes and neutral expression looking at the camera."
52
+ # prompt = "A black african female with long, straight blond hair and happy expression."
53
+ # prompt = "A black female with blonde hair."
54
+ # prompt = 'An attractive blond male'
55
+ # prompt = 'A happy 55 year old black woman with a hat, sunglasses, earrings and visible teeth. High resolution, sharp image.' #at the camera.'
56
+ prompt = 'A happy 25 year old woman with blond hair. Her head is looking significantly to the right.'
57
+
58
+ negprompt = '' #'bad teeth'
59
+ # generate(prompt, name='example', seed=4)
60
+
61
+ generate(prompt, negprompt=negprompt, steps=50, name='example', seed=200)
62
+
63
+
64
+
65
+
main.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
3
+
4
+ model_id = "stabilityai/stable-diffusion-2-1"
5
+
6
+ # Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead
7
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
8
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
9
+ pipe = pipe.to("cuda")
10
+
11
+ prompt = "a photo of an astronaut riding a horse on mars"
12
+ image = pipe(prompt).images[0]
13
+
14
+ image.save("astronaut_rides_horse.png")