tianweiy commited on
Commit
76cc5d7
·
1 Parent(s): 552f138

update readme

Browse files
Files changed (1) hide show
  1. README.md +26 -4
README.md CHANGED
@@ -30,9 +30,9 @@ Our 1-step Text-to-Image demo is hosted at [DMD2-1step](https://cc2622c0c132346c
30
 
31
  We can use the standard diffuser pipeline:
32
 
33
- #### 4-step generation
34
 
35
- ```.bash
36
  import torch
37
  from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
38
  from huggingface_hub import hf_hub_download
@@ -51,9 +51,31 @@ prompt="a photo of a cat"
51
  image=pipe(prompt=prompt, num_inference_steps=4, guidance_scale=0, timesteps=[999, 749, 499, 249]).images[0]
52
  ```
53
 
54
- #### 1-step generation
55
 
56
- ```.bash
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  import torch
58
  from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
59
  from huggingface_hub import hf_hub_download
 
30
 
31
  We can use the standard diffuser pipeline:
32
 
33
+ #### 4-step UNet generation
34
 
35
+ ```python
36
  import torch
37
  from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
38
  from huggingface_hub import hf_hub_download
 
51
  image=pipe(prompt=prompt, num_inference_steps=4, guidance_scale=0, timesteps=[999, 749, 499, 249]).images[0]
52
  ```
53
 
54
+ #### 4-step LoRA generation
55
 
56
+ ```python
57
+ import torch
58
+ from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
59
+ from huggingface_hub import hf_hub_download
60
+ from safetensors.torch import load_file
61
+ base_model_id = "stabilityai/stable-diffusion-xl-base-1.0"
62
+ repo_name = "tianweiy/DMD2"
63
+ ckpt_name = "dmd2_sdxl_4step_lora_fp16.bin"
64
+ # Load model.
65
+ pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to("cuda")
66
+ pipe.load_lora_weights(hf_hub_download(repo, ckpt))
67
+ pipe.fuse_lora()
68
+
69
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
70
+ prompt="a photo of a cat"
71
+
72
+ # LCMScheduler's default timesteps are different from the one we used for training
73
+ image=pipe(prompt=prompt, num_inference_steps=4, guidance_scale=0, timesteps=[999, 749, 499, 249]).images[0]
74
+ ```
75
+
76
+ #### 1-step UNet generation
77
+
78
+ ```python
79
  import torch
80
  from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
81
  from huggingface_hub import hf_hub_download