anzorq multimodalart HF staff commited on
Commit
890944a
1 Parent(s): a735af2

Pre-load all models in RAM (#12)

Browse files

- Pre-load all models in RAM (317aaa9d4e78f4ba8b98cd19a1bc411f9f433867)


Co-authored-by: Multimodal AI art <[email protected]>

Files changed (1) hide show
  1. app.py +24 -8
app.py CHANGED
@@ -1,5 +1,6 @@
1
  from diffusers import StableDiffusionPipeline
2
  from diffusers import StableDiffusionImg2ImgPipeline
 
3
  import gradio as gr
4
  import torch
5
 
@@ -34,9 +35,14 @@ prompt_prefixes = {
34
  }
35
 
36
  current_model = models[0]
37
- pipe = StableDiffusionPipeline.from_pretrained(current_model, torch_dtype=torch.float16)
38
- if torch.cuda.is_available():
39
- pipe = pipe.to("cuda")
 
 
 
 
 
40
 
41
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
42
 
@@ -54,10 +60,14 @@ def img_to_img(model, prompt, neg_prompt, guidance, steps, width, height, genera
54
  global current_model
55
  global pipe
56
  if model != current_model:
57
- current_model = model
58
- pipe = StableDiffusionPipeline.from_pretrained(current_model, torch_dtype=torch.float16)
59
 
60
- if torch.cuda.is_available():
 
 
 
 
61
  pipe = pipe.to("cuda")
62
 
63
  prompt = prompt_prefixes[current_model] + prompt
@@ -69,6 +79,7 @@ def img_to_img(model, prompt, neg_prompt, guidance, steps, width, height, genera
69
  width=width,
70
  height=height,
71
  generator=generator).images[0]
 
72
  return image
73
 
74
  def txt_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
@@ -77,9 +88,13 @@ def txt_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width,
77
  global pipe
78
  if model != current_model:
79
  current_model = model
80
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model, torch_dtype=torch.float16)
81
 
82
- if torch.cuda.is_available():
 
 
 
 
83
  pipe = pipe.to("cuda")
84
 
85
  prompt = prompt_prefixes[current_model] + prompt
@@ -95,6 +110,7 @@ def txt_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width,
95
  width=width,
96
  height=height,
97
  generator=generator).images[0]
 
98
  return image
99
 
100
 
 
1
  from diffusers import StableDiffusionPipeline
2
  from diffusers import StableDiffusionImg2ImgPipeline
3
+ from diffusers import AutoencoderKL, UNet2DConditionModel
4
  import gradio as gr
5
  import torch
6
 
 
35
  }
36
 
37
  current_model = models[0]
38
+ pipes = []
39
+ vae = AutoencoderKL.from_pretrained(current_model, subfolder="vae", torch_dtype=torch.float16)
40
+ for model in models:
41
+ unet = UNet2DConditionModel.from_pretrained(model, subfolder="unet", torch_dtype=torch.float16)
42
+ pipe = StableDiffusionPipeline.from_pretrained(model, unet=unet, vae=vae, torch_dtype=torch.float16)
43
+ pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model, unet=unet, vae=vae, torch_dtype=torch.float16)
44
+ pipes.append({"name":model, "pipeline":pipe, "pipeline_i2i":pipe_i2i})
45
+
46
 
47
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
48
 
 
60
  global current_model
61
  global pipe
62
  if model != current_model:
63
+ current_model = model
64
+ pipe = pipe.to("cpu")
65
 
66
+ for pipe_dict in pipes:
67
+ if(pipe_dict["name"] == current_model):
68
+ pipe = pipe_dict["pipeline"]
69
+
70
+ if torch.cuda.is_available():
71
  pipe = pipe.to("cuda")
72
 
73
  prompt = prompt_prefixes[current_model] + prompt
 
79
  width=width,
80
  height=height,
81
  generator=generator).images[0]
82
+
83
  return image
84
 
85
  def txt_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
 
88
  global pipe
89
  if model != current_model:
90
  current_model = model
91
+ pipe = pipe.to("cpu")
92
 
93
+ for pipe_dict in pipes:
94
+ if(pipe_dict["name"] == current_model):
95
+ pipe = pipe_dict["pipeline_i2i"]
96
+
97
+ if torch.cuda.is_available():
98
  pipe = pipe.to("cuda")
99
 
100
  prompt = prompt_prefixes[current_model] + prompt
 
110
  width=width,
111
  height=height,
112
  generator=generator).images[0]
113
+
114
  return image
115
 
116