gokaygokay commited on
Commit
c677fd7
·
verified ·
1 Parent(s): 9654b1c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -48
app.py CHANGED
@@ -20,7 +20,6 @@ from PIL import Image
20
  import numpy as np
21
  from transformers import AutoProcessor, AutoModelForCausalLM, pipeline
22
  import requests
23
- from RealESRGAN import RealESRGAN
24
 
25
 
26
  from unittest.mock import patch
@@ -125,25 +124,10 @@ florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base',
125
  enhancer_medium = pipeline("summarization", model="gokaygokay/Lamini-Prompt-Enchance", device=device)
126
  enhancer_long = pipeline("summarization", model="gokaygokay/Lamini-Prompt-Enchance-Long", device=device)
127
 
128
- class LazyRealESRGAN:
129
- def __init__(self, device, scale):
130
- self.device = device
131
- self.scale = scale
132
- self.model = None
133
-
134
- def load_model(self):
135
- if self.model is None:
136
- self.model = RealESRGAN(self.device, scale=self.scale)
137
- self.model.load_weights(f'models/upscalers/RealESRGAN_x{self.scale}.pth', download=False)
138
-
139
- def predict(self, img):
140
- self.load_model()
141
- return self.model.predict(img)
142
 
143
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
144
 
145
- lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
146
- lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
147
 
148
  # Florence caption function
149
  def florence_caption(image):
@@ -179,27 +163,13 @@ def enhance_prompt(input_prompt, model_choice):
179
 
180
  return enhanced_text
181
 
182
- def upscale_image(image, scale):
183
- # Ensure image is a PIL Image object
184
- if not isinstance(image, Image.Image):
185
- if isinstance(image, np.ndarray):
186
- image = Image.fromarray(image)
187
- else:
188
- raise ValueError("Input must be a PIL Image or a numpy array")
189
 
190
- if scale == 2:
191
- return lazy_realesrgan_x2.predict(image)
192
- elif scale == 4:
193
- return lazy_realesrgan_x4.predict(image)
194
- else:
195
- return image
196
 
197
  @spaces.GPU(duration=120)
198
  def generate_image(model_choice, additional_positive_prompt, additional_negative_prompt, height, width, num_inference_steps,
199
  guidance_scale, num_images_per_prompt, use_random_seed, seed, sampler, clip_skip,
200
  use_florence2, use_medium_enhancer, use_long_enhancer,
201
  use_positive_prefix, use_positive_suffix, use_negative_prefix, use_negative_suffix,
202
- use_upscaler, upscale_factor,
203
  input_image=None, progress=gr.Progress(track_tqdm=True)):
204
 
205
  # Select the appropriate pipe based on the model choice
@@ -267,18 +237,6 @@ def generate_image(model_choice, additional_positive_prompt, additional_negative
267
  generator=torch.Generator(pipe.device).manual_seed(seed)
268
  ).images
269
 
270
- if use_upscaler:
271
- print("Upscaling images")
272
- upscaled_images = []
273
- for i, img in enumerate(images):
274
- print(f"Upscaling image {i+1}")
275
- if not isinstance(img, Image.Image):
276
- print(f"Converting image {i+1} to PIL Image")
277
- img = Image.fromarray(np.uint8(img))
278
- upscaled_img = upscale_image(img, upscale_factor)
279
- upscaled_images.append(upscaled_img)
280
- images = upscaled_images
281
-
282
  print("Returning results")
283
  return images, seed, full_positive_prompt, full_negative_prompt
284
  except Exception as e:
@@ -330,10 +288,6 @@ with gr.Blocks(theme='bethecloud/storj_theme') as demo:
330
  use_medium_enhancer = gr.Checkbox(label="Use Medium Prompt Enhancer", value=False)
331
  use_long_enhancer = gr.Checkbox(label="Use Long Prompt Enhancer", value=False)
332
 
333
- with gr.Accordion("Upscaler Settings", open=False):
334
- use_upscaler = gr.Checkbox(label="Use Upscaler", value=False)
335
- upscale_factor = gr.Radio(label="Upscale Factor", choices=[2, 4], value=2)
336
-
337
  generate_btn = gr.Button("Generate Image")
338
 
339
  with gr.Accordion("Prefix and Suffix Settings", open=True):
@@ -372,7 +326,6 @@ with gr.Blocks(theme='bethecloud/storj_theme') as demo:
372
  guidance_scale, num_images_per_prompt, use_random_seed, seed, sampler,
373
  clip_skip, use_florence2, use_medium_enhancer, use_long_enhancer,
374
  use_positive_prefix, use_positive_suffix, use_negative_prefix, use_negative_suffix,
375
- use_upscaler, upscale_factor,
376
  input_image
377
  ],
378
  outputs=[output_gallery, seed_used, full_positive_prompt_used, full_negative_prompt_used]
 
20
  import numpy as np
21
  from transformers import AutoProcessor, AutoModelForCausalLM, pipeline
22
  import requests
 
23
 
24
 
25
  from unittest.mock import patch
 
124
  enhancer_medium = pipeline("summarization", model="gokaygokay/Lamini-Prompt-Enchance", device=device)
125
  enhancer_long = pipeline("summarization", model="gokaygokay/Lamini-Prompt-Enchance-Long", device=device)
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
129
 
130
+
 
131
 
132
  # Florence caption function
133
  def florence_caption(image):
 
163
 
164
  return enhanced_text
165
 
 
 
 
 
 
 
 
166
 
 
 
 
 
 
 
167
 
168
  @spaces.GPU(duration=120)
169
  def generate_image(model_choice, additional_positive_prompt, additional_negative_prompt, height, width, num_inference_steps,
170
  guidance_scale, num_images_per_prompt, use_random_seed, seed, sampler, clip_skip,
171
  use_florence2, use_medium_enhancer, use_long_enhancer,
172
  use_positive_prefix, use_positive_suffix, use_negative_prefix, use_negative_suffix,
 
173
  input_image=None, progress=gr.Progress(track_tqdm=True)):
174
 
175
  # Select the appropriate pipe based on the model choice
 
237
  generator=torch.Generator(pipe.device).manual_seed(seed)
238
  ).images
239
 
 
 
 
 
 
 
 
 
 
 
 
 
240
  print("Returning results")
241
  return images, seed, full_positive_prompt, full_negative_prompt
242
  except Exception as e:
 
288
  use_medium_enhancer = gr.Checkbox(label="Use Medium Prompt Enhancer", value=False)
289
  use_long_enhancer = gr.Checkbox(label="Use Long Prompt Enhancer", value=False)
290
 
 
 
 
 
291
  generate_btn = gr.Button("Generate Image")
292
 
293
  with gr.Accordion("Prefix and Suffix Settings", open=True):
 
326
  guidance_scale, num_images_per_prompt, use_random_seed, seed, sampler,
327
  clip_skip, use_florence2, use_medium_enhancer, use_long_enhancer,
328
  use_positive_prefix, use_positive_suffix, use_negative_prefix, use_negative_suffix,
 
329
  input_image
330
  ],
331
  outputs=[output_gallery, seed_used, full_positive_prompt_used, full_negative_prompt_used]