ford442 commited on
Commit
925e298
·
verified ·
1 Parent(s): 931ae00

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -20
app.py CHANGED
@@ -5,7 +5,6 @@
5
  # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
6
  # copies of the Software, and to permit persons to whom the Software is
7
  import spaces
8
-
9
  import os
10
  import random
11
  import uuid
@@ -23,10 +22,8 @@ import gc
23
  import time
24
  import datetime
25
  #from diffusers.schedulers import AysSchedules
26
-
27
  from gradio import themes
28
  from hidiffusion import apply_hidiffusion, remove_hidiffusion
29
-
30
  import gc
31
 
32
  torch.backends.cuda.matmul.allow_tf32 = False
@@ -37,7 +34,6 @@ torch.backends.cudnn.deterministic = False
37
  #torch.backends.cudnn.benchmark = False
38
  torch.backends.cuda.preferred_blas_library="cublas"
39
  # torch.backends.cuda.preferred_linalg_library="cusolver"
40
-
41
  torch.set_float32_matmul_precision("highest")
42
 
43
  FTP_HOST = "1ink.us"
@@ -50,7 +46,6 @@ DESCRIPTIONXX = """
50
  """
51
 
52
  examples = [
53
-
54
  "Many apples splashed with drops of water within a fancy bowl 4k, hdr --v 6.0 --style raw",
55
  "A profile photo of a dog, brown background, shot on Leica M6 --ar 128:85 --v 6.0 --style raw",
56
  ]
@@ -60,12 +55,8 @@ MODEL_OPTIONS = {
60
  }
61
 
62
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
63
- USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
64
- ENABLE_CPU_OFFLOAD = 0
65
  BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
66
 
67
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
68
-
69
  style_list = [
70
  {
71
  "name": "3840 x 2160",
@@ -95,15 +86,7 @@ STYLE_NAMES = list(styles.keys())
95
  HF_TOKEN = os.getenv("HF_TOKEN")
96
 
97
  #sampling_schedule = AysSchedules["StableDiffusionXLTimesteps"]
98
-
99
- def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
100
- if style_name in styles:
101
- p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
102
- else:
103
- p, n = styles[DEFAULT_STYLE_NAME]
104
- if not negative:
105
- negative = ""
106
- return p.replace("{prompt}", positive), n + negative
107
 
108
  def load_and_prepare_model(model_id):
109
  model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
@@ -200,8 +183,15 @@ def upload_to_ftp(filename):
200
  print(f"Uploaded {filename} to FTP server")
201
  except Exception as e:
202
  print(f"FTP upload error: {e}")
203
-
204
- def save_image(img):
 
 
 
 
 
 
 
205
  unique_name = str(uuid.uuid4()) + ".png"
206
  img.save(unique_name,optimize=False,compress_level=0)
207
  return unique_name
@@ -246,6 +236,7 @@ def generate_30(
246
  num_images: int = 1,
247
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
248
  ):
 
249
  torch.cuda.empty_cache()
250
  gc.collect()
251
  global models
@@ -302,6 +293,7 @@ def generate_60(
302
  num_images: int = 1,
303
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
304
  ):
 
305
  torch.cuda.empty_cache()
306
  gc.collect()
307
  global models
@@ -358,6 +350,7 @@ def generate_90(
358
  num_images: int = 1,
359
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
360
  ):
 
361
  torch.cuda.empty_cache()
362
  gc.collect()
363
  global models
 
5
  # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
6
  # copies of the Software, and to permit persons to whom the Software is
7
  import spaces
 
8
  import os
9
  import random
10
  import uuid
 
22
  import time
23
  import datetime
24
  #from diffusers.schedulers import AysSchedules
 
25
  from gradio import themes
26
  from hidiffusion import apply_hidiffusion, remove_hidiffusion
 
27
  import gc
28
 
29
  torch.backends.cuda.matmul.allow_tf32 = False
 
34
  #torch.backends.cudnn.benchmark = False
35
  torch.backends.cuda.preferred_blas_library="cublas"
36
  # torch.backends.cuda.preferred_linalg_library="cusolver"
 
37
  torch.set_float32_matmul_precision("highest")
38
 
39
  FTP_HOST = "1ink.us"
 
46
  """
47
 
48
  examples = [
 
49
  "Many apples splashed with drops of water within a fancy bowl 4k, hdr --v 6.0 --style raw",
50
  "A profile photo of a dog, brown background, shot on Leica M6 --ar 128:85 --v 6.0 --style raw",
51
  ]
 
55
  }
56
 
57
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
 
 
58
  BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
59
 
 
 
60
  style_list = [
61
  {
62
  "name": "3840 x 2160",
 
86
  HF_TOKEN = os.getenv("HF_TOKEN")
87
 
88
  #sampling_schedule = AysSchedules["StableDiffusionXLTimesteps"]
89
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
 
 
 
 
 
 
 
90
 
91
  def load_and_prepare_model(model_id):
92
  model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
 
183
  print(f"Uploaded {filename} to FTP server")
184
  except Exception as e:
185
  print(f"FTP upload error: {e}")
186
+
187
+ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
188
+ if style_name in styles:
189
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
190
+ else:
191
+ p, n = styles[DEFAULT_STYLE_NAME]
192
+ if not negative:
193
+ negative = ""
194
+ return p.replace("{prompt}", positive), n + negativedef save_image(img):
195
  unique_name = str(uuid.uuid4()) + ".png"
196
  img.save(unique_name,optimize=False,compress_level=0)
197
  return unique_name
 
236
  num_images: int = 1,
237
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
238
  ):
239
+ torch.backends.cudnn.benchmark = False
240
  torch.cuda.empty_cache()
241
  gc.collect()
242
  global models
 
293
  num_images: int = 1,
294
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
295
  ):
296
+ torch.backends.cudnn.benchmark = True
297
  torch.cuda.empty_cache()
298
  gc.collect()
299
  global models
 
350
  num_images: int = 1,
351
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
352
  ):
353
+ torch.backends.cudnn.benchmark = True
354
  torch.cuda.empty_cache()
355
  gc.collect()
356
  global models