ford442 commited on
Commit
7bb3c7f
·
verified ·
1 Parent(s): ccc256f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -10
app.py CHANGED
@@ -4,8 +4,8 @@
4
  # in the Software without restriction, including without limitation the rights
5
  # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
6
  # copies of the Software, and to permit persons to whom the Software is
7
- import spaces
8
 
 
9
  import os
10
  import random
11
  import uuid
@@ -17,7 +17,6 @@ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
17
  from typing import Tuple
18
  from transformers import AutoTokenizer, AutoModelForCausalLM
19
 
20
-
21
  css = '''
22
  .gradio-container{max-width: 570px !important}
23
  h1{text-align:center}
@@ -45,7 +44,7 @@ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
45
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
46
  BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
47
 
48
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
49
 
50
  style_list = [
51
  {
@@ -100,13 +99,7 @@ def load_and_prepare_model(model_id):
100
  add_watermarker=False,
101
  ).to(device)
102
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
103
-
104
- if USE_TORCH_COMPILE:
105
- pipe.compile()
106
-
107
- if ENABLE_CPU_OFFLOAD:
108
- pipe.enable_model_cpu_offload()
109
-
110
  return pipe
111
 
112
  # Preload and compile both models
 
4
  # in the Software without restriction, including without limitation the rights
5
  # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
6
  # copies of the Software, and to permit persons to whom the Software is
 
7
 
8
+ import spaces
9
  import os
10
  import random
11
  import uuid
 
17
  from typing import Tuple
18
  from transformers import AutoTokenizer, AutoModelForCausalLM
19
 
 
20
  css = '''
21
  .gradio-container{max-width: 570px !important}
22
  h1{text-align:center}
 
44
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
45
  BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
46
 
47
+ device = "cpu")
48
 
49
  style_list = [
50
  {
 
99
  add_watermarker=False,
100
  ).to(device)
101
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
102
+
 
 
 
 
 
 
103
  return pipe
104
 
105
  # Preload and compile both models