vilarin commited on
Commit
275bb26
1 Parent(s): 2a7c5e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  import torch
3
  from diffusers import StableDiffusionXLPipeline, AutoencoderKL
4
  from huggingface_hub import hf_hub_download
 
5
  from PIL import Image
6
  import requests
7
  from translatepy import Translator
@@ -28,19 +29,20 @@ JS = """function () {
28
  }
29
  }"""
30
 
31
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
32
-
33
  # Load VAE component
34
  vae = AutoencoderKL.from_pretrained(
35
  vae_model,
 
36
  )
37
 
38
  # Ensure model and scheduler are initialized in GPU-enabled function
39
- pipe = StableDiffusionXLPipeline.from_pretrained(model, vae=vae).to(device)
 
40
 
41
 
42
 
43
  # Function
 
44
  def generate_image(
45
  prompt,
46
  negative="low quality",
 
2
  import torch
3
  from diffusers import StableDiffusionXLPipeline, AutoencoderKL
4
  from huggingface_hub import hf_hub_download
5
+ import spaces
6
  from PIL import Image
7
  import requests
8
  from translatepy import Translator
 
29
  }
30
  }"""
31
 
 
 
32
  # Load VAE component
33
  vae = AutoencoderKL.from_pretrained(
34
  vae_model,
35
+ torch_dtype=torch.float16
36
  )
37
 
38
  # Ensure model and scheduler are initialized in GPU-enabled function
39
+ if torch.cuda.is_available():
40
+ pipe = StableDiffusionXLPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16).to("cuda")
41
 
42
 
43
 
44
  # Function
45
+ @spaces.GPU()
46
  def generate_image(
47
  prompt,
48
  negative="low quality",