HikariDawn777 commited on
Commit
8dea111
·
1 Parent(s): 8553e9e
Files changed (2) hide show
  1. app.py +3 -1
  2. requirements.txt +3 -3
app.py CHANGED
@@ -26,6 +26,7 @@ import collections
26
  import numpy as np
27
  import gradio as gr
28
  from PIL import Image
 
29
 
30
  import torch
31
  from pathlib import Path
@@ -131,6 +132,7 @@ def get_points(img, original_image, sel_pix, evt: gr.SelectData):
131
  return [img if isinstance(img, np.ndarray) else np.array(img), original_image]
132
 
133
 
 
134
  def gesturenet_inference(ref_image, prompt, selected_points):
135
 
136
  # Check some paramter, must have prompt and selected points
@@ -178,7 +180,7 @@ def gesturenet_inference(ref_image, prompt, selected_points):
178
  project_config = ProjectConfiguration(project_dir=config["output_dir"], logging_dir=Path(config["output_dir"], config["logging_name"])),
179
  )
180
  print("device is ", accelerator.device)
181
-
182
  feature_extractor = CLIPImageProcessor.from_pretrained(
183
  config["pretrained_model_name_or_path"], subfolder="feature_extractor", revision=None
184
  ) # This instance has now weight, they are just seeting file
 
26
  import numpy as np
27
  import gradio as gr
28
  from PIL import Image
29
+ import spaces
30
 
31
  import torch
32
  from pathlib import Path
 
132
  return [img if isinstance(img, np.ndarray) else np.array(img), original_image]
133
 
134
 
135
+ @spaces.GPU(duration=120)
136
  def gesturenet_inference(ref_image, prompt, selected_points):
137
 
138
  # Check some paramter, must have prompt and selected points
 
180
  project_config = ProjectConfiguration(project_dir=config["output_dir"], logging_dir=Path(config["output_dir"], config["logging_name"])),
181
  )
182
  print("device is ", accelerator.device)
183
+
184
  feature_extractor = CLIPImageProcessor.from_pretrained(
185
  config["pretrained_model_name_or_path"], subfolder="feature_extractor", revision=None
186
  ) # This instance has now weight, they are just seeting file
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
  # Non-strict version lib
2
- # torch==2.0.1
3
- # torchaudio==2.0.1
4
- # torchvision
5
  opencv-python
6
  transformers
7
  accelerate
 
1
  # Non-strict version lib
2
+ torch==2.0.1
3
+ torchaudio==2.0.1
4
+ torchvision
5
  opencv-python
6
  transformers
7
  accelerate