Spaces:
Sleeping
Sleeping
first commit
Browse files
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
README.md
CHANGED
@@ -10,4 +10,4 @@ pinned: false
|
|
10 |
license: apache-2.0
|
11 |
---
|
12 |
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
10 |
license: apache-2.0
|
11 |
---
|
12 |
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import cv2
|
3 |
+
import gradio as gr
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
# Use GPU if available
|
8 |
+
if torch.cuda.is_available():
|
9 |
+
device = torch.device("cuda")
|
10 |
+
else:
|
11 |
+
device = torch.device("cpu")
|
12 |
+
|
13 |
+
def manipulate_image(img, text_queries, score_threshold):
|
14 |
+
return img
|
15 |
+
|
16 |
+
|
17 |
+
description = """
|
18 |
+
"""
|
19 |
+
|
20 |
+
demo = gr.Interface(
|
21 |
+
manipulate_image,
|
22 |
+
inputs=[gr.Image(), "text", gr.Slider(0, 1, value=0.1)],
|
23 |
+
outputs="image",
|
24 |
+
title="Text-guided image manipulation with StyleMC",
|
25 |
+
description=description,
|
26 |
+
)
|
27 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
click==8.0.3
|
2 |
+
clip @ git+https://github.com/openai/CLIP.git@40f5484c1c74edd83cb9cf687c6ab92b28d8b656
|
3 |
+
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1602866480661/work
|
4 |
+
ftfy==6.1.1
|
5 |
+
google-auth==1.35.0
|
6 |
+
google-auth-oauthlib==0.4.6
|
7 |
+
google-pasta==0.2.0
|
8 |
+
h5py==2.10.0
|
9 |
+
imageio==2.13.2
|
10 |
+
imageio-ffmpeg==0.4.3
|
11 |
+
Jinja2==3.0.3
|
12 |
+
joblib==1.1.0
|
13 |
+
jsonschema==4.4.0
|
14 |
+
jupyter-client==7.1.1
|
15 |
+
jupyter-core==4.9.1
|
16 |
+
keras==2.7.0
|
17 |
+
Keras-Preprocessing==1.1.2
|
18 |
+
kiwisolver==1.3.2
|
19 |
+
lpips==0.1.4
|
20 |
+
mat4py==0.5.0
|
21 |
+
matplotlib==3.5.1
|
22 |
+
networkx==2.7.1
|
23 |
+
ninja==1.10.2.3
|
24 |
+
notebook==6.4.7
|
25 |
+
numba==0.55.1
|
26 |
+
numpy==1.21.4
|
27 |
+
oauthlib==3.1.1
|
28 |
+
opencv-python==4.5.4.60
|
29 |
+
pandas==1.3.5
|
30 |
+
Pillow
|
31 |
+
protobuf==3.19.3
|
32 |
+
psutil==5.8.0
|
33 |
+
PyYAML @ file:///Users/runner/miniforge3/conda-bld/pyyaml_1636139902075/work
|
34 |
+
requests==2.26.0
|
35 |
+
scikit-image==0.19.2
|
36 |
+
scikit-learn==1.0.2
|
37 |
+
scipy==1.4.1
|
38 |
+
tensorboard==2.2.2
|
39 |
+
tensorboard-data-server==0.6.1
|
40 |
+
tensorboard-plugin-wit==1.8.1
|
41 |
+
tensorflow==2.2.0
|
42 |
+
torch==1.10.0
|
43 |
+
torchaudio==0.10.0
|
44 |
+
torchdiffeq==0.2.2
|
45 |
+
torchvision==0.11.1
|
46 |
+
tornado==6.1
|
47 |
+
tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1632160078689/work
|
48 |
+
urllib3==1.26.7
|
49 |
+
validators==0.18.2
|
50 |
+
Werkzeug==2.0.2
|