Spaces:
Runtime error
Runtime error
Commit
·
8e0a0ad
1
Parent(s):
d98a4ef
fixing deprecated functions in gradio
Browse files- README.md +1 -0
- app.py +10 -7
- requirements.txt +2 -1
README.md
CHANGED
@@ -6,6 +6,7 @@ colorTo: pink
|
|
6 |
sdk: gradio
|
7 |
app_file: app.py
|
8 |
pinned: false
|
|
|
9 |
---
|
10 |
|
11 |
# Configuration
|
|
|
6 |
sdk: gradio
|
7 |
app_file: app.py
|
8 |
pinned: false
|
9 |
+
python_version: 3.10
|
10 |
---
|
11 |
|
12 |
# Configuration
|
app.py
CHANGED
@@ -12,12 +12,12 @@ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
|
12 |
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
13 |
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
14 |
|
15 |
-
|
16 |
-
|
17 |
#Open the precomputed embeddings
|
18 |
emb_filename = 'unsplash-25k-photos-embeddings.pkl'
|
19 |
with open(emb_filename, 'rb') as fIn:
|
20 |
-
img_names, img_emb = pickle.load(fIn)
|
|
|
|
|
21 |
|
22 |
|
23 |
def search_text(query, top_k=1):
|
@@ -41,9 +41,10 @@ def search_text(query, top_k=1):
|
|
41 |
|
42 |
image=[]
|
43 |
for hit in hits:
|
44 |
-
print(img_names[hit['corpus_id']])
|
45 |
object = Image.open(os.path.join("photos/", img_names[hit['corpus_id']]))
|
46 |
image.append(object)
|
|
|
47 |
|
48 |
return image
|
49 |
|
@@ -53,11 +54,13 @@ iface = gr.Interface(
|
|
53 |
description = "Gradio Demo fo CLIP model. \n This demo is based on assessment for the 🤗 Huggingface course 2. \n To use it, simply write which image you are looking for. Read more at the links below.",
|
54 |
article = "You find more information about this demo on my ✨ github repository [marcelcastrobr](https://github.com/marcelcastrobr/huggingface_course2)",
|
55 |
fn=search_text,
|
56 |
-
inputs=[gr.
|
57 |
label="Write what you are looking for in an image...",
|
58 |
placeholder="Text Here..."),
|
59 |
-
gr.
|
60 |
-
outputs=gr.
|
|
|
|
|
61 |
,examples=[[("Dog in the beach"), 2],
|
62 |
[("Paris during night."), 1],
|
63 |
[("A cute kangaroo"), 5],
|
|
|
12 |
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
13 |
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
14 |
|
|
|
|
|
15 |
#Open the precomputed embeddings
|
16 |
emb_filename = 'unsplash-25k-photos-embeddings.pkl'
|
17 |
with open(emb_filename, 'rb') as fIn:
|
18 |
+
img_names, img_emb = pickle.load(fIn)
|
19 |
+
#print(f'img_emb: {print(img_emb)}')
|
20 |
+
#print(f'img_names: {print(img_names)}')
|
21 |
|
22 |
|
23 |
def search_text(query, top_k=1):
|
|
|
41 |
|
42 |
image=[]
|
43 |
for hit in hits:
|
44 |
+
#print(img_names[hit['corpus_id']])
|
45 |
object = Image.open(os.path.join("photos/", img_names[hit['corpus_id']]))
|
46 |
image.append(object)
|
47 |
+
#print(f'array length is: {len(image)}')
|
48 |
|
49 |
return image
|
50 |
|
|
|
54 |
description = "Gradio Demo fo CLIP model. \n This demo is based on assessment for the 🤗 Huggingface course 2. \n To use it, simply write which image you are looking for. Read more at the links below.",
|
55 |
article = "You find more information about this demo on my ✨ github repository [marcelcastrobr](https://github.com/marcelcastrobr/huggingface_course2)",
|
56 |
fn=search_text,
|
57 |
+
inputs=[gr.Textbox(lines=4,
|
58 |
label="Write what you are looking for in an image...",
|
59 |
placeholder="Text Here..."),
|
60 |
+
gr.Slider(0, 5, step=1)],
|
61 |
+
outputs=[gr.Gallery(
|
62 |
+
label="Generated images", show_label=False, elem_id="gallery"
|
63 |
+
).style(grid=[2], height="auto")]
|
64 |
,examples=[[("Dog in the beach"), 2],
|
65 |
[("Paris during night."), 1],
|
66 |
[("A cute kangaroo"), 5],
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
transformers
|
2 |
sentence-transformers
|
3 |
-
torch
|
|
|
|
1 |
transformers
|
2 |
sentence-transformers
|
3 |
+
torch
|
4 |
+
gradio
|