Spaces:
Runtime error
Runtime error
jacopoteneggi
commited on
Commit
•
80dc74c
1
Parent(s):
24fbc2e
Update
Browse files- README.md +12 -10
- app.py +54 -0
- app_lib/__init__.py +0 -0
- app_lib/__pycache__/__init__.cpython-310.pyc +0 -0
- app_lib/__pycache__/main.cpython-310.pyc +0 -0
- app_lib/__pycache__/user_input.cpython-310.pyc +0 -0
- app_lib/__pycache__/utils.cpython-310.pyc +0 -0
- app_lib/main.py +51 -0
- app_lib/user_input.py +83 -0
- app_lib/utils.py +14 -0
- requirements.txt +3 -0
README.md
CHANGED
@@ -1,13 +1,15 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: streamlit
|
7 |
-
sdk_version: 1.
|
8 |
app_file: app.py
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
1 |
---
|
2 |
+
title: I Bet You Did Not Mean That
|
3 |
+
emoji: 🔎
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: indigo
|
6 |
sdk: streamlit
|
7 |
+
sdk_version: 1.25.0
|
8 |
app_file: app.py
|
9 |
+
datasets:
|
10 |
+
- jacopoteneggi/IBYDMT
|
11 |
+
preload_from_hub:
|
12 |
+
- laion/CLIP-ViT-B-32-laion2B-s34B-b79K open_clip_config.json,open_clip_pytorch_model.bin
|
13 |
+
pinned: true
|
14 |
+
license: cc-by-nc-4.0
|
15 |
+
---
|
app.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import open_clip
|
3 |
+
import streamlit as st
|
4 |
+
import torch
|
5 |
+
|
6 |
+
from app_lib.main import main
|
7 |
+
|
8 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
+
|
10 |
+
st.set_page_config(
|
11 |
+
layout="wide",
|
12 |
+
initial_sidebar_state=st.session_state.get("sidebar_state", "collapsed"),
|
13 |
+
)
|
14 |
+
st.session_state.sidebar_state = "collapsed"
|
15 |
+
st.markdown(
|
16 |
+
"""
|
17 |
+
<style>
|
18 |
+
textarea {
|
19 |
+
font-family: monospace !important;
|
20 |
+
}
|
21 |
+
input {
|
22 |
+
font-family: monospace !important;
|
23 |
+
}
|
24 |
+
</style>
|
25 |
+
""",
|
26 |
+
unsafe_allow_html=True,
|
27 |
+
)
|
28 |
+
|
29 |
+
st.markdown(
|
30 |
+
"""
|
31 |
+
# I Bet You Did Not Mean That
|
32 |
+
|
33 |
+
Official HF Space for the paper [*I Bet You Did Not Mean That: Testing Semantci Importance via Betting*](https://arxiv.org/pdf/2405.19146), by [Jacopo Teneggi](https://jacopoteneggi.github.io) and [Jeremias Sulam](https://sites.google.com/view/jsulam).
|
34 |
+
|
35 |
+
---
|
36 |
+
""",
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
+
def load_clip():
|
41 |
+
model, _, preprocess = open_clip.create_model_and_transforms(
|
42 |
+
"hf-hub:laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
43 |
+
)
|
44 |
+
tokenizer = open_clip.get_tokenizer("hf-hub:laion/CLIP-ViT-B-32-laion2B-s34B-b79K")
|
45 |
+
|
46 |
+
|
47 |
+
def test(
|
48 |
+
image, class_name, concepts, cardinality, model_name, dataset_name="imagenette"
|
49 |
+
):
|
50 |
+
print("test!")
|
51 |
+
|
52 |
+
|
53 |
+
if __name__ == "__main__":
|
54 |
+
main()
|
app_lib/__init__.py
ADDED
File without changes
|
app_lib/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (147 Bytes). View file
|
|
app_lib/__pycache__/main.cpython-310.pyc
ADDED
Binary file (1.38 kB). View file
|
|
app_lib/__pycache__/user_input.cpython-310.pyc
ADDED
Binary file (2.74 kB). View file
|
|
app_lib/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (513 Bytes). View file
|
|
app_lib/main.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
from app_lib.user_input import (
|
4 |
+
get_cardinality,
|
5 |
+
get_class_name,
|
6 |
+
get_concepts,
|
7 |
+
get_image,
|
8 |
+
get_model_name,
|
9 |
+
)
|
10 |
+
|
11 |
+
|
12 |
+
def main():
|
13 |
+
columns = st.columns([0.40, 0.60])
|
14 |
+
|
15 |
+
with columns[0]:
|
16 |
+
model_name = get_model_name()
|
17 |
+
|
18 |
+
row1 = st.columns(2)
|
19 |
+
row2 = st.columns(2)
|
20 |
+
|
21 |
+
with row1[0]:
|
22 |
+
image = get_image()
|
23 |
+
st.image(image, use_column_width=True)
|
24 |
+
with row1[1]:
|
25 |
+
class_name, class_ready, class_error = get_class_name()
|
26 |
+
concepts, concepts_ready, concepts_error = get_concepts()
|
27 |
+
cardinality = get_cardinality(concepts, concepts_ready)
|
28 |
+
|
29 |
+
with row2[0]:
|
30 |
+
change_image_button = st.button("Change Image", use_container_width=True)
|
31 |
+
if change_image_button:
|
32 |
+
st.session_state.sidebar_state = "expanded"
|
33 |
+
st.experimental_rerun()
|
34 |
+
with row2[1]:
|
35 |
+
ready = class_ready and concepts_ready
|
36 |
+
|
37 |
+
error_message = ""
|
38 |
+
if class_error is not None:
|
39 |
+
error_message += f"- {class_error}\n"
|
40 |
+
if concepts_error is not None:
|
41 |
+
error_message += f"- {concepts_error}\n"
|
42 |
+
|
43 |
+
test_button = st.button(
|
44 |
+
"Test",
|
45 |
+
help=None if ready else error_message,
|
46 |
+
use_container_width=True,
|
47 |
+
disabled=not ready,
|
48 |
+
)
|
49 |
+
|
50 |
+
if test_button:
|
51 |
+
test(image, class_name, concepts, cardinality, model_name)
|
app_lib/user_input.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
from streamlit_image_select import image_select
|
4 |
+
|
5 |
+
from app_lib.utils import SUPPORTED_MODELS
|
6 |
+
|
7 |
+
|
8 |
+
def _validate_class_name(class_name):
|
9 |
+
if class_name is None:
|
10 |
+
return (False, "Class name cannot be empty.")
|
11 |
+
if class_name.strip() == "":
|
12 |
+
return (False, "Class name cannot be empty.")
|
13 |
+
return (True, None)
|
14 |
+
|
15 |
+
|
16 |
+
def _validate_concepts(concepts):
|
17 |
+
if len(concepts) < 3:
|
18 |
+
return (False, "You must provide at least 3 concepts")
|
19 |
+
if len(concepts) > 10:
|
20 |
+
return (False, "Maximum 10 concepts allowed")
|
21 |
+
return (True, None)
|
22 |
+
|
23 |
+
|
24 |
+
def get_model_name():
|
25 |
+
return st.selectbox(
|
26 |
+
"Choose a model to test",
|
27 |
+
options=SUPPORTED_MODELS,
|
28 |
+
help="Name of the vision-language model to test the predictions of.",
|
29 |
+
)
|
30 |
+
|
31 |
+
|
32 |
+
def get_image():
|
33 |
+
with st.sidebar:
|
34 |
+
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
|
35 |
+
image = uploaded_file or image_select(
|
36 |
+
label="or select one",
|
37 |
+
images=[
|
38 |
+
"assets/ace.jpg",
|
39 |
+
"assets/ace.jpg",
|
40 |
+
"assets/ace.jpg",
|
41 |
+
"assets/ace.jpg",
|
42 |
+
],
|
43 |
+
)
|
44 |
+
return Image.open(image)
|
45 |
+
|
46 |
+
|
47 |
+
def get_class_name():
|
48 |
+
class_name = st.text_input(
|
49 |
+
"Class to test",
|
50 |
+
help="Name of the class to build the zero-shot CLIP classifier with.",
|
51 |
+
value="cat",
|
52 |
+
)
|
53 |
+
|
54 |
+
class_ready, class_error = _validate_class_name(class_name)
|
55 |
+
return class_name, class_ready, class_error
|
56 |
+
|
57 |
+
|
58 |
+
def get_concepts():
|
59 |
+
concepts = st.text_area(
|
60 |
+
"Concepts to test (max 10)",
|
61 |
+
help="List of concepts to test the predictions of the model with. Write one concept per line.",
|
62 |
+
height=160,
|
63 |
+
value="piano\ncute\nwhiskers\nmusic\nwild",
|
64 |
+
)
|
65 |
+
concepts = concepts.split("\n")
|
66 |
+
concepts = [concept.strip() for concept in concepts]
|
67 |
+
concepts = [concept for concept in concepts if concept != ""]
|
68 |
+
concepts = list(set(concepts))
|
69 |
+
|
70 |
+
concepts_ready, concepts_error = _validate_concepts(concepts)
|
71 |
+
return concepts, concepts_ready, concepts_error
|
72 |
+
|
73 |
+
|
74 |
+
def get_cardinality(concepts, concepts_ready):
|
75 |
+
return st.slider(
|
76 |
+
"Size of conditioning set",
|
77 |
+
help="The number of concepts to condition model predictions on.",
|
78 |
+
min_value=1,
|
79 |
+
max_value=max(2, len(concepts) - 1),
|
80 |
+
value=1,
|
81 |
+
step=1,
|
82 |
+
disabled=not concepts_ready,
|
83 |
+
)
|
app_lib/utils.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import hf_hub_download
|
2 |
+
|
3 |
+
supported_models_path = hf_hub_download(
|
4 |
+
repo_id="jacopoteneggi/IBYDMT",
|
5 |
+
filename="supported_models.txt",
|
6 |
+
repo_type="dataset",
|
7 |
+
)
|
8 |
+
|
9 |
+
SUPPORTED_MODELS = []
|
10 |
+
with open(supported_models_path, "r") as f:
|
11 |
+
for line in f:
|
12 |
+
line = line.strip()
|
13 |
+
model_name, _ = line.split(",")
|
14 |
+
SUPPORTED_MODELS.append(model_name)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
streamlit-image-select
|
2 |
+
clip @ git+https://github.com/openai/CLIP@main
|
3 |
+
open_clip_torch
|