Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,7 @@ import torch.nn as nn
|
|
11 |
from transformers import CLIPModel, AutoModel
|
12 |
from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize
|
13 |
from torchvision.transforms.functional import InterpolationMode
|
|
|
14 |
|
15 |
from huggingface_hub import hf_hub_download
|
16 |
from safetensors.torch import load_model
|
@@ -126,7 +127,8 @@ image_processor = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch
|
|
126 |
|
127 |
def predict_sentiment(text, image):
|
128 |
print(text, image)
|
129 |
-
image =
|
|
|
130 |
|
131 |
text_inputs = tokenizer(
|
132 |
text,
|
|
|
11 |
from transformers import CLIPModel, AutoModel
|
12 |
from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize
|
13 |
from torchvision.transforms.functional import InterpolationMode
|
14 |
+
from torchvision import transforms
|
15 |
|
16 |
from huggingface_hub import hf_hub_download
|
17 |
from safetensors.torch import load_model
|
|
|
127 |
|
128 |
def predict_sentiment(text, image):
|
129 |
print(text, image)
|
130 |
+
image = torchvision.io.read_image(image)
|
131 |
+
# image = transforms.ToTensor()(image).unsqueeze(0)
|
132 |
|
133 |
text_inputs = tokenizer(
|
134 |
text,
|