Spaces:
Runtime error
Runtime error
File size: 9,054 Bytes
00db9b6 d3ea62a fd3c7db d3ea62a e37247a d3ea62a e670f01 d3ea62a 12c88cf 5d7299f 12c88cf 6c171d5 12c88cf 6c171d5 25cfecc 12c88cf 6c171d5 a9272ce 5d7299f 12c88cf 25cfecc 5d7299f 25cfecc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 |
"""Find your apparel effortlessly. Just describe your apparel and get the relevant recommendations with links!"""
import argparse
import logging
import os
import wandb
import gradio as gr
import base64
import zipfile
import pickle
from pathlib import Path
from typing import List, Any, Dict
from PIL import Image
from pathlib import Path
from transformers import AutoTokenizer
from sentence_transformers import SentenceTransformer, util
from multilingual_clip import pt_multilingual_clip
import torch
from pathlib import Path
from typing import Callable, Dict, List, Tuple
from PIL.Image import Image
print(__file__)
os.environ["CUDA_VISIBLE_DEVICES"] = "" # do not use GPU
logging.basicConfig(level=logging.INFO)
DEFAULT_APPLICATION_NAME = "fashion-aggregator"
APP_DIR = Path(__file__).resolve().parent # what is the directory for this application?
LOGO = APP_DIR / "temp.jpg" # path to a small image for display in browser tab and social media
README = APP_DIR / "README.md" # path to an app readme file in HTML/markdown
DEFAULT_PORT = 11700
EMBEDDINGS_DIR = "artifacts/img-embeddings"
EMBEDDINGS_FILE = os.path.join(EMBEDDINGS_DIR, "embeddings.pkl")
RAW_PHOTOS_DIR = "artifacts/raw-photos"
# Download image embeddings and raw photos
wandb.login(key="4b5a23a662b20fdd61f2aeb5032cf56fdce278a4") # os.getenv('wandb')
api = wandb.Api()
artifact_embeddings = api.artifact("ryparmar/fashion-aggregator/unimoda-images:v1")
artifact_embeddings.download(EMBEDDINGS_DIR)
artifact_raw_photos = api.artifact("ryparmar/fashion-aggregator/unimoda-raw-images:v1")
artifact_raw_photos.download("artifacts")
with zipfile.ZipFile("artifacts/unimoda.zip", 'r') as zip_ref:
zip_ref.extractall(RAW_PHOTOS_DIR)
class TextEncoder:
"""Encodes the given text"""
def __init__(self, model_path="M-CLIP/XLM-Roberta-Large-Vit-B-32"):
self.model = pt_multilingual_clip.MultilingualCLIP.from_pretrained(model_path)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
@torch.no_grad()
def encode(self, query: str) -> torch.Tensor:
"""Predict/infer text embedding for a given query."""
query_emb = self.model.forward([query], self.tokenizer)
return query_emb
class ImageEnoder:
"""Encodes the given image"""
def __init__(self, model_path="clip-ViT-B-32"):
self.model = SentenceTransformer(model_path)
@torch.no_grad()
def encode(self, image: Image) -> torch.Tensor:
"""Predict/infer text embedding for a given query."""
image_emb = self.model.encode([image], convert_to_tensor=True, show_progress_bar=False)
return image_emb
class Retriever:
"""Retrieves relevant images for a given text embedding."""
def __init__(self, image_embeddings_path=None):
self.text_encoder = TextEncoder()
self.image_encoder = ImageEnoder()
with open(image_embeddings_path, "rb") as file:
self.image_names, self.image_embeddings = pickle.load(file)
self.image_names = [
img_name.replace("fashion-aggregator/fashion_aggregator/data/photos/", "")
for img_name in self.image_names
]
print("Images:", len(self.image_names))
@torch.no_grad()
def predict(self, text_query: str, k: int = 10) -> List[Any]:
"""Return top-k relevant items for a given embedding"""
query_emb = self.text_encoder.encode(text_query)
relevant_images = util.semantic_search(query_emb, self.image_embeddings, top_k=k)[0]
return relevant_images
@torch.no_grad()
def search_images(self, text_query: str, k: int = 6) -> Dict[str, List[Any]]:
"""Return top-k relevant images for a given embedding"""
images = self.predict(text_query, k)
paths_and_scores = {"path": [], "score": []}
for img in images:
paths_and_scores["path"].append(os.path.join(RAW_PHOTOS_DIR, self.image_names[img["corpus_id"]]))
paths_and_scores["score"].append(img["score"])
return paths_and_scores
class PredictorBackend:
"""Interface to a backend that serves predictions.
To communicate with a backend accessible via a URL, provide the url kwarg.
Otherwise, runs a predictor locally.
"""
def __init__(self, url=None):
if url is not None:
self.url = url
self._predict = self._predict_from_endpoint
else:
model = Retriever(image_embeddings_path=EMBEDDINGS_FILE)
self._predict = model.predict
self._search_images = model.search_images
def run(self, text: str):
pred, metrics = self._predict_with_metrics(text)
self._log_inference(pred, metrics)
return pred
def _predict_with_metrics(self, text: str) -> Tuple[List[str], Dict[str, float]]:
paths_and_scores = self._search_images(text)
metrics = {"mean_score": sum(paths_and_scores["score"]) / len(paths_and_scores["score"])}
return paths_and_scores["path"], metrics
def _log_inference(self, pred, metrics):
for key, value in metrics.items():
logging.info(f"METRIC {key} {value}")
logging.info(f"PRED >begin\n{pred}\nPRED >end")
predictor = PredictorBackend()
# Read the image file and encode it as base64
with open("./1001epochs.png", "rb") as f:
image_data = f.read()
image_base64 = base64.b64encode(image_data).decode("utf-8")
allow_flagging = "never"
title = f"""
<h2 style="background-image: linear-gradient(to right, #3A5FCD, #87CEFA); -webkit-background-clip: text;
-webkit-text-fill-color: transparent; text-align: center;">
Fashion Aggregator
</h2>
"""
description = f"""
<div style="display: flex; align-items: center; justify-content: center; flex-direction: column;">
<p style="font-size: 18px; color: #4AAAFF; text-align: center;">
Discover your perfect apparel effortlessly. Simply describe what you're looking for!
</p>
<div style="display: flex; align-items: center; margin-bottom: 0px;">
<img src='data:image/jpeg;base64,{image_base64}' width='50' height='30' style="margin-right: 5px;"/>
<p style="font-size: 14px; color: #555;">
Disclaimer: The purpose of this application is solely for demonstration. 1001epochs does not claim ownership for the results. Contact: [email protected] for full solution.
</p>
</div>
</div>
"""
frontend = gr.Interface(
fn=predictor.run,
inputs=gr.inputs.Textbox(label="Item Description", placeholder="Enter item description here"),
outputs=gr.Gallery(label="Relevant Items", show_labels=True, label_position="below"),
title=title,
description=description,
cache_examples=False, # should we cache those inputs for faster inference? slows down start
allow_flagging=allow_flagging, # should we show users the option to "flag" outputs?
flagging_options=["incorrect", "offensive", "other"], # what options do users have for feedback?
)
frontend.launch()
# # Read the image file and encode it as base64
# with open("./1001epochs.png", "rb") as f:
# image_data = f.read()
# image_base64 = base64.b64encode(image_data).decode("utf-8")
# allow_flagging = "never"
# title = f"""
# <h1 style="background-image: linear-gradient(to right, #ADD8E6, #87CEFA); -webkit-background-clip: text;
# -webkit-text-fill-color: transparent; text-align: center;">
# Fashion Aggregator
# </h1>
# """
# description = f"""
# <div style="display: flex; align-items: center; justify-content: center; flex-direction: column;">
# <img src='data:image/jpeg;base64,{image_base64}' width='50' height='50' style="margin-right: 10px;"/>
# <p style="font-size: small; color: gray; margin-top: 10px;">
# Disclaimer: This web app is for demonstration purposes only and not intended for commercial use. Contact: [email protected] for full solution.
# </p>
# <p style="font-size: 18px; color: blue; margin-top: 10px; text-align: center;">
# Discover your perfect apparel effortlessly. Simply describe what you're looking for!
# </p>
# </div>
# """
# frontend = gr.Interface(
# fn=predictor.run,
# inputs=gr.inputs.Textbox(label="Item Description", placeholder="Enter item description here"),
# outputs=gr.Gallery(label="Relevant Items", show_labels=True, label_position="below"),
# title=title,
# description=description,
# cache_examples=False, # should we cache those inputs for faster inference? slows down start
# allow_flagging=allow_flagging, # should we show users the option to "flag" outputs?
# flagging_options=["incorrect", "offensive", "other"], # what options do users have for feedback?
# )
# frontend.launch(
# # server_name="0.0.0.0", # make server accessible, binding all interfaces # noqa: S104
# # server_port=args.port, # set a port to bind to, failing if unavailable
# # share=False, # should we create a (temporary) public link on https://gradio.app?
# ) |