Spaces:
Sleeping
Sleeping
Sirreajohn
commited on
Commit
Β·
12f775f
1
Parent(s):
551e207
uploaded gradio app
Browse files- __pycache__/models.cpython-39.pyc +0 -0
- app.py +51 -0
- class_names_to_idx.pkl +3 -0
- examples/108310.jpg +0 -0
- examples/1203702.jpg +0 -0
- examples/2572488.jpg +0 -0
- examples/296426.jpg +0 -0
- examples/511818.jpg +0 -0
- models.py +15 -0
- models/ViT_16_base_101_classes_pretrained_custom_head.pth +3 -0
- models/effnet_b2_101_classes_pretrained_custom_head.pth +3 -0
__pycache__/models.cpython-39.pyc
ADDED
Binary file (792 Bytes). View file
|
|
app.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import gradio as gr
|
3 |
+
from pathlib import Path
|
4 |
+
import pickle as pkl
|
5 |
+
from models import *
|
6 |
+
|
7 |
+
|
8 |
+
examples = [[str(path)] for path in Path(r"examples").glob("*")]
|
9 |
+
|
10 |
+
with open('class_names_to_idx.pkl', 'rb') as fp:
|
11 |
+
class_idx_to_names = pkl.load(fp)
|
12 |
+
|
13 |
+
def predict_one(model, transforms, image, device, class_idx_to_names):
|
14 |
+
model.eval()
|
15 |
+
model = model.to(device)
|
16 |
+
with torch.inference_mode():
|
17 |
+
|
18 |
+
start_time = time.perf_counter()
|
19 |
+
image_transformed = transforms(image).unsqueeze(dim = 0).to(device)
|
20 |
+
|
21 |
+
y_logits = model(image_transformed)
|
22 |
+
y_preds = torch.softmax(y_logits, dim = 1)
|
23 |
+
|
24 |
+
end_time = time.perf_counter()
|
25 |
+
|
26 |
+
predictions = {class_idx_to_names[index]: x.item() for index, x in enumerate(y_preds[0])}
|
27 |
+
|
28 |
+
return predictions, end_time - start_time
|
29 |
+
|
30 |
+
def predict(image, model_choice):
|
31 |
+
|
32 |
+
if model_choice is None or model_choice == "effnet_b2":
|
33 |
+
model, transforms = get_effnet_b2()
|
34 |
+
else:
|
35 |
+
model, transforms = get_vit_16_base_transformer()
|
36 |
+
|
37 |
+
predictions, time_taken = predict_one(model, transforms, image, "cpu", class_idx_to_names)
|
38 |
+
return predictions, time_taken
|
39 |
+
|
40 |
+
|
41 |
+
title = "Food Recognition ππ"
|
42 |
+
desc = "A dual model app ft. EfficientNetB2 Feature Extractor and VisionTransformer. Now, bigger than ever. featuring 101 classes"
|
43 |
+
|
44 |
+
demo = gr.Interface(fn = predict,
|
45 |
+
inputs = [gr.Image(type = "pil", label = "upload an Jpeg or Png"), gr.Radio(["effnet_b2", "ViT (Vision Transformer)"], label = "choose model (default on effnet)")],
|
46 |
+
outputs = [gr.Label(num_top_classes = 5, label = "predictions"), gr.Number(label = "Prediction Time in seconds")],
|
47 |
+
examples = examples,
|
48 |
+
title = title,
|
49 |
+
description=desc)
|
50 |
+
|
51 |
+
demo.launch(debug = False)
|
class_names_to_idx.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:299cd0f46194deac09e3487c45b93acf02c991f09417cea83149ab5ff0cbc3a5
|
3 |
+
size 1604
|
examples/108310.jpg
ADDED
examples/1203702.jpg
ADDED
examples/2572488.jpg
ADDED
examples/296426.jpg
ADDED
examples/511818.jpg
ADDED
models.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torchvision.models import ViT_B_16_Weights, EfficientNet_B2_Weights
|
3 |
+
|
4 |
+
|
5 |
+
def get_vit_16_base_transformer():
|
6 |
+
vit_b_16_model = torch.load(r"models\ViT_16_base_101_classes_pretrained_custom_head.pth")
|
7 |
+
vit_b_16_transforms = ViT_B_16_Weights.DEFAULT.transforms()
|
8 |
+
|
9 |
+
return vit_b_16_model, vit_b_16_transforms
|
10 |
+
|
11 |
+
def get_effnet_b2():
|
12 |
+
eff_net_b2_model = torch.load(r"models\effnet_b2_101_classes_pretrained_custom_head.pth")
|
13 |
+
eff_net_b2_transforms = EfficientNet_B2_Weights.DEFAULT.transforms()
|
14 |
+
|
15 |
+
return eff_net_b2_model, eff_net_b2_transforms
|
models/ViT_16_base_101_classes_pretrained_custom_head.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97648542d2e7c33c5467761db056c936de506c73db0723381e2681fa44d2f667
|
3 |
+
size 343593351
|
models/effnet_b2_101_classes_pretrained_custom_head.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:638192c9b6e0f533e2a514739c903d564b6f0563bb270213e3160e166ece5def
|
3 |
+
size 31924415
|