Amith Adiraju
commited on
Commit
Β·
4a4298a
1
Parent(s):
8dc6fb5
Changed folder structure to align with hugging face streamlit spaces, where it has to be flat.
Browse files
src/app.py β app.py
RENAMED
File without changes
|
{src β inference}/__init__.py
RENAMED
File without changes
|
{src/inference β inference}/config.py
RENAMED
File without changes
|
{src/inference β inference}/preprocess_image.py
RENAMED
File without changes
|
{src/inference β inference}/translate.py
RENAMED
File without changes
|
src/inference/__init__.py
DELETED
File without changes
|
src/main.py
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
|
3 |
-
from inference.translate import (
|
4 |
-
extract_filter_img,
|
5 |
-
transcribe_menu_model,
|
6 |
-
load_models
|
7 |
-
)
|
8 |
-
|
9 |
-
from inference.config import DEBUG_MODE
|
10 |
-
from PIL import Image
|
11 |
-
import time
|
12 |
-
|
13 |
-
# Streamlit app
|
14 |
-
st.title("Image Upload and Processing")
|
15 |
-
|
16 |
-
|
17 |
-
# Using open source text detector, LLM for explaining items
|
18 |
-
text_extractor, \
|
19 |
-
item_tokenizer,item_summarizer = load_models(item_summarizer = "google/flan-t5-large")
|
20 |
-
|
21 |
-
# Streamlit function to upload an image from any device
|
22 |
-
uploaded_file = st.file_uploader("Choose an image...",
|
23 |
-
type=["jpg", "jpeg", "png"])
|
24 |
-
|
25 |
-
|
26 |
-
# Submit button
|
27 |
-
if uploaded_file is not None:
|
28 |
-
image = Image.open(uploaded_file)
|
29 |
-
|
30 |
-
# Only show if user wants to see
|
31 |
-
if st.checkbox('Show Uploaded Image'):
|
32 |
-
st.image(image,
|
33 |
-
caption='Uploaded Image',
|
34 |
-
use_column_width=True)
|
35 |
-
|
36 |
-
# Submit button
|
37 |
-
if st.button("Submit"):
|
38 |
-
|
39 |
-
msg1 = st.empty()
|
40 |
-
msg1.write("Pre-processing and extracting text out of your image ....")
|
41 |
-
st_filter = time.perf_counter()
|
42 |
-
# Call the extract_filter_img function
|
43 |
-
filtered_text = extract_filter_img(image, text_extractor)
|
44 |
-
en_filter = time.perf_counter()
|
45 |
-
|
46 |
-
msg2 = st.empty()
|
47 |
-
msg2.write("All pre-processing done, transcribing your menu items now ....")
|
48 |
-
st_trans_llm = time.perf_counter()
|
49 |
-
translated_text_dict = transcribe_menu_model(menu_texts=filtered_text,
|
50 |
-
text_tokenizer=item_tokenizer,
|
51 |
-
text_summarizer=item_summarizer
|
52 |
-
)
|
53 |
-
|
54 |
-
msg3 = st.empty()
|
55 |
-
msg3.write("Done transcribing ... ")
|
56 |
-
en_trans_llm = time.perf_counter()
|
57 |
-
|
58 |
-
msg1.empty(); msg2.empty(); msg3.empty()
|
59 |
-
st.success("Image processed successfully! " )
|
60 |
-
|
61 |
-
if DEBUG_MODE:
|
62 |
-
filter_time_sec = en_filter - st_filter
|
63 |
-
llm_time_sec = en_trans_llm - st_trans_llm
|
64 |
-
total_time_sec = filter_time_sec + llm_time_sec
|
65 |
-
|
66 |
-
st.write("Time took to extract and filter text {}".format(filter_time_sec))
|
67 |
-
st.write("Time took to summarize by LLM {}".format(llm_time_sec))
|
68 |
-
st.write('Overall time taken in seconds: {}'.format(total_time_sec))
|
69 |
-
|
70 |
-
st.table(translated_text_dict)
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/requirements.txt
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
sentencepiece==0.2.0
|
2 |
-
transformers==4.44.2
|
3 |
-
streamlit==1.37.1
|
4 |
-
pandas==2.2.2
|
5 |
-
altair
|
6 |
-
easyocr==1.6.2
|
7 |
-
matplotlib==3.7.1
|
8 |
-
numpy==1.24.2
|
9 |
-
Pillow==9.5.0
|
10 |
-
nltk==3.9.1
|
11 |
-
torch==2.1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|