Spaces:
Running
Running
UniquePratham
commited on
Commit
β’
bac8e56
1
Parent(s):
58c599f
Update app.py
Browse files
app.py
CHANGED
@@ -10,18 +10,13 @@ import tempfile
|
|
10 |
import os
|
11 |
import re
|
12 |
import json
|
|
|
13 |
from groq import Groq
|
14 |
|
15 |
# Page configuration
|
16 |
st.set_page_config(page_title="DualTextOCRFusion", page_icon="π", layout="wide")
|
17 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
|
19 |
-
# Directories for images and results
|
20 |
-
IMAGES_DIR = "images"
|
21 |
-
RESULTS_DIR = "results"
|
22 |
-
os.makedirs(IMAGES_DIR, exist_ok=True)
|
23 |
-
os.makedirs(RESULTS_DIR, exist_ok=True)
|
24 |
-
|
25 |
# Load Surya OCR Models (English + Hindi)
|
26 |
det_processor, det_model = load_det_processor(), load_det_model()
|
27 |
det_model.to(device)
|
@@ -56,12 +51,18 @@ def clean_extracted_text(text):
|
|
56 |
|
57 |
# Polish the text using a model
|
58 |
def polish_text_with_ai(cleaned_text):
|
59 |
-
prompt = f"Remove unwanted spaces between and inside words to join incomplete words, creating a meaningful sentence in either Hindi, English, or Hinglish without altering any words from the given extracted text. Then, return the corrected text with adjusted spaces."
|
60 |
client = Groq(api_key="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")
|
61 |
chat_completion = client.chat.completions.create(
|
62 |
messages=[
|
63 |
-
{
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
],
|
66 |
model="gemma2-9b-it",
|
67 |
)
|
@@ -103,35 +104,56 @@ model_choice = st.sidebar.selectbox("Select OCR Model:", ("GOT_CPU", "GOT_GPU",
|
|
103 |
|
104 |
# Upload Section
|
105 |
uploaded_file = st.sidebar.file_uploader("Choose an image...", type=["png", "jpg", "jpeg"])
|
106 |
-
|
107 |
-
|
108 |
-
if
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
else:
|
132 |
with st.spinner("Processing..."):
|
133 |
-
image = Image.open(image_path).convert("RGB")
|
134 |
-
|
135 |
if model_choice == "GOT_CPU":
|
136 |
got_model, tokenizer = init_got_model()
|
137 |
extracted_text = extract_text_got(image_path, got_model, tokenizer)
|
@@ -150,26 +172,35 @@ if uploaded_file or clipboard_text:
|
|
150 |
text_list = re.findall(r"text='(.*?)'", str(predictions[0]))
|
151 |
extracted_text = ' '.join(text_list)
|
152 |
|
|
|
153 |
cleaned_text = clean_extracted_text(extracted_text)
|
154 |
polished_text = polish_text_with_ai(cleaned_text) if model_choice in ["GOT_CPU", "GOT_GPU"] else cleaned_text
|
155 |
|
156 |
-
# Save
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
# Display image preview and text
|
161 |
-
if image_path:
|
162 |
-
with col1:
|
163 |
-
col1.image(image_path, caption='Uploaded Image', use_column_width=False, width=300)
|
164 |
|
|
|
165 |
st.subheader("Extracted Text (Cleaned & Polished)")
|
166 |
-
st.markdown(
|
167 |
-
|
168 |
-
#
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
st.markdown("### Highlighted Search Results:")
|
175 |
-
st.markdown(
|
|
|
10 |
import os
|
11 |
import re
|
12 |
import json
|
13 |
+
import base64
|
14 |
from groq import Groq
|
15 |
|
16 |
# Page configuration
|
17 |
st.set_page_config(page_title="DualTextOCRFusion", page_icon="π", layout="wide")
|
18 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
# Load Surya OCR Models (English + Hindi)
|
21 |
det_processor, det_model = load_det_processor(), load_det_model()
|
22 |
det_model.to(device)
|
|
|
51 |
|
52 |
# Polish the text using a model
|
53 |
def polish_text_with_ai(cleaned_text):
|
54 |
+
prompt = f"Remove unwanted spaces between and inside words to join incomplete words, creating a meaningful sentence in either Hindi, English, or Hinglish without altering any words from the given extracted text. Then, return the corrected text with adjusted spaces, keeping it as close to the original as possible."
|
55 |
client = Groq(api_key="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")
|
56 |
chat_completion = client.chat.completions.create(
|
57 |
messages=[
|
58 |
+
{
|
59 |
+
"role": "system",
|
60 |
+
"content": "You are a pedantic sentence corrector. Remove extra spaces between and within words to make the sentence meaningful in English, Hindi, or Hinglish, according to the context of the sentence, without changing any words."
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"role": "user",
|
64 |
+
"content": prompt,
|
65 |
+
}
|
66 |
],
|
67 |
model="gemma2-9b-it",
|
68 |
)
|
|
|
104 |
|
105 |
# Upload Section
|
106 |
uploaded_file = st.sidebar.file_uploader("Choose an image...", type=["png", "jpg", "jpeg"])
|
107 |
+
|
108 |
+
# Input from clipboard
|
109 |
+
if st.sidebar.button("Paste from Clipboard"):
|
110 |
+
try:
|
111 |
+
clipboard_data = st.experimental_get_clipboard()
|
112 |
+
if clipboard_data:
|
113 |
+
image_data = base64.b64decode(clipboard_data)
|
114 |
+
uploaded_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
|
115 |
+
uploaded_file.write(image_data)
|
116 |
+
uploaded_file.seek(0)
|
117 |
+
except Exception as e:
|
118 |
+
st.sidebar.warning(f"Clipboard data is not an image or an error occurred: {str(e)}")
|
119 |
+
|
120 |
+
# Input from camera
|
121 |
+
camera_file = st.sidebar.camera_input("Capture from Camera")
|
122 |
+
if camera_file:
|
123 |
+
uploaded_file = camera_file
|
124 |
+
|
125 |
+
# Predict button
|
126 |
+
predict_button = st.sidebar.button("Predict")
|
127 |
+
|
128 |
+
# Main columns
|
129 |
+
col1, col2 = st.columns([2, 1])
|
130 |
+
|
131 |
+
# Display image preview
|
132 |
+
if uploaded_file:
|
133 |
+
image = Image.open(uploaded_file)
|
134 |
+
with col1:
|
135 |
+
col1.image(image, caption='Uploaded Image', use_column_width=False, width=300)
|
136 |
+
|
137 |
+
# Save uploaded image to 'images' folder
|
138 |
+
images_dir = 'images'
|
139 |
+
os.makedirs(images_dir, exist_ok=True)
|
140 |
+
image_path = os.path.join(images_dir, uploaded_file.name)
|
141 |
+
with open(image_path, 'wb') as f:
|
142 |
+
f.write(uploaded_file.getvalue())
|
143 |
+
|
144 |
+
# Check if the result already exists
|
145 |
+
results_dir = 'results'
|
146 |
+
os.makedirs(results_dir, exist_ok=True)
|
147 |
+
result_path = os.path.join(results_dir, f"{uploaded_file.name}_result.json")
|
148 |
+
|
149 |
+
# Handle predictions
|
150 |
+
if predict_button:
|
151 |
+
if os.path.exists(result_path):
|
152 |
+
with open(result_path, 'r') as f:
|
153 |
+
result_data = json.load(f)
|
154 |
+
extracted_text = result_data["polished_text"]
|
155 |
else:
|
156 |
with st.spinner("Processing..."):
|
|
|
|
|
157 |
if model_choice == "GOT_CPU":
|
158 |
got_model, tokenizer = init_got_model()
|
159 |
extracted_text = extract_text_got(image_path, got_model, tokenizer)
|
|
|
172 |
text_list = re.findall(r"text='(.*?)'", str(predictions[0]))
|
173 |
extracted_text = ' '.join(text_list)
|
174 |
|
175 |
+
# Clean and polish extracted text
|
176 |
cleaned_text = clean_extracted_text(extracted_text)
|
177 |
polished_text = polish_text_with_ai(cleaned_text) if model_choice in ["GOT_CPU", "GOT_GPU"] else cleaned_text
|
178 |
|
179 |
+
# Save results to JSON file
|
180 |
+
result_data = {"polished_text": polished_text}
|
181 |
+
with open(result_path, 'w') as f:
|
182 |
+
json.dump(result_data, f)
|
|
|
|
|
|
|
|
|
183 |
|
184 |
+
# Display extracted text
|
185 |
st.subheader("Extracted Text (Cleaned & Polished)")
|
186 |
+
st.markdown(extracted_text, unsafe_allow_html=True)
|
187 |
+
|
188 |
+
# Search functionality
|
189 |
+
def update_search():
|
190 |
+
if search_query:
|
191 |
+
highlighted_text = highlight_text(extracted_text, search_query)
|
192 |
+
st.session_state["highlighted_result"] = highlighted_text
|
193 |
+
else:
|
194 |
+
st.session_state["highlighted_result"] = extracted_text
|
195 |
+
|
196 |
+
search_query = st.text_input(
|
197 |
+
"Search in extracted text:",
|
198 |
+
key="search_query",
|
199 |
+
placeholder="Type to search...",
|
200 |
+
on_change=update_search,
|
201 |
+
disabled=not uploaded_file
|
202 |
+
)
|
203 |
+
|
204 |
+
if "highlighted_result" in st.session_state:
|
205 |
st.markdown("### Highlighted Search Results:")
|
206 |
+
st.markdown(st.session_state["highlighted_result"], unsafe_allow_html=True)
|