Spaces:
Sleeping
Sleeping
TheKnight115
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,358 +1,90 @@
|
|
1 |
import streamlit as st
|
2 |
-
import
|
3 |
-
import numpy as np
|
4 |
-
import tempfile
|
5 |
-
import time
|
6 |
-
from ultralytics import YOLO
|
7 |
-
from huggingface_hub import hf_hub_download
|
8 |
-
from email.mime.text import MIMEText
|
9 |
-
from email.mime.multipart import MIMEMultipart
|
10 |
-
from email.mime.base import MIMEBase
|
11 |
-
from email import encoders
|
12 |
import os
|
13 |
-
import
|
14 |
-
|
15 |
-
|
16 |
-
import re
|
17 |
-
import torch
|
18 |
-
|
19 |
-
# Email credentials
|
20 |
-
FROM_EMAIL = "[email protected]"
|
21 |
-
EMAIL_PASSWORD = "cawxqifzqiwjufde" # App-Specific Password
|
22 |
-
TO_EMAIL = "[email protected]"
|
23 |
-
SMTP_SERVER = 'smtp.gmail.com'
|
24 |
-
SMTP_PORT = 465
|
25 |
-
|
26 |
-
# Arabic dictionary for converting license plate text
|
27 |
-
arabic_dict = {
|
28 |
-
"0": "٠", "1": "١", "2": "٢", "3": "٣", "4": "٤", "5": "٥",
|
29 |
-
"6": "٦", "7": "٧", "8": "٨", "9": "٩", "A": "ا", "B": "ب",
|
30 |
-
"J": "ح", "D": "د", "R": "ر", "S": "س", "X": "ص", "T": "ط",
|
31 |
-
"E": "ع", "G": "ق", "K": "ك", "L": "ل", "Z": "م", "N": "ن",
|
32 |
-
"H": "ه", "U": "و", "V": "ي", " ": " "
|
33 |
-
}
|
34 |
-
|
35 |
-
# Color mapping for different classes
|
36 |
-
class_colors = {
|
37 |
-
0: (0, 255, 0), # Green (Helmet)
|
38 |
-
1: (255, 0, 0), # Blue (License Plate)
|
39 |
-
2: (0, 0, 255), # Red (MotorbikeDelivery)
|
40 |
-
3: (255, 255, 0), # Cyan (MotorbikeSport)
|
41 |
-
4: (255, 0, 255), # Magenta (No Helmet)
|
42 |
-
5: (0, 255, 255), # Yellow (Person)
|
43 |
-
}
|
44 |
-
|
45 |
-
# Load the OCR model
|
46 |
-
processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR2_0", trust_remote_code=True)
|
47 |
-
model_ocr = AutoModel.from_pretrained("stepfun-ai/GOT-OCR2_0", trust_remote_code=True).to('cuda')
|
48 |
-
|
49 |
-
# Define lane area coordinates (example coordinates)
|
50 |
-
red_lane = np.array([[2, 1583], [1, 1131], [1828, 1141], [1912, 1580]], np.int32)
|
51 |
-
|
52 |
-
# YOLO inference function
|
53 |
-
def run_yolo(image):
|
54 |
-
results = model(image)
|
55 |
-
return results
|
56 |
-
|
57 |
-
|
58 |
-
# Function to process YOLO results and draw bounding boxes
|
59 |
-
def process_results(results, image):
|
60 |
-
boxes = results[0].boxes
|
61 |
-
for box in boxes:
|
62 |
-
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
63 |
-
conf = box.conf[0]
|
64 |
-
cls = int(box.cls[0])
|
65 |
-
label = model.names[cls]
|
66 |
-
color = class_colors.get(cls, (255, 255, 255))
|
67 |
-
|
68 |
-
# Draw rectangle and label
|
69 |
-
cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
|
70 |
-
cv2.putText(image, f"{label} {conf:.2f}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
71 |
-
return image
|
72 |
-
|
73 |
-
|
74 |
-
# Process uploaded images
|
75 |
-
def process_image(uploaded_file):
|
76 |
-
image = np.array(cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), 1))
|
77 |
-
results = run_yolo(image)
|
78 |
-
processed_image = process_results(results, image)
|
79 |
-
processed_image_rgb = cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB)
|
80 |
-
st.image(processed_image_rgb, caption='Detected Image', use_column_width=True)
|
81 |
-
|
82 |
-
# Create a download button for the processed image
|
83 |
-
im_pil = Image.fromarray(processed_image_rgb)
|
84 |
-
im_pil.save("processed_image.png")
|
85 |
-
with open("processed_image.png", "rb") as file:
|
86 |
-
btn = st.download_button(
|
87 |
-
label="Download Processed Image",
|
88 |
-
data=file,
|
89 |
-
file_name="processed_image.png",
|
90 |
-
mime="image/png"
|
91 |
-
)
|
92 |
-
|
93 |
-
# Process and save uploaded videos
|
94 |
-
@st.cache_data
|
95 |
-
# Define the function to process the video
|
96 |
-
def process_video_and_save(uploaded_file):
|
97 |
-
# Path for Arabic font
|
98 |
-
font_path = "alfont_com_arial-1.ttf"
|
99 |
-
|
100 |
-
# Paths for saving violation images
|
101 |
-
violation_image_path = 'violation.jpg'
|
102 |
-
|
103 |
-
# Track emails already sent to avoid duplicate emails
|
104 |
-
sent_emails = {}
|
105 |
-
|
106 |
-
# Dictionary to track violations per license plate
|
107 |
-
violations_dict = {}
|
108 |
-
|
109 |
-
# Paths for saving violation images and videos
|
110 |
-
video_path = "uploaded_video.mp4"
|
111 |
-
output_video_path = 'output_violation.mp4'
|
112 |
-
|
113 |
-
# Save the uploaded video file to this path
|
114 |
-
with open(video_path, "wb") as f:
|
115 |
-
f.write(uploaded_file.getbuffer())
|
116 |
-
|
117 |
-
cap = cv2.VideoCapture(video_path)
|
118 |
-
|
119 |
-
if not cap.isOpened():
|
120 |
-
st.error("Error opening video file.")
|
121 |
-
return None
|
122 |
-
|
123 |
-
# Codec and output settings
|
124 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
125 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
126 |
-
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
127 |
-
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
128 |
-
out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))
|
129 |
-
|
130 |
-
margin_y = 50
|
131 |
-
|
132 |
-
# Process frames
|
133 |
-
while cap.isOpened():
|
134 |
-
ret, frame = cap.read()
|
135 |
-
if not ret:
|
136 |
-
break # End of video
|
137 |
-
|
138 |
-
# Draw the red lane rectangle on each frame
|
139 |
-
cv2.polylines(frame, [red_lane], isClosed=True, color=(0, 0, 255), thickness=3) # Red lane
|
140 |
-
|
141 |
-
# Perform detection using YOLO on the current frame
|
142 |
-
results = model.track(frame)
|
143 |
-
|
144 |
-
# Process each detection in the results
|
145 |
-
for box in results[0].boxes:
|
146 |
-
x1, y1, x2, y2 = map(int, box.xyxy[0].cpu().numpy()) # Bounding box coordinates
|
147 |
-
label = model.names[int(box.cls)] # Class name (MotorbikeDelivery, Helmet, etc.)
|
148 |
-
color = (255, 0, 0) # Use a fixed color for bounding boxes
|
149 |
-
confidence = box.conf[0].item()
|
150 |
-
|
151 |
-
# Initialize flags and variables for the violations
|
152 |
-
helmet_violation = False
|
153 |
-
lane_violation = False
|
154 |
-
violation_type = []
|
155 |
-
|
156 |
-
# Draw bounding box around detected object
|
157 |
-
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 3) # 3 is the thickness of the rectangle
|
158 |
-
|
159 |
-
# Add label to the box (e.g., 'MotorbikeDelivery')
|
160 |
-
cv2.putText(frame, f'{label}: {confidence:.2f}', (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
|
161 |
-
|
162 |
-
# Detect MotorbikeDelivery
|
163 |
-
if label == 'MotorbikeDelivery' and confidence >= 0.4:
|
164 |
-
motorbike_crop = frame[max(0, y1 - margin_y):y2, x1:x2]
|
165 |
-
delivery_center = ((x1 + x2) // 2, (y2))
|
166 |
-
in_red_lane = cv2.pointPolygonTest(red_lane, delivery_center, False)
|
167 |
-
if in_red_lane >= 0:
|
168 |
-
lane_violation = True
|
169 |
-
violation_type.append("In Red Lane")
|
170 |
-
|
171 |
-
# Perform detection within the cropped motorbike region
|
172 |
-
sub_results = model(motorbike_crop)
|
173 |
-
|
174 |
-
for result in sub_results[0].boxes:
|
175 |
-
sub_x1, sub_y1, sub_x2, sub_y2 = map(int, result.xyxy[0].cpu().numpy()) # Bounding box coordinates
|
176 |
-
sub_label = model.names[int(result.cls)]
|
177 |
-
sub_color = (255, 0, 0) # Red color for the bounding box of sub-objects
|
178 |
-
|
179 |
-
# Draw bounding box around sub-detected objects (No_Helmet, License_plate, etc.)
|
180 |
-
cv2.rectangle(motorbike_crop, (sub_x1, sub_y1), (sub_x2, sub_y2), sub_color, 2)
|
181 |
-
cv2.putText(motorbike_crop, sub_label, (sub_x1, sub_y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, sub_color, 2)
|
182 |
-
|
183 |
-
if sub_label == 'No_Helmet':
|
184 |
-
helmet_violation = True
|
185 |
-
violation_type.append("No Helmet")
|
186 |
-
continue
|
187 |
-
if sub_label == 'License_plate':
|
188 |
-
license_crop = motorbike_crop[sub_y1:sub_y2, sub_x1:sub_x2]
|
189 |
-
|
190 |
-
# Apply OCR if a violation is detected
|
191 |
-
if helmet_violation or lane_violation:
|
192 |
-
# Perform OCR on the license plate
|
193 |
-
cv2.imwrite(violation_image_path, frame)
|
194 |
-
license_plate_pil = Image.fromarray(cv2.cvtColor(license_crop, cv2.COLOR_BGR2RGB))
|
195 |
-
temp_image_path = 'license_plate.png'
|
196 |
-
license_plate_pil.save(temp_image_path)
|
197 |
-
license_plate_text = model_ocr.chat(processor, temp_image_path, ocr_type='ocr')
|
198 |
-
filtered_text = filter_license_plate_text(license_plate_text)
|
199 |
-
# Check if the license plate is already detected and saved
|
200 |
-
if filtered_text:
|
201 |
-
# Add the license plate and its violations to the violations dictionary
|
202 |
-
if filtered_text not in violations_dict:
|
203 |
-
violations_dict[filtered_text] = violation_type #{"1234AB":[no_Helmet,In_red_Lane]}
|
204 |
-
send_email(filtered_text, violation_image_path, ', '.join(violation_type))
|
205 |
-
else:
|
206 |
-
# Update the violations for the license plate if new ones are found
|
207 |
-
current_violations = set(violations_dict[filtered_text]) # no helmet
|
208 |
-
new_violations = set(violation_type) # red lane, no helmet
|
209 |
-
updated_violations = list(current_violations | new_violations) # red_lane, no helmet
|
210 |
-
|
211 |
-
# If new violations are found, update and send email
|
212 |
-
if updated_violations != violations_dict[filtered_text]:
|
213 |
-
violations_dict[filtered_text] = updated_violations
|
214 |
-
send_email(filtered_text, violation_image_path, ', '.join(updated_violations))
|
215 |
-
|
216 |
-
# Draw OCR text (English and Arabic) on the original frame
|
217 |
-
arabic_text = convert_to_arabic(filtered_text)
|
218 |
-
frame = draw_text_pil(frame, filtered_text, (x1, y2 + 30), font_path, font_size=30, color=(255, 255, 255))
|
219 |
-
frame = draw_text_pil(frame, arabic_text, (x1, y2 + 60), font_path, font_size=30, color=(0, 255, 0))
|
220 |
-
|
221 |
-
# Write the processed frame to the output video
|
222 |
-
out.write(frame)
|
223 |
-
|
224 |
-
# Release resources when done
|
225 |
-
cap.release()
|
226 |
-
out.release()
|
227 |
-
if not os.path.exists(output_video_path):
|
228 |
-
st.error("Error: Processed video was not created.")
|
229 |
-
return output_video_path # Return the path of the processed video
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
# Live video feed processing
|
235 |
-
def live_video_feed():
|
236 |
-
stframe = st.empty()
|
237 |
-
video = cv2.VideoCapture(0)
|
238 |
-
|
239 |
-
if not video.isOpened():
|
240 |
-
st.error("Unable to access the webcam.")
|
241 |
-
return
|
242 |
-
|
243 |
-
while True:
|
244 |
-
ret, frame = video.read()
|
245 |
-
if not ret:
|
246 |
-
st.error("Failed to capture frame.")
|
247 |
-
break
|
248 |
-
|
249 |
-
# Run YOLO on the captured frame
|
250 |
-
results = run_yolo(frame)
|
251 |
-
annotated_frame = process_results(results, frame)
|
252 |
-
annotated_frame_rgb = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
|
253 |
-
|
254 |
-
# Display the frame with detections
|
255 |
-
stframe.image(annotated_frame_rgb, channels="RGB", use_column_width=True)
|
256 |
-
|
257 |
-
if st.button("Stop"):
|
258 |
-
break
|
259 |
-
|
260 |
-
video.release()
|
261 |
-
st.stop()
|
262 |
-
|
263 |
-
|
264 |
-
# Function to filter license plate text
|
265 |
-
def filter_license_plate_text(license_plate_text):
|
266 |
-
license_plate_text = re.sub(r'[^A-Z0-9]+', "", license_plate_text)
|
267 |
-
match = re.search(r'(\d{3,4})\s*([A-Z]{2})', license_plate_text)
|
268 |
-
return f"{match.group(1)} {match.group(2)}" if match else None
|
269 |
-
|
270 |
-
|
271 |
-
# Function to convert license plate text to Arabic
|
272 |
-
def convert_to_arabic(license_plate_text):
|
273 |
-
return "".join(arabic_dict.get(char, char) for char in license_plate_text)
|
274 |
-
|
275 |
-
|
276 |
-
# Function to send email notification with image attachment
|
277 |
-
def send_email(license_text, violation_image_path, violation_type):
|
278 |
-
if violation_type == 'no_helmet':
|
279 |
-
subject = 'تنبيه مخالفة: عدم ارتداء خوذة'
|
280 |
-
body = f"لعدم ارتداء الخوذة ({license_text}) تم تغريم دراجة نارية التي تحمل لوحة."
|
281 |
-
elif violation_type == 'in_red_lane':
|
282 |
-
subject = 'تنبيه مخالفة: دخول المسار الأيسر'
|
283 |
-
body = f"لدخولها المسار الأيسر ({license_text}) تم تغريم دراجة نارية التي تحمل لوحة."
|
284 |
-
elif violation_type == 'no_helmet_in_red_lane':
|
285 |
-
subject = 'تنبيه مخالفة: عدم ارتداء خوذة ودخول المسار الأيسر'
|
286 |
-
body = f"لعدم ارتداء الخوذة ولدخولها المسار الأيسر ({license_text}) تم تغريم دراجة نارية التي تحمل لوحة."
|
287 |
|
288 |
-
|
289 |
-
msg['From'] = FROM_EMAIL
|
290 |
-
msg['To'] = TO_EMAIL
|
291 |
-
msg['Subject'] = subject
|
292 |
-
msg.attach(MIMEText(body, 'plain'))
|
293 |
|
294 |
-
|
295 |
-
with open(violation_image_path, 'rb') as attachment_file:
|
296 |
-
part = MIMEBase('application', 'octet-stream')
|
297 |
-
part.set_payload(attachment_file.read())
|
298 |
-
encoders.encode_base64(part)
|
299 |
-
part.add_header('Content-Disposition', f'attachment; filename={os.path.basename(violation_image_path)}')
|
300 |
-
msg.attach(part)
|
301 |
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
print("Email with attachment sent successfully!")
|
306 |
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
draw = ImageDraw.Draw(img_pil)
|
311 |
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
319 |
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
)
|
352 |
-
|
353 |
-
elif input_type == "Live Feed":
|
354 |
-
live_video_feed()
|
355 |
-
|
356 |
-
|
357 |
-
if __name__ == "__main__":
|
358 |
-
main()
|
|
|
1 |
import streamlit as st
|
2 |
+
from processor import process_video, process_image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import os
|
4 |
+
from PIL import Image
|
5 |
+
import tempfile
|
6 |
+
import cv2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
+
st.set_page_config(page_title="Traffic Violation Detection", layout="wide")
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
st.title("🚦 Traffic Violation Detection App")
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
# Sidebar for selection
|
13 |
+
st.sidebar.title("Choose an Option")
|
14 |
+
option = st.sidebar.radio("Select the processing type:", ("Image", "Video", "Live Camera"))
|
|
|
15 |
|
16 |
+
if option == "Image":
|
17 |
+
st.header("🖼️ Image Processing")
|
18 |
+
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
|
|
19 |
|
20 |
+
if uploaded_file is not None:
|
21 |
+
# Save the uploaded image to a temporary file
|
22 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_image:
|
23 |
+
temp_image.write(uploaded_file.read())
|
24 |
+
temp_image_path = temp_image.name
|
25 |
+
|
26 |
+
# Display the uploaded image
|
27 |
+
st.image(uploaded_file, caption='Uploaded Image.', use_column_width=True)
|
28 |
+
|
29 |
+
# Process the image
|
30 |
+
if st.button("Process Image"):
|
31 |
+
with st.spinner("Processing..."):
|
32 |
+
font_path = "fonts/alfont_com_arial-1.ttf" # Update the path as needed
|
33 |
+
processed_image = process_image(temp_image_path, font_path)
|
34 |
+
if processed_image is not None:
|
35 |
+
# Convert the processed image to RGB
|
36 |
+
processed_image_rgb = cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB)
|
37 |
+
st.image(processed_image_rgb, caption='Processed Image.', use_column_width=True)
|
38 |
+
|
39 |
+
# Save processed image to a temporary file
|
40 |
+
result_image = Image.fromarray(processed_image_rgb)
|
41 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp:
|
42 |
+
result_image.save(tmp.name)
|
43 |
+
tmp_path = tmp.name
|
44 |
+
|
45 |
+
# Download button
|
46 |
+
with open(tmp_path, "rb") as file:
|
47 |
+
btn = st.download_button(
|
48 |
+
label="📥 Download Processed Image",
|
49 |
+
data=file,
|
50 |
+
file_name="processed_image.jpg",
|
51 |
+
mime="image/jpeg"
|
52 |
+
)
|
53 |
+
else:
|
54 |
+
st.error("Failed to process the image.")
|
55 |
+
|
56 |
+
elif option == "Video":
|
57 |
+
st.header("🎥 Video Processing")
|
58 |
+
video_files = [f for f in os.listdir("videos") if f.endswith(('.mp4', '.avi', '.mov'))]
|
59 |
|
60 |
+
if not video_files:
|
61 |
+
st.warning("No predefined videos found in the 'videos/' directory.")
|
62 |
+
else:
|
63 |
+
selected_video = st.selectbox("Select a video to process:", video_files)
|
64 |
+
video_path = os.path.join("videos", selected_video)
|
65 |
+
|
66 |
+
st.video(video_path)
|
67 |
+
|
68 |
+
if st.button("Process Video"):
|
69 |
+
with st.spinner("Processing..."):
|
70 |
+
font_path = "fonts/alfont_com_arial-1.ttf" # Update the path as needed
|
71 |
+
processed_video_path = process_video(video_path, font_path)
|
72 |
+
if processed_video_path and os.path.exists(processed_video_path):
|
73 |
+
st.success("Video processed successfully!")
|
74 |
+
st.video(processed_video_path)
|
75 |
+
|
76 |
+
# Provide download button
|
77 |
+
with open(processed_video_path, "rb") as file:
|
78 |
+
btn = st.download_button(
|
79 |
+
label="📥 Download Processed Video",
|
80 |
+
data=file,
|
81 |
+
file_name="processed_video.mp4",
|
82 |
+
mime="video/mp4"
|
83 |
+
)
|
84 |
+
else:
|
85 |
+
st.error("Failed to process the video.")
|
86 |
+
|
87 |
+
elif option == "Live Camera":
|
88 |
+
st.header("📷 Live Camera Processing")
|
89 |
+
st.warning("Live camera processing is currently not supported in this app due to Streamlit limitations.")
|
90 |
+
st.info("Consider running the live camera processing separately using your existing script.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|