Spaces:
Sleeping
Sleeping
TheKnight115
commited on
Update processor.py
Browse files- processor.py +106 -10
processor.py
CHANGED
@@ -196,31 +196,127 @@ def process_image(image_path, font_path, violation_image_path='violation.jpg'):
|
|
196 |
processed = process_frame(frame, font_path, violation_image_path)
|
197 |
return processed
|
198 |
|
199 |
-
def process_video(video_path
|
200 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
cap = cv2.VideoCapture(video_path)
|
|
|
|
|
202 |
if not cap.isOpened():
|
203 |
print("Error opening video file")
|
204 |
return None
|
205 |
|
|
|
206 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
207 |
-
output_video_path = 'output_violation.mp4'
|
208 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
209 |
-
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
210 |
-
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
211 |
out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))
|
212 |
|
|
|
|
|
|
|
213 |
while cap.isOpened():
|
214 |
ret, frame = cap.read()
|
215 |
if not ret:
|
216 |
-
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
|
218 |
-
|
219 |
-
|
220 |
|
221 |
-
|
222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
cap.release()
|
225 |
out.release()
|
|
|
226 |
return output_video_path
|
|
|
196 |
processed = process_frame(frame, font_path, violation_image_path)
|
197 |
return processed
|
198 |
|
199 |
+
def process_video(video_path):
|
200 |
+
# Paths for saving violation images
|
201 |
+
violation_image_path = '/kaggle/working/violation.jpg'
|
202 |
+
|
203 |
+
# Track emails already sent to avoid duplicate emails
|
204 |
+
sent_emails = {}
|
205 |
+
|
206 |
+
# Dictionary to track violations per license plate
|
207 |
+
violations_dict = {}
|
208 |
+
|
209 |
+
# Open video file
|
210 |
cap = cv2.VideoCapture(video_path)
|
211 |
+
|
212 |
+
# Check if the video file opened successfully
|
213 |
if not cap.isOpened():
|
214 |
print("Error opening video file")
|
215 |
return None
|
216 |
|
217 |
+
# Define codec and output video settings
|
218 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
219 |
+
output_video_path = '/kaggle/working/output_violation.mp4'
|
220 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
221 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # Frame width
|
222 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # Frame height
|
223 |
out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))
|
224 |
|
225 |
+
margin_y = 50
|
226 |
+
|
227 |
+
# Process the video frame by frame
|
228 |
while cap.isOpened():
|
229 |
ret, frame = cap.read()
|
230 |
if not ret:
|
231 |
+
break # End of video
|
232 |
+
|
233 |
+
# Draw the red lane polygon on each frame
|
234 |
+
cv2.polylines(frame, [red_lane], isClosed=True, color=(0, 0, 255), thickness=3) # Red lane
|
235 |
+
|
236 |
+
# Perform detection using YOLO on the current frame
|
237 |
+
results = model.track(frame)
|
238 |
+
|
239 |
+
# Process each detection in the results
|
240 |
+
for box in results[0].boxes:
|
241 |
+
x1, y1, x2, y2 = map(int, box.xyxy[0].cpu().numpy()) # Bounding box coordinates
|
242 |
+
label = model.names[int(box.cls)] # Class name (MotorbikeDelivery, Helmet, etc.)
|
243 |
+
color = class_colors[int(box.cls)]
|
244 |
+
confidence = box.conf[0].item()
|
245 |
+
|
246 |
+
# Initialize flags and variables for the violations
|
247 |
+
helmet_violation = False
|
248 |
+
lane_violation = False
|
249 |
+
violation_type = []
|
250 |
+
|
251 |
+
# Draw bounding box around detected object
|
252 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 3)
|
253 |
|
254 |
+
# Add label to the box (e.g., 'MotorbikeDelivery')
|
255 |
+
cv2.putText(frame, f'{label}: {confidence:.2f}', (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
|
256 |
|
257 |
+
# Detect MotorbikeDelivery
|
258 |
+
if label == 'MotorbikeDelivery' and confidence >= 0.4:
|
259 |
+
motorbike_crop = frame[max(0, y1 - margin_y):y2, x1:x2]
|
260 |
+
delivery_center = ((x1 + x2) // 2, (y2))
|
261 |
+
in_red_lane = cv2.pointPolygonTest(red_lane, delivery_center, False)
|
262 |
+
if in_red_lane >= 0:
|
263 |
+
lane_violation = True
|
264 |
+
violation_type.append("In Red Lane")
|
265 |
|
266 |
+
# Perform detection within the cropped motorbike region
|
267 |
+
sub_results = model(motorbike_crop)
|
268 |
+
|
269 |
+
for result in sub_results[0].boxes:
|
270 |
+
sub_x1, sub_y1, sub_x2, sub_y2 = map(int, result.xyxy[0].cpu().numpy()) # Bounding box coordinates
|
271 |
+
sub_label = model.names[int(result.cls)]
|
272 |
+
sub_color = (255, 0, 0) # Red color for the bounding box of sub-objects
|
273 |
+
|
274 |
+
# Draw bounding box around sub-detected objects (No_Helmet, License_plate, etc.)
|
275 |
+
cv2.rectangle(motorbike_crop, (sub_x1, sub_y1), (sub_x2, sub_y2), sub_color, 2)
|
276 |
+
cv2.putText(motorbike_crop, sub_label, (sub_x1, sub_y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, sub_color, 2)
|
277 |
+
|
278 |
+
if sub_label == 'No_Helmet':
|
279 |
+
helmet_violation = True
|
280 |
+
violation_type.append("No Helmet")
|
281 |
+
continue
|
282 |
+
if sub_label == 'License_plate':
|
283 |
+
license_crop = motorbike_crop[sub_y1:sub_y2, sub_x1:sub_x2]
|
284 |
+
|
285 |
+
# Apply OCR if a violation is detected
|
286 |
+
if helmet_violation or lane_violation:
|
287 |
+
# Perform OCR on the license plate
|
288 |
+
cv2.imwrite(violation_image_path, frame)
|
289 |
+
license_plate_pil = Image.fromarray(cv2.cvtColor(license_crop, cv2.COLOR_BGR2RGB))
|
290 |
+
temp_image_path = '/kaggle/working/license_plate.png'
|
291 |
+
license_plate_pil.save(temp_image_path)
|
292 |
+
license_plate_text = model_ocr.chat(processor, temp_image_path, ocr_type='ocr')
|
293 |
+
filtered_text = filter_license_plate_text(license_plate_text)
|
294 |
+
|
295 |
+
if filtered_text:
|
296 |
+
# Track violations for the license plate
|
297 |
+
if filtered_text not in violations_dict:
|
298 |
+
violations_dict[filtered_text] = violation_type
|
299 |
+
send_email(filtered_text, violation_image_path, ', '.join(violation_type))
|
300 |
+
else:
|
301 |
+
# Update violations if new ones are found
|
302 |
+
current_violations = set(violations_dict[filtered_text])
|
303 |
+
new_violations = set(violation_type)
|
304 |
+
updated_violations = list(current_violations | new_violations)
|
305 |
+
|
306 |
+
if updated_violations != violations_dict[filtered_text]:
|
307 |
+
violations_dict[filtered_text] = updated_violations
|
308 |
+
send_email(filtered_text, violation_image_path, ', '.join(updated_violations))
|
309 |
+
|
310 |
+
# Draw OCR text (English and Arabic) on the original frame
|
311 |
+
arabic_text = convert_to_arabic(filtered_text)
|
312 |
+
frame = draw_text_pil(frame, filtered_text, (x1, y2 + 30), font_path, font_size=30, color=(255, 255, 255))
|
313 |
+
frame = draw_text_pil(frame, arabic_text, (x1, y2 + 60), font_path, font_size=30, color=(0, 255, 0))
|
314 |
+
|
315 |
+
# Write the processed frame to the output video
|
316 |
+
out.write(frame)
|
317 |
+
|
318 |
+
# Release resources when done
|
319 |
cap.release()
|
320 |
out.release()
|
321 |
+
|
322 |
return output_video_path
|