import gradio as gr from dotenv import load_dotenv from roboflow import Roboflow import tempfile import os import requests import cv2 import numpy as np from dds_cloudapi_sdk import Config, Client from dds_cloudapi_sdk.tasks.dinox import DinoxTask from dds_cloudapi_sdk.tasks.types import DetectionTarget from dds_cloudapi_sdk import TextPrompt import subprocess # ========== Konfigurasi ========== load_dotenv() # Roboflow Config rf_api_key = os.getenv("ROBOFLOW_API_KEY") workspace = os.getenv("ROBOFLOW_WORKSPACE") project_name = os.getenv("ROBOFLOW_PROJECT") model_version = int(os.getenv("ROBOFLOW_MODEL_VERSION")) # DINO-X Config DINOX_API_KEY = os.getenv("DINO_X_API_KEY") DINOX_PROMPT = "beverage . bottle . cans . mixed box" # Customize sesuai produk kompetitor : food . drink # Inisialisasi Model rf = Roboflow(api_key=rf_api_key) project = rf.workspace(workspace).project(project_name) yolo_model = project.version(model_version).model dinox_config = Config(DINOX_API_KEY) dinox_client = Client(dinox_config) # ========== Fungsi Deteksi Kombinasi ========== def detect_combined(image): with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file: image.save(temp_file, format="JPEG") temp_path = temp_file.name try: # ========== [1] YOLO: Deteksi Produk Nestlé (Per Class) ========== yolo_pred = yolo_model.predict(temp_path, confidence=50, overlap=80).json() # Hitung per class Nestlé nestle_class_count = {} nestle_boxes = [] for pred in yolo_pred['predictions']: class_name = pred['class'] nestle_class_count[class_name] = nestle_class_count.get(class_name, 0) + 1 nestle_boxes.append((pred['x'], pred['y'], pred['width'], pred['height'])) total_nestle = sum(nestle_class_count.values()) # ========== [2] DINO-X: Deteksi Kompetitor ========== image_url = dinox_client.upload_file(temp_path) task = DinoxTask( image_url=image_url, prompts=[TextPrompt(text=DINOX_PROMPT)], bbox_threshold=0.4, targets=[DetectionTarget.BBox] ) dinox_client.run_task(task) dinox_pred = task.result.objects # Filter & Hitung Kompetitor competitor_class_count = {} competitor_boxes = [] for obj in dinox_pred: dinox_box = obj.bbox # Filter objek yang sudah terdeteksi oleh YOLO (Overlap detection) if not is_overlap(dinox_box, nestle_boxes): # Ignore if overlap with YOLO detections class_name = obj.category.strip().lower() # Normalisasi nama kelas competitor_class_count[class_name] = competitor_class_count.get(class_name, 0) + 1 competitor_boxes.append({ "class": class_name, "box": dinox_box, "confidence": obj.score }) total_competitor = sum(competitor_class_count.values()) # ========== [3] Format Output ========== result_text = "Product Nestle\n\n" for class_name, count in nestle_class_count.items(): result_text += f"{class_name}: {count}\n" result_text += f"\nTotal Products Nestle: {total_nestle}\n\n" #result_text += "Competitor Products\n\n" if competitor_class_count: result_text += f"Total Unclassified Products: {total_competitor}\n" # Hanya total, tidak per kelas else: result_text += "No Unclassified Products detected\n" # ========== [4] Visualisasi ========== img = cv2.imread(temp_path) # Nestlé (Hijau) for pred in yolo_pred['predictions']: x, y, w, h = pred['x'], pred['y'], pred['width'], pred['height'] cv2.rectangle(img, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (0,255,0), 2) cv2.putText(img, pred['class'], (int(x-w/2), int(y-h/2-10)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,255,0), 3) # Kompetitor (Merah) dengan nama 'unclassified' for comp in competitor_boxes: x1, y1, x2, y2 = comp['box'] # Define a list of target classes to rename unclassified_classes = ["beverage", "cans", "bottle", "mixed box"] # Normalize the class name to be case-insensitive and check if it's in the unclassified list display_name = "unclassified" if any(class_name in comp['class'].lower() for class_name in unclassified_classes) else comp['class'] cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 2) cv2.putText(img, f"{display_name} {comp['confidence']:.2f}", (int(x1), int(y1-10)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3) output_path = "/tmp/combined_output.jpg" cv2.imwrite(output_path, img) return output_path, result_text except Exception as e: return temp_path, f"Error: {str(e)}" finally: os.remove(temp_path) def is_overlap(box1, boxes2, threshold=0.3): # Fungsi untuk deteksi overlap bounding box x1_min, y1_min, x1_max, y1_max = box1 for b2 in boxes2: x2, y2, w2, h2 = b2 x2_min = x2 - w2/2 x2_max = x2 + w2/2 y2_min = y2 - h2/2 y2_max = y2 + h2/2 # Hitung area overlap dx = min(x1_max, x2_max) - max(x1_min, x2_min) dy = min(y1_max, y2_max) - max(y1_min, y2_min) if (dx >= 0) and (dy >= 0): area_overlap = dx * dy area_box1 = (x1_max - x1_min) * (y1_max - y1_min) if area_overlap / area_box1 > threshold: return True return False # ========== Fungsi untuk Deteksi Video ========== def convert_video_to_mp4(input_path, output_path): try: subprocess.run(['ffmpeg', '-i', input_path, '-vcodec', 'libx264', '-acodec', 'aac', output_path], check=True) return output_path except subprocess.CalledProcessError as e: return None, f"Error converting video: {e}" def detect_objects_in_video(video_path): temp_output_path = "/tmp/output_video.mp4" temp_frames_dir = tempfile.mkdtemp() frame_count = 0 previous_detections = {} # For storing previous frame's object detections try: # Convert video to MP4 if necessary if not video_path.endswith(".mp4"): video_path, err = convert_video_to_mp4(video_path, temp_output_path) if not video_path: return None, f"Video conversion error: {err}" # Read video and process frames video = cv2.VideoCapture(video_path) frame_rate = int(video.get(cv2.CAP_PROP_FPS)) frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) frame_size = (frame_width, frame_height) # VideoWriter for output video fourcc = cv2.VideoWriter_fourcc(*'mp4v') output_video = cv2.VideoWriter(temp_output_path, fourcc, frame_rate, frame_size) while True: ret, frame = video.read() if not ret: break # Save frame temporarily for predictions frame_path = os.path.join(temp_frames_dir, f"frame_{frame_count}.jpg") cv2.imwrite(frame_path, frame) # Process predictions for the current frame predictions = yolo_model.predict(frame_path, confidence=50, overlap=80).json() # Track current frame detections current_detections = {} for prediction in predictions['predictions']: class_name = prediction['class'] x, y, w, h = prediction['x'], prediction['y'], prediction['width'], prediction['height'] # Generate a unique ID for each detection based on the bounding box object_id = f"{class_name}_{x}_{y}_{w}_{h}" # Track each detected object individually if object_id not in current_detections: current_detections[object_id] = class_name # Draw bounding box for detected objects cv2.rectangle(frame, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (0,255,0), 2) cv2.putText(frame, class_name, (int(x-w/2), int(y-h/2-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2) # Update counts for objects object_counts = {} for detection_id in current_detections.keys(): class_name = current_detections[detection_id] object_counts[class_name] = object_counts.get(class_name, 0) + 1 # Generate display text for counts count_text = "" total_product_count = 0 for class_name, count in object_counts.items(): count_text += f"{class_name}: {count}\n" total_product_count += count count_text += f"\nTotal Product: {total_product_count}" # Overlay the counts text onto the frame y_offset = 20 for line in count_text.split("\n"): cv2.putText(frame, line, (10, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) y_offset += 30 # Move down for next line # Write processed frame to output video output_video.write(frame) frame_count += 1 # Update previous_detections for the next frame previous_detections = current_detections video.release() output_video.release() return temp_output_path except Exception as e: return None, f"An error occurred: {e}" # ========== Gradio Interface ========== with gr.Blocks(theme=gr.themes.Base(primary_hue="teal", secondary_hue="teal", neutral_hue="slate")) as iface: gr.Markdown("""

NESTLE - STOCK COUNTING

""") with gr.Row(): with gr.Column(): input_image = gr.Image(type="pil", label="Input Image") detect_image_button = gr.Button("Detect Image") output_image = gr.Image(label="Detect Object") output_text = gr.Textbox(label="Counting Object") detect_image_button.click(fn=detect_combined, inputs=input_image, outputs=[output_image, output_text]) with gr.Column(): input_video = gr.Video(label="Input Video") detect_video_button = gr.Button("Detect Video") output_video = gr.Video(label="Output Video") detect_video_button.click(fn=detect_objects_in_video, inputs=input_video, outputs=[output_video]) iface.launch()