Spaces:
Runtime error
Runtime error
import gradio as gr | |
import cv2 | |
import numpy as np | |
from tensorflow.keras.models import load_model | |
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input | |
# Load the pre-trained model | |
model = load_model("C:\Users\User\Downloads\klasifikasi_jerawat_model_klmpk2.keras") | |
#D:\SEMS 3\AI\klasifikasi_jerawat_model_klmpk2.keras | |
# Define acne labels | |
acne_labels = { | |
0: 'papules', | |
1: 'nodules', | |
2: 'pustules', | |
3: 'comedones' | |
} | |
# Define the detection function | |
def detect_acne(image, threshold=0.5): | |
# Resize and preprocess the image | |
image_resized = cv2.resize(image, (224, 224)) | |
input_data = preprocess_input(np.expand_dims(image_resized, axis=0)) | |
# Model prediction | |
predictions = model.predict(input_data) | |
# Process predictions | |
detections = [] | |
for i, prediction in enumerate(predictions[0]): | |
if prediction > threshold: | |
detections.append({ | |
'class': acne_labels[i], | |
'confidence': float(prediction) | |
}) | |
# Annotate image if detections exist | |
annotated_image = image.copy() | |
if detections: | |
for detection in detections: | |
# Simulate bounding box coordinates | |
height, width, _ = image.shape | |
xmin, ymin = int(width * 0.25), int(height * 0.25) | |
xmax, ymax = int(width * 0.75), int(height * 0.75) | |
# Draw bounding box | |
color = (203, 0, 203) # Purple-red | |
cv2.rectangle(annotated_image, (xmin, ymin), (xmax, ymax), color, 2) | |
return annotated_image, f"Detected acne: {detections}" | |
else: | |
return annotated_image, "No acne detected. Congrats!" | |
# Define Gradio interface | |
interface = gr.Interface( | |
fn=detect_acne, | |
inputs=gr.Image(type="numpy", label="Upload an image"), # Input as image | |
outputs=[ | |
gr.Image(type="numpy", label="Annotated Image"), # Output as image | |
gr.Textbox(label="Detection Result") # Output as text | |
] | |
) | |
# Launch the app | |
interface.launch() | |