File size: 6,186 Bytes
e267b1d
 
f01686c
25cc30d
e267b1d
 
5aaefae
f01686c
b1e655a
a66e03f
5aaefae
 
b1e655a
5aaefae
 
 
b1e655a
 
5aaefae
 
 
b1e655a
5aaefae
 
 
 
 
 
 
 
 
 
f9f191a
 
56dafcf
5aaefae
56dafcf
 
 
 
 
 
 
 
 
 
 
 
f9f191a
 
e267b1d
 
 
5aaefae
e267b1d
 
 
 
 
e45b47d
5aaefae
 
 
 
 
 
 
 
 
 
f01686c
 
5aaefae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ca06180
56dafcf
5aaefae
f01686c
 
5aaefae
 
bb3c537
f01686c
 
 
 
5aaefae
f01686c
 
 
 
 
 
 
 
a66e03f
5aaefae
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import streamlit as st
from ultralytics import YOLO
from PIL import Image
from transformers import AutoModelForImageClassification, AutoImageProcessor
import requests
from io import BytesIO
import numpy as np
import cv2
import concurrent.futures
from classification import hii
# Categories dictionary
categories_dict = {
    "UpperBody": ["top", "t-shirt", "sweatshirt", "blouse", "sweater", "cardigan", "jacket", "vest"],
    "Lowerbody": ["pants", "shorts", "skirt"],
    "Wholebody": ["coat", "dress", "jumpsuit", "cape"],
    "Head": ["glasses", "hat", "headband", "head covering", "hair accessory"],
    "Neck": ["tie", "neckline"],
    "Arms and Hands": ["glove", "watch","sleeve"],
    "Waist": ["belt"],
    "Legs and Feet": ["leg warmer", "tights", "stockings", "sock", "shoe"],
    "Others": ["bag", "wallet", "scarf", "umbrella"],
    "Garment parts": ["hood", "collar", "lapel", "epaulette","pocket"],
    "Closures": ["buckle", "zipper"],
    "Decorations": ["applique", "bead", "bow", "flower", "fringe", "ribbon", "rivet", "ruffle", "sequin", "tassel"]
}

def find_category(subcategory):
    for category, subcategories in categories_dict.items():
        if subcategory in subcategories:
            return category
    return "Subcategory not found."

# Load models and processor only once using Streamlit session state
if 'models_loaded' not in st.session_state:
    #localization model
    st.session_state.segment_model = YOLO("best.pt")
    #image preprocessor 
    st.session_state.image_processor = AutoImageProcessor.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-topwear")
    #top wear
    st.session_state.top_wear_model = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-topwear")
    #bootomwear
    st.session_state.bottomwear_model = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-bottomwear")
    #fullwear
    st.session_state.fullwear = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-fullwear")
    #for fullwear and top wear
    st.session_state.pattern_model = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-pattern-rgb")
    st.session_state.print_model = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-print")
    st.session_state.sleeve_length_model = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-sleeve-length")
    st.session_state.models_loaded = True

# Streamlit app UI
st.title("Clothing Classification Pipeline")
url = st.sidebar.text_input("Paste image URL here...")

if url:
    try:
        response = requests.get(url)
        if response.status_code == 200:
            image = Image.open(BytesIO(response.content))
            st.sidebar.image(image.resize((200, 200)), caption="Uploaded Image", use_column_width=False)

            # Convert image to numpy array for YOLO model
            image_np = np.array(image)
            
            # Perform inference
            results = st.session_state.segment_model(image_np)
            
            # Create a copy of the original image to draw bounding boxes and labels
            output_image = image_np.copy()
            
            cropped_images = []  # List to hold cropped images and their titles

            # Visualize the segmentation results
            for result in results:
                boxes = result.boxes  # Bounding boxes
                classes = result.names  # Class names of the detected objects
                
                for i, box in enumerate(boxes):
                    box_coords = box.xyxy[0].cpu().numpy().astype(int)
                    x1, y1, x2, y2 = box_coords
                    
                    # Draw the bounding box on the original image
                    cv2.rectangle(output_image, (x1, y1), (x2, y2), color=(0, 255, 0), thickness=2)
                    
                    # Get the class label and confidence score for the object
                    class_label = classes[box.cls[0].int().item()]
                    confidence = box.conf[0].item()
                    
                    # Prepare the label text with class and confidence
                    label_text = f'{class_label}: {confidence:.2f}'
                    
                    # Put text label on the original image
                    cv2.putText(output_image, label_text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    
                    # Crop the image based on the bounding box
                    cropped_image = image_np[y1:y2, x1:x2].copy()
                    cropped_image = cv2.resize(cropped_image, (200, 200))  # Resize cropped image
                    category_name = find_category(class_label) #categories name is here...................................
                    
                    # Add cropped image and its title to the list
                    cropped_images.append((cropped_image, f'Class: {category_name}, Confidence: {confidence:.2f}'))
            
            # Display the original image with bounding boxes and labels
            st.sidebar.image(output_image, caption="Segmented Image", channels="RGB", use_column_width=True)

            # Display cropped images row-wise
            num_columns = 3  # Number of columns per row
            num_rows = (len(cropped_images) + num_columns - 1) // num_columns  # Calculate the number of rows
            
            for i in range(num_rows):
                cols = st.columns(num_columns)
                for j in range(num_columns):
                    idx = i * num_columns + j
                    if idx < len(cropped_images):
                        cropped_image, title = cropped_images[idx]
                        with cols[j]:
                            st.image(cropped_image, caption=title, use_column_width=True)
            st.write(hii())
        else:
            st.write("URL Invalid...!")
    except Exception as e:
        st.write(f"An error occurred: {e}")