vishalkatheriya18 commited on
Commit
b6666d0
1 Parent(s): 0764de0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -128
app.py CHANGED
@@ -1,128 +1,3 @@
1
- # import streamlit as st
2
- # from ultralytics import YOLO
3
- # from PIL import Image
4
- # from transformers import AutoModelForImageClassification, AutoImageProcessor
5
- # import requests
6
- # from io import BytesIO
7
- # import numpy as np
8
- # import cv2
9
- # import concurrent.futures
10
- # from classification import hii,pipes
11
- # # Categories dictionary
12
- # categories_dict = {
13
- # "UpperBody": ["top", "t-shirt", "sweatshirt", "blouse", "sweater", "cardigan", "jacket", "vest"],
14
- # "Lowerbody": ["pants", "shorts", "skirt"],
15
- # "Wholebody": ["coat", "dress", "jumpsuit", "cape"],
16
- # "Head": ["glasses", "hat", "headband", "head covering", "hair accessory"],
17
- # "Neck": ["tie", "neckline", "collar"],
18
- # "Arms and Hands": ["glove", "watch","sleeve"],
19
- # "Waist": ["belt"],
20
- # "Legs and Feet": ["leg warmer", "tights", "stockings", "sock", "shoe"],
21
- # "Others": ["bag", "wallet", "scarf", "umbrella"],
22
- # "Garment parts": ["hood", "lapel", "epaulette","pocket"],
23
- # "Closures": ["buckle", "zipper"],
24
- # "Decorations": ["applique", "bead", "bow", "flower", "fringe", "ribbon", "rivet", "ruffle", "sequin", "tassel"]
25
- # }
26
-
27
- # def find_category(subcategory):
28
- # for category, subcategories in categories_dict.items():
29
- # if subcategory in subcategories:
30
- # return category
31
- # return "Subcategory not found."
32
-
33
- # # Load models and processor only once using Streamlit session state
34
- # if 'models_loaded' not in st.session_state:
35
- # #localization model
36
- # st.session_state.segment_model = YOLO("best.pt")
37
- # #image preprocessor
38
- # st.session_state.image_processor = AutoImageProcessor.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-topwear")
39
- # #top wear
40
- # st.session_state.top_wear_model = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-topwear")
41
- # #bootomwear
42
- # st.session_state.bottomwear_model = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-bottomwear")
43
- # #fullwear
44
- # st.session_state.fullwear = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-fullwear")
45
- # #for fullwear and top wear
46
- # st.session_state.pattern_model = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-pattern-rgb")
47
- # st.session_state.print_model = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-print")
48
- # st.session_state.sleeve_length_model = AutoModelForImageClassification.from_pretrained("vishalkatheriya18/convnextv2-tiny-1k-224-finetuned-sleeve-length")
49
- # st.session_state.models_loaded = True
50
-
51
- # # Streamlit app UI
52
- # st.title("Clothing Classification Pipeline")
53
- # url = st.sidebar.text_input("Paste image URL here...")
54
-
55
- # if url:
56
- # try:
57
- # response = requests.get(url)
58
- # if response.status_code == 200:
59
- # image = Image.open(BytesIO(response.content))
60
- # st.sidebar.image(image.resize((200, 200)), caption="Uploaded Image", use_column_width=False)
61
-
62
- # # Convert image to numpy array for YOLO model
63
- # image_np = np.array(image)
64
-
65
- # # Perform inference
66
- # results = st.session_state.segment_model(image_np)
67
-
68
- # # Create a copy of the original image to draw bounding boxes and labels
69
- # output_image = image_np.copy()
70
-
71
- # cropped_images = [] # List to hold cropped images and their titles
72
-
73
- # # Visualize the segmentation results
74
- # for result in results:
75
- # boxes = result.boxes # Bounding boxes
76
- # classes = result.names # Class names of the detected objects
77
-
78
- # for i, box in enumerate(boxes):
79
- # box_coords = box.xyxy[0].cpu().numpy().astype(int)
80
- # x1, y1, x2, y2 = box_coords
81
-
82
- # # Draw the bounding box on the original image
83
- # cv2.rectangle(output_image, (x1, y1), (x2, y2), color=(0, 255, 0), thickness=2)
84
-
85
- # # Get the class label and confidence score for the object
86
- # class_label = classes[box.cls[0].int().item()]
87
- # confidence = box.conf[0].item()
88
-
89
- # # Prepare the label text with class and confidence
90
- # label_text = f'{class_label}: {confidence:.2f}'
91
-
92
- # # Put text label on the original image
93
- # cv2.putText(output_image, label_text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
94
-
95
- # # Crop the image based on the bounding box
96
- # cropped_images = image_np[y1:y2, x1:x2].copy()
97
- # cropped_image = cv2.resize(cropped_images, (200, 200)) # Resize cropped image
98
- # category_name = find_category(class_label) #categories name is here...................................
99
- # st.write(">>>>>>>>>",category_name)
100
- # st.write(pipes(cropped_images,category_name))
101
- # # Add cropped image and its title to the list
102
- # cropped_images.append((cropped_image, f'Class: {category_name}, Confidence: {confidence:.2f}'))
103
-
104
- # # Display the original image with bounding boxes and labels
105
- # st.sidebar.image(output_image, caption="Segmented Image", channels="RGB", use_column_width=True)
106
-
107
- # # Display cropped images row-wise
108
- # num_columns = 3 # Number of columns per row
109
- # num_rows = (len(cropped_images) + num_columns - 1) // num_columns # Calculate the number of rows
110
-
111
- # for i in range(num_rows):
112
- # cols = st.columns(num_columns)
113
- # for j in range(num_columns):
114
- # idx = i * num_columns + j
115
- # if idx < len(cropped_images):
116
- # cropped_image, title = cropped_images[idx]
117
- # with cols[j]:
118
- # st.image(cropped_image, caption=title, use_column_width=True)
119
- # st.write(hii())
120
-
121
- # else:
122
- # st.write("URL Invalid...!")
123
- # except Exception as e:
124
- # st.write(f"An error occurred: {e}")
125
-
126
  import streamlit as st
127
  from ultralytics import YOLO
128
  from PIL import Image
@@ -187,7 +62,7 @@ if url:
187
 
188
  # Convert image to numpy array for YOLO model
189
  image_np = np.array(image)
190
-
191
  # Perform inference
192
  results = st.session_state.segment_model(image_np)
193
 
@@ -223,8 +98,9 @@ if url:
223
  cropped_image_resized = cv2.resize(cropped_image, (200, 200)) # Resize cropped image
224
  category_name = find_category(class_label)
225
 
226
- st.write(f"Detected category: {category_name}")
227
- st.write(pipes(cropped_image, category_name))
 
228
 
229
  # Add cropped image and its title to the list
230
  cropped_images_list.append((cropped_image_resized, f'Class: {category_name}, Confidence: {confidence:.2f}'))
@@ -246,6 +122,7 @@ if url:
246
  st.image(cropped_image, caption=title, use_column_width=True)
247
 
248
  st.write(hii())
 
249
 
250
  else:
251
  st.write("URL Invalid...!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from ultralytics import YOLO
3
  from PIL import Image
 
62
 
63
  # Convert image to numpy array for YOLO model
64
  image_np = np.array(image)
65
+ outputs={}
66
  # Perform inference
67
  results = st.session_state.segment_model(image_np)
68
 
 
98
  cropped_image_resized = cv2.resize(cropped_image, (200, 200)) # Resize cropped image
99
  category_name = find_category(class_label)
100
 
101
+ # st.write(f"Detected category: {category_name}")
102
+ outputs[category_name]=pipes(cropped_image, category_name)
103
+ # st.write(pipes(cropped_image, category_name))
104
 
105
  # Add cropped image and its title to the list
106
  cropped_images_list.append((cropped_image_resized, f'Class: {category_name}, Confidence: {confidence:.2f}'))
 
122
  st.image(cropped_image, caption=title, use_column_width=True)
123
 
124
  st.write(hii())
125
+ st.json(outputs)
126
 
127
  else:
128
  st.write("URL Invalid...!")