DawnC commited on
Commit
387ed22
·
1 Parent(s): cc515c0

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -635
app.py DELETED
@@ -1,635 +0,0 @@
1
- import os
2
- import numpy as np
3
- import torch
4
- import torch.nn as nn
5
- import gradio as gr
6
- import time
7
- import traceback
8
- import spaces
9
- import timm
10
- from torchvision.ops import nms, box_iou
11
- import torch.nn.functional as F
12
- from torchvision import transforms
13
- from PIL import Image, ImageDraw, ImageFont, ImageFilter
14
- from breed_health_info import breed_health_info
15
- from breed_noise_info import breed_noise_info
16
- from dog_database import get_dog_description
17
- from scoring_calculation_system import UserPreferences
18
- from recommendation_html_format import format_recommendation_html, get_breed_recommendations
19
- from history_manager import UserHistoryManager
20
- from search_history import create_history_tab, create_history_component
21
- from styles import get_css_styles
22
- from breed_detection import create_detection_tab
23
- from breed_comparison import create_comparison_tab
24
- from breed_recommendation import create_recommendation_tab
25
- from html_templates import (
26
- format_description_html,
27
- format_single_dog_result,
28
- format_multiple_breeds_result,
29
- format_unknown_breed_message,
30
- format_not_dog_message,
31
- format_hint_html,
32
- format_multi_dog_container,
33
- format_breed_details_html,
34
- get_color_scheme,
35
- get_akc_breeds_link
36
- )
37
- from urllib.parse import quote
38
- from ultralytics import YOLO
39
- from functools import wraps
40
-
41
-
42
- history_manager = UserHistoryManager()
43
-
44
- dog_breeds = ["Afghan_Hound", "African_Hunting_Dog", "Airedale", "American_Staffordshire_Terrier",
45
- "Appenzeller", "Australian_Terrier", "Bedlington_Terrier", "Bernese_Mountain_Dog", "Bichon_Frise",
46
- "Blenheim_Spaniel", "Border_Collie", "Border_Terrier", "Boston_Bull", "Bouvier_Des_Flandres",
47
- "Brabancon_Griffon", "Brittany_Spaniel", "Cardigan", "Chesapeake_Bay_Retriever",
48
- "Chihuahua", "Dachshund", "Dandie_Dinmont", "Doberman", "English_Foxhound", "English_Setter",
49
- "English_Springer", "EntleBucher", "Eskimo_Dog", "French_Bulldog", "German_Shepherd",
50
- "German_Short-Haired_Pointer", "Gordon_Setter", "Great_Dane", "Great_Pyrenees",
51
- "Greater_Swiss_Mountain_Dog","Havanese", "Ibizan_Hound", "Irish_Setter", "Irish_Terrier",
52
- "Irish_Water_Spaniel", "Irish_Wolfhound", "Italian_Greyhound", "Japanese_Spaniel",
53
- "Kerry_Blue_Terrier", "Labrador_Retriever", "Lakeland_Terrier", "Leonberg", "Lhasa",
54
- "Maltese_Dog", "Mexican_Hairless", "Newfoundland", "Norfolk_Terrier", "Norwegian_Elkhound",
55
- "Norwich_Terrier", "Old_English_Sheepdog", "Pekinese", "Pembroke", "Pomeranian",
56
- "Rhodesian_Ridgeback", "Rottweiler", "Saint_Bernard", "Saluki", "Samoyed",
57
- "Scotch_Terrier", "Scottish_Deerhound", "Sealyham_Terrier", "Shetland_Sheepdog", "Shiba_Inu",
58
- "Shih-Tzu", "Siberian_Husky", "Staffordshire_Bullterrier", "Sussex_Spaniel",
59
- "Tibetan_Mastiff", "Tibetan_Terrier", "Walker_Hound", "Weimaraner",
60
- "Welsh_Springer_Spaniel", "West_Highland_White_Terrier", "Yorkshire_Terrier",
61
- "Affenpinscher", "Basenji", "Basset", "Beagle", "Black-and-Tan_Coonhound", "Bloodhound",
62
- "Bluetick", "Borzoi", "Boxer", "Briard", "Bull_Mastiff", "Cairn", "Chow", "Clumber",
63
- "Cocker_Spaniel", "Collie", "Curly-Coated_Retriever", "Dhole", "Dingo",
64
- "Flat-Coated_Retriever", "Giant_Schnauzer", "Golden_Retriever", "Groenendael", "Keeshond",
65
- "Kelpie", "Komondor", "Kuvasz", "Malamute", "Malinois", "Miniature_Pinscher",
66
- "Miniature_Poodle", "Miniature_Schnauzer", "Otterhound", "Papillon", "Pug", "Redbone",
67
- "Schipperke", "Silky_Terrier", "Soft-Coated_Wheaten_Terrier", "Standard_Poodle",
68
- "Standard_Schnauzer", "Toy_Poodle", "Toy_Terrier", "Vizsla", "Whippet",
69
- "Wire-Haired_Fox_Terrier"]
70
-
71
-
72
- class MultiHeadAttention(nn.Module):
73
-
74
- def __init__(self, in_dim, num_heads=8):
75
- """
76
- Initializes the MultiHeadAttention module.
77
- Args:
78
- in_dim (int): Dimension of the input features.
79
- num_heads (int): Number of attention heads. Defaults to 8.
80
- """
81
- super().__init__()
82
- self.num_heads = num_heads
83
- self.head_dim = max(1, in_dim // num_heads) # Compute dimension per head
84
- self.scaled_dim = self.head_dim * num_heads # Scaled dimension after splitting into heads
85
- self.fc_in = nn.Linear(in_dim, self.scaled_dim) # Linear layer to project input to scaled_dim
86
- self.query = nn.Linear(self.scaled_dim, self.scaled_dim) # Query projection
87
- self.key = nn.Linear(self.scaled_dim, self.scaled_dim) # Key projection
88
- self.value = nn.Linear(self.scaled_dim, self.scaled_dim) # Value projection
89
- self.fc_out = nn.Linear(self.scaled_dim, in_dim) # Linear layer to project output back to in_dim
90
-
91
- def forward(self, x):
92
- """
93
- Forward pass for multi-head attention mechanism.
94
- Args:
95
- x (Tensor): Input tensor of shape (batch_size, input_dim).
96
- Returns:
97
- Tensor: Output tensor after applying attention mechanism.
98
- """
99
- N = x.shape[0] # Batch size
100
- x = self.fc_in(x) # Project input to scaled_dim
101
- q = self.query(x).view(N, self.num_heads, self.head_dim) # Compute queries
102
- k = self.key(x).view(N, self.num_heads, self.head_dim) # Compute keys
103
- v = self.value(x).view(N, self.num_heads, self.head_dim) # Compute values
104
-
105
- # Calculate attention scores
106
- energy = torch.einsum("nqd,nkd->nqk", [q, k]) # Dot product between queries and keys
107
- attention = F.softmax(energy / (self.head_dim ** 0.5), dim=2) # Apply softmax with scaling
108
-
109
- # Compute weighted sum of values based on attention scores
110
- out = torch.einsum("nqk,nvd->nqd", [attention, v])
111
- out = out.reshape(N, self.scaled_dim) # Concatenate all heads
112
- out = self.fc_out(out) # Project back to original input dimension
113
- return out
114
-
115
-
116
- class BaseModel(nn.Module):
117
-
118
- def __init__(self, num_classes, device='cuda' if torch.cuda.is_available() else 'cpu'):
119
- super().__init__()
120
- self.device = device
121
-
122
- # 1. Initialize backbone, num_classes=0 to remove classifier layer
123
- self.backbone = timm.create_model(
124
- 'convnextv2_base',
125
- pretrained=True,
126
- num_classes=0
127
- )
128
-
129
- # 2. Use test data to determine actual feature dimensions
130
- with torch.no_grad(): # No need to compute gradients
131
- dummy_input = torch.randn(1, 3, 224, 224) # Create example input
132
- features = self.backbone(dummy_input)
133
-
134
- if len(features.shape) > 2: # If features are multi-dimensional
135
- features = features.mean([-2, -1]) # Apply global average pooling
136
-
137
- self.feature_dim = features.shape[1] # Get correct feature dimension
138
-
139
- print(f"Feature Dimension from V2 backbone: {self.feature_dim}")
140
-
141
- # 3. Setup multi-head attention layer
142
- self.num_heads = max(1, min(8, self.feature_dim // 64))
143
- self.attention = MultiHeadAttention(self.feature_dim, num_heads=self.num_heads)
144
-
145
- # 4. Setup classifier
146
- self.classifier = nn.Sequential(
147
- nn.LayerNorm(self.feature_dim),
148
- nn.Dropout(0.3),
149
- nn.Linear(self.feature_dim, num_classes)
150
- )
151
-
152
- def forward(self, x):
153
- """
154
- The forward propagation process combines V2's FCCA and the multi-head attention mechanism.
155
- Args:
156
- x (Tensor): Input image tensor with shape [batch_size, channels, height, width]
157
- Returns:
158
- Tuple[Tensor, Tensor]: Classification logits and attention features.
159
- """
160
- x = x.to(self.device)
161
-
162
- # 1. Extract base features
163
- features = self.backbone(x)
164
-
165
- # 2. Process feature dimensions
166
- if len(features.shape) > 2:
167
- # If feature dimensions are [batch_size, channels, height, width]
168
- # Convert to [batch_size, channels]
169
- features = features.mean([-2, -1]) # Use global average pooling
170
-
171
- # 3. Apply attention mechanism
172
- attended_features = self.attention(features)
173
-
174
- # 4. Final classification
175
- logits = self.classifier(attended_features)
176
-
177
- return logits, attended_features
178
-
179
-
180
- class ModelManager:
181
- """
182
- 模型管理器:負責模型的初始化、設備管理和資源控制(CPU, GPU)
183
- """
184
- _instance = None
185
- _initialized = False
186
- _yolo_model = None
187
- _breed_model = None
188
- _device = None
189
-
190
- def __new__(cls):
191
- if cls._instance is None:
192
- cls._instance = super().__new__(cls)
193
- return cls._instance
194
-
195
- def __init__(self):
196
- # 避免重複初始化
197
- if not ModelManager._initialized:
198
- # 初始化設備,這會在第一次創建實例時執行
199
- self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
200
- ModelManager._initialized = True
201
-
202
- @property
203
- def device(self):
204
- """
205
- 提供對設備的訪問
206
- 確保在需要時設備已經被初始化
207
- """
208
- if self._device is None:
209
- self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
210
- return self._device
211
-
212
- @property
213
- def yolo_model(self):
214
- """
215
- 延遲初始化YOLO
216
- 只有在第一次使用時才會創建實例
217
- """
218
- if self._yolo_model is None:
219
- self._yolo_model = YOLO('yolov8x.pt')
220
- return self._yolo_model
221
-
222
- @property
223
- def breed_model(self):
224
- """
225
- 延遲初始化品種分類模型
226
- 只有在第一次使用時才會創建實例並移動到正確的設備上
227
- """
228
- if self._breed_model is None:
229
- self._breed_model = BaseModel(
230
- num_classes=len(dog_breeds),
231
- device=self.device
232
- ).to(self.device)
233
-
234
- checkpoint = torch.load(
235
- 'ConvNextV2Base_best_model_dog.pth',
236
- map_location=self.device # 確保checkpoint加載到正確的設備
237
- )
238
- self._breed_model.load_state_dict(checkpoint['base_model'], strict=False)
239
- self._breed_model.eval()
240
- return self._breed_model
241
-
242
-
243
- model_manager = ModelManager()
244
-
245
-
246
- # Image preprocessing function
247
- def preprocess_image(image):
248
- # If the image is numpy.ndarray turn into PIL.Image
249
- if isinstance(image, np.ndarray):
250
- image = Image.fromarray(image)
251
-
252
- # Use torchvision.transforms to process images
253
- transform = transforms.Compose([
254
- transforms.Resize((224, 224)),
255
- transforms.ToTensor(),
256
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
257
- ])
258
-
259
- return transform(image).unsqueeze(0)
260
-
261
- @spaces.GPU
262
- def predict_single_dog(image):
263
- """
264
- Predicts the dog breed using only the classifier.
265
- Args:
266
- image: PIL Image or numpy array
267
- Returns:
268
- tuple: (top1_prob, topk_breeds, relative_probs)
269
- """
270
-
271
- image_tensor = preprocess_image(image).to(model_manager.device)
272
-
273
- with torch.no_grad():
274
- # Get model outputs (只使用logits,不需要features)
275
- logits = model_manager.breed_model(image_tensor)[0] # 如果model仍返回tuple,取第一個元素
276
- probs = F.softmax(logits, dim=1)
277
-
278
- # Classifier prediction
279
- top5_prob, top5_idx = torch.topk(probs, k=5)
280
- breeds = [dog_breeds[idx.item()] for idx in top5_idx[0]]
281
- probabilities = [prob.item() for prob in top5_prob[0]]
282
-
283
- # Calculate relative probabilities
284
- sum_probs = sum(probabilities[:3]) # 只取前三個來計算相對概率
285
- relative_probs = [f"{(prob/sum_probs * 100):.2f}%" for prob in probabilities[:3]]
286
-
287
- # Debug output
288
- print("\nClassifier Predictions:")
289
- for breed, prob in zip(breeds[:5], probabilities[:5]):
290
- print(f"{breed}: {prob:.4f}")
291
-
292
- return probabilities[0], breeds[:3], relative_probs
293
-
294
- @spaces.GPU
295
- def detect_multiple_dogs(image, conf_threshold=0.3, iou_threshold=0.3):
296
- """
297
- 使用YOLO模型檢測圖片中的狗。
298
- 只保留被識別為狗(class 16)的物體,並標記它們的狀態。
299
-
300
- Args:
301
- image: PIL Image
302
- conf_threshold: YOLO檢測的信心度閾值
303
- iou_threshold: 非極大值抑制的IoU閾值
304
-
305
- Returns:
306
- list: 包含檢測到的狗的列表,每個元素是(cropped_image, confidence, box, is_dog)的元組
307
- """
308
- results = model_manager.yolo_model(image, conf=conf_threshold,
309
- iou=iou_threshold)[0]
310
-
311
- dogs = []
312
- boxes = []
313
-
314
- # 只處理被識別為狗的物體
315
- for box in results.boxes:
316
- class_id = box.cls.item()
317
- if class_id == 16: # COCO dataset中狗的類別是16
318
- xyxy = box.xyxy[0].tolist()
319
- confidence = box.conf.item()
320
- boxes.append((xyxy, confidence, True)) # 加入is_dog標記
321
-
322
- if not boxes:
323
- # 如果沒有檢測到狗,返回整張圖片並標記為非狗
324
- return [(image, 1.0, [0, 0, image.width, image.height], False)]
325
-
326
- nms_boxes = non_max_suppression(boxes, iou_threshold)
327
- detected_objects = []
328
-
329
- # 處理每個檢測到的狗
330
- for box, confidence, is_dog in nms_boxes:
331
- x1, y1, x2, y2 = box
332
- w, h = x2 - x1, y2 - y1
333
- # 擴大檢測框範圍以包含完整的狗
334
- x1 = max(0, x1 - w * 0.01)
335
- y1 = max(0, y1 - h * 0.01)
336
- x2 = min(image.width, x2 + w * 0.01)
337
- y2 = min(image.height, y2 + h * 0.01)
338
- cropped_image = image.crop((x1, y1, x2, y2))
339
- detected_objects.append((cropped_image, confidence, [x1, y1, x2, y2], is_dog))
340
-
341
- return detected_objects
342
-
343
- def non_max_suppression(boxes, iou_threshold):
344
- keep = []
345
- boxes = sorted(boxes, key=lambda x: x[1], reverse=True)
346
- while boxes:
347
- current = boxes.pop(0)
348
- keep.append(current)
349
- boxes = [box for box in boxes if calculate_iou(current[0], box[0]) < iou_threshold]
350
- return keep
351
-
352
-
353
- def calculate_iou(box1, box2):
354
- x1 = max(box1[0], box2[0])
355
- y1 = max(box1[1], box2[1])
356
- x2 = min(box1[2], box2[2])
357
- y2 = min(box1[3], box2[3])
358
-
359
- intersection = max(0, x2 - x1) * max(0, y2 - y1)
360
- area1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
361
- area2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
362
-
363
- iou = intersection / float(area1 + area2 - intersection)
364
- return iou
365
-
366
-
367
-
368
- def create_breed_comparison(breed1: str, breed2: str) -> dict:
369
- breed1_info = get_dog_description(breed1)
370
- breed2_info = get_dog_description(breed2)
371
-
372
- # 標準化數值轉換
373
- value_mapping = {
374
- 'Size': {'Small': 1, 'Medium': 2, 'Large': 3, 'Giant': 4},
375
- 'Exercise_Needs': {'Low': 1, 'Moderate': 2, 'High': 3, 'Very High': 4},
376
- 'Care_Level': {'Low': 1, 'Moderate': 2, 'High': 3},
377
- 'Grooming_Needs': {'Low': 1, 'Moderate': 2, 'High': 3}
378
- }
379
-
380
- comparison_data = {
381
- breed1: {},
382
- breed2: {}
383
- }
384
-
385
- for breed, info in [(breed1, breed1_info), (breed2, breed2_info)]:
386
- comparison_data[breed] = {
387
- 'Size': value_mapping['Size'].get(info['Size'], 2), # 預設 Medium
388
- 'Exercise_Needs': value_mapping['Exercise_Needs'].get(info['Exercise Needs'], 2), # 預設 Moderate
389
- 'Care_Level': value_mapping['Care_Level'].get(info['Care Level'], 2),
390
- 'Grooming_Needs': value_mapping['Grooming_Needs'].get(info['Grooming Needs'], 2),
391
- 'Good_with_Children': info['Good with Children'] == 'Yes',
392
- 'Original_Data': info
393
- }
394
-
395
- return comparison_data
396
-
397
-
398
- @spaces.GPU
399
- def predict(image):
400
- """
401
- 主要的預測函數,負責處理狗的檢測和品種辨識。
402
- 它整合了YOLO的物體檢測和專門的品種分類模型。
403
- 實施雙層檢測,非狗會直接忽略.
404
-
405
- Args:
406
- image: PIL Image 或 numpy array
407
-
408
- Returns:
409
- tuple: (html_output, annotated_image, initial_state)
410
- """
411
- if image is None:
412
- return format_hint_html("Please upload an image to start."), None, None
413
-
414
- try:
415
- if isinstance(image, np.ndarray):
416
- image = Image.fromarray(image)
417
-
418
- # 檢測圖片中的狗
419
- dogs = detect_multiple_dogs(image)
420
- color_scheme = get_color_scheme(len(dogs) == 1)
421
-
422
- # 準備標註
423
- annotated_image = image.copy()
424
- draw = ImageDraw.Draw(annotated_image)
425
-
426
- try:
427
- font = ImageFont.truetype("arial.ttf", 24)
428
- except:
429
- font = ImageFont.load_default()
430
-
431
- dogs_info = ""
432
-
433
- # 處理每個檢測到的物體
434
- for i, (cropped_image, detection_confidence, box, is_dog) in enumerate(dogs):
435
- color = color_scheme if len(dogs) == 1 else color_scheme[i % len(color_scheme)]
436
-
437
- # 繪製框和標籤
438
- draw.rectangle(box, outline=color, width=4)
439
- label = f"Dog {i+1}" if is_dog else f"Object {i+1}"
440
- label_bbox = draw.textbbox((0, 0), label, font=font)
441
- label_width = label_bbox[2] - label_bbox[0]
442
- label_height = label_bbox[3] - label_bbox[1]
443
-
444
- # 繪製標籤背景和文字
445
- label_x = box[0] + 5
446
- label_y = box[1] + 5
447
- draw.rectangle(
448
- [label_x - 2, label_y - 2, label_x + label_width + 4, label_y + label_height + 4],
449
- fill='white',
450
- outline=color,
451
- width=2
452
- )
453
- draw.text((label_x, label_y), label, fill=color, font=font)
454
-
455
- try:
456
- # 首先檢查是否為狗
457
- if not is_dog:
458
- dogs_info += format_not_dog_message(color, i+1)
459
- continue
460
-
461
- # 如果是狗,進行品種預測
462
- top1_prob, topk_breeds, relative_probs = predict_single_dog(cropped_image)
463
- combined_confidence = detection_confidence * top1_prob
464
-
465
- # 根據信心度決定輸出格式
466
- if combined_confidence < 0.15:
467
- dogs_info += format_unknown_breed_message(color, i+1)
468
- elif top1_prob >= 0.4:
469
- breed = topk_breeds[0]
470
- description = get_dog_description(breed)
471
- if description is None:
472
- description = {
473
- "Name": breed,
474
- "Size": "Unknown",
475
- "Exercise Needs": "Unknown",
476
- "Grooming Needs": "Unknown",
477
- "Care Level": "Unknown",
478
- "Good with Children": "Unknown",
479
- "Description": f"Identified as {breed.replace('_', ' ')}"
480
- }
481
- dogs_info += format_single_dog_result(breed, description, color)
482
- else:
483
- dogs_info += format_multiple_breeds_result(
484
- topk_breeds,
485
- relative_probs,
486
- color,
487
- i+1,
488
- lambda breed: get_dog_description(breed) or {
489
- "Name": breed,
490
- "Size": "Unknown",
491
- "Exercise Needs": "Unknown",
492
- "Grooming Needs": "Unknown",
493
- "Care Level": "Unknown",
494
- "Good with Children": "Unknown",
495
- "Description": f"Identified as {breed.replace('_', ' ')}"
496
- }
497
- )
498
- except Exception as e:
499
- print(f"Error formatting results for dog {i+1}: {str(e)}")
500
- dogs_info += format_unknown_breed_message(color, i+1)
501
-
502
- # 包裝���終的HTML輸出
503
- html_output = format_multi_dog_container(dogs_info)
504
-
505
- # 準備初始狀態
506
- initial_state = {
507
- "dogs_info": dogs_info,
508
- "image": annotated_image,
509
- "is_multi_dog": len(dogs) > 1,
510
- "html_output": html_output
511
- }
512
-
513
- return html_output, annotated_image, initial_state
514
-
515
- except Exception as e:
516
- error_msg = f"An error occurred: {str(e)}\n\nTraceback:\n{traceback.format_exc()}"
517
- print(error_msg)
518
- return format_hint_html(error_msg), None, None
519
-
520
-
521
- def show_details_html(choice, previous_output, initial_state):
522
- """
523
- Generate detailed HTML view for a selected breed.
524
-
525
- Args:
526
- choice: str, Selected breed option
527
- previous_output: str, Previous HTML output
528
- initial_state: dict, Current state information
529
-
530
- Returns:
531
- tuple: (html_output, gradio_update, updated_state)
532
- """
533
- if not choice:
534
- return previous_output, gr.update(visible=True), initial_state
535
-
536
- try:
537
- breed = choice.split("More about ")[-1]
538
- description = get_dog_description(breed)
539
- html_output = format_breed_details_html(description, breed)
540
-
541
- # Update state
542
- initial_state["current_description"] = html_output
543
- initial_state["original_buttons"] = initial_state.get("buttons", [])
544
-
545
- return html_output, gr.update(visible=True), initial_state
546
-
547
- except Exception as e:
548
- error_msg = f"An error occurred while showing details: {e}"
549
- print(error_msg)
550
- return format_hint_html(error_msg), gr.update(visible=True), initial_state
551
-
552
- def main():
553
- with gr.Blocks(css=get_css_styles()) as iface:
554
-
555
- gr.HTML("""
556
- <header style='text-align: center; padding: 20px; margin-bottom: 20px;'>
557
- <h1 style='font-size: 2.5em; margin-bottom: 10px; color: #2D3748;'>
558
- 🐾 PawMatch AI
559
- </h1>
560
- <h2 style='font-size: 1.2em; font-weight: normal; color: #4A5568; margin-top: 5px;'>
561
- Your Smart Dog Breed Guide
562
- </h2>
563
- <div style='width: 50px; height: 3px; background: linear-gradient(90deg, #4299e1, #48bb78); margin: 15px auto;'></div>
564
- <p style='color: #718096; font-size: 0.9em;'>
565
- Powered by AI • Breed Recognition • Smart Matching • Companion Guide
566
- </p>
567
- </header>
568
- """)
569
-
570
- # 先創建歷史組件實例(但不創建標籤頁)
571
- history_component = create_history_component()
572
-
573
- with gr.Tabs():
574
- # 1. 品種檢測標籤頁
575
- example_images = [
576
- 'Border_Collie.jpg',
577
- 'Golden_Retriever.jpeg',
578
- 'Saint_Bernard.jpeg',
579
- 'Samoyed.jpeg',
580
- 'French_Bulldog.jpeg'
581
- ]
582
- detection_components = create_detection_tab(predict, example_images)
583
-
584
- # 2. 品種比較標籤頁
585
- comparison_components = create_comparison_tab(
586
- dog_breeds=dog_breeds,
587
- get_dog_description=get_dog_description,
588
- breed_health_info=breed_health_info,
589
- breed_noise_info=breed_noise_info
590
- )
591
-
592
- # 3. 品種推薦標籤頁
593
- recommendation_components = create_recommendation_tab(
594
- UserPreferences=UserPreferences,
595
- get_breed_recommendations=get_breed_recommendations,
596
- format_recommendation_html=format_recommendation_html,
597
- history_component=history_component
598
- )
599
-
600
-
601
- # 4. 最後創建歷史記錄標籤頁
602
- create_history_tab(history_component)
603
-
604
- # Footer
605
- gr.HTML('''
606
- <div style="
607
- display: flex;
608
- align-items: center;
609
- justify-content: center;
610
- gap: 20px;
611
- padding: 20px 0;
612
- ">
613
- <p style="
614
- font-family: 'Arial', sans-serif;
615
- font-size: 14px;
616
- font-weight: 500;
617
- letter-spacing: 2px;
618
- background: linear-gradient(90deg, #555, #007ACC);
619
- -webkit-background-clip: text;
620
- -webkit-text-fill-color: transparent;
621
- margin: 0;
622
- text-transform: uppercase;
623
- display: inline-block;
624
- ">EXPLORE THE CODE →</p>
625
- <a href="https://github.com/Eric-Chung-0511/Learning-Record/tree/main/Data%20Science%20Projects/PawMatchAI" style="text-decoration: none;">
626
- <img src="https://img.shields.io/badge/GitHub-PawMatch_AI-007ACC?logo=github&style=for-the-badge">
627
- </a>
628
- </div>
629
- ''')
630
-
631
- return iface
632
-
633
- if __name__ == "__main__":
634
- iface = main()
635
- iface.launch()