tzmartin commited on
Commit
d768084
Β·
unverified Β·
1 Parent(s): 5b129df

feat($app): initial push

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. README.md +1 -1
  3. app.py +70 -0
  4. requirements.txt +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ sample.jpeg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Crowd Counter
3
- emoji: 🐒
4
  colorFrom: gray
5
  colorTo: red
6
  sdk: gradio
 
1
  ---
2
  title: Crowd Counter
3
+ emoji: πŸ‘¨β€πŸ‘©β€πŸ‘¦β€πŸ‘¦ βˆ‘
4
  colorFrom: gray
5
  colorTo: red
6
  sdk: gradio
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ import torch
5
+ import kornia as K
6
+ from kornia.core import Tensor
7
+ from kornia.contrib import FaceDetector, FaceDetectorResult, FaceKeypoint
8
+
9
+
10
+ def draw_keypoint(img: np.ndarray, det: FaceDetectorResult, kpt_type: FaceKeypoint) -> np.ndarray:
11
+ kpt = det.get_keypoint(kpt_type).int().tolist()
12
+ return cv2.circle(img, kpt, 2, (255, 0, 0), 2)
13
+
14
+
15
+ def detect(img_raw):
16
+
17
+ # preprocess
18
+ if img_raw is not None and len(img_raw.shape) == 3:
19
+ img = K.image_to_tensor(img_raw, keepdim=False)
20
+ img = K.color.bgr_to_rgb(img.float())
21
+
22
+
23
+ # create the detector and find the faces !
24
+ face_detection = FaceDetector()
25
+
26
+ with torch.no_grad():
27
+ dets = face_detection(img)
28
+ dets = [FaceDetectorResult(o) for o in dets[0]]
29
+
30
+ img_vis = img_raw.copy()
31
+
32
+ vis_threshold = 0.8
33
+
34
+ for b in dets:
35
+ if b.score < vis_threshold:
36
+ continue
37
+
38
+ # Draw face bounding box
39
+ img_vis = cv2.rectangle(img_vis, b.top_left.int().tolist(), b.bottom_right.int().tolist(), (0, 255, 0), 4)
40
+ # Draw Keypoints
41
+ img_vis = draw_keypoint(img_vis, b, FaceKeypoint.EYE_LEFT)
42
+ img_vis = draw_keypoint(img_vis, b, FaceKeypoint.EYE_RIGHT)
43
+ img_vis = draw_keypoint(img_vis, b, FaceKeypoint.NOSE)
44
+ img_vis = draw_keypoint(img_vis, b, FaceKeypoint.MOUTH_LEFT)
45
+ img_vis = draw_keypoint(img_vis, b, FaceKeypoint.MOUTH_RIGHT)
46
+
47
+ return img_vis, len(dets)
48
+
49
+
50
+ title = "Crowd Counter"
51
+ description = "<p style='text-align: center'>This is a Gradio demo for crowd counting using Kornia's Face Detection model.</p><p style='text-align: center'>To use it, simply upload your image, or click one of the examples to load them</p>"
52
+ article = "<p style='text-align: center'><a href='https://kornia.readthedocs.io/en/latest/' target='_blank'>Kornia Docs</a> | <a href='https://github.com/kornia/kornia' target='_blank'>Kornia Github Repo</a> | <a
53
+ href='https://kornia.readthedocs.io/en/latest/applications/face_detection.html' target='_blank'>Kornia Face Detection Tutorial</a></p>"
54
+ outputs = [gr.outputs.Image(type="pil", label="Output Image"), gr.outputs.Textbox(label="Total (Head) Count")]
55
+
56
+ examples = ['sample.jpeg']
57
+
58
+ face = gr.Interface(
59
+ detect,
60
+ gr.inputs.Image(type="numpy"),
61
+ outputs=outputs,
62
+ examples=examples,
63
+ title=title,
64
+ description=description,
65
+ article=article,
66
+ live=True,
67
+ allow_flagging="never"
68
+ )
69
+
70
+ face.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ kornia
2
+ opencv-python
3
+ torch