Spaces:
Runtime error
Runtime error
kkowenn
commited on
Commit
·
925cf9e
1
Parent(s):
32498ef
Add Streamlit app
Browse files- app.py +130 -0
- graph_opt.pb +3 -0
- requirements.txt +1 -0
app.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import cv2 as cv
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
# Paths to your files
|
6 |
+
model_path = "graph_opt.pb"
|
7 |
+
|
8 |
+
# Load the TensorFlow model
|
9 |
+
net = cv.dnn.readNetFromTensorflow(model_path)
|
10 |
+
if net.empty():
|
11 |
+
raise FileNotFoundError(f"Model file not found or cannot be loaded: {model_path}")
|
12 |
+
|
13 |
+
BODY_PARTS = {"Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
|
14 |
+
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
|
15 |
+
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14,
|
16 |
+
"LEye": 15, "REar": 16, "LEar": 17, "Background": 18}
|
17 |
+
|
18 |
+
# Adjust POSE_PAIRS to include only wrist to elbow parts
|
19 |
+
POSE_PAIRS = [["RElbow", "RWrist"], ["LElbow", "LWrist"]]
|
20 |
+
|
21 |
+
width = 368
|
22 |
+
height = 368
|
23 |
+
inWidth = width
|
24 |
+
inHeight = height
|
25 |
+
thr = 0.2
|
26 |
+
|
27 |
+
def overlay_image_alpha(img, img_overlay, pos, alpha_mask):
|
28 |
+
x, y = pos
|
29 |
+
|
30 |
+
y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])
|
31 |
+
x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])
|
32 |
+
|
33 |
+
y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)
|
34 |
+
x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)
|
35 |
+
|
36 |
+
if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:
|
37 |
+
return
|
38 |
+
|
39 |
+
img_crop = img[y1:y2, x1:x2]
|
40 |
+
img_overlay_crop = img_overlay[y1o:y2o, x1o:x2o]
|
41 |
+
alpha = alpha_mask[y1o:y2o, x1o:x2o, np.newaxis]
|
42 |
+
alpha_inv = 1.0 - alpha
|
43 |
+
|
44 |
+
img_crop[:] = alpha * img_overlay_crop + alpha_inv * img_crop
|
45 |
+
|
46 |
+
def poseDetector(frame, overlay_img):
|
47 |
+
frameWidth = frame.shape[1]
|
48 |
+
frameHeight = frame.shape[0]
|
49 |
+
|
50 |
+
net.setInput(cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (127.5, 127.5, 127.5), swapRB=True, crop=False))
|
51 |
+
out = net.forward()
|
52 |
+
out = out[:, :19, :, :]
|
53 |
+
|
54 |
+
assert (len(BODY_PARTS) == out.shape[1])
|
55 |
+
|
56 |
+
points = []
|
57 |
+
for i in range(len(BODY_PARTS)):
|
58 |
+
heatMap = out[0, i, :, :]
|
59 |
+
|
60 |
+
_, conf, _, point = cv.minMaxLoc(heatMap)
|
61 |
+
x = (frameWidth * point[0]) / out.shape[3]
|
62 |
+
y = (frameHeight * point[1]) / out.shape[2]
|
63 |
+
|
64 |
+
points.append((int(x), int(y)) if conf > thr else None)
|
65 |
+
|
66 |
+
for pair in POSE_PAIRS:
|
67 |
+
partFrom = pair[0]
|
68 |
+
partTo = pair[1]
|
69 |
+
|
70 |
+
assert (partFrom in BODY_PARTS)
|
71 |
+
assert (partTo in BODY_PARTS)
|
72 |
+
|
73 |
+
idFrom = BODY_PARTS[partFrom]
|
74 |
+
idTo = BODY_PARTS[partTo]
|
75 |
+
|
76 |
+
if points[idFrom] and points[idTo]:
|
77 |
+
angle = np.degrees(np.arctan2(points[idTo][1] - points[idFrom][1], points[idTo][0] - points[idFrom][0]))
|
78 |
+
adjusted_angle = angle + 270
|
79 |
+
|
80 |
+
length = int(np.sqrt((points[idTo][0] - points[idFrom][0])**2 + (points[idTo][1] - points[idFrom][1])**2))
|
81 |
+
reduced_length = int(length * 0.5)
|
82 |
+
overlay_resized = cv.resize(overlay_img, (reduced_length, int(overlay_img.shape[0] * 0.5)))
|
83 |
+
|
84 |
+
M = cv.getRotationMatrix2D((overlay_resized.shape[1] / 2, overlay_resized.shape[0] / 2), adjusted_angle, 1)
|
85 |
+
overlay_rotated = cv.warpAffine(overlay_resized, M, (overlay_resized.shape[1], overlay_resized.shape[0]), flags=cv.INTER_LINEAR, borderMode=cv.BORDER_CONSTANT, borderValue=(0, 0, 0, 0))
|
86 |
+
|
87 |
+
wrist_position = points[idTo]
|
88 |
+
position = (int(wrist_position[0] - overlay_rotated.shape[1] / 2), int(wrist_position[1] - overlay_rotated.shape[0] / 2))
|
89 |
+
|
90 |
+
alpha_mask = overlay_rotated[:, :, 3] / 255.0
|
91 |
+
overlay_image_alpha(frame, overlay_rotated[:, :, :3], position, alpha_mask)
|
92 |
+
|
93 |
+
t, _ = net.getPerfProfile()
|
94 |
+
return frame
|
95 |
+
|
96 |
+
def main():
|
97 |
+
st.title("Webcam Stream with Pose Detection and Tattoo Overlay")
|
98 |
+
|
99 |
+
st.sidebar.header("Upload Tattoo Image")
|
100 |
+
uploaded_tattoo_img = st.sidebar.file_uploader("Upload Tattoo Image", type=["png"])
|
101 |
+
|
102 |
+
if uploaded_tattoo_img:
|
103 |
+
tattoo_img = cv.imdecode(np.frombuffer(uploaded_tattoo_img.read(), np.uint8), cv.IMREAD_UNCHANGED)
|
104 |
+
|
105 |
+
run = st.checkbox('Run')
|
106 |
+
|
107 |
+
# Access the webcam (0 is the default camera)
|
108 |
+
cap = cv.VideoCapture(0)
|
109 |
+
|
110 |
+
if not cap.isOpened():
|
111 |
+
st.error("Unable to access the camera")
|
112 |
+
return
|
113 |
+
|
114 |
+
stframe = st.empty()
|
115 |
+
|
116 |
+
while run:
|
117 |
+
ret, frame = cap.read()
|
118 |
+
if not ret:
|
119 |
+
st.error("Failed to capture image")
|
120 |
+
break
|
121 |
+
|
122 |
+
frame = poseDetector(frame, tattoo_img)
|
123 |
+
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
|
124 |
+
stframe.image(frame, channels="RGB")
|
125 |
+
|
126 |
+
# Release the camera when done
|
127 |
+
cap.release()
|
128 |
+
|
129 |
+
if __name__ == "__main__":
|
130 |
+
main()
|
graph_opt.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51f84ea82b3d0143dd4cf362e018c125e832c64771908ee8766fbd8b0328008d
|
3 |
+
size 7804434
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
streamlit
|