Spaces:
Running
Running
Zai
commited on
Commit
·
dafe57a
1
Parent(s):
7ba388d
testing space
Browse files- .gitignore +34 -0
- requirements.txt +4 -0
- space.py +33 -10
- untitled.txt +0 -0
.gitignore
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
*.pyc
|
3 |
+
__pycache__/
|
4 |
+
|
5 |
+
# Virtual environments
|
6 |
+
venv/
|
7 |
+
env/
|
8 |
+
*.env/
|
9 |
+
|
10 |
+
# Jupyter Notebook
|
11 |
+
.ipynb_checkpoints/
|
12 |
+
|
13 |
+
# Model artifacts and data files
|
14 |
+
*.h5
|
15 |
+
*.pkl
|
16 |
+
*.npy
|
17 |
+
*.npz
|
18 |
+
*.csv
|
19 |
+
|
20 |
+
# Logs
|
21 |
+
*.log
|
22 |
+
|
23 |
+
# Ignore data directory if you store large datasets
|
24 |
+
data/
|
25 |
+
|
26 |
+
# Ignore model outputs
|
27 |
+
models/
|
28 |
+
|
29 |
+
# Ignore environment configuration files
|
30 |
+
.env
|
31 |
+
|
32 |
+
# Ignore system files
|
33 |
+
.DS_Store
|
34 |
+
Thumbs.db
|
requirements.txt
CHANGED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
streamlit
|
3 |
+
opencv-python
|
4 |
+
numpy
|
space.py
CHANGED
@@ -1,31 +1,54 @@
|
|
1 |
import torch
|
2 |
import streamlit as st
|
3 |
-
|
4 |
-
|
|
|
5 |
|
6 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
7 |
|
8 |
-
model = Headshot()
|
9 |
|
10 |
-
|
|
|
|
|
|
|
11 |
|
12 |
def main():
|
13 |
st.title("Headshot simulator")
|
14 |
|
15 |
-
video_source = st.sidebar.radio("Select video source:", ("Webcam", "Upload","Demo"))
|
16 |
|
17 |
if video_source == "Webcam":
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
elif video_source == "Demo":
|
21 |
-
# prediction,image = sample()
|
22 |
pass
|
23 |
else:
|
24 |
uploaded_file = st.sidebar.file_uploader("Choose a video file", type=["mp4", "avi"])
|
25 |
if uploaded_file is not None:
|
26 |
file_bytes = uploaded_file.read()
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
if __name__ == "__main__":
|
31 |
-
main()
|
|
|
1 |
import torch
|
2 |
import streamlit as st
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
# from headshot import Headshot
|
6 |
|
7 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
8 |
|
9 |
+
# model = Headshot()
|
10 |
|
11 |
+
def detect_faces(frame):
|
12 |
+
# Implement your face detection logic here
|
13 |
+
# For demonstration purposes, return a dummy bounding box
|
14 |
+
return [(100, 100, 200, 200)]
|
15 |
|
16 |
def main():
|
17 |
st.title("Headshot simulator")
|
18 |
|
19 |
+
video_source = st.sidebar.radio("Select video source:", ("Webcam", "Upload", "Demo"))
|
20 |
|
21 |
if video_source == "Webcam":
|
22 |
+
cap = cv2.VideoCapture(0)
|
23 |
+
while True:
|
24 |
+
ret, frame = cap.read()
|
25 |
+
if not ret:
|
26 |
+
break
|
27 |
+
faces = detect_faces(frame)
|
28 |
+
for face in faces:
|
29 |
+
x, y, w, h = face
|
30 |
+
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
|
31 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
32 |
+
st.image(frame, channels="RGB")
|
33 |
|
34 |
elif video_source == "Demo":
|
35 |
+
# prediction, image = sample()
|
36 |
pass
|
37 |
else:
|
38 |
uploaded_file = st.sidebar.file_uploader("Choose a video file", type=["mp4", "avi"])
|
39 |
if uploaded_file is not None:
|
40 |
file_bytes = uploaded_file.read()
|
41 |
+
cap = cv2.VideoCapture(uploaded_file.name)
|
42 |
+
while True:
|
43 |
+
ret, frame = cap.read()
|
44 |
+
if not ret:
|
45 |
+
break
|
46 |
+
faces = detect_faces(frame)
|
47 |
+
for face in faces:
|
48 |
+
x, y, w, h = face
|
49 |
+
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
|
50 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
51 |
+
st.image(frame, channels="RGB")
|
52 |
|
53 |
if __name__ == "__main__":
|
54 |
+
main()
|
untitled.txt
DELETED
File without changes
|