Spaces:
Sleeping
Sleeping
added dockerfile build
Browse files- Dockerfile +22 -0
- app.py +6 -4
- docker-compose.yml +15 -0
Dockerfile
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10-slim
|
2 |
+
|
3 |
+
ENV DEBIAN_FRONTEND noninteractive
|
4 |
+
ENV LANG C.UTF-8
|
5 |
+
ENV GRADIO_SERVER_NAME 0.0.0.0
|
6 |
+
|
7 |
+
RUN useradd -m -u 1000 user
|
8 |
+
WORKDIR /home/user/app
|
9 |
+
|
10 |
+
RUN :\
|
11 |
+
&& apt-get update -y \
|
12 |
+
&& apt-get install -y --no-install-recommends ffmpeg \
|
13 |
+
&& apt-get clean \
|
14 |
+
&& rm -rf /var/lib/apt/lists/*
|
15 |
+
|
16 |
+
RUN --mount=target=requirements.txt,source=requirements.txt :\
|
17 |
+
&& pip install --no-cache-dir -r requirements.txt
|
18 |
+
|
19 |
+
COPY . .
|
20 |
+
EXPOSE 7860
|
21 |
+
|
22 |
+
ENTRYPOINT ["python", "app.py"]
|
app.py
CHANGED
@@ -88,8 +88,8 @@ def inference(video):
|
|
88 |
|
89 |
temp_dir = tempfile.mkdtemp()
|
90 |
temp_directories.append(temp_dir)
|
91 |
-
output_composition = temp_dir + "/matted_video.mp4"
|
92 |
|
|
|
93 |
convert_video(
|
94 |
model, # The loaded model, can be on any device (cpu or cuda).
|
95 |
input_source=video, # A video file or an image sequence directory.
|
@@ -112,7 +112,9 @@ if __name__ == "__main__":
|
|
112 |
temp_directories = []
|
113 |
atexit.register(cleanup_temp_directories)
|
114 |
|
115 |
-
model = torch.hub.load(
|
|
|
|
|
116 |
|
117 |
if torch.cuda.is_available():
|
118 |
free_memory = get_free_memory_gb()
|
@@ -130,7 +132,7 @@ if __name__ == "__main__":
|
|
130 |
"Gradio demo for Robust Video Matting. To use it, simply upload your video, or click one of the examples to load them. Read more at the links below."
|
131 |
)
|
132 |
with gr.Row():
|
133 |
-
inp = gr.Video(label="Input Video")
|
134 |
out = gr.Video(label="Output Video")
|
135 |
btn = gr.Button("Run")
|
136 |
btn.click(inference, inputs=inp, outputs=out)
|
@@ -144,5 +146,5 @@ if __name__ == "__main__":
|
|
144 |
)
|
145 |
|
146 |
block.queue(
|
147 |
-
api_open=False, max_size=5,
|
148 |
).launch()
|
|
|
88 |
|
89 |
temp_dir = tempfile.mkdtemp()
|
90 |
temp_directories.append(temp_dir)
|
|
|
91 |
|
92 |
+
output_composition = temp_dir + "/matted_video.mp4"
|
93 |
convert_video(
|
94 |
model, # The loaded model, can be on any device (cpu or cuda).
|
95 |
input_source=video, # A video file or an image sequence directory.
|
|
|
112 |
temp_directories = []
|
113 |
atexit.register(cleanup_temp_directories)
|
114 |
|
115 |
+
model = torch.hub.load(
|
116 |
+
"PeterL1n/RobustVideoMatting", "mobilenetv3", trust_repo=True
|
117 |
+
)
|
118 |
|
119 |
if torch.cuda.is_available():
|
120 |
free_memory = get_free_memory_gb()
|
|
|
132 |
"Gradio demo for Robust Video Matting. To use it, simply upload your video, or click one of the examples to load them. Read more at the links below."
|
133 |
)
|
134 |
with gr.Row():
|
135 |
+
inp = gr.Video(label="Input Video", sources=["upload"], include_audio=True)
|
136 |
out = gr.Video(label="Output Video")
|
137 |
btn = gr.Button("Run")
|
138 |
btn.click(inference, inputs=inp, outputs=out)
|
|
|
146 |
)
|
147 |
|
148 |
block.queue(
|
149 |
+
api_open=False, max_size=5, default_concurrency_limit=concurrency_count
|
150 |
).launch()
|
docker-compose.yml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
version: '3.8'
|
2 |
+
services:
|
3 |
+
video-matting:
|
4 |
+
build: .
|
5 |
+
restart: unless-stopped
|
6 |
+
shm_size: 8g
|
7 |
+
ports:
|
8 |
+
- "7860:7860"
|
9 |
+
deploy:
|
10 |
+
resources:
|
11 |
+
reservations:
|
12 |
+
devices:
|
13 |
+
- driver: nvidia
|
14 |
+
capabilities: [gpu]
|
15 |
+
count: all
|