Roop works
Browse files- .gitignore +160 -0
- app.py +3 -7
- requirements.txt +1 -4
- roop/core.py +8 -59
- roop/processors/frame/core.py +3 -11
- roop/processors/frame/face_enhancer.py +0 -75
- roop/processors/frame/face_swapper.py +3 -32
- roop/ui.json +0 -158
- roop/ui.py +0 -231
- run.py +0 -6
.gitignore
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
#.idea/
|
app.py
CHANGED
@@ -1,14 +1,10 @@
|
|
1 |
import numpy as np
|
2 |
import gradio as gr
|
|
|
|
|
3 |
|
4 |
def sepia(input_img1, input_img2):
|
5 |
-
|
6 |
-
[0.393, 0.769, 0.189],
|
7 |
-
[0.349, 0.686, 0.168],
|
8 |
-
[0.272, 0.534, 0.131]
|
9 |
-
])
|
10 |
-
sepia_img = input_img1.dot(sepia_filter.T)
|
11 |
-
sepia_img /= sepia_img.max()
|
12 |
return sepia_img
|
13 |
|
14 |
demo = gr.Interface(fn=sepia, inputs=[gr.Image(shape=(200, 200)), gr.Image(shape=(200, 200))], outputs=["image"])
|
|
|
1 |
import numpy as np
|
2 |
import gradio as gr
|
3 |
+
from roop import core
|
4 |
+
|
5 |
|
6 |
def sepia(input_img1, input_img2):
|
7 |
+
sepia_img = core.run(input_img1, input_img2)
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
return sepia_img
|
9 |
|
10 |
demo = gr.Interface(fn=sepia, inputs=[gr.Image(shape=(200, 200)), gr.Image(shape=(200, 200))], outputs=["image"])
|
requirements.txt
CHANGED
@@ -5,8 +5,6 @@ opencv-python==4.7.0.72
|
|
5 |
onnx==1.14.0
|
6 |
insightface==0.7.3
|
7 |
psutil==5.9.5
|
8 |
-
tk==0.1.0
|
9 |
-
customtkinter==5.1.3
|
10 |
pillow==9.5.0
|
11 |
torch==2.0.1+cu118; sys_platform != 'darwin'
|
12 |
torch==2.0.1; sys_platform == 'darwin'
|
@@ -19,5 +17,4 @@ tensorflow==2.13.0rc1; sys_platform == 'darwin'
|
|
19 |
tensorflow==2.12.0; sys_platform != 'darwin'
|
20 |
opennsfw2==0.10.2
|
21 |
protobuf==4.23.2
|
22 |
-
tqdm==4.65.0
|
23 |
-
gfpgan==1.3.8
|
|
|
5 |
onnx==1.14.0
|
6 |
insightface==0.7.3
|
7 |
psutil==5.9.5
|
|
|
|
|
8 |
pillow==9.5.0
|
9 |
torch==2.0.1+cu118; sys_platform != 'darwin'
|
10 |
torch==2.0.1; sys_platform == 'darwin'
|
|
|
17 |
tensorflow==2.12.0; sys_platform != 'darwin'
|
18 |
opennsfw2==0.10.2
|
19 |
protobuf==4.23.2
|
20 |
+
tqdm==4.65.0
|
|
roop/core.py
CHANGED
@@ -19,10 +19,10 @@ import tensorflow
|
|
19 |
|
20 |
import roop.globals
|
21 |
import roop.metadata
|
22 |
-
import roop.ui as ui
|
23 |
from roop.predicter import predict_image, predict_video
|
24 |
from roop.processors.frame.core import get_frame_processors_modules
|
25 |
from roop.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path
|
|
|
26 |
|
27 |
if 'ROCMExecutionProvider' in roop.globals.execution_providers:
|
28 |
del torch
|
@@ -32,12 +32,11 @@ warnings.filterwarnings('ignore', category=UserWarning, module='torchvision')
|
|
32 |
|
33 |
|
34 |
def parse_args() -> None:
|
35 |
-
signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
|
36 |
program = argparse.ArgumentParser()
|
37 |
program.add_argument('-s', '--source', help='select an source image', dest='source_path')
|
38 |
program.add_argument('-t', '--target', help='select an target image or video', dest='target_path')
|
39 |
program.add_argument('-o', '--output', help='select output file or directory', dest='output_path')
|
40 |
-
program.add_argument('--frame-processor', help='pipeline of frame processors', dest='frame_processor', default=['face_swapper'], choices=['face_swapper'
|
41 |
program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=False)
|
42 |
program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True)
|
43 |
program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False)
|
@@ -157,65 +156,19 @@ def pre_check() -> bool:
|
|
157 |
|
158 |
def update_status(message: str, scope: str = 'ROOP.CORE') -> None:
|
159 |
print(f'[{scope}] {message}')
|
160 |
-
if not roop.globals.headless:
|
161 |
-
ui.update_status(message)
|
162 |
|
163 |
|
164 |
-
def start() ->
|
165 |
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
166 |
if not frame_processor.pre_start():
|
167 |
return
|
168 |
# process image to image
|
169 |
-
if has_image_extension(roop.globals.target_path):
|
170 |
-
if predict_image(roop.globals.target_path):
|
171 |
-
destroy()
|
172 |
-
shutil.copy2(roop.globals.target_path, roop.globals.output_path)
|
173 |
-
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
174 |
-
update_status('Progressing...', frame_processor.NAME)
|
175 |
-
frame_processor.process_image(roop.globals.source_path, roop.globals.output_path, roop.globals.output_path)
|
176 |
-
release_resources()
|
177 |
-
if is_image(roop.globals.target_path):
|
178 |
-
update_status('Processing to image succeed!')
|
179 |
-
else:
|
180 |
-
update_status('Processing to image failed!')
|
181 |
-
return
|
182 |
-
# process image to videos
|
183 |
-
if predict_video(roop.globals.target_path):
|
184 |
-
destroy()
|
185 |
-
update_status('Creating temp resources...')
|
186 |
-
create_temp(roop.globals.target_path)
|
187 |
-
update_status('Extracting frames...')
|
188 |
-
extract_frames(roop.globals.target_path)
|
189 |
-
temp_frame_paths = get_temp_frame_paths(roop.globals.target_path)
|
190 |
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
191 |
update_status('Progressing...', frame_processor.NAME)
|
192 |
-
frame_processor.
|
193 |
release_resources()
|
194 |
-
|
195 |
-
|
196 |
-
update_status('Detecting fps...')
|
197 |
-
fps = detect_fps(roop.globals.target_path)
|
198 |
-
update_status(f'Creating video with {fps} fps...')
|
199 |
-
create_video(roop.globals.target_path, fps)
|
200 |
-
else:
|
201 |
-
update_status('Creating video with 30.0 fps...')
|
202 |
-
create_video(roop.globals.target_path)
|
203 |
-
# handle audio
|
204 |
-
if roop.globals.keep_audio:
|
205 |
-
if roop.globals.keep_fps:
|
206 |
-
update_status('Restoring audio...')
|
207 |
-
else:
|
208 |
-
update_status('Restoring audio might cause issues as fps are not kept...')
|
209 |
-
restore_audio(roop.globals.target_path, roop.globals.output_path)
|
210 |
-
else:
|
211 |
-
move_temp(roop.globals.target_path, roop.globals.output_path)
|
212 |
-
# clean and validate
|
213 |
-
clean_temp(roop.globals.target_path)
|
214 |
-
if is_video(roop.globals.target_path):
|
215 |
-
update_status('Processing to video succeed!')
|
216 |
-
else:
|
217 |
-
update_status('Processing to video failed!')
|
218 |
-
|
219 |
|
220 |
def destroy() -> None:
|
221 |
if roop.globals.target_path:
|
@@ -223,7 +176,7 @@ def destroy() -> None:
|
|
223 |
quit()
|
224 |
|
225 |
|
226 |
-
def run() ->
|
227 |
parse_args()
|
228 |
if not pre_check():
|
229 |
return
|
@@ -231,8 +184,4 @@ def run() -> None:
|
|
231 |
if not frame_processor.pre_check():
|
232 |
return
|
233 |
limit_resources()
|
234 |
-
|
235 |
-
start()
|
236 |
-
else:
|
237 |
-
window = ui.init(start, destroy)
|
238 |
-
window.mainloop()
|
|
|
19 |
|
20 |
import roop.globals
|
21 |
import roop.metadata
|
|
|
22 |
from roop.predicter import predict_image, predict_video
|
23 |
from roop.processors.frame.core import get_frame_processors_modules
|
24 |
from roop.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path
|
25 |
+
from roop.typing import Frame, Face
|
26 |
|
27 |
if 'ROCMExecutionProvider' in roop.globals.execution_providers:
|
28 |
del torch
|
|
|
32 |
|
33 |
|
34 |
def parse_args() -> None:
|
|
|
35 |
program = argparse.ArgumentParser()
|
36 |
program.add_argument('-s', '--source', help='select an source image', dest='source_path')
|
37 |
program.add_argument('-t', '--target', help='select an target image or video', dest='target_path')
|
38 |
program.add_argument('-o', '--output', help='select output file or directory', dest='output_path')
|
39 |
+
program.add_argument('--frame-processor', help='pipeline of frame processors', dest='frame_processor', default=['face_swapper'], choices=['face_swapper'], nargs='+')
|
40 |
program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=False)
|
41 |
program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True)
|
42 |
program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False)
|
|
|
156 |
|
157 |
def update_status(message: str, scope: str = 'ROOP.CORE') -> None:
|
158 |
print(f'[{scope}] {message}')
|
|
|
|
|
159 |
|
160 |
|
161 |
+
def start(img1, img2) -> Frame:
|
162 |
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
163 |
if not frame_processor.pre_start():
|
164 |
return
|
165 |
# process image to image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
167 |
update_status('Progressing...', frame_processor.NAME)
|
168 |
+
of = frame_processor.process_image(img1, img2)
|
169 |
release_resources()
|
170 |
+
return of
|
171 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
|
173 |
def destroy() -> None:
|
174 |
if roop.globals.target_path:
|
|
|
176 |
quit()
|
177 |
|
178 |
|
179 |
+
def run(img1, img2) -> Frame:
|
180 |
parse_args()
|
181 |
if not pre_check():
|
182 |
return
|
|
|
184 |
if not frame_processor.pre_check():
|
185 |
return
|
186 |
limit_resources()
|
187 |
+
return start(img1, img2)
|
|
|
|
|
|
|
|
roop/processors/frame/core.py
CHANGED
@@ -12,8 +12,7 @@ FRAME_PROCESSORS_INTERFACE = [
|
|
12 |
'pre_check',
|
13 |
'pre_start',
|
14 |
'process_frame',
|
15 |
-
'process_image'
|
16 |
-
'process_video'
|
17 |
]
|
18 |
|
19 |
|
@@ -22,6 +21,7 @@ def load_frame_processor_module(frame_processor: str) -> Any:
|
|
22 |
frame_processor_module = importlib.import_module(f'roop.processors.frame.{frame_processor}')
|
23 |
for method_name in FRAME_PROCESSORS_INTERFACE:
|
24 |
if not hasattr(frame_processor_module, method_name):
|
|
|
25 |
sys.exit()
|
26 |
except ImportError:
|
27 |
sys.exit()
|
@@ -45,12 +45,4 @@ def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_f
|
|
45 |
future = executor.submit(process_frames, source_path, [path], progress)
|
46 |
futures.append(future)
|
47 |
for future in futures:
|
48 |
-
future.result()
|
49 |
-
|
50 |
-
|
51 |
-
def process_video(source_path: str, frame_paths: list[str], process_frames: Callable[[str, List[str], Any], None]) -> None:
|
52 |
-
progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
|
53 |
-
total = len(frame_paths)
|
54 |
-
with tqdm(total=total, desc='Processing', unit='frame', dynamic_ncols=True, bar_format=progress_bar_format) as progress:
|
55 |
-
progress.set_postfix({'execution_providers': roop.globals.execution_providers, 'threads': roop.globals.execution_threads, 'memory': roop.globals.max_memory})
|
56 |
-
multi_process_frame(source_path, frame_paths, process_frames, progress)
|
|
|
12 |
'pre_check',
|
13 |
'pre_start',
|
14 |
'process_frame',
|
15 |
+
'process_image'
|
|
|
16 |
]
|
17 |
|
18 |
|
|
|
21 |
frame_processor_module = importlib.import_module(f'roop.processors.frame.{frame_processor}')
|
22 |
for method_name in FRAME_PROCESSORS_INTERFACE:
|
23 |
if not hasattr(frame_processor_module, method_name):
|
24 |
+
print(frame_processor_module, method_name)
|
25 |
sys.exit()
|
26 |
except ImportError:
|
27 |
sys.exit()
|
|
|
45 |
future = executor.submit(process_frames, source_path, [path], progress)
|
46 |
futures.append(future)
|
47 |
for future in futures:
|
48 |
+
future.result()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
roop/processors/frame/face_enhancer.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
from typing import Any, List
|
2 |
-
import cv2
|
3 |
-
import threading
|
4 |
-
import gfpgan
|
5 |
-
|
6 |
-
import roop.globals
|
7 |
-
import roop.processors.frame.core
|
8 |
-
from roop.core import update_status
|
9 |
-
from roop.face_analyser import get_one_face
|
10 |
-
from roop.typing import Frame, Face
|
11 |
-
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
|
12 |
-
|
13 |
-
FACE_ENHANCER = None
|
14 |
-
THREAD_SEMAPHORE = threading.Semaphore()
|
15 |
-
THREAD_LOCK = threading.Lock()
|
16 |
-
NAME = 'ROOP.FACE-ENHANCER'
|
17 |
-
|
18 |
-
|
19 |
-
def pre_check() -> bool:
|
20 |
-
download_directory_path = resolve_relative_path('../models')
|
21 |
-
conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/GFPGANv1.4.pth'])
|
22 |
-
return True
|
23 |
-
|
24 |
-
|
25 |
-
def pre_start() -> bool:
|
26 |
-
if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
|
27 |
-
update_status('Select an image or video for target path.', NAME)
|
28 |
-
return False
|
29 |
-
return True
|
30 |
-
|
31 |
-
|
32 |
-
def get_face_enhancer() -> Any:
|
33 |
-
global FACE_ENHANCER
|
34 |
-
|
35 |
-
with THREAD_LOCK:
|
36 |
-
if FACE_ENHANCER is None:
|
37 |
-
model_path = resolve_relative_path('../models/GFPGANv1.4.pth')
|
38 |
-
# todo: set models path https://github.com/TencentARC/GFPGAN/issues/399
|
39 |
-
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined]
|
40 |
-
return FACE_ENHANCER
|
41 |
-
|
42 |
-
|
43 |
-
def enhance_face(temp_frame: Frame) -> Frame:
|
44 |
-
with THREAD_SEMAPHORE:
|
45 |
-
_, _, temp_frame = get_face_enhancer().enhance(
|
46 |
-
temp_frame,
|
47 |
-
paste_back=True
|
48 |
-
)
|
49 |
-
return temp_frame
|
50 |
-
|
51 |
-
|
52 |
-
def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
|
53 |
-
target_face = get_one_face(temp_frame)
|
54 |
-
if target_face:
|
55 |
-
temp_frame = enhance_face(temp_frame)
|
56 |
-
return temp_frame
|
57 |
-
|
58 |
-
|
59 |
-
def process_frames(source_path: str, temp_frame_paths: List[str], progress: Any = None) -> None:
|
60 |
-
for temp_frame_path in temp_frame_paths:
|
61 |
-
temp_frame = cv2.imread(temp_frame_path)
|
62 |
-
result = process_frame(None, temp_frame)
|
63 |
-
cv2.imwrite(temp_frame_path, result)
|
64 |
-
if progress:
|
65 |
-
progress.update(1)
|
66 |
-
|
67 |
-
|
68 |
-
def process_image(source_path: str, target_path: str, output_path: str) -> None:
|
69 |
-
target_frame = cv2.imread(target_path)
|
70 |
-
result = process_frame(None, target_frame)
|
71 |
-
cv2.imwrite(output_path, result)
|
72 |
-
|
73 |
-
|
74 |
-
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
|
75 |
-
roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
roop/processors/frame/face_swapper.py
CHANGED
@@ -22,15 +22,6 @@ def pre_check() -> bool:
|
|
22 |
|
23 |
|
24 |
def pre_start() -> bool:
|
25 |
-
if not is_image(roop.globals.source_path):
|
26 |
-
update_status('Select an image for source path.', NAME)
|
27 |
-
return False
|
28 |
-
elif not get_one_face(cv2.imread(roop.globals.source_path)):
|
29 |
-
update_status('No face in source path detected.', NAME)
|
30 |
-
return False
|
31 |
-
if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
|
32 |
-
update_status('Select an image or video for target path.', NAME)
|
33 |
-
return False
|
34 |
return True
|
35 |
|
36 |
|
@@ -61,26 +52,6 @@ def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
|
|
61 |
return temp_frame
|
62 |
|
63 |
|
64 |
-
def
|
65 |
-
source_face = get_one_face(
|
66 |
-
|
67 |
-
temp_frame = cv2.imread(temp_frame_path)
|
68 |
-
try:
|
69 |
-
result = process_frame(source_face, temp_frame)
|
70 |
-
cv2.imwrite(temp_frame_path, result)
|
71 |
-
except Exception as exception:
|
72 |
-
print(exception)
|
73 |
-
pass
|
74 |
-
if progress:
|
75 |
-
progress.update(1)
|
76 |
-
|
77 |
-
|
78 |
-
def process_image(source_path: str, target_path: str, output_path: str) -> None:
|
79 |
-
source_face = get_one_face(cv2.imread(source_path))
|
80 |
-
target_frame = cv2.imread(target_path)
|
81 |
-
result = process_frame(source_face, target_frame)
|
82 |
-
cv2.imwrite(output_path, result)
|
83 |
-
|
84 |
-
|
85 |
-
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
|
86 |
-
roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)
|
|
|
22 |
|
23 |
|
24 |
def pre_start() -> bool:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
return True
|
26 |
|
27 |
|
|
|
52 |
return temp_frame
|
53 |
|
54 |
|
55 |
+
def process_image(source_frame: Frame, target_frame: Frame) -> Frame:
|
56 |
+
source_face = get_one_face(source_frame)
|
57 |
+
return process_frame(source_face, target_frame)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
roop/ui.json
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"CTk": {
|
3 |
-
"fg_color": ["gray95", "gray10"]
|
4 |
-
},
|
5 |
-
"CTkToplevel": {
|
6 |
-
"fg_color": ["gray95", "gray10"]
|
7 |
-
},
|
8 |
-
"CTkFrame": {
|
9 |
-
"corner_radius": 6,
|
10 |
-
"border_width": 0,
|
11 |
-
"fg_color": ["gray90", "gray13"],
|
12 |
-
"top_fg_color": ["gray85", "gray16"],
|
13 |
-
"border_color": ["gray65", "gray28"]
|
14 |
-
},
|
15 |
-
"CTkButton": {
|
16 |
-
"corner_radius": 6,
|
17 |
-
"border_width": 0,
|
18 |
-
"fg_color": ["#3a7ebf", "#1f538d"],
|
19 |
-
"hover_color": ["#325882", "#14375e"],
|
20 |
-
"border_color": ["#3E454A", "#949A9F"],
|
21 |
-
"text_color": ["#DCE4EE", "#DCE4EE"],
|
22 |
-
"text_color_disabled": ["gray74", "gray60"]
|
23 |
-
},
|
24 |
-
"CTkLabel": {
|
25 |
-
"corner_radius": 0,
|
26 |
-
"fg_color": "transparent",
|
27 |
-
"text_color": ["gray14", "gray84"]
|
28 |
-
},
|
29 |
-
"CTkEntry": {
|
30 |
-
"corner_radius": 6,
|
31 |
-
"border_width": 2,
|
32 |
-
"fg_color": ["#F9F9FA", "#343638"],
|
33 |
-
"border_color": ["#979DA2", "#565B5E"],
|
34 |
-
"text_color": ["gray14", "gray84"],
|
35 |
-
"placeholder_text_color": ["gray52", "gray62"]
|
36 |
-
},
|
37 |
-
"CTkCheckbox": {
|
38 |
-
"corner_radius": 6,
|
39 |
-
"border_width": 3,
|
40 |
-
"fg_color": ["#3a7ebf", "#1f538d"],
|
41 |
-
"border_color": ["#3E454A", "#949A9F"],
|
42 |
-
"hover_color": ["#325882", "#14375e"],
|
43 |
-
"checkmark_color": ["#DCE4EE", "gray90"],
|
44 |
-
"text_color": ["gray14", "gray84"],
|
45 |
-
"text_color_disabled": ["gray60", "gray45"]
|
46 |
-
},
|
47 |
-
"CTkSwitch": {
|
48 |
-
"corner_radius": 1000,
|
49 |
-
"border_width": 3,
|
50 |
-
"button_length": 0,
|
51 |
-
"fg_color": ["#939BA2", "#4A4D50"],
|
52 |
-
"progress_color": ["#3a7ebf", "#1f538d"],
|
53 |
-
"button_color": ["gray36", "#D5D9DE"],
|
54 |
-
"button_hover_color": ["gray20", "gray100"],
|
55 |
-
"text_color": ["gray14", "gray84"],
|
56 |
-
"text_color_disabled": ["gray60", "gray45"]
|
57 |
-
},
|
58 |
-
"CTkRadiobutton": {
|
59 |
-
"corner_radius": 1000,
|
60 |
-
"border_width_checked": 6,
|
61 |
-
"border_width_unchecked": 3,
|
62 |
-
"fg_color": ["#3a7ebf", "#1f538d"],
|
63 |
-
"border_color": ["#3E454A", "#949A9F"],
|
64 |
-
"hover_color": ["#325882", "#14375e"],
|
65 |
-
"text_color": ["gray14", "gray84"],
|
66 |
-
"text_color_disabled": ["gray60", "gray45"]
|
67 |
-
},
|
68 |
-
"CTkProgressBar": {
|
69 |
-
"corner_radius": 1000,
|
70 |
-
"border_width": 0,
|
71 |
-
"fg_color": ["#939BA2", "#4A4D50"],
|
72 |
-
"progress_color": ["#3a7ebf", "#1f538d"],
|
73 |
-
"border_color": ["gray", "gray"]
|
74 |
-
},
|
75 |
-
"CTkSlider": {
|
76 |
-
"corner_radius": 1000,
|
77 |
-
"button_corner_radius": 1000,
|
78 |
-
"border_width": 6,
|
79 |
-
"button_length": 0,
|
80 |
-
"fg_color": ["#939BA2", "#4A4D50"],
|
81 |
-
"progress_color": ["gray40", "#AAB0B5"],
|
82 |
-
"button_color": ["#3a7ebf", "#1f538d"],
|
83 |
-
"button_hover_color": ["#325882", "#14375e"]
|
84 |
-
},
|
85 |
-
"CTkOptionMenu": {
|
86 |
-
"corner_radius": 6,
|
87 |
-
"fg_color": ["#3a7ebf", "#1f538d"],
|
88 |
-
"button_color": ["#325882", "#14375e"],
|
89 |
-
"button_hover_color": ["#234567", "#1e2c40"],
|
90 |
-
"text_color": ["#DCE4EE", "#DCE4EE"],
|
91 |
-
"text_color_disabled": ["gray74", "gray60"]
|
92 |
-
},
|
93 |
-
"CTkComboBox": {
|
94 |
-
"corner_radius": 6,
|
95 |
-
"border_width": 2,
|
96 |
-
"fg_color": ["#F9F9FA", "#343638"],
|
97 |
-
"border_color": ["#979DA2", "#565B5E"],
|
98 |
-
"button_color": ["#979DA2", "#565B5E"],
|
99 |
-
"button_hover_color": ["#6E7174", "#7A848D"],
|
100 |
-
"text_color": ["gray14", "gray84"],
|
101 |
-
"text_color_disabled": ["gray50", "gray45"]
|
102 |
-
},
|
103 |
-
"CTkScrollbar": {
|
104 |
-
"corner_radius": 1000,
|
105 |
-
"border_spacing": 4,
|
106 |
-
"fg_color": "transparent",
|
107 |
-
"button_color": ["gray55", "gray41"],
|
108 |
-
"button_hover_color": ["gray40", "gray53"]
|
109 |
-
},
|
110 |
-
"CTkSegmentedButton": {
|
111 |
-
"corner_radius": 6,
|
112 |
-
"border_width": 2,
|
113 |
-
"fg_color": ["#979DA2", "gray29"],
|
114 |
-
"selected_color": ["#3a7ebf", "#1f538d"],
|
115 |
-
"selected_hover_color": ["#325882", "#14375e"],
|
116 |
-
"unselected_color": ["#979DA2", "gray29"],
|
117 |
-
"unselected_hover_color": ["gray70", "gray41"],
|
118 |
-
"text_color": ["#DCE4EE", "#DCE4EE"],
|
119 |
-
"text_color_disabled": ["gray74", "gray60"]
|
120 |
-
},
|
121 |
-
"CTkTextbox": {
|
122 |
-
"corner_radius": 6,
|
123 |
-
"border_width": 0,
|
124 |
-
"fg_color": ["gray100", "gray20"],
|
125 |
-
"border_color": ["#979DA2", "#565B5E"],
|
126 |
-
"text_color": ["gray14", "gray84"],
|
127 |
-
"scrollbar_button_color": ["gray55", "gray41"],
|
128 |
-
"scrollbar_button_hover_color": ["gray40", "gray53"]
|
129 |
-
},
|
130 |
-
"CTkScrollableFrame": {
|
131 |
-
"label_fg_color": ["gray80", "gray21"]
|
132 |
-
},
|
133 |
-
"DropdownMenu": {
|
134 |
-
"fg_color": ["gray90", "gray20"],
|
135 |
-
"hover_color": ["gray75", "gray28"],
|
136 |
-
"text_color": ["gray14", "gray84"]
|
137 |
-
},
|
138 |
-
"CTkFont": {
|
139 |
-
"macOS": {
|
140 |
-
"family": "Avenir",
|
141 |
-
"size": 12,
|
142 |
-
"weight": "normal"
|
143 |
-
},
|
144 |
-
"Windows": {
|
145 |
-
"family": "Corbel",
|
146 |
-
"size": 12,
|
147 |
-
"weight": "normal"
|
148 |
-
},
|
149 |
-
"Linux": {
|
150 |
-
"family": "Montserrat",
|
151 |
-
"size": 12,
|
152 |
-
"weight": "normal"
|
153 |
-
}
|
154 |
-
},
|
155 |
-
"RoopDonate": {
|
156 |
-
"text_color": ["gray74", "gray60"]
|
157 |
-
}
|
158 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
roop/ui.py
DELETED
@@ -1,231 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import webbrowser
|
3 |
-
import customtkinter as ctk
|
4 |
-
from typing import Callable, Tuple
|
5 |
-
import cv2
|
6 |
-
from PIL import Image, ImageOps
|
7 |
-
|
8 |
-
import roop.globals
|
9 |
-
import roop.metadata
|
10 |
-
from roop.face_analyser import get_one_face
|
11 |
-
from roop.capturer import get_video_frame, get_video_frame_total
|
12 |
-
from roop.predicter import predict_frame
|
13 |
-
from roop.processors.frame.core import get_frame_processors_modules
|
14 |
-
from roop.utilities import is_image, is_video, resolve_relative_path
|
15 |
-
|
16 |
-
ROOT = None
|
17 |
-
ROOT_HEIGHT = 700
|
18 |
-
ROOT_WIDTH = 600
|
19 |
-
|
20 |
-
PREVIEW = None
|
21 |
-
PREVIEW_MAX_HEIGHT = 700
|
22 |
-
PREVIEW_MAX_WIDTH = 1200
|
23 |
-
|
24 |
-
RECENT_DIRECTORY_SOURCE = None
|
25 |
-
RECENT_DIRECTORY_TARGET = None
|
26 |
-
RECENT_DIRECTORY_OUTPUT = None
|
27 |
-
|
28 |
-
preview_label = None
|
29 |
-
preview_slider = None
|
30 |
-
source_label = None
|
31 |
-
target_label = None
|
32 |
-
status_label = None
|
33 |
-
|
34 |
-
|
35 |
-
def init(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
|
36 |
-
global ROOT, PREVIEW
|
37 |
-
|
38 |
-
ROOT = create_root(start, destroy)
|
39 |
-
PREVIEW = create_preview(ROOT)
|
40 |
-
|
41 |
-
return ROOT
|
42 |
-
|
43 |
-
|
44 |
-
def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
|
45 |
-
global source_label, target_label, status_label
|
46 |
-
|
47 |
-
ctk.deactivate_automatic_dpi_awareness()
|
48 |
-
ctk.set_appearance_mode('system')
|
49 |
-
ctk.set_default_color_theme(resolve_relative_path('ui.json'))
|
50 |
-
|
51 |
-
root = ctk.CTk()
|
52 |
-
root.minsize(ROOT_WIDTH, ROOT_HEIGHT)
|
53 |
-
root.title(f'{roop.metadata.name} {roop.metadata.version}')
|
54 |
-
root.configure()
|
55 |
-
root.protocol('WM_DELETE_WINDOW', lambda: destroy())
|
56 |
-
|
57 |
-
source_label = ctk.CTkLabel(root, text=None)
|
58 |
-
source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25)
|
59 |
-
|
60 |
-
target_label = ctk.CTkLabel(root, text=None)
|
61 |
-
target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25)
|
62 |
-
|
63 |
-
source_button = ctk.CTkButton(root, text='Select a face', cursor='hand2', command=lambda: select_source_path())
|
64 |
-
source_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1)
|
65 |
-
|
66 |
-
target_button = ctk.CTkButton(root, text='Select a target', cursor='hand2', command=lambda: select_target_path())
|
67 |
-
target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1)
|
68 |
-
|
69 |
-
keep_fps_value = ctk.BooleanVar(value=roop.globals.keep_fps)
|
70 |
-
keep_fps_checkbox = ctk.CTkSwitch(root, text='Keep fps', variable=keep_fps_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_fps', not roop.globals.keep_fps))
|
71 |
-
keep_fps_checkbox.place(relx=0.1, rely=0.6)
|
72 |
-
|
73 |
-
keep_frames_value = ctk.BooleanVar(value=roop.globals.keep_frames)
|
74 |
-
keep_frames_switch = ctk.CTkSwitch(root, text='Keep frames', variable=keep_frames_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_frames', keep_frames_value.get()))
|
75 |
-
keep_frames_switch.place(relx=0.1, rely=0.65)
|
76 |
-
|
77 |
-
keep_audio_value = ctk.BooleanVar(value=roop.globals.keep_audio)
|
78 |
-
keep_audio_switch = ctk.CTkSwitch(root, text='Keep audio', variable=keep_audio_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_audio', keep_audio_value.get()))
|
79 |
-
keep_audio_switch.place(relx=0.6, rely=0.6)
|
80 |
-
|
81 |
-
many_faces_value = ctk.BooleanVar(value=roop.globals.many_faces)
|
82 |
-
many_faces_switch = ctk.CTkSwitch(root, text='Many faces', variable=many_faces_value, cursor='hand2', command=lambda: setattr(roop.globals, 'many_faces', many_faces_value.get()))
|
83 |
-
many_faces_switch.place(relx=0.6, rely=0.65)
|
84 |
-
|
85 |
-
start_button = ctk.CTkButton(root, text='Start', cursor='hand2', command=lambda: select_output_path(start))
|
86 |
-
start_button.place(relx=0.15, rely=0.75, relwidth=0.2, relheight=0.05)
|
87 |
-
|
88 |
-
stop_button = ctk.CTkButton(root, text='Destroy', cursor='hand2', command=lambda: destroy())
|
89 |
-
stop_button.place(relx=0.4, rely=0.75, relwidth=0.2, relheight=0.05)
|
90 |
-
|
91 |
-
preview_button = ctk.CTkButton(root, text='Preview', cursor='hand2', command=lambda: toggle_preview())
|
92 |
-
preview_button.place(relx=0.65, rely=0.75, relwidth=0.2, relheight=0.05)
|
93 |
-
|
94 |
-
status_label = ctk.CTkLabel(root, text=None, justify='center')
|
95 |
-
status_label.place(relx=0.1, rely=0.9, relwidth=0.8)
|
96 |
-
|
97 |
-
donate_label = ctk.CTkLabel(root, text='Become a GitHub Sponsor', justify='center', cursor='hand2')
|
98 |
-
donate_label.place(relx=0.1, rely=0.95, relwidth=0.8)
|
99 |
-
donate_label.configure(text_color=ctk.ThemeManager.theme.get('RoopDonate').get('text_color'))
|
100 |
-
donate_label.bind('<Button>', lambda event: webbrowser.open('https://github.com/sponsors/s0md3v'))
|
101 |
-
|
102 |
-
return root
|
103 |
-
|
104 |
-
|
105 |
-
def create_preview(parent: ctk.CTkToplevel) -> ctk.CTkToplevel:
|
106 |
-
global preview_label, preview_slider
|
107 |
-
|
108 |
-
preview = ctk.CTkToplevel(parent)
|
109 |
-
preview.withdraw()
|
110 |
-
preview.title('Preview')
|
111 |
-
preview.configure()
|
112 |
-
preview.protocol('WM_DELETE_WINDOW', lambda: toggle_preview())
|
113 |
-
preview.resizable(width=False, height=False)
|
114 |
-
|
115 |
-
preview_label = ctk.CTkLabel(preview, text=None)
|
116 |
-
preview_label.pack(fill='both', expand=True)
|
117 |
-
|
118 |
-
preview_slider = ctk.CTkSlider(preview, from_=0, to=0, command=lambda frame_value: update_preview(frame_value))
|
119 |
-
|
120 |
-
return preview
|
121 |
-
|
122 |
-
|
123 |
-
def update_status(text: str) -> None:
|
124 |
-
status_label.configure(text=text)
|
125 |
-
ROOT.update()
|
126 |
-
|
127 |
-
|
128 |
-
def select_source_path() -> None:
|
129 |
-
global RECENT_DIRECTORY_SOURCE
|
130 |
-
|
131 |
-
PREVIEW.withdraw()
|
132 |
-
source_path = ctk.filedialog.askopenfilename(title='select an source image', initialdir=RECENT_DIRECTORY_SOURCE)
|
133 |
-
if is_image(source_path):
|
134 |
-
roop.globals.source_path = source_path
|
135 |
-
RECENT_DIRECTORY_SOURCE = os.path.dirname(roop.globals.source_path)
|
136 |
-
image = render_image_preview(roop.globals.source_path, (200, 200))
|
137 |
-
source_label.configure(image=image)
|
138 |
-
else:
|
139 |
-
roop.globals.source_path = None
|
140 |
-
source_label.configure(image=None)
|
141 |
-
|
142 |
-
|
143 |
-
def select_target_path() -> None:
|
144 |
-
global RECENT_DIRECTORY_TARGET
|
145 |
-
|
146 |
-
PREVIEW.withdraw()
|
147 |
-
target_path = ctk.filedialog.askopenfilename(title='select an target image or video', initialdir=RECENT_DIRECTORY_TARGET)
|
148 |
-
if is_image(target_path):
|
149 |
-
roop.globals.target_path = target_path
|
150 |
-
RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path)
|
151 |
-
image = render_image_preview(roop.globals.target_path, (200, 200))
|
152 |
-
target_label.configure(image=image)
|
153 |
-
elif is_video(target_path):
|
154 |
-
roop.globals.target_path = target_path
|
155 |
-
RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path)
|
156 |
-
video_frame = render_video_preview(target_path, (200, 200))
|
157 |
-
target_label.configure(image=video_frame)
|
158 |
-
else:
|
159 |
-
roop.globals.target_path = None
|
160 |
-
target_label.configure(image=None)
|
161 |
-
|
162 |
-
|
163 |
-
def select_output_path(start: Callable[[], None]) -> None:
|
164 |
-
global RECENT_DIRECTORY_OUTPUT
|
165 |
-
|
166 |
-
if is_image(roop.globals.target_path):
|
167 |
-
output_path = ctk.filedialog.asksaveasfilename(title='save image output file', defaultextension='.png', initialfile='output.png', initialdir=RECENT_DIRECTORY_OUTPUT)
|
168 |
-
elif is_video(roop.globals.target_path):
|
169 |
-
output_path = ctk.filedialog.asksaveasfilename(title='save video output file', defaultextension='.mp4', initialfile='output.mp4', initialdir=RECENT_DIRECTORY_OUTPUT)
|
170 |
-
else:
|
171 |
-
output_path = None
|
172 |
-
if output_path:
|
173 |
-
roop.globals.output_path = output_path
|
174 |
-
RECENT_DIRECTORY_OUTPUT = os.path.dirname(roop.globals.output_path)
|
175 |
-
start()
|
176 |
-
|
177 |
-
|
178 |
-
def render_image_preview(image_path: str, size: Tuple[int, int]) -> ctk.CTkImage:
|
179 |
-
image = Image.open(image_path)
|
180 |
-
if size:
|
181 |
-
image = ImageOps.fit(image, size, Image.LANCZOS)
|
182 |
-
return ctk.CTkImage(image, size=image.size)
|
183 |
-
|
184 |
-
|
185 |
-
def render_video_preview(video_path: str, size: Tuple[int, int], frame_number: int = 0) -> ctk.CTkImage:
|
186 |
-
capture = cv2.VideoCapture(video_path)
|
187 |
-
if frame_number:
|
188 |
-
capture.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
189 |
-
has_frame, frame = capture.read()
|
190 |
-
if has_frame:
|
191 |
-
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
192 |
-
if size:
|
193 |
-
image = ImageOps.fit(image, size, Image.LANCZOS)
|
194 |
-
return ctk.CTkImage(image, size=image.size)
|
195 |
-
capture.release()
|
196 |
-
cv2.destroyAllWindows()
|
197 |
-
|
198 |
-
|
199 |
-
def toggle_preview() -> None:
|
200 |
-
if PREVIEW.state() == 'normal':
|
201 |
-
PREVIEW.withdraw()
|
202 |
-
elif roop.globals.source_path and roop.globals.target_path:
|
203 |
-
init_preview()
|
204 |
-
update_preview()
|
205 |
-
PREVIEW.deiconify()
|
206 |
-
|
207 |
-
|
208 |
-
def init_preview() -> None:
|
209 |
-
if is_image(roop.globals.target_path):
|
210 |
-
preview_slider.pack_forget()
|
211 |
-
if is_video(roop.globals.target_path):
|
212 |
-
video_frame_total = get_video_frame_total(roop.globals.target_path)
|
213 |
-
preview_slider.configure(to=video_frame_total)
|
214 |
-
preview_slider.pack(fill='x')
|
215 |
-
preview_slider.set(0)
|
216 |
-
|
217 |
-
|
218 |
-
def update_preview(frame_number: int = 0) -> None:
|
219 |
-
if roop.globals.source_path and roop.globals.target_path:
|
220 |
-
temp_frame = get_video_frame(roop.globals.target_path, frame_number)
|
221 |
-
if predict_frame(temp_frame):
|
222 |
-
quit()
|
223 |
-
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
224 |
-
temp_frame = frame_processor.process_frame(
|
225 |
-
get_one_face(cv2.imread(roop.globals.source_path)),
|
226 |
-
temp_frame
|
227 |
-
)
|
228 |
-
image = Image.fromarray(cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB))
|
229 |
-
image = ImageOps.contain(image, (PREVIEW_MAX_WIDTH, PREVIEW_MAX_HEIGHT), Image.LANCZOS)
|
230 |
-
image = ctk.CTkImage(image, size=image.size)
|
231 |
-
preview_label.configure(image=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
run.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
|
3 |
-
from roop import core
|
4 |
-
|
5 |
-
if __name__ == '__main__':
|
6 |
-
core.run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|