Spaces:
Running
on
A10G
Running
on
A10G
update
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
#!/usr/bin/env python
|
2 |
"""Demo app for https://github.com/ziqihuangg/ReVersion.
|
3 |
The code in this repo is partly adapted from the following repository:
|
4 |
-
https://
|
5 |
|
6 |
S-Lab License 1.0
|
7 |
|
@@ -39,7 +39,14 @@ from inference import inference_fn
|
|
39 |
|
40 |
|
41 |
TITLE = '# ReVersion'
|
42 |
-
DESCRIPTION = '''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
It is recommended to upgrade to GPU in Settings after duplicating this space to use it.
|
44 |
<a href="https://huggingface.co/spaces/Ziqi/ReVersion?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
|
45 |
'''
|
@@ -47,25 +54,17 @@ DETAILDESCRIPTION='''
|
|
47 |
ReVersion
|
48 |
'''
|
49 |
DETAILDESCRIPTION='''
|
50 |
-
|
51 |
<center>
|
52 |
-
<img src="https://huggingface.co/spaces/Ziqi/ReVersion/teaser.jpg" width="800" align="center" >
|
53 |
</center>
|
54 |
'''
|
55 |
-
|
56 |
-
# Custom Diffusion allows you to fine-tune text-to-image diffusion models, such as Stable Diffusion, given a few images of a new concept (~4-20).
|
57 |
-
# We fine-tune only a subset of model parameters, namely key and value projection matrices, in the cross-attention layers and the modifier token used to represent the object.
|
58 |
-
# This also reduces the extra storage for each additional concept to 75MB. Our method also allows you to use a combination of concepts. There's still limitations on which compositions work. For more analysis please refer to our [website](https://www.cs.cmu.edu/~custom-diffusion/).
|
59 |
-
# <center>
|
60 |
-
# <img src="https://huggingface.co/spaces/nupurkmr9/custom-diffusion/resolve/main/method.jpg" width="600" align="center" >
|
61 |
-
# </center>
|
62 |
-
# '''
|
63 |
|
64 |
ORIGINAL_SPACE_ID = 'Ziqi/ReVersion'
|
65 |
SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
|
66 |
-
|
67 |
-
|
68 |
-
# '''
|
69 |
if os.getenv('SYSTEM') == 'spaces' and SPACE_ID != ORIGINAL_SPACE_ID:
|
70 |
SETTINGS = f'<a href="https://huggingface.co/spaces/{SPACE_ID}/settings">Settings</a>'
|
71 |
|
@@ -88,22 +87,6 @@ def show_warning(warning_text: str) -> gr.Blocks:
|
|
88 |
return demo
|
89 |
|
90 |
|
91 |
-
def update_output_files() -> dict:
|
92 |
-
paths = sorted(pathlib.Path('results').glob('*.bin'))
|
93 |
-
paths = [path.as_posix() for path in paths] # type: ignore
|
94 |
-
return gr.update(value=paths or None)
|
95 |
-
|
96 |
-
def find_weight_files() -> list[str]:
|
97 |
-
curr_dir = pathlib.Path(__file__).parent
|
98 |
-
paths = sorted(curr_dir.rglob('*.bin'))
|
99 |
-
paths = [path for path in paths if '.lfs' not in str(path)]
|
100 |
-
return [path.relative_to(curr_dir).as_posix() for path in paths]
|
101 |
-
|
102 |
-
|
103 |
-
def reload_custom_diffusion_weight_list() -> dict:
|
104 |
-
return gr.update(choices=find_weight_files())
|
105 |
-
|
106 |
-
|
107 |
def create_inference_demo(func: inference_fn) -> gr.Blocks:
|
108 |
with gr.Blocks() as demo:
|
109 |
with gr.Row():
|
@@ -141,17 +124,10 @@ def create_inference_demo(func: inference_fn) -> gr.Blocks:
|
|
141 |
value=50)
|
142 |
run_button = gr.Button('Generate')
|
143 |
|
144 |
-
# gr.Markdown('''
|
145 |
-
# - Models with names starting with "custom-diffusion-models/" are the pretrained models provided in the [original repo](https://github.com/adobe-research/custom-diffusion), and the ones with names starting with "results/delta.bin" are your trained models.
|
146 |
-
# - After training, you can press "Reload Weight List" button to load your trained model names.
|
147 |
-
# - Increase number of steps in Other parameters for better samples qualitatively.
|
148 |
-
# ''')
|
149 |
with gr.Column():
|
150 |
result = gr.Image(label='Result')
|
151 |
|
152 |
-
|
153 |
-
# inputs=None,
|
154 |
-
# outputs=weight_name)
|
155 |
prompt.submit(fn=func,
|
156 |
inputs=[
|
157 |
model_id,
|
@@ -185,8 +161,6 @@ else:
|
|
185 |
print('*** Now using %s.'%('cpu'))
|
186 |
|
187 |
with gr.Blocks(css='style.css') as demo:
|
188 |
-
# if os.getenv('IS_SHARED_UI'):
|
189 |
-
# show_warning(SHARED_UI_WARNING)
|
190 |
if not torch.cuda.is_available():
|
191 |
show_warning(CUDA_NOT_AVAILABLE_WARNING)
|
192 |
|
|
|
1 |
#!/usr/bin/env python
|
2 |
"""Demo app for https://github.com/ziqihuangg/ReVersion.
|
3 |
The code in this repo is partly adapted from the following repository:
|
4 |
+
https://github.com/ziqihuangg/ReVersion
|
5 |
|
6 |
S-Lab License 1.0
|
7 |
|
|
|
39 |
|
40 |
|
41 |
TITLE = '# ReVersion'
|
42 |
+
DESCRIPTION = '''
|
43 |
+
This is a demo for **ReVersion: Diffusion-Based Relation Inversion from Images**
|
44 |
+
<br>
|
45 |
+
[[Paper](https://arxiv.org/abs/2303.13495)] |
|
46 |
+
[[Project Page](https://ziqihuangg.github.io/projects/reversion.html)] |
|
47 |
+
[[GitHub Code](https://github.com/ziqihuangg/ReVersion)] |
|
48 |
+
[[Video](https://www.youtube.com/watch?v=pkal3yjyyKQ)]
|
49 |
+
<br>
|
50 |
It is recommended to upgrade to GPU in Settings after duplicating this space to use it.
|
51 |
<a href="https://huggingface.co/spaces/Ziqi/ReVersion?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
|
52 |
'''
|
|
|
54 |
ReVersion
|
55 |
'''
|
56 |
DETAILDESCRIPTION='''
|
57 |
+
We propose a new task, **Relation Inversion**: Given a few exemplar images, where a relation co-exists in every image, we aim to find a relation prompt **\<R>** to capture this interaction, and apply the relation to new entities to synthesize new scenes.
|
58 |
<center>
|
59 |
+
<img src="https://huggingface.co/spaces/Ziqi/ReVersion/resolve/main/teaser.jpg" width="800" align="center" >
|
60 |
</center>
|
61 |
'''
|
62 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
ORIGINAL_SPACE_ID = 'Ziqi/ReVersion'
|
65 |
SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
|
66 |
+
|
67 |
+
|
|
|
68 |
if os.getenv('SYSTEM') == 'spaces' and SPACE_ID != ORIGINAL_SPACE_ID:
|
69 |
SETTINGS = f'<a href="https://huggingface.co/spaces/{SPACE_ID}/settings">Settings</a>'
|
70 |
|
|
|
87 |
return demo
|
88 |
|
89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
def create_inference_demo(func: inference_fn) -> gr.Blocks:
|
91 |
with gr.Blocks() as demo:
|
92 |
with gr.Row():
|
|
|
124 |
value=50)
|
125 |
run_button = gr.Button('Generate')
|
126 |
|
|
|
|
|
|
|
|
|
|
|
127 |
with gr.Column():
|
128 |
result = gr.Image(label='Result')
|
129 |
|
130 |
+
|
|
|
|
|
131 |
prompt.submit(fn=func,
|
132 |
inputs=[
|
133 |
model_id,
|
|
|
161 |
print('*** Now using %s.'%('cpu'))
|
162 |
|
163 |
with gr.Blocks(css='style.css') as demo:
|
|
|
|
|
164 |
if not torch.cuda.is_available():
|
165 |
show_warning(CUDA_NOT_AVAILABLE_WARNING)
|
166 |
|