noam commited on
Commit
10f82aa
·
1 Parent(s): 34cd442

canceled general option to save space

Browse files
Files changed (1) hide show
  1. app.py +68 -65
app.py CHANGED
@@ -27,17 +27,17 @@ Paint by Inpaint: Learning to Add Image Objects by Removing Them First</a>
27
  description = """
28
  <p style="text-align: center;">
29
  Gradio demo for <strong>Paint by Inpaint: Learning to Add Image Objects by Removing Them First</strong>, visit our <a href='https://rotsteinnoam.github.io/Paint-by-Inpaint/' target='_blank'>project page</a>. <br>
30
- The demo is both for models trained for image object addition using the <a href='https://huggingface.co/datasets/paint-by-inpaint/PIPE' target='_blank'>PIPE dataset</a> along with models trained with other datasets that are meant for general editing. <br>
31
  </p>
32
  """
33
 
34
  # Base models
35
  object_addition_base_model_id = "paint-by-inpaint/add-base"
36
- general_editing_base_model_id = "paint-by-inpaint/general-base"
37
 
38
  # MagicBrush finetuned models
39
  object_addition_finetuned_model_id = "paint-by-inpaint/add-finetuned-mb"
40
- general_editing_finetuned_model_id = "paint-by-inpaint/general-finetuned-mb"
41
 
42
  device = "cuda" if torch.cuda.is_available() else "cpu"
43
  dtype = torch.float16 if "cuda" in device else torch.float32
@@ -49,8 +49,8 @@ def load_model(model_id):
49
  pipe_object_addition_base = load_model(object_addition_base_model_id)
50
  pipe_object_addition_finetuned = load_model(object_addition_finetuned_model_id)
51
 
52
- pipe_general_editing_base = load_model(general_editing_base_model_id)
53
- pipe_general_editing_finetuned = load_model(general_editing_finetuned_model_id)
54
 
55
  @spaces.GPU(duration=15)
56
  def generate(
@@ -99,8 +99,9 @@ with gr.Blocks(css=".compact-box .gr-row { margin-bottom: 5px; } .compact-box .g
99
  </div>
100
  """.format(description=description))
101
 
102
- with gr.Tabs():
103
- with gr.Tab("Object Addition"):
 
104
  with gr.Row():
105
  with gr.Column():
106
  input_image = gr.Image(label="Input Image", type="pil", interactive=True)
@@ -116,7 +117,8 @@ with gr.Blocks(css=".compact-box .gr-row { margin-bottom: 5px; } .compact-box .g
116
 
117
  with gr.Group(elem_id="compact-box"):
118
  with gr.Row():
119
- steps = gr.Number(value=50, precision=0, label="Steps", interactive=True)
 
120
 
121
  with gr.Column():
122
  with gr.Row():
@@ -130,6 +132,7 @@ with gr.Blocks(css=".compact-box .gr-row { margin-bottom: 5px; } .compact-box .g
130
  )
131
 
132
  with gr.Row():
 
133
  text_cfg_scale = gr.Number(value=7.5, label="Text CFG", interactive=True)
134
  image_cfg_scale = gr.Number(value=1.5, label="Image CFG", interactive=True)
135
 
@@ -160,65 +163,65 @@ with gr.Blocks(css=".compact-box .gr-row { margin-bottom: 5px; } .compact-box .g
160
  outputs=[steps, randomize_seed, seed, text_cfg_scale, image_cfg_scale, edited_image],
161
  )
162
 
163
- with gr.Tab("General Editing"):
164
- with gr.Row():
165
- with gr.Column():
166
- input_image_editing = gr.Image(label="Input Image", type="pil", interactive=True)
167
- instruction_editing = gr.Textbox(lines=1, label="Editing Instruction", interactive=True, max_lines=1, placeholder="Enter editing instruction here")
168
-
169
- model_choice_editing = gr.Radio(
170
- ["Base-Model", "Finetuned-MB-Model"],
171
- value="Base-Model",
172
- type="index",
173
- label="Choose Model",
174
- interactive=True,
175
- )
176
-
177
- with gr.Group(elem_id="compact-box"):
178
- with gr.Row():
179
- steps_editing = gr.Number(value=50, precision=0, label="Steps", interactive=True)
180
 
181
- with gr.Column():
182
- with gr.Row():
183
- seed_editing = gr.Number(value=2024, precision=0, label="Seed", interactive=True)
184
- randomize_seed_editing = gr.Radio(
185
- ["Fix Seed", "Randomize Seed"],
186
- value="Randomize Seed",
187
- type="index",
188
- show_label=False,
189
- interactive=True,
190
- )
191
 
192
- with gr.Row():
193
- text_cfg_scale_editing = gr.Number(value=7.5, label="Text CFG", interactive=True)
194
- image_cfg_scale_editing = gr.Number(value=1.5, label="Image CFG", interactive=True)
195
-
196
- with gr.Row():
197
- generate_button_editing = gr.Button("Generate")
198
- reset_button_editing = gr.Button("Reset")
199
-
200
- with gr.Column():
201
- edited_image_editing = gr.Image(label="Edited Image", type="pil", interactive=False)
202
-
203
- generate_button_editing.click(
204
- fn=lambda *args: generate(*args, task_type="general_editing"),
205
- inputs=[
206
- input_image_editing,
207
- instruction_editing,
208
- model_choice_editing,
209
- steps_editing,
210
- randomize_seed_editing,
211
- seed_editing,
212
- text_cfg_scale_editing,
213
- image_cfg_scale_editing,
214
- ],
215
- outputs=[seed_editing, text_cfg_scale_editing, image_cfg_scale_editing, edited_image_editing],
216
- )
217
- reset_button_editing.click(
218
- fn=reset,
219
- inputs=[],
220
- outputs=[steps_editing, randomize_seed_editing, seed_editing, text_cfg_scale_editing, image_cfg_scale_editing, edited_image_editing],
221
- )
222
 
223
  gr.Markdown(help_text)
224
 
 
27
  description = """
28
  <p style="text-align: center;">
29
  Gradio demo for <strong>Paint by Inpaint: Learning to Add Image Objects by Removing Them First</strong>, visit our <a href='https://rotsteinnoam.github.io/Paint-by-Inpaint/' target='_blank'>project page</a>. <br>
30
+ The demo involves two models: one trained for image object addition using the <a href='https://huggingface.co/datasets/paint-by-inpaint/PIPE' target='_blank'>PIPE dataset</a>, and another model further fine-tuned on the MagicBrush dataset.
31
  </p>
32
  """
33
 
34
  # Base models
35
  object_addition_base_model_id = "paint-by-inpaint/add-base"
36
+ # general_editing_base_model_id = "paint-by-inpaint/general-base"
37
 
38
  # MagicBrush finetuned models
39
  object_addition_finetuned_model_id = "paint-by-inpaint/add-finetuned-mb"
40
+ # general_editing_finetuned_model_id = "paint-by-inpaint/general-finetuned-mb"
41
 
42
  device = "cuda" if torch.cuda.is_available() else "cpu"
43
  dtype = torch.float16 if "cuda" in device else torch.float32
 
49
  pipe_object_addition_base = load_model(object_addition_base_model_id)
50
  pipe_object_addition_finetuned = load_model(object_addition_finetuned_model_id)
51
 
52
+ # pipe_general_editing_base = load_model(general_editing_base_model_id)
53
+ # pipe_general_editing_finetuned = load_model(general_editing_finetuned_model_id)
54
 
55
  @spaces.GPU(duration=15)
56
  def generate(
 
99
  </div>
100
  """.format(description=description))
101
 
102
+ # with gr.Tabs():
103
+ # with gr.Tab("Object Addition"):
104
+ if 1:
105
  with gr.Row():
106
  with gr.Column():
107
  input_image = gr.Image(label="Input Image", type="pil", interactive=True)
 
117
 
118
  with gr.Group(elem_id="compact-box"):
119
  with gr.Row():
120
+ with gr.Column():
121
+ steps = gr.Number(value=50, precision=0, label="Steps", interactive=True)
122
 
123
  with gr.Column():
124
  with gr.Row():
 
132
  )
133
 
134
  with gr.Row():
135
+
136
  text_cfg_scale = gr.Number(value=7.5, label="Text CFG", interactive=True)
137
  image_cfg_scale = gr.Number(value=1.5, label="Image CFG", interactive=True)
138
 
 
163
  outputs=[steps, randomize_seed, seed, text_cfg_scale, image_cfg_scale, edited_image],
164
  )
165
 
166
+ # with gr.Tab("General Editing"):
167
+ # with gr.Row():
168
+ # with gr.Column():
169
+ # input_image_editing = gr.Image(label="Input Image", type="pil", interactive=True)
170
+ # instruction_editing = gr.Textbox(lines=1, label="Editing Instruction", interactive=True, max_lines=1, placeholder="Enter editing instruction here")
171
+
172
+ # model_choice_editing = gr.Radio(
173
+ # ["Base-Model", "Finetuned-MB-Model"],
174
+ # value="Base-Model",
175
+ # type="index",
176
+ # label="Choose Model",
177
+ # interactive=True,
178
+ # )
179
+
180
+ # with gr.Group(elem_id="compact-box"):
181
+ # with gr.Row():
182
+ # steps_editing = gr.Number(value=50, precision=0, label="Steps", interactive=True)
183
 
184
+ # with gr.Column():
185
+ # with gr.Row():
186
+ # seed_editing = gr.Number(value=2024, precision=0, label="Seed", interactive=True)
187
+ # randomize_seed_editing = gr.Radio(
188
+ # ["Fix Seed", "Randomize Seed"],
189
+ # value="Randomize Seed",
190
+ # type="index",
191
+ # show_label=False,
192
+ # interactive=True,
193
+ # )
194
 
195
+ # with gr.Row():
196
+ # text_cfg_scale_editing = gr.Number(value=7.5, label="Text CFG", interactive=True)
197
+ # image_cfg_scale_editing = gr.Number(value=1.5, label="Image CFG", interactive=True)
198
+
199
+ # with gr.Row():
200
+ # generate_button_editing = gr.Button("Generate")
201
+ # reset_button_editing = gr.Button("Reset")
202
+
203
+ # with gr.Column():
204
+ # edited_image_editing = gr.Image(label="Edited Image", type="pil", interactive=False)
205
+
206
+ # generate_button_editing.click(
207
+ # fn=lambda *args: generate(*args, task_type="general_editing"),
208
+ # inputs=[
209
+ # input_image_editing,
210
+ # instruction_editing,
211
+ # model_choice_editing,
212
+ # steps_editing,
213
+ # randomize_seed_editing,
214
+ # seed_editing,
215
+ # text_cfg_scale_editing,
216
+ # image_cfg_scale_editing,
217
+ # ],
218
+ # outputs=[seed_editing, text_cfg_scale_editing, image_cfg_scale_editing, edited_image_editing],
219
+ # )
220
+ # reset_button_editing.click(
221
+ # fn=reset,
222
+ # inputs=[],
223
+ # outputs=[steps_editing, randomize_seed_editing, seed_editing, text_cfg_scale_editing, image_cfg_scale_editing, edited_image_editing],
224
+ # )
225
 
226
  gr.Markdown(help_text)
227