Ziqi commited on
Commit
6015068
1 Parent(s): 7185c8b
Files changed (1) hide show
  1. app.py +24 -18
app.py CHANGED
@@ -29,16 +29,16 @@ from inference import inference_fn
29
  # from uploader import upload
30
 
31
 
32
- def parse_args() -> argparse.Namespace:
33
- parser = argparse.ArgumentParser()
34
- parser.add_argument('--device', type=str, default='cpu')
35
- parser.add_argument('--theme', type=str)
36
- parser.add_argument('--share', action='store_true')
37
- parser.add_argument('--port', type=int)
38
- parser.add_argument('--disable-queue',
39
- dest='enable_queue',
40
- action='store_false')
41
- return parser.parse_args()
42
 
43
 
44
  TITLE = '# ReVersion'
@@ -176,9 +176,13 @@ def create_inference_demo(func: inference_fn) -> gr.Blocks:
176
  return demo
177
 
178
 
179
- args = parse_args()
180
- args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
181
- print('*** Now using %s.'%(args.device))
 
 
 
 
182
 
183
  with gr.Blocks(css='style.css') as demo:
184
  # if os.getenv('IS_SHARED_UI'):
@@ -194,10 +198,12 @@ with gr.Blocks(css='style.css') as demo:
194
  with gr.TabItem('Relation-Specific Text-to-Image Generation'):
195
  create_inference_demo(inference_fn)
196
 
197
- demo.launch(
198
- enable_queue=args.enable_queue,
199
- server_port=args.port,
200
- share=args.share
201
- )
 
 
202
  # demo.queue(default_enabled=False).launch(server_port=args.port, share=args.share)
203
 
 
29
  # from uploader import upload
30
 
31
 
32
+ # def parse_args() -> argparse.Namespace:
33
+ # parser = argparse.ArgumentParser()
34
+ # parser.add_argument('--device', type=str, default='cpu')
35
+ # parser.add_argument('--theme', type=str)
36
+ # parser.add_argument('--share', action='store_true')
37
+ # parser.add_argument('--port', type=int)
38
+ # parser.add_argument('--disable-queue',
39
+ # dest='enable_queue',
40
+ # action='store_false')
41
+ # return parser.parse_args()
42
 
43
 
44
  TITLE = '# ReVersion'
 
176
  return demo
177
 
178
 
179
+ # args = parse_args()
180
+ # args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
181
+ # print('*** Now using %s.'%(args.device))
182
+ if torch.cuda.is_available():
183
+ print('*** Now using %s.'%('cuda'))
184
+ else:
185
+ print('*** Now using %s.'%('cpu'))
186
 
187
  with gr.Blocks(css='style.css') as demo:
188
  # if os.getenv('IS_SHARED_UI'):
 
198
  with gr.TabItem('Relation-Specific Text-to-Image Generation'):
199
  create_inference_demo(inference_fn)
200
 
201
+ demo.queue(default_enabled=False).launch(share=False)
202
+
203
+ # demo.launch(
204
+ # enable_queue=args.enable_queue,
205
+ # server_port=args.port,
206
+ # share=args.share
207
+ # )
208
  # demo.queue(default_enabled=False).launch(server_port=args.port, share=args.share)
209