flow3rdown commited on
Commit
16bab0d
·
1 Parent(s): 360ca92
Files changed (3) hide show
  1. app.py +107 -39
  2. examples/inland_lake.png +0 -0
  3. examples/qinghai_lake.png +0 -0
app.py CHANGED
@@ -1,10 +1,108 @@
1
  import gradio as gr
2
 
3
- def single_inference():
4
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- def blended_inference():
7
- pass
8
 
9
  TITLE = """MKG Analogy"""
10
 
@@ -13,43 +111,13 @@ with gr.Blocks() as block:
13
  gr.HTML(TITLE)
14
 
15
  with gr.Tab("Single Analogical Reasoning"):
16
- with gr.Column():
17
- gr.Markdown(""" $(I_h, I_t) : (T_q, ?)$
18
- """)
19
- with gr.Row():
20
- with gr.Column():
21
- head_image = gr.Image(type='pil', label="Head Image")
22
- head_ent = gr.Textbox(lines=1, label="Head Entity")
23
- with gr.Column():
24
- tail_image = gr.Image(type='pil', label="Tail Image")
25
- tail_ent = gr.Textbox(lines=1, label="Tail Entity")
26
- with gr.Column():
27
- question_text = gr.Textbox(lines=1, label="Question Name")
28
- question_ent = gr.Textbox(lines=1, label="Question Entity")
29
- submit_btn = gr.Button("Submit")
30
- output_text = gr.Textbox(label="Output")
31
-
32
- # examples=[['example01.jpg', MODELS[0], 'best'], ['example02.jpg', MODELS[0], 'best']]
33
- # ex = gr.Examples(
34
- # examples=examples,
35
- # fn=image_to_prompt,
36
- # inputs=[input_image, input_model, input_mode],
37
- # outputs=[output_text, share_button, community_icon, loading_icon],
38
- # cache_examples=True,
39
- # run_on_click=True
40
- # )
41
- # ex.dataset.headers = [""]
42
 
43
  with gr.Tab("Blended Analogical Reasoning"):
44
- pass
45
-
46
  # gr.HTML(ARTICLE)
47
-
48
- # submit_btn.click(
49
- # fn=image_to_prompt,
50
- # inputs=[input_image, input_model, input_mode],
51
- # outputs=[output_text, share_button, community_icon, loading_icon]
52
- # )
53
- # share_button.click(None, [], [], _js=None)
54
 
55
  block.queue(max_size=64).launch(enable_queue=True)
 
1
  import gradio as gr
2
 
3
+ def single_inference_iit(head_img, head_id, tail_img, tail_id, question_txt, question_id):
4
+ return question_txt
5
+
6
+ def single_inference_tti(head_txt, head_id, tail_txt, tail_id, question_img, question_id):
7
+ return head_txt
8
+
9
+ def blended_inference_iti(head_img, head_id, tail_txt, tail_id, question_img, question_id):
10
+ return tail_txt
11
+
12
+ def single_tab_iit():
13
+ with gr.Column():
14
+ gr.Markdown(""" $(I_h, I_t) : (T_q, ?)$
15
+ """)
16
+ with gr.Row():
17
+ with gr.Column():
18
+ head_image = gr.Image(type='pil', label="Head Image")
19
+ head_ent = gr.Textbox(lines=1, label="Head Entity")
20
+ with gr.Column():
21
+ tail_image = gr.Image(type='pil', label="Tail Image")
22
+ tail_ent = gr.Textbox(lines=1, label="Tail Entity")
23
+ with gr.Column():
24
+ question_text = gr.Textbox(lines=1, label="Question Name")
25
+ question_ent = gr.Textbox(lines=1, label="Question Entity")
26
+
27
+ submit_btn = gr.Button("Submit")
28
+ output_text = gr.Textbox(label="Output")
29
+
30
+ submit_btn.click(fn=single_inference_iit,
31
+ inputs=[head_image, head_ent, tail_image, tail_ent, question_text, question_ent],
32
+ outputs=[output_text])
33
+
34
+ examples=[['examples/qinghai_lake.png', 'Q201294', 'examples/inland_lake.png', 'Q31805992', "campaign", 'Q18812548']]
35
+ ex = gr.Examples(
36
+ examples=examples,
37
+ fn=single_inference_iit,
38
+ inputs=[head_image, head_ent, tail_image, tail_ent, question_text, question_ent],
39
+ outputs=[output_text],
40
+ cache_examples=True,
41
+ run_on_click=True
42
+ )
43
+
44
+ def single_tab_tti():
45
+ with gr.Column():
46
+ gr.Markdown(""" $(T_h, T_t) : (I_q, ?)$
47
+ """)
48
+ with gr.Row():
49
+ with gr.Column():
50
+ head_text = gr.Textbox(lines=1, label="Head Name")
51
+ head_ent = gr.Textbox(lines=1, label="Head Entity")
52
+ with gr.Column():
53
+ tail_text = gr.Textbox(lines=1, label="Tail Name")
54
+ tail_ent = gr.Textbox(lines=1, label="Tail Entity")
55
+ with gr.Column():
56
+ question_text = gr.Image(type='pil', label="Question Image")
57
+ question_ent = gr.Textbox(lines=1, label="Question Entity")
58
+ submit_btn = gr.Button("Submit")
59
+ output_text = gr.Textbox(label="Output")
60
+
61
+ submit_btn.click(fn=single_inference_iit,
62
+ inputs=[head_text, head_ent, tail_text, tail_ent, question_text, question_ent],
63
+ outputs=[output_text])
64
+
65
+ examples=[['qinghai_lake', 'Q201294', 'inland_lake', 'Q31805992', 'examples/qinghai_lake.png', 'Q18812548']]
66
+ ex = gr.Examples(
67
+ examples=examples,
68
+ fn=single_inference_iit,
69
+ inputs=[head_text, head_ent, tail_text, tail_ent, question_text, question_ent],
70
+ outputs=[output_text],
71
+ cache_examples=True,
72
+ run_on_click=True
73
+ )
74
+
75
+ def blended_tab_iti():
76
+ with gr.Column():
77
+ gr.Markdown(""" $(I_h, T_t) : (I_q, ?)$
78
+ """)
79
+ with gr.Row():
80
+ with gr.Column():
81
+ head_image = gr.Image(type='pil', label="Head Image")
82
+ head_ent = gr.Textbox(lines=1, label="Head Entity")
83
+ with gr.Column():
84
+ tail_txt = gr.Textbox(lines=1, label="Tail Name")
85
+ tail_ent = gr.Textbox(lines=1, label="Tail Entity")
86
+ with gr.Column():
87
+ question_image = gr.Image(type='pil', label="Question Image")
88
+ question_ent = gr.Textbox(lines=1, label="Question Entity")
89
+ submit_btn = gr.Button("Submit")
90
+ output_text = gr.Textbox(label="Output")
91
+
92
+ submit_btn.click(fn=single_inference_iit,
93
+ inputs=[head_image, head_ent, tail_txt, tail_ent, question_image, question_ent],
94
+ outputs=[output_text])
95
+
96
+ examples=[['examples/qinghai_lake.png', 'Q201294', 'inland_lake', 'Q31805992', 'examples/inland_lake.png', 'Q18812548']]
97
+ ex = gr.Examples(
98
+ examples=examples,
99
+ fn=single_inference_iit,
100
+ inputs=[head_image, head_ent, tail_txt, tail_ent, question_image, question_ent],
101
+ outputs=[output_text],
102
+ cache_examples=True,
103
+ run_on_click=True
104
+ )
105
 
 
 
106
 
107
  TITLE = """MKG Analogy"""
108
 
 
111
  gr.HTML(TITLE)
112
 
113
  with gr.Tab("Single Analogical Reasoning"):
114
+ single_tab_iit()
115
+ single_tab_tti()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  with gr.Tab("Blended Analogical Reasoning"):
118
+ blended_tab_iti()
119
+
120
  # gr.HTML(ARTICLE)
121
+
 
 
 
 
 
 
122
 
123
  block.queue(max_size=64).launch(enable_queue=True)
examples/inland_lake.png ADDED
examples/qinghai_lake.png ADDED