Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ def load_or_create_model_and_embeddings(model_name, data_file, output_dir):
|
|
16 |
if os.path.exists(model_path) and os.path.exists(embeddings_path):
|
17 |
print("載入已保存的模型和嵌入...")
|
18 |
model = SentenceTransformer(model_path)
|
19 |
-
embeddings = torch.load(embeddings_path
|
20 |
with open(data_file, 'r', encoding='utf-8') as f:
|
21 |
data = json.load(f)
|
22 |
else:
|
@@ -32,7 +32,7 @@ def load_or_create_model_and_embeddings(model_name, data_file, output_dir):
|
|
32 |
return model, embeddings, data
|
33 |
|
34 |
# 設置參數
|
35 |
-
model_name = '
|
36 |
data_file = 'labeled_cti_data.json'
|
37 |
output_dir = '.'
|
38 |
|
@@ -55,8 +55,7 @@ def semantic_search(query, top_k=3):
|
|
55 |
results.append({
|
56 |
'text': data[idx]['text'],
|
57 |
'similarity_score': 1 - distances[0][i] / 2,
|
58 |
-
'entity_groups': get_entity_groups(data[idx]['entities'])
|
59 |
-
'entities': data[idx]['entities']
|
60 |
})
|
61 |
return results
|
62 |
|
@@ -64,63 +63,24 @@ def search_and_format(query):
|
|
64 |
results = semantic_search(query)
|
65 |
formatted_results = ""
|
66 |
for i, result in enumerate(results, 1):
|
67 |
-
formatted_results += f"
|
68 |
-
formatted_results += "
|
69 |
-
|
70 |
-
words = result['text'].split()
|
71 |
-
|
72 |
-
color_map = {
|
73 |
-
'PERSON': 'lightpink',
|
74 |
-
'ORG': 'lightblue',
|
75 |
-
'PLACE': 'lightyellow',
|
76 |
-
'TECHNOLOGY': 'lightgreen',
|
77 |
-
'MALWARE': 'plum',
|
78 |
-
'ATTACK': 'peachpuff'
|
79 |
-
}
|
80 |
-
|
81 |
-
formatted_text = []
|
82 |
-
for word in words:
|
83 |
-
found = False
|
84 |
-
for entity in result['entities']:
|
85 |
-
if word in entity['word']:
|
86 |
-
color = color_map.get(entity['entity_group'], 'lightgray')
|
87 |
-
formatted_word = f'<span style="background-color: {color};">{word} <sup>{entity["entity_group"]}</sup></span>'
|
88 |
-
formatted_text.append(formatted_word)
|
89 |
-
found = True
|
90 |
-
break
|
91 |
-
if not found:
|
92 |
-
formatted_text.append(word)
|
93 |
-
|
94 |
-
formatted_results += ' '.join(formatted_text) + "<br><br>"
|
95 |
-
formatted_results += f"<strong>相似度分數:</strong> {result['similarity_score']:.4f}<br><br>"
|
96 |
-
|
97 |
return formatted_results
|
98 |
|
99 |
-
def
|
100 |
-
"""將音檔資料轉錄為文字"""
|
101 |
-
# 顯示載入動畫
|
102 |
-
query_input.update(value="正在轉錄中...")
|
103 |
-
|
104 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio:
|
105 |
-
temp_audio.write(audio_data)
|
106 |
-
temp_audio_path = temp_audio.name
|
107 |
-
|
108 |
-
transcription = transcribe_audio(temp_audio_path)
|
109 |
-
|
110 |
-
os.remove(temp_audio_path)
|
111 |
-
|
112 |
-
# 更新查詢框
|
113 |
-
query_input.update(value=transcription)
|
114 |
-
|
115 |
-
def transcribe_audio(audio_path):
|
116 |
-
"""使用 OpenAI Whisper API 轉錄音檔"""
|
117 |
try:
|
118 |
-
|
|
|
119 |
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
120 |
return transcript.text
|
121 |
except Exception as e:
|
122 |
return f"轉錄時發生錯誤: {str(e)}"
|
123 |
|
|
|
|
|
|
|
|
|
124 |
|
125 |
# 示例問題
|
126 |
example_queries = [
|
@@ -138,47 +98,40 @@ example_queries = [
|
|
138 |
|
139 |
# 自定義 CSS
|
140 |
custom_css = """
|
141 |
-
|
142 |
-
.
|
143 |
-
.
|
144 |
-
.
|
145 |
-
.
|
146 |
-
.examples-grid {display: grid; grid-template-columns: repeat(auto-fill, minmax(200px, 1fr)); gap: 10px; margin-top: 20px;}
|
147 |
-
.example-button {width: 100%;}
|
148 |
-
span sup {font-size: 0.7em; font-weight: bold;}
|
149 |
-
/* 新增的樣式 */
|
150 |
-
.small-button {padding: 5px 10px; font-size: 0.9em;}
|
151 |
"""
|
152 |
|
153 |
# 創建Gradio界面
|
154 |
with gr.Blocks(css=custom_css) as iface:
|
155 |
gr.Markdown("# AskCTI")
|
156 |
-
gr.Markdown("
|
157 |
-
|
158 |
-
with gr.Row():
|
159 |
-
with gr.Column(scale=1):
|
160 |
-
query_input = gr.Textbox(lines=
|
161 |
with gr.Row():
|
162 |
-
submit_btn = gr.Button("查詢"
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
gr.Markdown("### 範例查詢")
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
|
|
|
|
|
|
175 |
|
176 |
submit_btn.click(search_and_format, inputs=[query_input], outputs=[output])
|
177 |
-
audio_input.change(
|
178 |
-
fn=audio_to_text, # 直接呼叫 audio_to_text 函數
|
179 |
-
inputs=[audio_input],
|
180 |
-
outputs=[query_input] # 將轉錄結果輸出到 query_input
|
181 |
-
)
|
182 |
|
183 |
# 啟動Gradio界面
|
184 |
iface.launch()
|
|
|
16 |
if os.path.exists(model_path) and os.path.exists(embeddings_path):
|
17 |
print("載入已保存的模型和嵌入...")
|
18 |
model = SentenceTransformer(model_path)
|
19 |
+
embeddings = torch.load(embeddings_path)
|
20 |
with open(data_file, 'r', encoding='utf-8') as f:
|
21 |
data = json.load(f)
|
22 |
else:
|
|
|
32 |
return model, embeddings, data
|
33 |
|
34 |
# 設置參數
|
35 |
+
model_name = 'sentence-transformers/all-MiniLM-L6-v2'
|
36 |
data_file = 'labeled_cti_data.json'
|
37 |
output_dir = '.'
|
38 |
|
|
|
55 |
results.append({
|
56 |
'text': data[idx]['text'],
|
57 |
'similarity_score': 1 - distances[0][i] / 2,
|
58 |
+
'entity_groups': get_entity_groups(data[idx]['entities'])
|
|
|
59 |
})
|
60 |
return results
|
61 |
|
|
|
63 |
results = semantic_search(query)
|
64 |
formatted_results = ""
|
65 |
for i, result in enumerate(results, 1):
|
66 |
+
formatted_results += f"{i}. 相似度分數: {result['similarity_score']:.4f}\n"
|
67 |
+
formatted_results += f" 情資: {result['text']}\n"
|
68 |
+
formatted_results += f" 命名實體: {', '.join(result['entity_groups'])}\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
return formatted_results
|
70 |
|
71 |
+
def transcribe_audio(audio):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
try:
|
73 |
+
# 將音頻文件上傳到Whisper API
|
74 |
+
with open(audio, "rb") as audio_file:
|
75 |
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
76 |
return transcript.text
|
77 |
except Exception as e:
|
78 |
return f"轉錄時發生錯誤: {str(e)}"
|
79 |
|
80 |
+
def audio_to_search(audio):
|
81 |
+
transcription = transcribe_audio(audio)
|
82 |
+
search_results = search_and_format(transcription)
|
83 |
+
return search_results, transcription, transcription
|
84 |
|
85 |
# 示例問題
|
86 |
example_queries = [
|
|
|
98 |
|
99 |
# 自定義 CSS
|
100 |
custom_css = """
|
101 |
+
.container {display: flex; flex-direction: row;}
|
102 |
+
.input-column {flex: 1; padding-right: 20px;}
|
103 |
+
.output-column {flex: 2;}
|
104 |
+
.examples-list {display: flex; flex-wrap: wrap; gap: 10px;}
|
105 |
+
.examples-list > * {flex-basis: calc(50% - 5px);}
|
|
|
|
|
|
|
|
|
|
|
106 |
"""
|
107 |
|
108 |
# 創建Gradio界面
|
109 |
with gr.Blocks(css=custom_css) as iface:
|
110 |
gr.Markdown("# AskCTI")
|
111 |
+
gr.Markdown("輸入查詢或使用語音輸入以查詢相關情資威脅情報,將顯示前3個最相關的結果。")
|
112 |
+
|
113 |
+
with gr.Row(equal_height=True):
|
114 |
+
with gr.Column(scale=1, min_width=300):
|
115 |
+
query_input = gr.Textbox(lines=3, label="文字查詢")
|
116 |
with gr.Row():
|
117 |
+
submit_btn = gr.Button("查詢")
|
118 |
+
audio_input = gr.Audio(type="filepath", label="語音輸入")
|
119 |
+
|
|
|
120 |
gr.Markdown("### 範例查詢")
|
121 |
+
for i in range(0, len(example_queries), 2):
|
122 |
+
with gr.Row():
|
123 |
+
for j in range(2):
|
124 |
+
if i + j < len(example_queries):
|
125 |
+
gr.Button(example_queries[i+j]).click(
|
126 |
+
lambda x: x, inputs=[gr.Textbox(value=example_queries[i+j], visible=False)], outputs=[query_input]
|
127 |
+
)
|
128 |
+
|
129 |
+
with gr.Column(scale=2):
|
130 |
+
output = gr.Textbox(lines=20, label="查詢結果")
|
131 |
+
transcription_output = gr.Textbox(lines=3, label="語音轉錄結果")
|
132 |
|
133 |
submit_btn.click(search_and_format, inputs=[query_input], outputs=[output])
|
134 |
+
audio_input.change(audio_to_search, inputs=[audio_input], outputs=[output, transcription_output, query_input])
|
|
|
|
|
|
|
|
|
135 |
|
136 |
# 啟動Gradio界面
|
137 |
iface.launch()
|