awacke1 commited on
Commit
7639607
·
verified ·
1 Parent(s): 00a95fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -67
app.py CHANGED
@@ -39,7 +39,6 @@ title = "🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI"
39
  helpURL = 'https://huggingface.co/awacke1'
40
  bugURL = 'https://huggingface.co/spaces/awacke1'
41
  icons = '🚲🏆'
42
-
43
  st.set_page_config(
44
  page_title=title,
45
  page_icon=icons,
@@ -51,27 +50,29 @@ st.set_page_config(
51
  'About': title
52
  }
53
  )
54
-
55
- # 2. 🚲BikeAI🏆 Load environment variables and initialize clients
56
  load_dotenv()
57
-
58
- # OpenAI setup
59
  openai.api_key = os.getenv('OPENAI_API_KEY')
60
  if openai.api_key == None:
61
  openai.api_key = st.secrets['OPENAI_API_KEY']
62
-
63
  openai_client = OpenAI(
64
  api_key=os.getenv('OPENAI_API_KEY'),
65
  organization=os.getenv('OPENAI_ORG_ID')
66
  )
67
-
68
- # 3.🚲BikeAI🏆 Claude setup
69
  anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
70
  if anthropic_key == None:
71
  anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
72
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
 
 
 
 
 
 
 
 
73
 
74
- # 4.🚲BikeAI🏆 Initialize session states
 
75
  if 'transcript_history' not in st.session_state:
76
  st.session_state.transcript_history = []
77
  if "chat_history" not in st.session_state:
@@ -83,17 +84,8 @@ if "messages" not in st.session_state:
83
  if 'last_voice_input' not in st.session_state:
84
  st.session_state.last_voice_input = ""
85
 
86
- # 5. 🚲BikeAI🏆 HuggingFace AI setup
87
- API_URL = os.getenv('API_URL')
88
- HF_KEY = os.getenv('HF_KEY')
89
- MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
90
- MODEL2 = "openai/whisper-small.en"
91
- headers = {
92
- "Authorization": f"Bearer {HF_KEY}",
93
- "Content-Type": "application/json"
94
- }
95
 
96
- # 6. 🚲BikeAI🏆 Custom CSS
97
  st.markdown("""
98
  <style>
99
  .main {
@@ -134,7 +126,8 @@ st.markdown("""
134
  """, unsafe_allow_html=True)
135
 
136
 
137
- # 7. Helper Functions
 
138
  def generate_filename(prompt, file_type):
139
  """Generate a safe filename using the prompt and file type."""
140
  central = pytz.timezone('US/Central')
@@ -142,11 +135,6 @@ def generate_filename(prompt, file_type):
142
  replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
143
  safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
144
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
145
-
146
-
147
-
148
-
149
- # 8. Function to create and save a file (and avoid the black hole of lost data 🕳)
150
  def create_file(filename, prompt, response, should_save=True):
151
  if not should_save:
152
  return
@@ -163,8 +151,8 @@ def create_and_save_file(content, file_type="md", prompt=None, is_image=False, s
163
  else:
164
  f.write(prompt + "\n\n" + content if prompt else content)
165
  return filename
166
-
167
 
 
168
  def get_download_link(file_path):
169
  """Create download link for file."""
170
  with open(file_path, "rb") as file:
@@ -172,6 +160,7 @@ def get_download_link(file_path):
172
  b64 = base64.b64encode(contents).decode()
173
  return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}📂</a>'
174
 
 
175
  @st.cache_resource
176
  def SpeechSynthesis(result):
177
  """HTML5 Speech Synthesis."""
@@ -204,9 +193,7 @@ def process_image(image_input, user_prompt):
204
  if isinstance(image_input, str):
205
  with open(image_input, "rb") as image_file:
206
  image_input = image_file.read()
207
-
208
  base64_image = base64.b64encode(image_input).decode("utf-8")
209
-
210
  response = openai_client.chat.completions.create(
211
  model=st.session_state["openai_model"],
212
  messages=[
@@ -220,7 +207,6 @@ def process_image(image_input, user_prompt):
220
  ],
221
  temperature=0.0,
222
  )
223
-
224
  return response.choices[0].message.content
225
 
226
  def process_audio(audio_input, text_input=''):
@@ -228,18 +214,14 @@ def process_audio(audio_input, text_input=''):
228
  if isinstance(audio_input, str):
229
  with open(audio_input, "rb") as file:
230
  audio_input = file.read()
231
-
232
  transcription = openai_client.audio.transcriptions.create(
233
  model="whisper-1",
234
  file=audio_input,
235
  )
236
-
237
  st.session_state.messages.append({"role": "user", "content": transcription.text})
238
-
239
  with st.chat_message("assistant"):
240
  st.markdown(transcription.text)
241
  SpeechSynthesis(transcription.text)
242
-
243
  filename = generate_filename(transcription.text, "wav")
244
  create_and_save_file(audio_input, "wav", transcription.text, True)
245
 
@@ -259,14 +241,12 @@ def process_video(video_path, seconds_per_frame=1):
259
  break
260
  _, buffer = cv2.imencode(".jpg", frame)
261
  base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
262
-
263
  video.release()
264
  return base64Frames, None
265
 
266
  def process_video_with_gpt(video_input, user_prompt):
267
  """Process video with GPT-4 vision."""
268
  base64Frames, _ = process_video(video_input)
269
-
270
  response = openai_client.chat.completions.create(
271
  model=st.session_state["openai_model"],
272
  messages=[
@@ -291,7 +271,6 @@ def extract_urls(text):
291
  abs_link_matches = abs_link_pattern.findall(text)
292
  pdf_link_matches = pdf_link_pattern.findall(text)
293
  title_matches = title_pattern.findall(text)
294
-
295
  # markdown with the extracted fields
296
  markdown_text = ""
297
  for i in range(len(date_matches)):
@@ -305,17 +284,14 @@ def extract_urls(text):
305
  markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
306
  markdown_text += "---\n\n"
307
  return markdown_text
308
-
309
  except:
310
  st.write('.')
311
  return ''
312
 
313
 
314
  def search_arxiv(query):
315
-
316
  st.write("Performing AI Lookup...")
317
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
318
-
319
  result1 = client.predict(
320
  prompt=query,
321
  llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
@@ -324,7 +300,6 @@ def search_arxiv(query):
324
  )
325
  st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
326
  st.markdown(result1)
327
-
328
  result2 = client.predict(
329
  prompt=query,
330
  llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
@@ -335,7 +310,6 @@ def search_arxiv(query):
335
  st.markdown(result2)
336
  combined_result = f"{result1}\n\n{result2}"
337
  return combined_result
338
-
339
  #return responseall
340
 
341
 
@@ -365,7 +339,6 @@ def perform_ai_lookup(query):
365
  Question = '### 🔎 ' + query + '\r\n' # Format for markdown display with links
366
  References = response1[0]
367
  ReferenceLinks = extract_urls(References)
368
-
369
  RunSecondQuery = True
370
  results=''
371
  if RunSecondQuery:
@@ -382,7 +355,6 @@ def perform_ai_lookup(query):
382
  # Restructure results to follow format of Question, Answer, References, ReferenceLinks
383
  results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
384
  st.markdown(results)
385
-
386
  st.write('🔍Run of Multi-Agent System Paper Summary Spec is Complete')
387
  end_time = time.strftime("%Y-%m-%d %H:%M:%S")
388
  start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
@@ -391,8 +363,6 @@ def perform_ai_lookup(query):
391
  st.write(f"Start time: {start_time}")
392
  st.write(f"Finish time: {end_time}")
393
  st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
394
-
395
-
396
  filename = generate_filename(query, "md")
397
  create_file(filename, query, results)
398
  return results
@@ -402,10 +372,8 @@ def process_with_gpt(text_input):
402
  """Process text with GPT-4o."""
403
  if text_input:
404
  st.session_state.messages.append({"role": "user", "content": text_input})
405
-
406
  with st.chat_message("user"):
407
  st.markdown(text_input)
408
-
409
  with st.chat_message("assistant"):
410
  completion = openai_client.chat.completions.create(
411
  model=st.session_state["openai_model"],
@@ -417,7 +385,6 @@ def process_with_gpt(text_input):
417
  )
418
  return_text = completion.choices[0].message.content
419
  st.write("GPT-4o: " + return_text)
420
-
421
  #filename = generate_filename(text_input, "md")
422
  filename = generate_filename("GPT-4o: " + return_text, "md")
423
  create_file(filename, text_input, return_text)
@@ -427,10 +394,8 @@ def process_with_gpt(text_input):
427
  def process_with_claude(text_input):
428
  """Process text with Claude."""
429
  if text_input:
430
-
431
  with st.chat_message("user"):
432
  st.markdown(text_input)
433
-
434
  with st.chat_message("assistant"):
435
  response = claude_client.messages.create(
436
  model="claude-3-sonnet-20240229",
@@ -441,11 +406,9 @@ def process_with_claude(text_input):
441
  )
442
  response_text = response.content[0].text
443
  st.write("Claude: " + response_text)
444
-
445
  #filename = generate_filename(text_input, "md")
446
  filename = generate_filename("Claude: " + response_text, "md")
447
  create_file(filename, text_input, response_text)
448
-
449
  st.session_state.chat_history.append({
450
  "user": text_input,
451
  "claude": response_text
@@ -467,8 +430,6 @@ def create_zip_of_files(files):
467
  zipf.write(file)
468
  return zip_name
469
 
470
-
471
-
472
  def get_media_html(media_path, media_type="video", width="100%"):
473
  """Generate HTML for media player."""
474
  media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
@@ -490,9 +451,7 @@ def get_media_html(media_path, media_type="video", width="100%"):
490
  def create_media_gallery():
491
  """Create the media gallery interface."""
492
  st.header("🎬 Media Gallery")
493
-
494
  tabs = st.tabs(["🖼️ Images", "🎵 Audio", "🎥 Video"])
495
-
496
  with tabs[0]:
497
  image_files = glob.glob("*.png") + glob.glob("*.jpg")
498
  if image_files:
@@ -502,13 +461,11 @@ def create_media_gallery():
502
  with cols[idx % num_cols]:
503
  img = Image.open(image_file)
504
  st.image(img, use_container_width=True)
505
-
506
  # Add GPT vision analysis option
507
  if st.button(f"Analyze {os.path.basename(image_file)}"):
508
  analysis = process_image(image_file,
509
  "Describe this image in detail and identify key elements.")
510
  st.markdown(analysis)
511
-
512
  with tabs[1]:
513
  audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
514
  for audio_file in audio_files:
@@ -518,7 +475,6 @@ def create_media_gallery():
518
  with open(audio_file, "rb") as f:
519
  transcription = process_audio(f)
520
  st.write(transcription)
521
-
522
  with tabs[2]:
523
  video_files = glob.glob("*.mp4")
524
  for video_file in video_files:
@@ -530,29 +486,23 @@ def create_media_gallery():
530
  st.markdown(analysis)
531
 
532
 
533
-
534
  def display_file_manager():
535
  """Display file management sidebar with guaranteed unique button keys."""
536
  st.sidebar.title("📁 File Management")
537
-
538
  all_files = glob.glob("*.md")
539
  all_files.sort(reverse=True)
540
-
541
  if st.sidebar.button("🗑 Delete All", key="delete_all_files_button"):
542
  for file in all_files:
543
  os.remove(file)
544
  st.rerun()
545
-
546
  if st.sidebar.button("⬇️ Download All", key="download_all_files_button"):
547
  zip_file = create_zip_of_files(all_files)
548
  st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
549
-
550
  # Create unique keys using file attributes
551
  for idx, file in enumerate(all_files):
552
  # Get file stats for unique identification
553
  file_stat = os.stat(file)
554
  unique_id = f"{idx}_{file_stat.st_size}_{file_stat.st_mtime}"
555
-
556
  col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
557
  with col1:
558
  if st.button("🌐", key=f"view_{unique_id}"):
@@ -570,8 +520,6 @@ def display_file_manager():
570
  st.rerun()
571
 
572
 
573
-
574
-
575
  # Speech Recognition HTML Component
576
  speech_recognition_html = """
577
  <!DOCTYPE html>
 
39
  helpURL = 'https://huggingface.co/awacke1'
40
  bugURL = 'https://huggingface.co/spaces/awacke1'
41
  icons = '🚲🏆'
 
42
  st.set_page_config(
43
  page_title=title,
44
  page_icon=icons,
 
50
  'About': title
51
  }
52
  )
 
 
53
  load_dotenv()
 
 
54
  openai.api_key = os.getenv('OPENAI_API_KEY')
55
  if openai.api_key == None:
56
  openai.api_key = st.secrets['OPENAI_API_KEY']
 
57
  openai_client = OpenAI(
58
  api_key=os.getenv('OPENAI_API_KEY'),
59
  organization=os.getenv('OPENAI_ORG_ID')
60
  )
 
 
61
  anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
62
  if anthropic_key == None:
63
  anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
64
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
65
+ API_URL = os.getenv('API_URL')
66
+ HF_KEY = os.getenv('HF_KEY')
67
+ MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
68
+ MODEL2 = "openai/whisper-small.en"
69
+ headers = {
70
+ "Authorization": f"Bearer {HF_KEY}",
71
+ "Content-Type": "application/json"
72
+ }
73
 
74
+
75
+ # 2.🚲BikeAI🏆 Initialize session states
76
  if 'transcript_history' not in st.session_state:
77
  st.session_state.transcript_history = []
78
  if "chat_history" not in st.session_state:
 
84
  if 'last_voice_input' not in st.session_state:
85
  st.session_state.last_voice_input = ""
86
 
 
 
 
 
 
 
 
 
 
87
 
88
+ # 3. 🚲BikeAI🏆 Custom CSS
89
  st.markdown("""
90
  <style>
91
  .main {
 
126
  """, unsafe_allow_html=True)
127
 
128
 
129
+
130
+ # create and save a file (and avoid the black hole of lost data 🕳)
131
  def generate_filename(prompt, file_type):
132
  """Generate a safe filename using the prompt and file type."""
133
  central = pytz.timezone('US/Central')
 
135
  replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
136
  safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
137
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
 
 
 
 
 
138
  def create_file(filename, prompt, response, should_save=True):
139
  if not should_save:
140
  return
 
151
  else:
152
  f.write(prompt + "\n\n" + content if prompt else content)
153
  return filename
 
154
 
155
+ # Load a file, base64 it, return as link
156
  def get_download_link(file_path):
157
  """Create download link for file."""
158
  with open(file_path, "rb") as file:
 
160
  b64 = base64.b64encode(contents).decode()
161
  return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}📂</a>'
162
 
163
+ # Speech Synth Browser Style
164
  @st.cache_resource
165
  def SpeechSynthesis(result):
166
  """HTML5 Speech Synthesis."""
 
193
  if isinstance(image_input, str):
194
  with open(image_input, "rb") as image_file:
195
  image_input = image_file.read()
 
196
  base64_image = base64.b64encode(image_input).decode("utf-8")
 
197
  response = openai_client.chat.completions.create(
198
  model=st.session_state["openai_model"],
199
  messages=[
 
207
  ],
208
  temperature=0.0,
209
  )
 
210
  return response.choices[0].message.content
211
 
212
  def process_audio(audio_input, text_input=''):
 
214
  if isinstance(audio_input, str):
215
  with open(audio_input, "rb") as file:
216
  audio_input = file.read()
 
217
  transcription = openai_client.audio.transcriptions.create(
218
  model="whisper-1",
219
  file=audio_input,
220
  )
 
221
  st.session_state.messages.append({"role": "user", "content": transcription.text})
 
222
  with st.chat_message("assistant"):
223
  st.markdown(transcription.text)
224
  SpeechSynthesis(transcription.text)
 
225
  filename = generate_filename(transcription.text, "wav")
226
  create_and_save_file(audio_input, "wav", transcription.text, True)
227
 
 
241
  break
242
  _, buffer = cv2.imencode(".jpg", frame)
243
  base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
 
244
  video.release()
245
  return base64Frames, None
246
 
247
  def process_video_with_gpt(video_input, user_prompt):
248
  """Process video with GPT-4 vision."""
249
  base64Frames, _ = process_video(video_input)
 
250
  response = openai_client.chat.completions.create(
251
  model=st.session_state["openai_model"],
252
  messages=[
 
271
  abs_link_matches = abs_link_pattern.findall(text)
272
  pdf_link_matches = pdf_link_pattern.findall(text)
273
  title_matches = title_pattern.findall(text)
 
274
  # markdown with the extracted fields
275
  markdown_text = ""
276
  for i in range(len(date_matches)):
 
284
  markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
285
  markdown_text += "---\n\n"
286
  return markdown_text
 
287
  except:
288
  st.write('.')
289
  return ''
290
 
291
 
292
  def search_arxiv(query):
 
293
  st.write("Performing AI Lookup...")
294
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
 
295
  result1 = client.predict(
296
  prompt=query,
297
  llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
 
300
  )
301
  st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
302
  st.markdown(result1)
 
303
  result2 = client.predict(
304
  prompt=query,
305
  llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
 
310
  st.markdown(result2)
311
  combined_result = f"{result1}\n\n{result2}"
312
  return combined_result
 
313
  #return responseall
314
 
315
 
 
339
  Question = '### 🔎 ' + query + '\r\n' # Format for markdown display with links
340
  References = response1[0]
341
  ReferenceLinks = extract_urls(References)
 
342
  RunSecondQuery = True
343
  results=''
344
  if RunSecondQuery:
 
355
  # Restructure results to follow format of Question, Answer, References, ReferenceLinks
356
  results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
357
  st.markdown(results)
 
358
  st.write('🔍Run of Multi-Agent System Paper Summary Spec is Complete')
359
  end_time = time.strftime("%Y-%m-%d %H:%M:%S")
360
  start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
 
363
  st.write(f"Start time: {start_time}")
364
  st.write(f"Finish time: {end_time}")
365
  st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
 
 
366
  filename = generate_filename(query, "md")
367
  create_file(filename, query, results)
368
  return results
 
372
  """Process text with GPT-4o."""
373
  if text_input:
374
  st.session_state.messages.append({"role": "user", "content": text_input})
 
375
  with st.chat_message("user"):
376
  st.markdown(text_input)
 
377
  with st.chat_message("assistant"):
378
  completion = openai_client.chat.completions.create(
379
  model=st.session_state["openai_model"],
 
385
  )
386
  return_text = completion.choices[0].message.content
387
  st.write("GPT-4o: " + return_text)
 
388
  #filename = generate_filename(text_input, "md")
389
  filename = generate_filename("GPT-4o: " + return_text, "md")
390
  create_file(filename, text_input, return_text)
 
394
  def process_with_claude(text_input):
395
  """Process text with Claude."""
396
  if text_input:
 
397
  with st.chat_message("user"):
398
  st.markdown(text_input)
 
399
  with st.chat_message("assistant"):
400
  response = claude_client.messages.create(
401
  model="claude-3-sonnet-20240229",
 
406
  )
407
  response_text = response.content[0].text
408
  st.write("Claude: " + response_text)
 
409
  #filename = generate_filename(text_input, "md")
410
  filename = generate_filename("Claude: " + response_text, "md")
411
  create_file(filename, text_input, response_text)
 
412
  st.session_state.chat_history.append({
413
  "user": text_input,
414
  "claude": response_text
 
430
  zipf.write(file)
431
  return zip_name
432
 
 
 
433
  def get_media_html(media_path, media_type="video", width="100%"):
434
  """Generate HTML for media player."""
435
  media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
 
451
  def create_media_gallery():
452
  """Create the media gallery interface."""
453
  st.header("🎬 Media Gallery")
 
454
  tabs = st.tabs(["🖼️ Images", "🎵 Audio", "🎥 Video"])
 
455
  with tabs[0]:
456
  image_files = glob.glob("*.png") + glob.glob("*.jpg")
457
  if image_files:
 
461
  with cols[idx % num_cols]:
462
  img = Image.open(image_file)
463
  st.image(img, use_container_width=True)
 
464
  # Add GPT vision analysis option
465
  if st.button(f"Analyze {os.path.basename(image_file)}"):
466
  analysis = process_image(image_file,
467
  "Describe this image in detail and identify key elements.")
468
  st.markdown(analysis)
 
469
  with tabs[1]:
470
  audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
471
  for audio_file in audio_files:
 
475
  with open(audio_file, "rb") as f:
476
  transcription = process_audio(f)
477
  st.write(transcription)
 
478
  with tabs[2]:
479
  video_files = glob.glob("*.mp4")
480
  for video_file in video_files:
 
486
  st.markdown(analysis)
487
 
488
 
 
489
  def display_file_manager():
490
  """Display file management sidebar with guaranteed unique button keys."""
491
  st.sidebar.title("📁 File Management")
 
492
  all_files = glob.glob("*.md")
493
  all_files.sort(reverse=True)
 
494
  if st.sidebar.button("🗑 Delete All", key="delete_all_files_button"):
495
  for file in all_files:
496
  os.remove(file)
497
  st.rerun()
 
498
  if st.sidebar.button("⬇️ Download All", key="download_all_files_button"):
499
  zip_file = create_zip_of_files(all_files)
500
  st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
 
501
  # Create unique keys using file attributes
502
  for idx, file in enumerate(all_files):
503
  # Get file stats for unique identification
504
  file_stat = os.stat(file)
505
  unique_id = f"{idx}_{file_stat.st_size}_{file_stat.st_mtime}"
 
506
  col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
507
  with col1:
508
  if st.button("🌐", key=f"view_{unique_id}"):
 
520
  st.rerun()
521
 
522
 
 
 
523
  # Speech Recognition HTML Component
524
  speech_recognition_html = """
525
  <!DOCTYPE html>