awacke1 commited on
Commit
84b7583
·
verified ·
1 Parent(s): 1a6391f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +608 -0
app.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import os
4
+ import re
5
+ import glob
6
+ import json
7
+ import base64
8
+ import zipfile
9
+ import random
10
+ import requests
11
+ import openai
12
+ from PIL import Image
13
+ from urllib.parse import quote
14
+
15
+ import streamlit as st
16
+ import streamlit.components.v1 as components
17
+
18
+ # If you do model inference via huggingface_hub:
19
+ # from huggingface_hub import InferenceClient
20
+
21
+
22
+ ########################################################################################
23
+ # 1) GLOBAL CONFIG & PLACEHOLDERS
24
+ ########################################################################################
25
+ BASE_URL = "https://huggingface.co/spaces/awacke1/MermaidMarkdownDiagramEditor"
26
+
27
+ PromptPrefix = "AI-Search: "
28
+ PromptPrefix2 = "AI-Refine: "
29
+ PromptPrefix3 = "AI-JS: "
30
+
31
+ roleplaying_glossary = {
32
+ "Core Rulebooks": {
33
+ "Dungeons and Dragons": ["Player's Handbook", "Dungeon Master's Guide", "Monster Manual"],
34
+ "GURPS": ["Basic Set Characters", "Basic Set Campaigns"]
35
+ },
36
+ "Campaigns & Adventures": {
37
+ "Pathfinder": ["Rise of the Runelords", "Curse of the Crimson Throne"]
38
+ }
39
+ }
40
+
41
+ transhuman_glossary = {
42
+ "Neural Interfaces": ["Cortex Jack", "Mind-Machine Fusion"],
43
+ "Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
44
+ }
45
+
46
+ def process_text(text):
47
+ """🕵️ process_text: detective style—prints lines to Streamlit for debugging."""
48
+ st.write(f"process_text called with: {text}")
49
+
50
+ def search_arxiv(text):
51
+ """🔭 search_arxiv: pretend to search ArXiv, just prints debug."""
52
+ st.write(f"search_arxiv called with: {text}")
53
+
54
+ def SpeechSynthesis(text):
55
+ """🗣 Simple logging for text-to-speech placeholders."""
56
+ st.write(f"SpeechSynthesis called with: {text}")
57
+
58
+ def process_image(image_file, prompt):
59
+ """📷 Simple placeholder for image AI pipeline."""
60
+ return f"[process_image placeholder] {image_file} => {prompt}"
61
+
62
+ def process_video(video_file, seconds_per_frame):
63
+ """🎞 Simple placeholder for video AI pipeline."""
64
+ st.write(f"[process_video placeholder] {video_file}, {seconds_per_frame} sec/frame")
65
+
66
+ API_URL = "https://huggingface-inference-endpoint-placeholder"
67
+ API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
68
+
69
+ @st.cache_resource
70
+ def InferenceLLM(prompt):
71
+ """🔮 Stub returning mock response for 'prompt'."""
72
+ return f"[InferenceLLM placeholder response to prompt: {prompt}]"
73
+
74
+ ########################################################################################
75
+ # 2) GLOSSARY & FILE UTILITY
76
+ ########################################################################################
77
+ @st.cache_resource
78
+ def display_glossary_entity(k):
79
+ """
80
+ Creates multiple link emojis for a single entity.
81
+ Each link might point to /?q=..., /?q=<prefix>..., or external sites.
82
+ """
83
+ search_urls = {
84
+ "🚀🌌ArXiv": lambda x: f"/?q={quote(x)}",
85
+ "🃏Analyst": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix)}",
86
+ "📚PyCoder": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix2)}",
87
+ "🔬JSCoder": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix3)}",
88
+ "📖": lambda x: f"https://en.wikipedia.org/wiki/{quote(x)}",
89
+ "🔍": lambda x: f"https://www.google.com/search?q={quote(x)}",
90
+ "🔎": lambda x: f"https://www.bing.com/search?q={quote(x)}",
91
+ "🎥": lambda x: f"https://www.youtube.com/results?search_query={quote(x)}",
92
+ "🐦": lambda x: f"https://twitter.com/search?q={quote(x)}",
93
+ }
94
+ links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
95
+ st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
96
+
97
+ def display_content_or_image(query):
98
+ """
99
+ If 'query' is in transhuman_glossary or there's an image matching 'images/<query>.png',
100
+ show it. Otherwise warn.
101
+ """
102
+ for category, term_list in transhuman_glossary.items():
103
+ for term in term_list:
104
+ if query.lower() in term.lower():
105
+ st.subheader(f"Found in {category}:")
106
+ st.write(term)
107
+ return True
108
+ image_path = f"images/{query}.png"
109
+ if os.path.exists(image_path):
110
+ st.image(image_path, caption=f"Image for {query}")
111
+ return True
112
+ st.warning("No matching content or image found.")
113
+ return False
114
+
115
+ def clear_query_params():
116
+ """Warn about clearing. Full clearing requires a redirect or st.experimental_set_query_params()."""
117
+ st.warning("Define a redirect or link without query params if you want to truly clear them.")
118
+
119
+ ########################################################################################
120
+ # 3) FILE-HANDLING (MD files, etc.)
121
+ ########################################################################################
122
+ def load_file(file_path):
123
+ """Load file contents as UTF-8 text, or return empty on error."""
124
+ try:
125
+ with open(file_path, "r", encoding='utf-8') as f:
126
+ return f.read()
127
+ except:
128
+ return ""
129
+
130
+ @st.cache_resource
131
+ def create_zip_of_files(files):
132
+ """Combine multiple local .md files into a single .zip for user to download."""
133
+ zip_name = "Arxiv-Paper-Search-QA-RAG-Streamlit-Gradio-AP.zip"
134
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
135
+ for file in files:
136
+ zipf.write(file)
137
+ return zip_name
138
+
139
+ @st.cache_resource
140
+ def get_zip_download_link(zip_file):
141
+ """Return an <a> link to download the given zip_file (base64-encoded)."""
142
+ with open(zip_file, 'rb') as f:
143
+ data = f.read()
144
+ b64 = base64.b64encode(data).decode()
145
+ return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
146
+
147
+ def get_table_download_link(file_path):
148
+ """
149
+ Creates a download link for a single file from your snippet.
150
+ Encodes it as base64 data.
151
+ """
152
+ try:
153
+ with open(file_path, 'r', encoding='utf-8') as file:
154
+ data = file.read()
155
+ b64 = base64.b64encode(data.encode()).decode()
156
+ file_name = os.path.basename(file_path)
157
+ ext = os.path.splitext(file_name)[1]
158
+ mime_map = {
159
+ '.txt': 'text/plain',
160
+ '.py': 'text/plain',
161
+ '.xlsx': 'text/plain',
162
+ '.csv': 'text/plain',
163
+ '.htm': 'text/html',
164
+ '.md': 'text/markdown',
165
+ '.wav': 'audio/wav'
166
+ }
167
+ mime_type = mime_map.get(ext, 'application/octet-stream')
168
+ return f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
169
+ except:
170
+ return ''
171
+
172
+ def get_file_size(file_path):
173
+ """Get file size in bytes."""
174
+ return os.path.getsize(file_path)
175
+
176
+ def FileSidebar():
177
+ """
178
+ Renders .md files, providing open/view/delete/run logic in the sidebar.
179
+ """
180
+ all_files = glob.glob("*.md")
181
+ # Exclude short-named or special files if needed:
182
+ all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
183
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
184
+
185
+ Files1, Files2 = st.sidebar.columns(2)
186
+ with Files1:
187
+ if st.button("🗑 Delete All"):
188
+ for file in all_files:
189
+ os.remove(file)
190
+ st.rerun()
191
+ with Files2:
192
+ if st.button("⬇️ Download"):
193
+ zip_file = create_zip_of_files(all_files)
194
+ st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
195
+
196
+ file_contents = ''
197
+ file_name = ''
198
+ next_action = ''
199
+
200
+ for file in all_files:
201
+ col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1])
202
+ with col1:
203
+ if st.button("🌐", key="md_"+file):
204
+ file_contents = load_file(file)
205
+ file_name = file
206
+ next_action = 'md'
207
+ st.session_state['next_action'] = next_action
208
+ with col2:
209
+ st.markdown(get_table_download_link(file), unsafe_allow_html=True)
210
+ with col3:
211
+ if st.button("📂", key="open_"+file):
212
+ file_contents = load_file(file)
213
+ file_name = file
214
+ next_action = 'open'
215
+ st.session_state['lastfilename'] = file
216
+ st.session_state['filename'] = file
217
+ st.session_state['filetext'] = file_contents
218
+ st.session_state['next_action'] = next_action
219
+ with col4:
220
+ if st.button("▶️", key="read_"+file):
221
+ file_contents = load_file(file)
222
+ file_name = file
223
+ next_action = 'search'
224
+ st.session_state['next_action'] = next_action
225
+ with col5:
226
+ if st.button("🗑", key="delete_"+file):
227
+ os.remove(file)
228
+ st.rerun()
229
+
230
+ if file_contents:
231
+ if next_action == 'open':
232
+ open1, open2 = st.columns([0.8, 0.2])
233
+ with open1:
234
+ file_name_input = st.text_input('File Name:', file_name, key='file_name_input')
235
+ file_content_area = st.text_area('File Contents:', file_contents, height=300, key='file_content_area')
236
+ if st.button('💾 Save File'):
237
+ with open(file_name_input, 'w', encoding='utf-8') as f:
238
+ f.write(file_content_area)
239
+ st.markdown(f'Saved {file_name_input} successfully.')
240
+ elif next_action == 'search':
241
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
242
+ user_prompt = PromptPrefix2 + file_contents
243
+ st.markdown(user_prompt)
244
+ if st.button('🔍Re-Code'):
245
+ search_arxiv(file_contents)
246
+ elif next_action == 'md':
247
+ st.markdown(file_contents)
248
+ SpeechSynthesis(file_contents)
249
+ if st.button("🔍Run"):
250
+ st.write("Running GPT logic placeholder...")
251
+
252
+ ########################################################################################
253
+ # 4) SCORING / GLOSSARIES
254
+ ########################################################################################
255
+ score_dir = "scores"
256
+ os.makedirs(score_dir, exist_ok=True)
257
+
258
+ def generate_key(label, header, idx):
259
+ return f"{header}_{label}_{idx}_key"
260
+
261
+ def update_score(key, increment=1):
262
+ """
263
+ Track a 'score' for each glossary item or term, saved in JSON per key.
264
+ """
265
+ score_file = os.path.join(score_dir, f"{key}.json")
266
+ if os.path.exists(score_file):
267
+ with open(score_file, "r") as file:
268
+ score_data = json.load(file)
269
+ else:
270
+ score_data = {"clicks": 0, "score": 0}
271
+ score_data["clicks"] += increment
272
+ score_data["score"] += increment
273
+ with open(score_file, "w") as file:
274
+ json.dump(score_data, file)
275
+ return score_data["score"]
276
+
277
+ def load_score(key):
278
+ file_path = os.path.join(score_dir, f"{key}.json")
279
+ if os.path.exists(file_path):
280
+ with open(file_path, "r") as file:
281
+ score_data = json.load(file)
282
+ return score_data["score"]
283
+ return 0
284
+
285
+ def display_buttons_with_scores(num_columns_text):
286
+ """
287
+ Show glossary items as clickable buttons that increment a 'score'.
288
+ """
289
+ game_emojis = {
290
+ "Dungeons and Dragons": "🐉",
291
+ "Call of Cthulhu": "🐙",
292
+ "GURPS": "🎲",
293
+ "Pathfinder": "🗺️",
294
+ "Kindred of the East": "🌅",
295
+ "Changeling": "🍃",
296
+ }
297
+ topic_emojis = {
298
+ "Core Rulebooks": "📚",
299
+ "Maps & Settings": "🗺️",
300
+ "Game Mechanics & Tools": "⚙️",
301
+ "Monsters & Adversaries": "👹",
302
+ "Campaigns & Adventures": "📜",
303
+ "Creatives & Assets": "🎨",
304
+ "Game Master Resources": "🛠️",
305
+ "Lore & Background": "📖",
306
+ "Character Development": "🧍",
307
+ "Homebrew Content": "🔧",
308
+ "General Topics": "🌍",
309
+ }
310
+
311
+ for category, games in roleplaying_glossary.items():
312
+ category_emoji = topic_emojis.get(category, "🔍")
313
+ st.markdown(f"## {category_emoji} {category}")
314
+ for game, terms in games.items():
315
+ game_emoji = game_emojis.get(game, "🎮")
316
+ for term in terms:
317
+ key = f"{category}_{game}_{term}".replace(' ', '_').lower()
318
+ score_val = load_score(key)
319
+ if st.button(f"{game_emoji} {category} {game} {term} {score_val}", key=key):
320
+ newscore = update_score(key.replace('?', ''))
321
+ st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
322
+
323
+ ########################################################################################
324
+ # 5) IMAGES & VIDEOS
325
+ ########################################################################################
326
+
327
+ def display_images_and_wikipedia_summaries(num_columns=4):
328
+ """Display .png images in a grid, referencing the name as a 'keyword'."""
329
+ image_files = [f for f in os.listdir('.') if f.endswith('.png')]
330
+ if not image_files:
331
+ st.write("No PNG images found in the current directory.")
332
+ return
333
+
334
+ image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0]))
335
+ cols = st.columns(num_columns)
336
+ col_index = 0
337
+ for image_file in image_files_sorted:
338
+ with cols[col_index % num_columns]:
339
+ try:
340
+ image = Image.open(image_file)
341
+ st.image(image, use_column_width=True)
342
+ k = image_file.split('.')[0]
343
+ display_glossary_entity(k)
344
+ image_text_input = st.text_input(f"Prompt for {image_file}", key=f"image_prompt_{image_file}")
345
+ if image_text_input:
346
+ response = process_image(image_file, image_text_input)
347
+ st.markdown(response)
348
+ except:
349
+ st.write(f"Could not open {image_file}")
350
+ col_index += 1
351
+
352
+ def display_videos_and_links(num_columns=4):
353
+ """Displays all .mp4/.webm in a grid, plus text input for prompts."""
354
+ video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
355
+ if not video_files:
356
+ st.write("No MP4 or WEBM videos found in the current directory.")
357
+ return
358
+
359
+ video_files_sorted = sorted(video_files, key=lambda x: len(x.split('.')[0]))
360
+ cols = st.columns(num_columns)
361
+ col_index = 0
362
+ for video_file in video_files_sorted:
363
+ with cols[col_index % num_columns]:
364
+ k = video_file.split('.')[0]
365
+ st.video(video_file, format='video/mp4', start_time=0)
366
+ display_glossary_entity(k)
367
+ video_text_input = st.text_input(f"Video Prompt for {video_file}", key=f"video_prompt_{video_file}")
368
+ if video_text_input:
369
+ try:
370
+ seconds_per_frame = 10
371
+ process_video(video_file, seconds_per_frame)
372
+ except ValueError:
373
+ st.error("Invalid input for seconds per frame!")
374
+ col_index += 1
375
+
376
+ ########################################################################################
377
+ # 6) MERMAID
378
+ ########################################################################################
379
+
380
+ def generate_mermaid_html(mermaid_code: str) -> str:
381
+ """
382
+ Returns HTML that centers the Mermaid diagram, loading from a CDN.
383
+ """
384
+ return f"""
385
+ <html>
386
+ <head>
387
+ <script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
388
+ <style>
389
+ .centered-mermaid {{
390
+ display: flex;
391
+ justify-content: center;
392
+ margin: 20px auto;
393
+ }}
394
+ .mermaid {{
395
+ max-width: 800px;
396
+ }}
397
+ </style>
398
+ </head>
399
+ <body>
400
+ <div class="mermaid centered-mermaid">
401
+ {mermaid_code}
402
+ </div>
403
+ <script>
404
+ mermaid.initialize({{ startOnLoad: true }});
405
+ </script>
406
+ </body>
407
+ </html>
408
+ """
409
+
410
+ def append_model_param(url: str, model_selected: bool) -> str:
411
+ """
412
+ If user checks 'Append ?model=1', we append &model=1 or ?model=1 if not present.
413
+ """
414
+ if not model_selected:
415
+ return url
416
+ delimiter = "&" if "?" in url else "?"
417
+ return f"{url}{delimiter}model=1"
418
+
419
+ def inject_base_url(url: str) -> str:
420
+ """
421
+ If a link does not start with http, prepend your BASE_URL
422
+ so it becomes an absolute link to huggingface.co/spaces/...
423
+ """
424
+ if url.startswith("http"):
425
+ return url
426
+ return f"{BASE_URL}{url}"
427
+
428
+ # We use 2-parameter click lines for Mermaid 11.4.1 compatibility:
429
+ DEFAULT_MERMAID = r"""
430
+ flowchart LR
431
+ U((User 😎)) -- "Talk 🗣️" --> LLM[LLM Agent 🤖\nExtract Info]
432
+ click U "/?q=U" _self
433
+ click LLM "/?q=LLM%20Agent%20Extract%20Info" _blank
434
+
435
+ LLM -- "Query 🔍" --> HS[Hybrid Search 🔎\nVector+NER+Lexical]
436
+ click HS "/?q=Hybrid%20Search%20Vector%20NER%20Lexical" _blank
437
+
438
+ HS -- "Reason 🤔" --> RE[Reasoning Engine 🛠️\nNeuralNetwork+Medical]
439
+ click RE "/?q=R" _blank
440
+
441
+ RE -- "Link 📡" --> KG((Knowledge Graph 📚\nOntology+GAR+RAG))
442
+ click KG "/?q=K" _blank
443
+ """
444
+
445
+ ########################################################################################
446
+ # 7) MAIN UI
447
+ ########################################################################################
448
+
449
+ def main():
450
+ st.set_page_config(page_title="Mermaid + Two-Parameter Click + LetterMap", layout="wide")
451
+
452
+ # 1) Provide a letter map:
453
+ letter_map = {
454
+ "K": "Knowledge Graph Ontology, GAR, and RAG",
455
+ "R": "Reasoning Engine NeuralNetwork Medical",
456
+ "U": "User Node Something",
457
+ "HS": "Hybrid Search Vector NER Lexical", # if you want multiple letters
458
+ "LLM Agent Extract Info": "LLM Agent expansions etc."
459
+ }
460
+
461
+ query_params = st.query_params
462
+ q_list = (query_params.get('q') or query_params.get('query') or [''])
463
+ if q_list:
464
+ q_val = q_list[0].strip()
465
+ if q_val:
466
+ # 2) If q_val is in letter_map, display that line
467
+ if q_val in letter_map:
468
+ expanded_line = letter_map[q_val]
469
+ st.write(f"**Expanded**: {expanded_line}")
470
+ else:
471
+ # fallback to normal "AI-Search: q_val"
472
+ search_payload = PromptPrefix + q_val
473
+ st.markdown(search_payload)
474
+ process_text(search_payload)
475
+
476
+ if 'action' in query_params:
477
+ action_list = query_params['action']
478
+ if action_list:
479
+ action = action_list[0]
480
+ if action == 'show_message':
481
+ st.success("Showing a message because 'action=show_message' was found in the URL.")
482
+ elif action == 'clear':
483
+ clear_query_params()
484
+
485
+ # If a 'query=' param is present, show content or image
486
+ if 'query' in query_params:
487
+ paramQ = query_params['query'][0]
488
+ display_content_or_image(paramQ)
489
+
490
+ st.sidebar.write("## Diagram Link Settings")
491
+ model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
492
+
493
+ # Rebuild the click lines for the two-parameter approach
494
+ lines = DEFAULT_MERMAID.strip().split("\n")
495
+ new_lines = []
496
+ for line in lines:
497
+ # e.g. click U "/?q=K" _blank
498
+ if line.strip().startswith("click ") and '"/?' in line:
499
+ pattern = r'(click\s+\S+\s+)"([^"]+)"\s+(\S+)'
500
+ match = re.match(pattern, line.strip())
501
+ if match:
502
+ prefix_part = match.group(1) # e.g. "click U "
503
+ old_url = match.group(2) # e.g. /?q=K
504
+ target = match.group(3) # e.g. _blank
505
+
506
+ new_url = inject_base_url(old_url)
507
+ new_url = append_model_param(new_url, model_selected)
508
+ new_line = f'{prefix_part}"{new_url}" {target}'
509
+ new_lines.append(new_line)
510
+ else:
511
+ new_lines.append(line)
512
+ else:
513
+ new_lines.append(line)
514
+
515
+ final_mermaid = "\n".join(new_lines)
516
+
517
+ # Render the top-centered Mermaid diagram
518
+ st.sidebar.markdown("**Mermaid Diagram** - 2-Parameter for v11.4.1")
519
+ diagram_html = generate_mermaid_html(final_mermaid)
520
+ components.html(diagram_html, height=400, scrolling=True)
521
+
522
+ # Two-column: left => Markdown, right => Mermaid
523
+ left_col, right_col = st.columns(2)
524
+
525
+ # -- Left: Markdown
526
+ with left_col:
527
+ st.subheader("Markdown Side 📝")
528
+ if "markdown_text" not in st.session_state:
529
+ st.session_state["markdown_text"] = "## Hello!\nYou can type some *Markdown* here.\n"
530
+
531
+ markdown_text = st.text_area("Edit Markdown:",
532
+ value=st.session_state["markdown_text"],
533
+ height=300)
534
+ st.session_state["markdown_text"] = markdown_text
535
+
536
+ colA, colB = st.columns(2)
537
+ with colA:
538
+ if st.button("🔄 Refresh Markdown"):
539
+ st.write("**Markdown** content refreshed! 🍿")
540
+ with colB:
541
+ if st.button("❌ Clear Markdown"):
542
+ st.session_state["markdown_text"] = ""
543
+ st.rerun()
544
+
545
+ st.markdown("---")
546
+ st.markdown("**Preview:**")
547
+ st.markdown(markdown_text)
548
+
549
+ # -- Right: Mermaid
550
+ with right_col:
551
+ st.subheader("Mermaid Side 🧜‍♂️")
552
+ if "current_mermaid" not in st.session_state:
553
+ st.session_state["current_mermaid"] = final_mermaid
554
+
555
+ mermaid_input = st.text_area("Edit Mermaid Code:",
556
+ value=st.session_state["current_mermaid"],
557
+ height=300)
558
+
559
+ colC, colD = st.columns(2)
560
+ with colC:
561
+ if st.button("🎨 Refresh Diagram"):
562
+ st.session_state["current_mermaid"] = mermaid_input
563
+ st.write("**Mermaid** diagram refreshed! 🌈")
564
+ st.rerun()
565
+ with colD:
566
+ if st.button("❌ Clear Mermaid"):
567
+ st.session_state["current_mermaid"] = ""
568
+ st.rerun()
569
+
570
+ st.markdown("---")
571
+ st.markdown("**Mermaid Source:**")
572
+ st.code(mermaid_input, language="python", line_numbers=True)
573
+
574
+ # Media Galleries
575
+ st.markdown("---")
576
+ st.header("Media Galleries")
577
+
578
+ num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
579
+ display_images_and_wikipedia_summaries(num_columns_images)
580
+
581
+ num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
582
+ display_videos_and_links(num_columns_video)
583
+
584
+ # Extended text interface
585
+ showExtendedTextInterface = False
586
+ if showExtendedTextInterface:
587
+ # e.g. display_buttons_with_scores(...)
588
+ pass
589
+
590
+ # File Sidebar
591
+ FileSidebar()
592
+
593
+ # Random Title
594
+ titles = [
595
+ "🧠🎭 Semantic Symphonies & Episodic Encores",
596
+ "🌌🎼 AI Rhythms of Memory Lane",
597
+ "🎭🎉 Cognitive Crescendos & Neural Harmonies",
598
+ "🧠🎺 Mnemonic Melodies & Synaptic Grooves",
599
+ "🎼🎸 Straight Outta Cognition",
600
+ "🥁🎻 Jazzy Jambalaya of AI Memories",
601
+ "🏰 Semantic Soul & Episodic Essence",
602
+ "🥁🎻 The Music Of AI's Mind"
603
+ ]
604
+ st.markdown(f"**{random.choice(titles)}**")
605
+
606
+
607
+ if __name__ == "__main__":
608
+ main()