awacke1 commited on
Commit
871a88e
Β·
verified Β·
1 Parent(s): 6a060e0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +509 -0
app.py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
3
+ import plotly.graph_objects as go
4
+ import streamlit.components.v1 as components
5
+ from datetime import datetime
6
+ from audio_recorder_streamlit import audio_recorder
7
+ from bs4 import BeautifulSoup
8
+ from collections import defaultdict, deque
9
+ from dotenv import load_dotenv
10
+ from gradio_client import Client
11
+ from huggingface_hub import InferenceClient
12
+ from io import BytesIO
13
+ from PIL import Image
14
+ from PyPDF2 import PdfReader
15
+ from urllib.parse import quote
16
+ from xml.etree import ElementTree as ET
17
+ from openai import OpenAI
18
+ import extra_streamlit_components as stx
19
+ from streamlit.runtime.scriptrunner import get_script_run_ctx
20
+ import asyncio
21
+ import edge_tts
22
+
23
+ # πŸ”§ Config & Setup
24
+ st.set_page_config(
25
+ page_title="🚲BikeAIπŸ† Claude/GPT Research",
26
+ page_icon="πŸš²πŸ†",
27
+ layout="wide",
28
+ initial_sidebar_state="auto",
29
+ menu_items={
30
+ 'Get Help': 'https://huggingface.co/awacke1',
31
+ 'Report a bug': 'https://huggingface.co/spaces/awacke1',
32
+ 'About': "🚲BikeAIπŸ† Claude/GPT Research AI"
33
+ }
34
+ )
35
+ load_dotenv()
36
+
37
+ openai_api_key = os.getenv('OPENAI_API_KEY', "")
38
+ anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
39
+ if 'OPENAI_API_KEY' in st.secrets:
40
+ openai_api_key = st.secrets['OPENAI_API_KEY']
41
+ if 'ANTHROPIC_API_KEY' in st.secrets:
42
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
43
+
44
+ openai.api_key = openai_api_key
45
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
46
+ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
47
+ HF_KEY = os.getenv('HF_KEY')
48
+ API_URL = os.getenv('API_URL')
49
+
50
+ if 'transcript_history' not in st.session_state:
51
+ st.session_state['transcript_history'] = []
52
+ if 'chat_history' not in st.session_state:
53
+ st.session_state['chat_history'] = []
54
+ if 'openai_model' not in st.session_state:
55
+ st.session_state['openai_model'] = "gpt-4o-2024-05-13"
56
+ if 'messages' not in st.session_state:
57
+ st.session_state['messages'] = []
58
+ if 'last_voice_input' not in st.session_state:
59
+ st.session_state['last_voice_input'] = ""
60
+ if 'editing_file' not in st.session_state:
61
+ st.session_state['editing_file'] = None
62
+ if 'edit_new_name' not in st.session_state:
63
+ st.session_state['edit_new_name'] = ""
64
+ if 'edit_new_content' not in st.session_state:
65
+ st.session_state['edit_new_content'] = ""
66
+ if 'viewing_prefix' not in st.session_state:
67
+ st.session_state['viewing_prefix'] = None
68
+ if 'should_rerun' not in st.session_state:
69
+ st.session_state['should_rerun'] = False
70
+ if 'old_val' not in st.session_state:
71
+ st.session_state['old_val'] = None
72
+
73
+ # 🎨 Minimal Custom CSS
74
+ st.markdown("""
75
+ <style>
76
+ .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
77
+ .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
78
+ .stButton>button {
79
+ margin-right: 0.5rem;
80
+ }
81
+ </style>
82
+ """, unsafe_allow_html=True)
83
+
84
+ FILE_EMOJIS = {
85
+ "md": "πŸ“",
86
+ "mp3": "🎡",
87
+ }
88
+
89
+ def clean_for_speech(text: str) -> str:
90
+ text = text.replace("\n", " ")
91
+ text = text.replace("</s>", " ")
92
+ text = text.replace("#", "")
93
+ # Remove links
94
+ text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
95
+ text = re.sub(r"\s+", " ", text).strip()
96
+ return text
97
+
98
+ def generate_filename(content, file_type="md"):
99
+ prefix = datetime.now().strftime("%y%m_%H%M") + "_"
100
+ words = re.findall(r"\w+", content)
101
+ name_text = '_'.join(words[:3]) if words else 'file'
102
+ filename = f"{prefix}{name_text}.{file_type}"
103
+ return filename
104
+
105
+ def create_file(prompt, response, file_type="md"):
106
+ filename = generate_filename(response.strip() if response.strip() else prompt.strip(), file_type)
107
+ with open(filename, 'w', encoding='utf-8') as f:
108
+ f.write(prompt + "\n\n" + response)
109
+ return filename
110
+
111
+ def get_download_link(file):
112
+ with open(file, "rb") as f:
113
+ b64 = base64.b64encode(f.read()).decode()
114
+ return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
115
+
116
+ @st.cache_resource
117
+ def speech_synthesis_html(result):
118
+ html_code = f"""
119
+ <html><body>
120
+ <script>
121
+ var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}");
122
+ window.speechSynthesis.speak(msg);
123
+ </script>
124
+ </body></html>
125
+ """
126
+ components.html(html_code, height=0)
127
+
128
+ async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
129
+ text = clean_for_speech(text)
130
+ if not text.strip():
131
+ return None
132
+ rate_str = f"{rate:+d}%"
133
+ pitch_str = f"{pitch:+d}Hz"
134
+ communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
135
+ out_fn = generate_filename(text,"mp3")
136
+ await communicate.save(out_fn)
137
+ return out_fn
138
+
139
+ def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
140
+ return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
141
+
142
+ def play_and_download_audio(file_path):
143
+ if file_path and os.path.exists(file_path):
144
+ st.audio(file_path)
145
+ dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}"</a>'
146
+ st.markdown(dl_link, unsafe_allow_html=True)
147
+
148
+ def process_image(image_path, user_prompt):
149
+ with open(image_path, "rb") as imgf:
150
+ image_data = imgf.read()
151
+ b64img = base64.b64encode(image_data).decode("utf-8")
152
+ resp = openai_client.chat.completions.create(
153
+ model=st.session_state["openai_model"],
154
+ messages=[
155
+ {"role": "system", "content": "You are a helpful assistant."},
156
+ {"role": "user", "content": [
157
+ {"type": "text", "text": user_prompt},
158
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64img}"}}
159
+ ]}
160
+ ],
161
+ temperature=0.0,
162
+ )
163
+ return resp.choices[0].message.content
164
+
165
+ def process_audio(audio_path):
166
+ with open(audio_path, "rb") as f:
167
+ transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
168
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
169
+ return transcription.text
170
+
171
+ def process_video(video_path, seconds_per_frame=1):
172
+ vid = cv2.VideoCapture(video_path)
173
+ total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
174
+ fps = vid.get(cv2.CAP_PROP_FPS)
175
+ skip = int(fps*seconds_per_frame)
176
+ frames_b64 = []
177
+ for i in range(0, total, skip):
178
+ vid.set(cv2.CAP_PROP_POS_FRAMES, i)
179
+ ret, frame = vid.read()
180
+ if not ret: break
181
+ _, buf = cv2.imencode(".jpg", frame)
182
+ frames_b64.append(base64.b64encode(buf).decode("utf-8"))
183
+ vid.release()
184
+ return frames_b64
185
+
186
+ def process_video_with_gpt(video_path, prompt):
187
+ frames = process_video(video_path)
188
+ resp = openai_client.chat.completions.create(
189
+ model=st.session_state["openai_model"],
190
+ messages=[
191
+ {"role":"system","content":"Analyze video frames."},
192
+ {"role":"user","content":[
193
+ {"type":"text","text":prompt},
194
+ *[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
195
+ ]}
196
+ ]
197
+ )
198
+ return resp.choices[0].message.content
199
+
200
+ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True):
201
+ start = time.time()
202
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
203
+ r = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")
204
+ refs = r[0]
205
+ r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
206
+ result = f"### πŸ”Ž {q}\n\n{r2}\n\n{refs}"
207
+
208
+ st.markdown(result)
209
+
210
+ if vocal_summary:
211
+ main_text = clean_for_speech(r2)
212
+ audio_file_main = speak_with_edge_tts(main_text)
213
+ st.write("### πŸŽ™οΈ Vocal Summary (Short Answer)")
214
+ play_and_download_audio(audio_file_main)
215
+
216
+ if extended_refs:
217
+ summaries_text = "Here are the summaries from the references: " + refs.replace('"','')
218
+ summaries_text = clean_for_speech(summaries_text)
219
+ audio_file_refs = speak_with_edge_tts(summaries_text)
220
+ st.write("### πŸ“œ Extended References & Summaries")
221
+ play_and_download_audio(audio_file_refs)
222
+
223
+ if titles_summary:
224
+ titles = []
225
+ for line in refs.split('\n'):
226
+ m = re.search(r"\[([^\]]+)\]", line)
227
+ if m:
228
+ titles.append(m.group(1))
229
+ if titles:
230
+ titles_text = "Here are the titles of the papers: " + ", ".join(titles)
231
+ titles_text = clean_for_speech(titles_text)
232
+ audio_file_titles = speak_with_edge_tts(titles_text)
233
+ st.write("### πŸ”– Paper Titles")
234
+ play_and_download_audio(audio_file_titles)
235
+
236
+ elapsed = time.time()-start
237
+ st.write(f"**Total Elapsed:** {elapsed:.2f} s")
238
+ create_file(q, result, "md")
239
+ return result
240
+
241
+ def process_with_gpt(text):
242
+ if not text: return
243
+ st.session_state.messages.append({"role":"user","content":text})
244
+ with st.chat_message("user"):
245
+ st.markdown(text)
246
+ with st.chat_message("assistant"):
247
+ c = openai_client.chat.completions.create(
248
+ model=st.session_state["openai_model"],
249
+ messages=st.session_state.messages,
250
+ stream=False
251
+ )
252
+ ans = c.choices[0].message.content
253
+ st.write("GPT-4o: " + ans)
254
+ create_file(text, ans, "md")
255
+ st.session_state.messages.append({"role":"assistant","content":ans})
256
+ return ans
257
+
258
+ def process_with_claude(text):
259
+ if not text: return
260
+ with st.chat_message("user"):
261
+ st.markdown(text)
262
+ with st.chat_message("assistant"):
263
+ r = claude_client.messages.create(
264
+ model="claude-3-sonnet-20240229",
265
+ max_tokens=1000,
266
+ messages=[{"role":"user","content":text}]
267
+ )
268
+ ans = r.content[0].text
269
+ st.write("Claude-3.5: " + ans)
270
+ create_file(text, ans, "md")
271
+ st.session_state.chat_history.append({"user":text,"claude":ans})
272
+ return ans
273
+
274
+ def create_zip_of_files(md_files, mp3_files):
275
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
276
+ all_files = md_files + mp3_files
277
+ if not all_files:
278
+ return None
279
+ stems = [os.path.splitext(os.path.basename(f))[0] for f in all_files]
280
+ joined = "_".join(stems)
281
+ if len(joined) > 50:
282
+ joined = joined[:50] + "_etc"
283
+ zip_name = f"{joined}.zip"
284
+ with zipfile.ZipFile(zip_name,'w') as z:
285
+ for f in all_files:
286
+ z.write(f)
287
+ return zip_name
288
+
289
+ def load_files_for_sidebar():
290
+ md_files = glob.glob("*.md")
291
+ mp3_files = glob.glob("*.mp3")
292
+
293
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
294
+
295
+ all_files = md_files + mp3_files
296
+
297
+ groups = defaultdict(list)
298
+ for f in all_files:
299
+ fname = os.path.basename(f)
300
+ prefix = fname[:10]
301
+ groups[prefix].append(f)
302
+
303
+ for prefix in groups:
304
+ groups[prefix].sort(key=lambda x: os.path.getmtime(x), reverse=True)
305
+
306
+ sorted_prefixes = sorted(groups.keys(), key=lambda pre: max(os.path.getmtime(x) for x in groups[pre]), reverse=True)
307
+ return groups, sorted_prefixes
308
+
309
+ def extract_keywords_from_md(files):
310
+ text = ""
311
+ for f in files:
312
+ if f.endswith(".md"):
313
+ c = open(f,'r',encoding='utf-8').read()
314
+ text += " " + c
315
+ words = re.findall(r"\w+", text.lower())
316
+ unique_words = []
317
+ for w in words:
318
+ if w not in unique_words:
319
+ unique_words.append(w)
320
+ if len(unique_words) == 5:
321
+ break
322
+ return unique_words
323
+
324
+ def display_file_manager_sidebar(groups, sorted_prefixes):
325
+ st.sidebar.title("🎡 Audio & Document Manager")
326
+
327
+ all_md = []
328
+ all_mp3 = []
329
+ for prefix in groups:
330
+ for f in groups[prefix]:
331
+ if f.endswith(".md"):
332
+ all_md.append(f)
333
+ elif f.endswith(".mp3"):
334
+ all_mp3.append(f)
335
+
336
+ top_bar = st.sidebar.columns(3)
337
+ with top_bar[0]:
338
+ if st.button("πŸ—‘ Del All MD"):
339
+ for f in all_md:
340
+ os.remove(f)
341
+ st.session_state.should_rerun = True
342
+ with top_bar[1]:
343
+ if st.button("πŸ—‘ Del All MP3"):
344
+ for f in all_mp3:
345
+ os.remove(f)
346
+ st.session_state.should_rerun = True
347
+ with top_bar[2]:
348
+ if st.button("⬇️ Zip All"):
349
+ z = create_zip_of_files(all_md, all_mp3)
350
+ if z:
351
+ st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
352
+
353
+ for prefix in sorted_prefixes:
354
+ files = groups[prefix]
355
+ kw = extract_keywords_from_md(files)
356
+ keywords_str = " ".join(kw) if kw else "No Keywords"
357
+ with st.sidebar.expander(f"{prefix} Files ({len(files)}) - Keywords: {keywords_str}", expanded=True):
358
+ c1,c2 = st.columns(2)
359
+ with c1:
360
+ if st.button("πŸ‘€View Group", key="view_group_"+prefix):
361
+ st.session_state.viewing_prefix = prefix
362
+ with c2:
363
+ if st.button("πŸ—‘Del Group", key="del_group_"+prefix):
364
+ for f in files:
365
+ os.remove(f)
366
+ st.success(f"Deleted all files in group {prefix} successfully!")
367
+ st.session_state.should_rerun = True
368
+
369
+ for f in files:
370
+ fname = os.path.basename(f)
371
+ ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
372
+ st.write(f"**{fname}** - {ctime}")
373
+
374
+ def run_selected_model(option, user_input):
375
+ user_input = user_input.strip()
376
+ if option == "Arxiv":
377
+ st.subheader("Arxiv Only Results:")
378
+ perform_ai_lookup(user_input, vocal_summary=True, extended_refs=False, titles_summary=True)
379
+ elif option == "GPT-4o":
380
+ process_with_gpt(user_input)
381
+ elif option == "Claude-3.5":
382
+ process_with_claude(user_input)
383
+
384
+ def main():
385
+ st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
386
+ tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
387
+
388
+ # Removed the old model_choice radio
389
+ # Instead, we rely on the dropdown in the Process Input section.
390
+
391
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
392
+ val = mycomponent(my_input_value="Hello")
393
+
394
+ # Show input in a text box for editing if detected
395
+ if val:
396
+ val_stripped = val.replace('\n', ' ')
397
+ edited_input = st.text_area("Edit your detected input:", value=val_stripped, height=100)
398
+ run_option = st.selectbox("Select AI Model:", ["Arxiv", "GPT-4o", "Claude-3.5"])
399
+ autorun = st.checkbox("AutoRun on input change", value=False)
400
+
401
+ input_changed = (val != st.session_state.old_val)
402
+
403
+ if autorun and input_changed:
404
+ # Automatically run the selected model if input changed
405
+ st.session_state.old_val = val
406
+ run_selected_model(run_option, edited_input)
407
+ else:
408
+ # If not autorun, show a button to run manually
409
+ if st.button("Process Input"):
410
+ st.session_state.old_val = val
411
+ run_selected_model(run_option, edited_input)
412
+
413
+
414
+ if tab_main == "πŸ” Search ArXiv":
415
+ st.subheader("πŸ” Search ArXiv")
416
+ q=st.text_input("Research query:")
417
+
418
+ st.markdown("### πŸŽ›οΈ Audio Generation Options")
419
+ vocal_summary = st.checkbox("πŸŽ™οΈ Vocal Summary (Short Answer)", value=True)
420
+ extended_refs = st.checkbox("πŸ“œ Extended References & Summaries (Long)", value=False)
421
+ titles_summary = st.checkbox("πŸ”– Paper Titles Only", value=True)
422
+
423
+ if q and st.button("Run ArXiv Query"):
424
+ perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs, titles_summary=titles_summary)
425
+
426
+ elif tab_main == "🎀 Voice Input":
427
+ st.subheader("🎀 Voice Recognition")
428
+ user_text = st.text_area("Message:", height=100)
429
+ user_text = user_text.strip().replace('\n', ' ')
430
+ if st.button("Send πŸ“¨"):
431
+ # Default to GPT-4o here, or you could similarly provide options.
432
+ process_with_gpt(user_text)
433
+ st.subheader("πŸ“œ Chat History")
434
+ t1,t2=st.tabs(["Claude History","GPT-4o History"])
435
+ with t1:
436
+ for c in st.session_state.chat_history:
437
+ st.write("**You:**", c["user"])
438
+ st.write("**Claude:**", c["claude"])
439
+ with t2:
440
+ for m in st.session_state.messages:
441
+ with st.chat_message(m["role"]):
442
+ st.markdown(m["content"])
443
+
444
+ elif tab_main == "πŸ“Έ Media Gallery":
445
+ st.header("🎬 Media Gallery - Images and Videos")
446
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "πŸŽ₯ Video"])
447
+ with tabs[0]:
448
+ imgs = glob.glob("*.png")+glob.glob("*.jpg")
449
+ if imgs:
450
+ c = st.slider("Cols",1,5,3)
451
+ cols = st.columns(c)
452
+ for i,f in enumerate(imgs):
453
+ with cols[i%c]:
454
+ st.image(Image.open(f),use_container_width=True)
455
+ if st.button(f"πŸ‘€ Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
456
+ a = process_image(f,"Describe this image.")
457
+ st.markdown(a)
458
+ else:
459
+ st.write("No images found.")
460
+ with tabs[1]:
461
+ vids = glob.glob("*.mp4")
462
+ if vids:
463
+ for v in vids:
464
+ with st.expander(f"πŸŽ₯ {os.path.basename(v)}"):
465
+ st.video(v)
466
+ if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
467
+ a = process_video_with_gpt(v,"Describe video.")
468
+ st.markdown(a)
469
+ else:
470
+ st.write("No videos found.")
471
+
472
+ elif tab_main == "πŸ“ File Editor":
473
+ if getattr(st.session_state,'current_file',None):
474
+ st.subheader(f"Editing: {st.session_state.current_file}")
475
+ new_text = st.text_area("Content:", st.session_state.file_content, height=300)
476
+ if st.button("Save"):
477
+ with open(st.session_state.current_file,'w',encoding='utf-8') as f:
478
+ f.write(new_text)
479
+ st.success("Updated!")
480
+ st.session_state.should_rerun = True
481
+ else:
482
+ st.write("Select a file from the sidebar to edit.")
483
+
484
+ groups, sorted_prefixes = load_files_for_sidebar()
485
+ display_file_manager_sidebar(groups, sorted_prefixes)
486
+
487
+ if st.session_state.viewing_prefix and st.session_state.viewing_prefix in groups:
488
+ st.write("---")
489
+ st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
490
+ for f in groups[st.session_state.viewing_prefix]:
491
+ fname = os.path.basename(f)
492
+ ext = os.path.splitext(fname)[1].lower().strip('.')
493
+ st.write(f"### {fname}")
494
+ if ext == "md":
495
+ content = open(f,'r',encoding='utf-8').read()
496
+ st.markdown(content)
497
+ elif ext == "mp3":
498
+ st.audio(f)
499
+ else:
500
+ st.markdown(get_download_link(f), unsafe_allow_html=True)
501
+ if st.button("Close Group View"):
502
+ st.session_state.viewing_prefix = None
503
+
504
+ if st.session_state.should_rerun:
505
+ st.session_state.should_rerun = False
506
+ st.rerun()
507
+
508
+ if __name__=="__main__":
509
+ main()