awacke1 commited on
Commit
a2dcff5
Β·
verified Β·
1 Parent(s): b9432f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +497 -139
app.py CHANGED
@@ -1,105 +1,204 @@
1
  import streamlit as st
2
- import anthropic, openai, base64, cv2, glob, os, re, time, zipfile
 
 
3
  from datetime import datetime
4
- from collections import defaultdict
 
 
5
  from dotenv import load_dotenv
6
  from gradio_client import Client
 
 
7
  from PIL import Image
 
 
 
8
  from openai import OpenAI
9
- import asyncio, edge_tts
 
 
 
10
 
11
  # 🎯 1. Core Configuration & Setup
12
  st.set_page_config(
13
- page_title="🚲BikeAI Research", page_icon="🚲", layout="wide",
14
- menu_items={'About': "🚲BikeAI Research Assistant"}
 
 
 
 
 
 
 
15
  )
16
  load_dotenv()
17
 
18
- # πŸ”‘ 2. API Setup
19
- openai_api_key = st.secrets.get('OPENAI_API_KEY', os.getenv('OPENAI_API_KEY', ""))
20
- anthropic_key = st.secrets.get('ANTHROPIC_API_KEY', os.getenv('ANTHROPIC_API_KEY_3', ""))
21
- openai_client = OpenAI(api_key=openai_api_key, organization=os.getenv('OPENAI_ORG_ID'))
22
- claude_client = anthropic.Anthropic(api_key=anthropic_key)
23
-
24
- # πŸ“ 3. Session State
25
- for key in ['transcript_history', 'chat_history', 'messages', 'viewing_prefix', 'should_rerun', 'old_val']:
26
- if key not in st.session_state:
27
- st.session_state[key] = [] if key in ['transcript_history', 'chat_history', 'messages'] else None
28
 
29
- st.session_state.setdefault('openai_model', "gpt-4o-2024-05-13")
30
-
31
- # 🧠 4. Content Processing
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  def get_high_info_terms(text: str) -> list:
33
- """Extract high-information terms from text"""
34
  stop_words = set([
35
  'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with',
36
- 'by', 'from', 'up', 'about', 'into', 'over', 'after', 'be', 'been', 'being', 'have',
37
- 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'should', 'could', 'this', 'that'
 
 
 
 
38
  ])
39
 
40
  key_phrases = [
41
  'artificial intelligence', 'machine learning', 'deep learning', 'neural network',
42
- 'natural language', 'computer vision', 'data science', 'reinforcement learning',
43
- 'large language model', 'transformer model', 'quantum computing', 'arxiv search'
 
 
 
 
44
  ]
45
 
46
- preserved_phrases = [phrase for phrase in key_phrases if phrase in text.lower()]
47
- text = text.lower()
48
- for phrase in preserved_phrases:
49
- text = text.replace(phrase, '')
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- words = [word.lower() for word in re.findall(r'\b\w+(?:-\w+)*\b', text)
52
- if len(word) > 3 and word.lower() not in stop_words
53
- and not word.isdigit() and any(c.isalpha() for c in word)]
 
 
 
 
 
54
 
55
- unique_terms = list(dict.fromkeys(preserved_phrases + words))
56
- return unique_terms[:5]
57
 
58
- # πŸ“ 5. File Operations
59
  def generate_filename(content, file_type="md"):
60
- """Generate filename with high-information terms"""
61
  prefix = datetime.now().strftime("%y%m_%H%M") + "_"
62
  info_terms = get_high_info_terms(content)
63
  name_text = '_'.join(term.replace(' ', '-') for term in info_terms) if info_terms else 'file'
64
- return f"{prefix}{name_text[:100]}.{file_type}"
 
 
 
 
 
 
65
 
66
  def create_file(prompt, response, file_type="md"):
67
- """Create a new file with generated filename"""
68
- filename = generate_filename(response.strip() or prompt.strip(), file_type)
69
  with open(filename, 'w', encoding='utf-8') as f:
70
- f.write(f"{prompt}\n\n{response}")
71
  return filename
72
 
73
- def create_zip_of_files(md_files, mp3_files):
74
- """Create zip with intelligent naming"""
75
- all_files = [f for f in md_files if 'readme.md' not in f.lower()] + mp3_files
76
- if not all_files:
77
- return None
78
-
79
- content = " ".join(open(f, 'r', encoding='utf-8').read() if f.endswith('.md')
80
- else os.path.basename(f) for f in all_files)
81
-
82
- timestamp = datetime.now().strftime("%y%m_%H%M")
83
- info_terms = get_high_info_terms(content)[:3]
84
- zip_name = f"{timestamp}_{'_'.join(t.replace(' ', '-') for t in info_terms)}.zip"
85
-
86
- with zipfile.ZipFile(zip_name, 'w') as z:
87
- for f in all_files:
88
- z.write(f)
89
- return zip_name
90
 
91
- # πŸ”Š 6. Audio Processing
92
  def clean_for_speech(text: str) -> str:
93
- """Prepare text for speech synthesis"""
94
- text = re.sub(r'\n|</s>|#|\(https?:\/\/[^\)]+\)|\s+', ' ', text)
95
- return text.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
98
- """Generate audio file using Edge TTS"""
99
  text = clean_for_speech(text)
100
  if not text.strip():
101
  return None
102
- communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
 
 
103
  out_fn = generate_filename(text, "mp3")
104
  await communicate.save(out_fn)
105
  return out_fn
@@ -108,11 +207,19 @@ def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
108
  """Wrapper for edge TTS generation"""
109
  return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
110
 
111
- # 🎬 7. Media Processing
 
 
 
 
 
 
 
112
  def process_image(image_path, user_prompt):
113
  """Process image with GPT-4V"""
114
  with open(image_path, "rb") as imgf:
115
- b64img = base64.b64encode(imgf.read()).decode("utf-8")
 
116
  resp = openai_client.chat.completions.create(
117
  model=st.session_state["openai_model"],
118
  messages=[
@@ -126,129 +233,380 @@ def process_image(image_path, user_prompt):
126
  )
127
  return resp.choices[0].message.content
128
 
129
- # πŸ€– 8. AI Integration
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False):
131
  """Perform Arxiv search and generate audio summaries"""
132
  start = time.time()
133
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
134
- refs = client.predict(q, 20, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1",
135
- api_name="/update_with_rag_md")[0]
136
- answer = client.predict(q, "mistralai/Mixtral-8x7B-Instruct-v0.1", True, api_name="/ask_llm")
137
- result = f"### πŸ”Ž {q}\n\n{answer}\n\n{refs}"
138
 
139
  st.markdown(result)
140
 
 
141
  if full_audio:
142
- complete_text = f"Query: {q}. {clean_for_speech(answer)} {clean_for_speech(refs)}"
 
143
  st.write("### πŸ“š Complete Audio Response")
144
- play_and_download_audio(speak_with_edge_tts(complete_text))
145
 
146
  if vocal_summary:
147
- st.write("### πŸŽ™οΈ Vocal Summary")
148
- play_and_download_audio(speak_with_edge_tts(clean_for_speech(answer)))
 
 
149
 
150
  if extended_refs:
151
- st.write("### πŸ“œ Extended References")
152
- play_and_download_audio(speak_with_edge_tts(
153
- "Reference summaries: " + clean_for_speech(refs)))
 
 
154
 
155
  if titles_summary:
156
- titles = [m.group(1) for m in re.finditer(r"\[([^\]]+)\]", refs)]
 
 
 
 
157
  if titles:
 
 
 
158
  st.write("### πŸ”– Paper Titles")
159
- play_and_download_audio(speak_with_edge_tts(
160
- "Paper titles: " + ", ".join(titles)))
161
 
162
- st.write(f"**Time:** {time.time()-start:.2f} s")
163
- create_file(q, result)
 
164
  return result
165
 
166
  def process_with_gpt(text):
167
  """Process text with GPT-4"""
168
- if not text:
169
- return
170
  st.session_state.messages.append({"role":"user","content":text})
171
- with st.chat_message("user"): st.markdown(text)
 
172
  with st.chat_message("assistant"):
173
- resp = openai_client.chat.completions.create(
174
  model=st.session_state["openai_model"],
175
  messages=st.session_state.messages,
176
  stream=False
177
  )
178
- ans = resp.choices[0].message.content
179
- st.write(f"GPT-4o: {ans}")
180
- create_file(text, ans)
181
  st.session_state.messages.append({"role":"assistant","content":ans})
182
  return ans
183
 
184
  def process_with_claude(text):
185
  """Process text with Claude"""
186
- if not text:
187
- return
188
- with st.chat_message("user"): st.markdown(text)
189
  with st.chat_message("assistant"):
190
- resp = claude_client.messages.create(
191
  model="claude-3-sonnet-20240229",
192
  max_tokens=1000,
193
  messages=[{"role":"user","content":text}]
194
  )
195
- ans = resp.content[0].text
196
- st.write(f"Claude-3.5: {ans}")
197
- create_file(text, ans)
198
  st.session_state.chat_history.append({"user":text,"claude":ans})
199
  return ans
200
 
201
- def play_and_download_audio(file_path):
202
- """Play and provide download link for audio file"""
203
- if file_path and os.path.exists(file_path):
204
- st.audio(file_path)
205
- dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
206
- st.markdown(dl_link, unsafe_allow_html=True)
 
207
 
208
- # 🎯 9. Main Application
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  def main():
210
- st.sidebar.title("🚲BikeAI Research")
211
- tab_main = st.radio("Action:", ["πŸ” Search ArXiv", "🎀 Voice", "πŸ“Έ Media", "πŸ“ Files"], horizontal=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
 
213
  if tab_main == "πŸ” Search ArXiv":
 
214
  q = st.text_input("Research query:")
215
- st.markdown("### πŸŽ›οΈ Audio Options")
216
- cols = st.columns(4)
217
- options = {
218
- 'vocal_summary': cols[0].checkbox("πŸŽ™οΈ Summary", value=True),
219
- 'extended_refs': cols[1].checkbox("πŸ“œ References", value=False),
220
- 'titles_summary': cols[2].checkbox("πŸ”– Titles", value=True),
221
- 'full_audio': cols[3].checkbox("πŸ“š Full Response", value=False,
222
- help="Generate complete audio including papers")
223
- }
224
- if q and st.button("Search"):
225
- perform_ai_lookup(q, **options)
226
-
227
- elif tab_main == "🎀 Voice":
228
- user_text = st.text_area("Message:", height=100).strip()
229
- model = st.selectbox("Model:", ["GPT-4o", "Claude-3.5"])
230
- if st.button("Send πŸ“¨"):
231
- if model == "GPT-4o":
232
- process_with_gpt(user_text)
233
- else:
234
- process_with_claude(user_text)
235
 
236
- elif tab_main == "πŸ“Έ Media":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  tabs = st.tabs(["πŸ–ΌοΈ Images", "πŸŽ₯ Video"])
238
  with tabs[0]:
239
- for f in glob.glob("*.png") + glob.glob("*.jpg"):
240
- st.image(Image.open(f))
241
- if st.button(f"Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
242
- st.markdown(process_image(f, "Describe this image."))
243
-
244
- elif tab_main == "πŸ“ Files":
245
- md_files = [f for f in glob.glob("*.md") if 'readme.md' not in f.lower()]
246
- mp3_files = glob.glob("*.mp3")
247
- if st.button("Create Archive"):
248
- zip_file = create_zip_of_files(md_files, mp3_files)
249
- if zip_file:
250
- with open(zip_file, "rb") as f:
251
- st.download_button("Download Archive", f, zip_file)
252
-
253
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
  main()
 
1
  import streamlit as st
2
+ import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
3
+ import plotly.graph_objects as go
4
+ import streamlit.components.v1 as components
5
  from datetime import datetime
6
+ from audio_recorder_streamlit import audio_recorder
7
+ from bs4 import BeautifulSoup
8
+ from collections import defaultdict, deque
9
  from dotenv import load_dotenv
10
  from gradio_client import Client
11
+ from huggingface_hub import InferenceClient
12
+ from io import BytesIO
13
  from PIL import Image
14
+ from PyPDF2 import PdfReader
15
+ from urllib.parse import quote
16
+ from xml.etree import ElementTree as ET
17
  from openai import OpenAI
18
+ import extra_streamlit_components as stx
19
+ from streamlit.runtime.scriptrunner import get_script_run_ctx
20
+ import asyncio
21
+ import edge_tts
22
 
23
  # 🎯 1. Core Configuration & Setup
24
  st.set_page_config(
25
+ page_title="🚲BikeAIπŸ† Claude/GPT Research",
26
+ page_icon="πŸš²πŸ†",
27
+ layout="wide",
28
+ initial_sidebar_state="auto",
29
+ menu_items={
30
+ 'Get Help': 'https://huggingface.co/awacke1',
31
+ 'Report a bug': 'https://huggingface.co/spaces/awacke1',
32
+ 'About': "🚲BikeAIπŸ† Claude/GPT Research AI"
33
+ }
34
  )
35
  load_dotenv()
36
 
37
+ # πŸ”‘ 2. API Setup & Clients
38
+ openai_api_key = os.getenv('OPENAI_API_KEY', "")
39
+ anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
40
+ if 'OPENAI_API_KEY' in st.secrets:
41
+ openai_api_key = st.secrets['OPENAI_API_KEY']
42
+ if 'ANTHROPIC_API_KEY' in st.secrets:
43
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
 
 
 
44
 
45
+ openai.api_key = openai_api_key
46
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
47
+ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
48
+ HF_KEY = os.getenv('HF_KEY')
49
+ API_URL = os.getenv('API_URL')
50
+
51
+ # πŸ“ 3. Session State Management
52
+ if 'transcript_history' not in st.session_state:
53
+ st.session_state['transcript_history'] = []
54
+ if 'chat_history' not in st.session_state:
55
+ st.session_state['chat_history'] = []
56
+ if 'openai_model' not in st.session_state:
57
+ st.session_state['openai_model'] = "gpt-4o-2024-05-13"
58
+ if 'messages' not in st.session_state:
59
+ st.session_state['messages'] = []
60
+ if 'last_voice_input' not in st.session_state:
61
+ st.session_state['last_voice_input'] = ""
62
+ if 'editing_file' not in st.session_state:
63
+ st.session_state['editing_file'] = None
64
+ if 'edit_new_name' not in st.session_state:
65
+ st.session_state['edit_new_name'] = ""
66
+ if 'edit_new_content' not in st.session_state:
67
+ st.session_state['edit_new_content'] = ""
68
+ if 'viewing_prefix' not in st.session_state:
69
+ st.session_state['viewing_prefix'] = None
70
+ if 'should_rerun' not in st.session_state:
71
+ st.session_state['should_rerun'] = False
72
+ if 'old_val' not in st.session_state:
73
+ st.session_state['old_val'] = None
74
+
75
+ # 🎨 4. Custom CSS
76
+ st.markdown("""
77
+ <style>
78
+ .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
79
+ .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
80
+ .stButton>button {
81
+ margin-right: 0.5rem;
82
+ }
83
+ </style>
84
+ """, unsafe_allow_html=True)
85
+
86
+ FILE_EMOJIS = {
87
+ "md": "πŸ“",
88
+ "mp3": "🎡",
89
+ }
90
+
91
+ # 🧠 5. High-Information Content Extraction
92
  def get_high_info_terms(text: str) -> list:
93
+ """Extract high-information terms from text, including key phrases"""
94
  stop_words = set([
95
  'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with',
96
+ 'by', 'from', 'up', 'about', 'into', 'over', 'after', 'is', 'are', 'was', 'were',
97
+ 'be', 'been', 'being', 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would',
98
+ 'should', 'could', 'might', 'must', 'shall', 'can', 'may', 'this', 'that', 'these',
99
+ 'those', 'i', 'you', 'he', 'she', 'it', 'we', 'they', 'what', 'which', 'who',
100
+ 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most',
101
+ 'other', 'some', 'such', 'than', 'too', 'very', 'just', 'there'
102
  ])
103
 
104
  key_phrases = [
105
  'artificial intelligence', 'machine learning', 'deep learning', 'neural network',
106
+ 'personal assistant', 'natural language', 'computer vision', 'data science',
107
+ 'reinforcement learning', 'knowledge graph', 'semantic search', 'time series',
108
+ 'large language model', 'transformer model', 'attention mechanism',
109
+ 'autonomous system', 'edge computing', 'quantum computing', 'blockchain technology',
110
+ 'cognitive science', 'human computer', 'decision making', 'arxiv search',
111
+ 'research paper', 'scientific study', 'empirical analysis'
112
  ]
113
 
114
+ # First identify key phrases
115
+ preserved_phrases = []
116
+ lower_text = text.lower()
117
+ for phrase in key_phrases:
118
+ if phrase in lower_text:
119
+ preserved_phrases.append(phrase)
120
+ text = text.replace(phrase, '')
121
+
122
+ # Then extract individual high-info words
123
+ words = re.findall(r'\b\w+(?:-\w+)*\b', text)
124
+ high_info_words = [
125
+ word.lower() for word in words
126
+ if len(word) > 3
127
+ and word.lower() not in stop_words
128
+ and not word.isdigit()
129
+ and any(c.isalpha() for c in word)
130
+ ]
131
 
132
+ # Combine and deduplicate while preserving order
133
+ all_terms = preserved_phrases + high_info_words
134
+ seen = set()
135
+ unique_terms = []
136
+ for term in all_terms:
137
+ if term not in seen:
138
+ seen.add(term)
139
+ unique_terms.append(term)
140
 
141
+ max_terms = 5
142
+ return unique_terms[:max_terms]
143
 
144
+ # πŸ“ 6. File Operations
145
  def generate_filename(content, file_type="md"):
146
+ """Generate filename with meaningful terms"""
147
  prefix = datetime.now().strftime("%y%m_%H%M") + "_"
148
  info_terms = get_high_info_terms(content)
149
  name_text = '_'.join(term.replace(' ', '-') for term in info_terms) if info_terms else 'file'
150
+
151
+ max_length = 100
152
+ if len(name_text) > max_length:
153
+ name_text = name_text[:max_length]
154
+
155
+ filename = f"{prefix}{name_text}.{file_type}"
156
+ return filename
157
 
158
  def create_file(prompt, response, file_type="md"):
159
+ """Create file with intelligent naming"""
160
+ filename = generate_filename(response.strip() if response.strip() else prompt.strip(), file_type)
161
  with open(filename, 'w', encoding='utf-8') as f:
162
+ f.write(prompt + "\n\n" + response)
163
  return filename
164
 
165
+ def get_download_link(file):
166
+ """Generate download link for file"""
167
+ with open(file, "rb") as f:
168
+ b64 = base64.b64encode(f.read()).decode()
169
+ return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
+ # πŸ”Š 7. Audio Processing
172
  def clean_for_speech(text: str) -> str:
173
+ """Clean text for speech synthesis"""
174
+ text = text.replace("\n", " ")
175
+ text = text.replace("</s>", " ")
176
+ text = text.replace("#", "")
177
+ text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
178
+ text = re.sub(r"\s+", " ", text).strip()
179
+ return text
180
+
181
+ @st.cache_resource
182
+ def speech_synthesis_html(result):
183
+ """Create HTML for speech synthesis"""
184
+ html_code = f"""
185
+ <html><body>
186
+ <script>
187
+ var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}");
188
+ window.speechSynthesis.speak(msg);
189
+ </script>
190
+ </body></html>
191
+ """
192
+ components.html(html_code, height=0)
193
 
194
  async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
195
+ """Generate audio using Edge TTS"""
196
  text = clean_for_speech(text)
197
  if not text.strip():
198
  return None
199
+ rate_str = f"{rate:+d}%"
200
+ pitch_str = f"{pitch:+d}Hz"
201
+ communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
202
  out_fn = generate_filename(text, "mp3")
203
  await communicate.save(out_fn)
204
  return out_fn
 
207
  """Wrapper for edge TTS generation"""
208
  return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
209
 
210
+ def play_and_download_audio(file_path):
211
+ """Play and provide download link for audio"""
212
+ if file_path and os.path.exists(file_path):
213
+ st.audio(file_path)
214
+ dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
215
+ st.markdown(dl_link, unsafe_allow_html=True)
216
+
217
+ # 🎬 8. Media Processing
218
  def process_image(image_path, user_prompt):
219
  """Process image with GPT-4V"""
220
  with open(image_path, "rb") as imgf:
221
+ image_data = imgf.read()
222
+ b64img = base64.b64encode(image_data).decode("utf-8")
223
  resp = openai_client.chat.completions.create(
224
  model=st.session_state["openai_model"],
225
  messages=[
 
233
  )
234
  return resp.choices[0].message.content
235
 
236
+ def process_audio(audio_path):
237
+ """Process audio with Whisper"""
238
+ with open(audio_path, "rb") as f:
239
+ transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
240
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
241
+ return transcription.text
242
+
243
+ def process_video(video_path, seconds_per_frame=1):
244
+ """Extract frames from video"""
245
+ vid = cv2.VideoCapture(video_path)
246
+ total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
247
+ fps = vid.get(cv2.CAP_PROP_FPS)
248
+ skip = int(fps*seconds_per_frame)
249
+ frames_b64 = []
250
+ for i in range(0, total, skip):
251
+ vid.set(cv2.CAP_PROP_POS_FRAMES, i)
252
+ ret, frame = vid.read()
253
+ if not ret: break
254
+ _, buf = cv2.imencode(".jpg", frame)
255
+ frames_b64.append(base64.b64encode(buf).decode("utf-8"))
256
+ vid.release()
257
+ return frames_b64
258
+
259
+ def process_video_with_gpt(video_path, prompt):
260
+ """Analyze video frames with GPT-4V"""
261
+ frames = process_video(video_path)
262
+ resp = openai_client.chat.completions.create(
263
+ model=st.session_state["openai_model"],
264
+ messages=[
265
+ {"role":"system","content":"Analyze video frames."},
266
+ {"role":"user","content":[
267
+ {"type":"text","text":prompt},
268
+ *[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
269
+ ]}
270
+ ]
271
+ )
272
+ return resp.choices[0].message.content
273
+
274
+ # πŸ€– 9. AI Model Integration
275
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False):
276
  """Perform Arxiv search and generate audio summaries"""
277
  start = time.time()
278
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
279
+ r = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")
280
+ refs = r[0]
281
+ r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
282
+ result = f"### πŸ”Ž {q}\n\n{r2}\n\n{refs}"
283
 
284
  st.markdown(result)
285
 
286
+ # Generate full audio version if requested
287
  if full_audio:
288
+ complete_text = f"Complete response for query: {q}. {clean_for_speech(r2)} {clean_for_speech(refs)}"
289
+ audio_file_full = speak_with_edge_tts(complete_text)
290
  st.write("### πŸ“š Complete Audio Response")
291
+ play_and_download_audio(audio_file_full)
292
 
293
  if vocal_summary:
294
+ main_text = clean_for_speech(r2)
295
+ audio_file_main = speak_with_edge_tts(main_text)
296
+ st.write("### πŸŽ™οΈ Vocal Summary (Short Answer)")
297
+ play_and_download_audio(audio_file_main)
298
 
299
  if extended_refs:
300
+ summaries_text = "Here are the summaries from the references: " + refs.replace('"','')
301
+ summaries_text = clean_for_speech(summaries_text)
302
+ audio_file_refs = speak_with_edge_tts(summaries_text)
303
+ st.write("### πŸ“œ Extended References & Summaries")
304
+ play_and_download_audio(audio_file_refs)
305
 
306
  if titles_summary:
307
+ titles = []
308
+ for line in refs.split('\n'):
309
+ m = re.search(r"\[([^\]]+)\]", line)
310
+ if m:
311
+ titles.append(m.group(1))
312
  if titles:
313
+ titles_text = "Here are the titles of the papers: " + ", ".join(titles)
314
+ titles_text = clean_for_speech(titles_text)
315
+ audio_file_titles = speak_with_edge_tts(titles_text)
316
  st.write("### πŸ”– Paper Titles")
317
+ play_and_download_audio(audio_file_titles)
 
318
 
319
+ elapsed = time.time()-start
320
+ st.write(f"**Total Elapsed:** {elapsed:.2f} s")
321
+ create_file(q, result, "md")
322
  return result
323
 
324
  def process_with_gpt(text):
325
  """Process text with GPT-4"""
326
+ if not text: return
 
327
  st.session_state.messages.append({"role":"user","content":text})
328
+ with st.chat_message("user"):
329
+ st.markdown(text)
330
  with st.chat_message("assistant"):
331
+ c = openai_client.chat.completions.create(
332
  model=st.session_state["openai_model"],
333
  messages=st.session_state.messages,
334
  stream=False
335
  )
336
+ ans = c.choices[0].message.content
337
+ st.write("GPT-4o: " + ans)
338
+ create_file(text, ans, "md")
339
  st.session_state.messages.append({"role":"assistant","content":ans})
340
  return ans
341
 
342
  def process_with_claude(text):
343
  """Process text with Claude"""
344
+ if not text: return
345
+ with st.chat_message("user"):
346
+ st.markdown(text)
347
  with st.chat_message("assistant"):
348
+ r = claude_client.messages.create(
349
  model="claude-3-sonnet-20240229",
350
  max_tokens=1000,
351
  messages=[{"role":"user","content":text}]
352
  )
353
+ ans = r.content[0].text
354
+ st.write("Claude-3.5: " + ans)
355
+ create_file(text, ans, "md")
356
  st.session_state.chat_history.append({"user":text,"claude":ans})
357
  return ans
358
 
359
+ # πŸ“‚ 10. File Management
360
+ def create_zip_of_files(md_files, mp3_files):
361
+ """Create zip with intelligent naming"""
362
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
363
+ all_files = md_files + mp3_files
364
+ if not all_files:
365
+ return None
366
 
367
+ # Collect content for high-info term extraction
368
+ all_content = []
369
+ for f in all_files:
370
+ if f.endswith('.md'):
371
+ with open(f, 'r', encoding='utf-8') as file:
372
+ all_content.append(file.read())
373
+ elif f.endswith('.mp3'):
374
+ all_content.append(os.path.basename(f))
375
+
376
+ combined_content = " ".join(all_content)
377
+ info_terms = get_high_info_terms(combined_content)
378
+
379
+ timestamp = datetime.now().strftime("%y%m_%H%M")
380
+ name_text = '_'.join(term.replace(' ', '-') for term in info_terms[:3])
381
+ zip_name = f"{timestamp}_{name_text}.zip"
382
+
383
+ with zipfile.ZipFile(zip_name,'w') as z:
384
+ for f in all_files:
385
+ z.write(f)
386
+
387
+ return zip_name
388
+
389
+ def load_files_for_sidebar():
390
+ """Load and group files for sidebar display"""
391
+ md_files = glob.glob("*.md")
392
+ mp3_files = glob.glob("*.mp3")
393
+
394
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
395
+ all_files = md_files + mp3_files
396
+
397
+ groups = defaultdict(list)
398
+ for f in all_files:
399
+ fname = os.path.basename(f)
400
+ prefix = fname[:10]
401
+ groups[prefix].append(f)
402
+
403
+ for prefix in groups:
404
+ groups[prefix].sort(key=lambda x: os.path.getmtime(x), reverse=True)
405
+
406
+ sorted_prefixes = sorted(groups.keys(),
407
+ key=lambda pre: max(os.path.getmtime(x) for x in groups[pre]),
408
+ reverse=True)
409
+ return groups, sorted_prefixes
410
+
411
+ def extract_keywords_from_md(files):
412
+ """Extract keywords from markdown files"""
413
+ text = ""
414
+ for f in files:
415
+ if f.endswith(".md"):
416
+ c = open(f,'r',encoding='utf-8').read()
417
+ text += " " + c
418
+ return get_high_info_terms(text)
419
+
420
+ def display_file_manager_sidebar(groups, sorted_prefixes):
421
+ """Display file manager in sidebar"""
422
+ st.sidebar.title("🎡 Audio & Document Manager")
423
+
424
+ all_md = []
425
+ all_mp3 = []
426
+ for prefix in groups:
427
+ for f in groups[prefix]:
428
+ if f.endswith(".md"):
429
+ all_md.append(f)
430
+ elif f.endswith(".mp3"):
431
+ all_mp3.append(f)
432
+
433
+ top_bar = st.sidebar.columns(3)
434
+ with top_bar[0]:
435
+ if st.button("πŸ—‘ Del All MD"):
436
+ for f in all_md:
437
+ os.remove(f)
438
+ st.session_state.should_rerun = True
439
+ with top_bar[1]:
440
+ if st.button("πŸ—‘ Del All MP3"):
441
+ for f in all_mp3:
442
+ os.remove(f)
443
+ st.session_state.should_rerun = True
444
+ with top_bar[2]:
445
+ if st.button("⬇️ Zip All"):
446
+ z = create_zip_of_files(all_md, all_mp3)
447
+ if z:
448
+ st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
449
+
450
+ for prefix in sorted_prefixes:
451
+ files = groups[prefix]
452
+ kw = extract_keywords_from_md(files)
453
+ keywords_str = " ".join(kw) if kw else "No Keywords"
454
+ with st.sidebar.expander(f"{prefix} Files ({len(files)}) - Keywords: {keywords_str}", expanded=True):
455
+ c1,c2 = st.columns(2)
456
+ with c1:
457
+ if st.button("πŸ‘€View Group", key="view_group_"+prefix):
458
+ st.session_state.viewing_prefix = prefix
459
+ with c2:
460
+ if st.button("πŸ—‘Del Group", key="del_group_"+prefix):
461
+ for f in files:
462
+ os.remove(f)
463
+ st.success(f"Deleted all files in group {prefix} successfully!")
464
+ st.session_state.should_rerun = True
465
+
466
+ for f in files:
467
+ fname = os.path.basename(f)
468
+ ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
469
+ st.write(f"**{fname}** - {ctime}")
470
+
471
+ # 🎯 11. Main Application
472
  def main():
473
+ st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
474
+ tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
475
+
476
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
477
+ val = mycomponent(my_input_value="Hello")
478
+
479
+ # Show input in a text box for editing if detected
480
+ if val:
481
+ val_stripped = val.replace('\n', ' ')
482
+ edited_input = st.text_area("Edit your detected input:", value=val_stripped, height=100)
483
+ run_option = st.selectbox("Select AI Model:", ["Arxiv", "GPT-4o", "Claude-3.5"])
484
+ col1, col2 = st.columns(2)
485
+ with col1:
486
+ autorun = st.checkbox("AutoRun on input change", value=False)
487
+ with col2:
488
+ full_audio = st.checkbox("Generate Complete Audio", value=False,
489
+ help="Generate audio for the complete response including all papers and summaries")
490
+
491
+ input_changed = (val != st.session_state.old_val)
492
+
493
+ if autorun and input_changed:
494
+ st.session_state.old_val = val
495
+ if run_option == "Arxiv":
496
+ perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
497
+ titles_summary=True, full_audio=full_audio)
498
+ else:
499
+ if run_option == "GPT-4o":
500
+ process_with_gpt(edited_input)
501
+ elif run_option == "Claude-3.5":
502
+ process_with_claude(edited_input)
503
+ else:
504
+ if st.button("Process Input"):
505
+ st.session_state.old_val = val
506
+ if run_option == "Arxiv":
507
+ perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
508
+ titles_summary=True, full_audio=full_audio)
509
+ else:
510
+ if run_option == "GPT-4o":
511
+ process_with_gpt(edited_input)
512
+ elif run_option == "Claude-3.5":
513
+ process_with_claude(edited_input)
514
 
515
  if tab_main == "πŸ” Search ArXiv":
516
+ st.subheader("πŸ” Search ArXiv")
517
  q = st.text_input("Research query:")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
 
519
+ st.markdown("### πŸŽ›οΈ Audio Generation Options")
520
+ vocal_summary = st.checkbox("πŸŽ™οΈ Vocal Summary (Short Answer)", value=True)
521
+ extended_refs = st.checkbox("πŸ“œ Extended References & Summaries (Long)", value=False)
522
+ titles_summary = st.checkbox("πŸ”– Paper Titles Only", value=True)
523
+ full_audio = st.checkbox("πŸ“š Generate Complete Audio Response", value=False,
524
+ help="Generate audio for the complete response including all papers and summaries")
525
+
526
+ if q and st.button("Run ArXiv Query"):
527
+ perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
528
+ titles_summary=titles_summary, full_audio=full_audio)
529
+
530
+ elif tab_main == "🎀 Voice Input":
531
+ st.subheader("🎀 Voice Recognition")
532
+ user_text = st.text_area("Message:", height=100)
533
+ user_text = user_text.strip().replace('\n', ' ')
534
+ if st.button("Send πŸ“¨"):
535
+ process_with_gpt(user_text)
536
+ st.subheader("πŸ“œ Chat History")
537
+ t1,t2=st.tabs(["Claude History","GPT-4o History"])
538
+ with t1:
539
+ for c in st.session_state.chat_history:
540
+ st.write("**You:**", c["user"])
541
+ st.write("**Claude:**", c["claude"])
542
+ with t2:
543
+ for m in st.session_state.messages:
544
+ with st.chat_message(m["role"]):
545
+ st.markdown(m["content"])
546
+
547
+ elif tab_main == "πŸ“Έ Media Gallery":
548
+ st.header("🎬 Media Gallery - Images and Videos")
549
  tabs = st.tabs(["πŸ–ΌοΈ Images", "πŸŽ₯ Video"])
550
  with tabs[0]:
551
+ imgs = glob.glob("*.png")+glob.glob("*.jpg")
552
+ if imgs:
553
+ c = st.slider("Cols",1,5,3)
554
+ cols = st.columns(c)
555
+ for i,f in enumerate(imgs):
556
+ with cols[i%c]:
557
+ st.image(Image.open(f),use_container_width=True)
558
+ if st.button(f"πŸ‘€ Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
559
+ a = process_image(f,"Describe this image.")
560
+ st.markdown(a)
561
+ else:
562
+ st.write("No images found.")
563
+ with tabs[1]:
564
+ vids = glob.glob("*.mp4")
565
+ if vids:
566
+ for v in vids:
567
+ with st.expander(f"πŸŽ₯ {os.path.basename(v)}"):
568
+ st.video(v)
569
+ if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
570
+ a = process_video_with_gpt(v,"Describe video.")
571
+ st.markdown(a)
572
+ else:
573
+ st.write("No videos found.")
574
+
575
+ elif tab_main == "πŸ“ File Editor":
576
+ if getattr(st.session_state,'current_file',None):
577
+ st.subheader(f"Editing: {st.session_state.current_file}")
578
+ new_text = st.text_area("Content:", st.session_state.file_content, height=300)
579
+ if st.button("Save"):
580
+ with open(st.session_state.current_file,'w',encoding='utf-8') as f:
581
+ f.write(new_text)
582
+ st.success("Updated!")
583
+ st.session_state.should_rerun = True
584
+ else:
585
+ st.write("Select a file from the sidebar to edit.")
586
+
587
+ groups, sorted_prefixes = load_files_for_sidebar()
588
+ display_file_manager_sidebar(groups, sorted_prefixes)
589
+
590
+ if st.session_state.viewing_prefix and st.session_state.viewing_prefix in groups:
591
+ st.write("---")
592
+ st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
593
+ for f in groups[st.session_state.viewing_prefix]:
594
+ fname = os.path.basename(f)
595
+ ext = os.path.splitext(fname)[1].lower().strip('.')
596
+ st.write(f"### {fname}")
597
+ if ext == "md":
598
+ content = open(f,'r',encoding='utf-8').read()
599
+ st.markdown(content)
600
+ elif ext == "mp3":
601
+ st.audio(f)
602
+ else:
603
+ st.markdown(get_download_link(f), unsafe_allow_html=True)
604
+ if st.button("Close Group View"):
605
+ st.session_state.viewing_prefix = None
606
+
607
+ if st.session_state.should_rerun:
608
+ st.session_state.should_rerun = False
609
+ st.rerun()
610
+
611
+ if __name__=="__main__":
612
  main()