girishwangikar
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,44 +1,75 @@
|
|
1 |
import streamlit as st
|
2 |
import pandas as pd
|
3 |
-
from smolagents import CodeAgent
|
4 |
from typing import Union, List, Dict
|
5 |
from groq import Groq
|
6 |
import os
|
7 |
from duckduckgo_search import DDGS
|
8 |
|
9 |
class DuckDuckGoSearch:
|
10 |
-
"""
|
|
|
|
|
|
|
11 |
def __init__(self):
|
|
|
12 |
self.ddgs = DDGS()
|
13 |
|
14 |
def __call__(self, query: str, max_results: int = 5) -> str:
|
15 |
try:
|
16 |
# Perform the search and get results
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
# Format the results into a readable string
|
20 |
formatted_results = []
|
21 |
for idx, result in enumerate(search_results, 1):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
formatted_results.append(
|
23 |
-
f"{idx}. Title: {
|
24 |
-
f"
|
25 |
-
f" Source: {
|
|
|
|
|
26 |
)
|
27 |
|
28 |
return "\n".join(formatted_results)
|
|
|
29 |
except Exception as e:
|
30 |
-
|
|
|
|
|
|
|
31 |
|
32 |
class GroqLLM:
|
33 |
-
"""
|
|
|
|
|
|
|
34 |
def __init__(self, model_name="llama-3.1-8B-Instant"):
|
35 |
self.client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
36 |
self.model_name = model_name
|
37 |
|
38 |
def __call__(self, prompt: Union[str, dict, List[Dict]]) -> str:
|
39 |
try:
|
|
|
40 |
prompt_str = str(prompt) if isinstance(prompt, (dict, list)) else prompt
|
41 |
|
|
|
42 |
completion = self.client.chat.completions.create(
|
43 |
model=self.model_name,
|
44 |
messages=[{
|
@@ -49,14 +80,18 @@ class GroqLLM:
|
|
49 |
max_tokens=1024,
|
50 |
stream=False
|
51 |
)
|
|
|
52 |
return completion.choices[0].message.content if completion.choices else "Error: No response generated"
|
53 |
except Exception as e:
|
54 |
error_msg = f"Error generating response: {str(e)}"
|
55 |
-
print(error_msg)
|
56 |
return error_msg
|
57 |
|
58 |
def create_analysis_prompt(topic: str, search_results: str) -> str:
|
59 |
-
"""
|
|
|
|
|
|
|
60 |
return f"""Analyze the following news information about {topic}.
|
61 |
Search Results: {search_results}
|
62 |
|
@@ -86,7 +121,10 @@ def create_analysis_prompt(topic: str, search_results: str) -> str:
|
|
86 |
Please format the analysis in a clear, journalistic style with section headers."""
|
87 |
|
88 |
def log_agent_activity(prompt: str, result: str, agent_name: str):
|
89 |
-
"""
|
|
|
|
|
|
|
90 |
with st.expander("View Agent Activity Log"):
|
91 |
st.write(f"### Agent Activity ({agent_name}):")
|
92 |
st.write("**Input Prompt:**")
|
@@ -137,19 +175,29 @@ try:
|
|
137 |
if news_topic:
|
138 |
with st.spinner("Gathering information and analyzing..."):
|
139 |
try:
|
|
|
|
|
|
|
|
|
140 |
# Perform search
|
141 |
search_results = search_tool(
|
142 |
f"Latest news about {news_topic} last 7 days",
|
143 |
max_results=search_depth
|
144 |
)
|
145 |
|
146 |
-
if not search_results.startswith("Search error"):
|
|
|
|
|
|
|
147 |
# Create analysis prompt
|
148 |
analysis_prompt = create_analysis_prompt(news_topic, search_results)
|
149 |
|
150 |
# Get analysis from LLM
|
151 |
analysis_result = llm(analysis_prompt)
|
152 |
|
|
|
|
|
|
|
153 |
# Display results
|
154 |
st.subheader("π Analysis Results")
|
155 |
st.markdown(analysis_result)
|
@@ -161,7 +209,8 @@ try:
|
|
161 |
"News Analysis Agent"
|
162 |
)
|
163 |
else:
|
164 |
-
|
|
|
165 |
|
166 |
except Exception as e:
|
167 |
st.error(f"An error occurred during analysis: {str(e)}")
|
|
|
1 |
import streamlit as st
|
2 |
import pandas as pd
|
|
|
3 |
from typing import Union, List, Dict
|
4 |
from groq import Groq
|
5 |
import os
|
6 |
from duckduckgo_search import DDGS
|
7 |
|
8 |
class DuckDuckGoSearch:
|
9 |
+
"""
|
10 |
+
Custom DuckDuckGo search implementation with robust error handling and result processing.
|
11 |
+
Uses the duckduckgo_search library to fetch and format news results.
|
12 |
+
"""
|
13 |
def __init__(self):
|
14 |
+
# Initialize the DuckDuckGo search session
|
15 |
self.ddgs = DDGS()
|
16 |
|
17 |
def __call__(self, query: str, max_results: int = 5) -> str:
|
18 |
try:
|
19 |
# Perform the search and get results
|
20 |
+
# The news method is more appropriate for recent news analysis
|
21 |
+
search_results = list(self.ddgs.news(
|
22 |
+
query,
|
23 |
+
max_results=max_results,
|
24 |
+
region='wt-wt', # Worldwide results
|
25 |
+
safesearch='on'
|
26 |
+
))
|
27 |
+
|
28 |
+
if not search_results:
|
29 |
+
return "No results found. Try modifying your search query."
|
30 |
|
31 |
# Format the results into a readable string
|
32 |
formatted_results = []
|
33 |
for idx, result in enumerate(search_results, 1):
|
34 |
+
# Extract available fields with fallbacks for missing data
|
35 |
+
title = result.get('title', 'No title available')
|
36 |
+
snippet = result.get('body', result.get('snippet', 'No description available'))
|
37 |
+
source = result.get('source', 'Unknown source')
|
38 |
+
url = result.get('url', result.get('link', 'No link available'))
|
39 |
+
date = result.get('date', 'Date not available')
|
40 |
+
|
41 |
+
# Format each result with available information
|
42 |
formatted_results.append(
|
43 |
+
f"{idx}. Title: {title}\n"
|
44 |
+
f" Date: {date}\n"
|
45 |
+
f" Source: {source}\n"
|
46 |
+
f" Summary: {snippet}\n"
|
47 |
+
f" URL: {url}\n"
|
48 |
)
|
49 |
|
50 |
return "\n".join(formatted_results)
|
51 |
+
|
52 |
except Exception as e:
|
53 |
+
# Provide detailed error information for debugging
|
54 |
+
error_msg = f"Search error: {str(e)}\nTry again with a different search term or check your internet connection."
|
55 |
+
print(f"DuckDuckGo search error: {str(e)}") # For logging
|
56 |
+
return error_msg
|
57 |
|
58 |
class GroqLLM:
|
59 |
+
"""
|
60 |
+
LLM interface using Groq's LLama model.
|
61 |
+
Handles API communication and response processing.
|
62 |
+
"""
|
63 |
def __init__(self, model_name="llama-3.1-8B-Instant"):
|
64 |
self.client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
65 |
self.model_name = model_name
|
66 |
|
67 |
def __call__(self, prompt: Union[str, dict, List[Dict]]) -> str:
|
68 |
try:
|
69 |
+
# Convert prompt to string if it's a complex structure
|
70 |
prompt_str = str(prompt) if isinstance(prompt, (dict, list)) else prompt
|
71 |
|
72 |
+
# Make API call to Groq
|
73 |
completion = self.client.chat.completions.create(
|
74 |
model=self.model_name,
|
75 |
messages=[{
|
|
|
80 |
max_tokens=1024,
|
81 |
stream=False
|
82 |
)
|
83 |
+
|
84 |
return completion.choices[0].message.content if completion.choices else "Error: No response generated"
|
85 |
except Exception as e:
|
86 |
error_msg = f"Error generating response: {str(e)}"
|
87 |
+
print(error_msg) # For logging
|
88 |
return error_msg
|
89 |
|
90 |
def create_analysis_prompt(topic: str, search_results: str) -> str:
|
91 |
+
"""
|
92 |
+
Creates a detailed prompt for news analysis, structuring the request
|
93 |
+
to get comprehensive and well-organized results from the LLM.
|
94 |
+
"""
|
95 |
return f"""Analyze the following news information about {topic}.
|
96 |
Search Results: {search_results}
|
97 |
|
|
|
121 |
Please format the analysis in a clear, journalistic style with section headers."""
|
122 |
|
123 |
def log_agent_activity(prompt: str, result: str, agent_name: str):
|
124 |
+
"""
|
125 |
+
Creates an expandable log of agent activities in the Streamlit interface
|
126 |
+
for transparency and debugging purposes.
|
127 |
+
"""
|
128 |
with st.expander("View Agent Activity Log"):
|
129 |
st.write(f"### Agent Activity ({agent_name}):")
|
130 |
st.write("**Input Prompt:**")
|
|
|
175 |
if news_topic:
|
176 |
with st.spinner("Gathering information and analyzing..."):
|
177 |
try:
|
178 |
+
# Show search progress
|
179 |
+
search_placeholder = st.empty()
|
180 |
+
search_placeholder.info("Searching for recent news...")
|
181 |
+
|
182 |
# Perform search
|
183 |
search_results = search_tool(
|
184 |
f"Latest news about {news_topic} last 7 days",
|
185 |
max_results=search_depth
|
186 |
)
|
187 |
|
188 |
+
if not search_results.startswith(("Search error", "No results")):
|
189 |
+
# Update progress
|
190 |
+
search_placeholder.info("Analyzing search results...")
|
191 |
+
|
192 |
# Create analysis prompt
|
193 |
analysis_prompt = create_analysis_prompt(news_topic, search_results)
|
194 |
|
195 |
# Get analysis from LLM
|
196 |
analysis_result = llm(analysis_prompt)
|
197 |
|
198 |
+
# Clear progress messages
|
199 |
+
search_placeholder.empty()
|
200 |
+
|
201 |
# Display results
|
202 |
st.subheader("π Analysis Results")
|
203 |
st.markdown(analysis_result)
|
|
|
209 |
"News Analysis Agent"
|
210 |
)
|
211 |
else:
|
212 |
+
search_placeholder.empty()
|
213 |
+
st.error(search_results)
|
214 |
|
215 |
except Exception as e:
|
216 |
st.error(f"An error occurred during analysis: {str(e)}")
|