from crewai import Agent, Crew, Process, Task from crewai.project import CrewBase, agent, crew, task # from newsletter_gen.tools.research import SearchAndContents, FindSimilar, GetContents from research import SearchAndContents, FindSimilar, GetContents # JB from langchain_anthropic import ChatAnthropic from langchain_groq import ChatGroq from datetime import datetime import streamlit as st from typing import Union, List, Tuple, Dict from langchain_core.agents import AgentFinish import json from langchain_google_genai import ChatGoogleGenerativeAI from langchain_huggingface import HuggingFaceEndpoint # JB import os # JB: # https://python.langchain.com/v0.2/docs/integrations/chat/ollama/ # LangChain supports many other chat models. Here, we're using Ollama from langchain_community.chat_models import ChatOllama # To get rid of the telemetry error messages, try: # Connection Timeout Error with telemetry.crewai.com #254 # https://github.com/joaomdmoura/crewAI/issues/254 # os.environ["OTEL_SDK_DISABLED"] = "true" os.environ["OTEL_SDK_DISABLED"] = "true" # SUCCES: # DIT LIJKT INDERDAAD DE TELEMETRY ERROR MESSAGES IN DE VS CODE TERMINAL TE VOORKOMEN !!!!!!!!!! # Wel in die terminal nog deze korte messages: # 2024-06-14 02:20:17,425 - 25632 - __init__.py-__init__:1218 - WARNING: SDK is disabled. @CrewBase class NewsletterGenCrew: """NewsletterGen crew""" # agents_config = "config/agents.yaml" # tasks_config = "config/tasks.yaml" agents_config = "agents.yaml" tasks_config = "tasks.yaml" def llm(self): # llm = ChatAnthropic(model_name="claude-3-sonnet-20240229", max_tokens=4096) # ORIGINAL # llm = ChatAnthropic(model_name="claude-3-sonnet-20240229", # # max_tokens=4096, # cache=True, # api_key="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" # ) # JB # https://console.anthropic.com/dashboard # https://console.anthropic.com/settings/keys # jb_anthropic_key_2_13-06-2024: # ANTHROPIC_API_KEY=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 # https://console.anthropic.com/settings/usage # # BadRequestError: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits.'}} # llm = ChatGroq(model="llama3-70b-8192") # https://console.groq.com/docs/rate-limits # llm = ChatGroq(model="mixtral-8x7b-32768") # JB 13-06-2024 - geeft af en toe rate limit errors # IN HF SPACES STREAMLIT APP: BadRequestError: Error code: 400 - {'error': {'message': 'Organization has been restricted. Please reach out to support if you believe this was in error.', 'type': 'invalid_request_error', 'code': 'organization_restricted'}} llm = ChatGoogleGenerativeAI(google_api_key=os.getenv("GOOGLE_API_KEY")) # https://python.langchain.com/v0.2/docs/integrations/chat/ollama/ # supports many more optional parameters. Hover on your `ChatOllama(...)` # class to view the latest available supported parameters # llm = ChatOllama(model="llama3") # llm = ChatOllama(model="mistral:latest") # check if ollama is running and which LLMs can then be used, run this in Anaconda cmd admin window: # ollama list # OUTPUT EXAMPLE: # (newsletter-gen-py3.11) (base) C:\Users\jfhmb\EXA_CREWAI\exa-crewai-master\exa-crewai-master>ollama list # NAME ID SIZE MODIFIED # llama3:latest 365c0bd3c000 4.7 GB 3 days ago # nomic-embed-text:latest 0a109f422b47 274 MB 3 days ago # crewai-llama3:latest d952d07761cd 4.7 GB 10 days ago # llama3:8b 365c0bd3c000 4.7 GB 10 days ago # mistral:latest 61e88e884507 4.1 GB 6 weeks ago # mxbai-embed-large:latest 468836162de7 669 MB 6 weeks ago # # OLLAMA LOGS: # C:\Users\jfhmb\AppData\Local\Ollama # # Running ollama on Hugging Face Spaces #2833 # https://github.com/ollama/ollama/issues/2833 # HUGGING FACE LLMs # https://python.langchain.com/v0.2/docs/integrations/chat/huggingface/ # HUGGINGFACEHUB_API_TOKEN # https://huggingface.co/docs/hub/security-tokens # https://huggingface.co/settings/tokens # %pip install --upgrade --quiet langchain-huggingface text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2 # 1. Instantiate an LLM # HuggingFaceEndpoint # from langchain_huggingface import HuggingFaceEndpoint # #llm = HuggingFaceEndpoint( # repo_id="meta-llama/Meta-Llama-3-70B-Instruct", # task="text-generation", # max_new_tokens=512, # do_sample=False, # repetition_penalty=1.03, #) # BadRequestError: (Request ID: ots-pfsrtb04xa7oVcKIc) Bad request: Model requires a Pro subscription; check out hf.co/pricing to learn more. Make sure to include your HF token in your query. # API Reference:HuggingFaceEndpoint print("JB: in class NewsletterGenCrew - using llm: ", llm) return llm def step_callback( self, agent_output: Union[str, List[Tuple[Dict, str]], AgentFinish], agent_name, *args, ): with st.chat_message("AI"): # Try to parse the output if it is a JSON string if isinstance(agent_output, str): try: agent_output = json.loads(agent_output) except json.JSONDecodeError: pass if isinstance(agent_output, list) and all( isinstance(item, tuple) for item in agent_output ): for action, description in agent_output: # Print attributes based on assumed structure st.write(f"Agent Name: {agent_name}") st.write(f"Tool used: {getattr(action, 'tool', 'Unknown')}") st.write(f"Tool input: {getattr(action, 'tool_input', 'Unknown')}") st.write(f"{getattr(action, 'log', 'Unknown')}") with st.expander("Show observation"): st.markdown(f"Observation\n\n{description}") # Check if the output is a dictionary as in the second case elif isinstance(agent_output, AgentFinish): st.write(f"Agent Name: {agent_name}") output = agent_output.return_values st.write(f"I finished my task:\n{output['output']}") # Handle unexpected formats else: st.write(type(agent_output)) st.write(agent_output) @agent def researcher(self) -> Agent: return Agent( config=self.agents_config["researcher"], tools=[SearchAndContents(), FindSimilar(), GetContents()], verbose=True, llm=self.llm(), step_callback=lambda step: self.step_callback(step, "Research Agent"), ) @agent def editor(self) -> Agent: return Agent( config=self.agents_config["editor"], verbose=True, tools=[SearchAndContents(), FindSimilar(), GetContents()], llm=self.llm(), step_callback=lambda step: self.step_callback(step, "Chief Editor"), ) @agent def designer(self) -> Agent: return Agent( config=self.agents_config["designer"], verbose=True, allow_delegation=False, llm=self.llm(), step_callback=lambda step: self.step_callback(step, "HTML Writer"), ) @task def research_task(self) -> Task: return Task( config=self.tasks_config["research_task"], agent=self.researcher(), output_file=f"logs/{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_research_task.md", ) @task def edit_task(self) -> Task: return Task( config=self.tasks_config["edit_task"], agent=self.editor(), output_file=f"logs/{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_edit_task.md", ) @task def newsletter_task(self) -> Task: return Task( config=self.tasks_config["newsletter_task"], agent=self.designer(), output_file=f"logs/{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_newsletter_task.html", ) @crew def crew(self) -> Crew: """Creates the NewsletterGen crew""" return Crew( agents=self.agents, # Automatically created by the @agent decorator tasks=self.tasks, # Automatically created by the @task decorator process=Process.sequential, verbose=2, # process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/ )