Spaces:
Sleeping
Sleeping
Update FC_tool_main.py
Browse filesUpdate code to use the new gpt-4o-mini instead of gpt-3.5-turbo-0125 because the newer model is better and cheaper :)
- FC_tool_main.py +5 -4
FC_tool_main.py
CHANGED
@@ -41,6 +41,7 @@ from langchain.memory import ConversationBufferWindowMemory
|
|
41 |
# _ = load_dotenv(find_dotenv()) # read local .env file
|
42 |
openai.api_key = os.getenv('OPENAI_API_KEY') #os.environ['OPENAI_API_KEY']
|
43 |
rapid_api_key = os.getenv('RAPID_API_KEY')
|
|
|
44 |
|
45 |
def get_temperature():
|
46 |
return 0 #Default value
|
@@ -156,7 +157,7 @@ class YouTubeTranscriptPointsExtractor:
|
|
156 |
"""
|
157 |
main_points_extraction_function = [convert_to_openai_function(info_model)]
|
158 |
|
159 |
-
model = ChatOpenAI(temperature=get_temperature())
|
160 |
|
161 |
extraction_model = model.bind(functions=main_points_extraction_function, function_call={"name": info_model.__name__})
|
162 |
|
@@ -313,7 +314,7 @@ class QuestionAnswerExtractor:
|
|
313 |
"""
|
314 |
answer_extraction_function = [convert_to_openai_function(QuestionAnswerExtractor.Info)]
|
315 |
|
316 |
-
model = ChatOpenAI(temperature=get_temperature())
|
317 |
extraction_model = model.bind(functions=answer_extraction_function, function_call={"name": "Info"})
|
318 |
|
319 |
prompt = ChatPromptTemplate.from_messages([
|
@@ -349,7 +350,7 @@ class QuestionAnswerExtractor:
|
|
349 |
("human", "Question: {question}\n\nPartial Answers: {partial_answers}\n\nPlease provide a consolidated, comprehensive answer to the question based on these partial answers. Ignore any information from answers with low confidence (0.5 or below).")
|
350 |
])
|
351 |
|
352 |
-
consolidation_model = ChatOpenAI(temperature=get_temperature())
|
353 |
consolidation_chain = consolidation_prompt | consolidation_model
|
354 |
|
355 |
final_answer = consolidation_chain.invoke({
|
@@ -392,7 +393,7 @@ class YouTubeAgent:
|
|
392 |
|
393 |
self.functions = [convert_to_openai_function(f) for f in self.tools]
|
394 |
|
395 |
-
self.model = ChatOpenAI(temperature=get_temperature()).bind(functions=self.functions)
|
396 |
|
397 |
self.prompt = ChatPromptTemplate.from_messages([
|
398 |
("system", self.sys_message),
|
|
|
41 |
# _ = load_dotenv(find_dotenv()) # read local .env file
|
42 |
openai.api_key = os.getenv('OPENAI_API_KEY') #os.environ['OPENAI_API_KEY']
|
43 |
rapid_api_key = os.getenv('RAPID_API_KEY')
|
44 |
+
model_name="gpt-4o-mini"
|
45 |
|
46 |
def get_temperature():
|
47 |
return 0 #Default value
|
|
|
157 |
"""
|
158 |
main_points_extraction_function = [convert_to_openai_function(info_model)]
|
159 |
|
160 |
+
model = ChatOpenAI(temperature=get_temperature(), model=model_name)
|
161 |
|
162 |
extraction_model = model.bind(functions=main_points_extraction_function, function_call={"name": info_model.__name__})
|
163 |
|
|
|
314 |
"""
|
315 |
answer_extraction_function = [convert_to_openai_function(QuestionAnswerExtractor.Info)]
|
316 |
|
317 |
+
model = ChatOpenAI(temperature=get_temperature(), model=model_name)
|
318 |
extraction_model = model.bind(functions=answer_extraction_function, function_call={"name": "Info"})
|
319 |
|
320 |
prompt = ChatPromptTemplate.from_messages([
|
|
|
350 |
("human", "Question: {question}\n\nPartial Answers: {partial_answers}\n\nPlease provide a consolidated, comprehensive answer to the question based on these partial answers. Ignore any information from answers with low confidence (0.5 or below).")
|
351 |
])
|
352 |
|
353 |
+
consolidation_model = ChatOpenAI(temperature=get_temperature(), model=model_name)
|
354 |
consolidation_chain = consolidation_prompt | consolidation_model
|
355 |
|
356 |
final_answer = consolidation_chain.invoke({
|
|
|
393 |
|
394 |
self.functions = [convert_to_openai_function(f) for f in self.tools]
|
395 |
|
396 |
+
self.model = ChatOpenAI(temperature=get_temperature(), model=model_name).bind(functions=self.functions)
|
397 |
|
398 |
self.prompt = ChatPromptTemplate.from_messages([
|
399 |
("system", self.sys_message),
|