Vijish commited on
Commit
fa947da
·
verified ·
1 Parent(s): 14f2ceb

Upload 3 files

Browse files
Files changed (3) hide show
  1. aisugg22.py +75 -0
  2. corechat.py +105 -0
  3. coresugg.py +49 -0
aisugg22.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, UploadFile, File, Security, status
2
+ from fastapi.security.api_key import APIKeyHeader
3
+ from environs import Env
4
+ import json
5
+
6
+ # Importing modules from both scripts
7
+ from coresugg import ConversationPayload as ConversationPayloadSugg, create_conversation_starter_prompt, generate_conversation_starters, NUMBER_OF_MESSAGES_FOR_CONTEXT as NUMBER_OF_MESSAGES_FOR_CONTEXT_SUGG
8
+ from corechat import ConversationPayload as ConversationPayloadChat, get_conversation_suggestions, NUMBER_OF_MESSAGES_FOR_CONTEXT as NUMBER_OF_MESSAGES_FOR_CONTEXT_CHAT
9
+
10
+ # Load environment variables
11
+ env = Env()
12
+ API_KEY = env.str("API_KEY", "koottu123456abcDEF")
13
+ api_key_header = APIKeyHeader(name="X-API-KEY", auto_error=False)
14
+
15
+ app = FastAPI()
16
+
17
+ async def get_api_key(api_key_header: str = Security(api_key_header)):
18
+ if api_key_header == API_KEY:
19
+ return api_key_header
20
+ else:
21
+ raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid API Key")
22
+
23
+ # Route from app.py
24
+ @app.post("/integration")
25
+ def integration(payload: ConversationPayloadSugg, api_key: str = Security(get_api_key)):
26
+ from_user_questions = payload.FromUserKavasQuestions[-NUMBER_OF_MESSAGES_FOR_CONTEXT_SUGG:]
27
+ to_user_questions = payload.ToUserKavasQuestions[-NUMBER_OF_MESSAGES_FOR_CONTEXT_SUGG:]
28
+ ai_prompt = create_conversation_starter_prompt(from_user_questions + to_user_questions, payload.Chatmood)
29
+ conversation_starters = generate_conversation_starters(ai_prompt)
30
+ return {"conversation_starters": conversation_starters}
31
+
32
+ # Route from chatt.py
33
+ @app.post("/chat_integration")
34
+ def chat_integration(payload: ConversationPayloadChat, api_key: str = Security(get_api_key)):
35
+ last_chat_messages = payload.LastChatMessages[-NUMBER_OF_MESSAGES_FOR_CONTEXT_CHAT:]
36
+ suggestions = get_conversation_suggestions(last_chat_messages)
37
+ return {"version": "1.0.0-alpha", "suggested_responses": suggestions}
38
+
39
+ # Combined upload endpoint
40
+ @app.post("/upload")
41
+ async def upload_file(file: UploadFile = File(...), api_key: str = Security(get_api_key)):
42
+ if file.content_type != 'application/json':
43
+ raise HTTPException(status_code=400, detail="Invalid file type. Please upload a JSON file.")
44
+
45
+ data = await file.read()
46
+ try:
47
+ json_data = json.loads(data)
48
+ except json.JSONDecodeError:
49
+ raise HTTPException(status_code=400, detail="Invalid JSON format.")
50
+
51
+ if "FromUserKavasQuestions" in json_data and "Chatmood" in json_data:
52
+ prompt = create_conversation_starter_prompt(
53
+ json_data["FromUserKavasQuestions"],
54
+ json_data["Chatmood"]
55
+ )
56
+ starter_suggestion = generate_conversation_starters(prompt)
57
+ return {"conversation_starter": starter_suggestion}
58
+ elif "LastChatMessages" in json_data:
59
+ last_chat_messages = json_data["LastChatMessages"][-NUMBER_OF_MESSAGES_FOR_CONTEXT_CHAT:]
60
+ response = {
61
+ "version": "1.0.0-alpha",
62
+ "suggested_responses": get_conversation_suggestions(last_chat_messages)
63
+ }
64
+ return response
65
+ else:
66
+ raise HTTPException(status_code=400, detail="Invalid JSON structure.")
67
+
68
+
69
+
70
+
71
+ #uvicorn app:app --reload
72
+
73
+
74
+
75
+ #uvicorn aisugg22:app --reload
corechat.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ import openai
3
+ from environs import Env
4
+ from typing import List
5
+
6
+ # Configuration and API Key Management
7
+ env = Env()
8
+ env.read_env("openai.env")
9
+ openai.api_key = env.str("OPENAI_API_KEY")
10
+ SYSTEM_PROMPT = env.str("SYSTEM_PROMPT", "Suggest a suitable reply for a user in a dating conversation context.")
11
+ MODEL = env.str("MODEL", "gpt-3.5-turbo")
12
+ NUMBER_OF_MESSAGES_FOR_CONTEXT = min(env.int("NUMBER_OF_MESSAGES_FOR_CONTEXT", 4), 10)
13
+ AI_RESPONSE_TIMEOUT = env.int("AI_RESPONSE_TIMEOUT", 20)
14
+
15
+ class LastChatMessage(BaseModel):
16
+ fromUser: str
17
+ touser: str
18
+
19
+ class ConversationPayload(BaseModel):
20
+ fromusername: str
21
+ tousername: str
22
+ zodiansign: str
23
+ LastChatMessages: List[dict]
24
+ Chatmood: str
25
+
26
+ def transform_messages(last_chat_messages):
27
+ t_messages = []
28
+ for chat in last_chat_messages:
29
+ if "fromUser" in chat:
30
+ from_user = chat['fromUser']
31
+ message = chat.get('touser', '')
32
+ t_messages.append(f"{from_user}: {message}")
33
+ elif "touser" in chat:
34
+ to_user = chat['touser']
35
+ message = chat.get('fromUser', '')
36
+ t_messages.append(f"{to_user}: {message}")
37
+
38
+ if t_messages and "touser" in last_chat_messages[-1]:
39
+ latest_message = t_messages[-1]
40
+ latest_message = f"Q: {latest_message}"
41
+ t_messages[-1] = latest_message
42
+
43
+ return t_messages
44
+
45
+ def generate_system_prompt(last_chat_messages, fromusername, tousername, zodiansign=None, chatmood=None):
46
+ prompt = ""
47
+ if not last_chat_messages or ("touser" not in last_chat_messages[-1]):
48
+ prompt = f"Suggest a casual and friendly message for {fromusername} to start a conversation with {tousername} or continue naturally, as if talking to a good friend. Strictly avoid replying to messages from {fromusername} or answering their questions."
49
+ else:
50
+ prompt = f"Suggest a warm and friendly reply for {fromusername} to respond to the last message from {tousername}, as if responding to a dear friend. Strictly avoid replying to messages from {fromusername} or answering their questions."
51
+
52
+ if zodiansign:
53
+ prompt += f" Keep in mind {tousername}'s {zodiansign} zodiac sign."
54
+ if chatmood:
55
+ prompt += f" Consider the {chatmood} mood."
56
+
57
+ return prompt
58
+
59
+ def get_conversation_suggestions(last_chat_messages):
60
+ fromusername = last_chat_messages[-1].get("fromusername", "")
61
+ tousername = last_chat_messages[-1].get("tousername", "")
62
+ zodiansign = last_chat_messages[-1].get("zodiansign", "")
63
+ chatmood = last_chat_messages[-1].get("Chatmood", "")
64
+
65
+ messages = transform_messages(last_chat_messages)
66
+
67
+ system_prompt = generate_system_prompt(last_chat_messages, fromusername, tousername, zodiansign, chatmood)
68
+ messages_final = [{"role": "system", "content": system_prompt}]
69
+
70
+ if messages:
71
+ messages_final.extend([{"role": "user", "content": m} for m in messages])
72
+ else:
73
+ # If there are no messages, add a default message to ensure a response is generated
74
+ default_message = f"{tousername}: Hi there!"
75
+ messages_final.append({"role": "user", "content": default_message})
76
+
77
+ try:
78
+ response = openai.ChatCompletion.create(
79
+ model=MODEL,
80
+ messages=messages_final,
81
+ temperature=0.7,
82
+ max_tokens=150,
83
+ n=3,
84
+ request_timeout=AI_RESPONSE_TIMEOUT
85
+ )
86
+
87
+ formatted_replies = []
88
+ for idx, choice in enumerate(response.choices):
89
+ formatted_replies.append({
90
+ "type": "TEXT",
91
+ "body": choice.message['content'],
92
+ "title": f"AI Reply {idx + 1}",
93
+ "confidence": 1,
94
+ })
95
+
96
+ return formatted_replies
97
+
98
+ except openai.error.Timeout as e:
99
+ formatted_reply = [{
100
+ "type": "TEXT",
101
+ "body": "Request to the AI response generator has timed out. Please try again later.",
102
+ "title": "AI Response Error",
103
+ "confidence": 1
104
+ }]
105
+ return formatted_reply
coresugg.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # core.py
2
+
3
+ from pydantic import BaseModel
4
+ import openai
5
+ from environs import Env
6
+
7
+ # Load environment variables
8
+ env = Env()
9
+ env.read_env("openai.env") # Adjust the file name as needed
10
+
11
+ # Set OpenAI API key
12
+ openai.api_key = env.str("OPENAI_API_KEY")
13
+
14
+ # Define constants from environment variables
15
+ SYSTEM_PROMPT = env.str("SYSTEM_PROMPT", "generate 3 different friendly short conversation starter for a user to another unknown user.")
16
+ MODEL = env.str("MODEL", "gpt-3.5-turbo")
17
+ NUMBER_OF_MESSAGES_FOR_CONTEXT = min(env.int("NUMBER_OF_MESSAGES_FOR_CONTEXT", 4), 10)
18
+ AI_RESPONSE_TIMEOUT = env.int("AI_RESPONSE_TIMEOUT", 20)
19
+
20
+ class ConversationPayload(BaseModel):
21
+ fromusername: str
22
+ tousername: str
23
+ FromUserKavasQuestions: list
24
+ ToUserKavasQuestions: list
25
+ Chatmood: str
26
+
27
+ def create_conversation_starter_prompt(user_questions, chatmood):
28
+ formatted_info = " ".join([f"{qa['Question']} - {qa['Answer']}" for qa in user_questions if qa['Answer']])
29
+ prompt = (f"Based on user profile info and a {chatmood} mood, "
30
+ f"generate 3 subtle and very short conversation starters. "
31
+ f"Explore various topics like travel, hobbies, movies, and not just culinary tastes. "
32
+ f"\nProfile Info: {formatted_info}")
33
+ return prompt
34
+
35
+ def generate_conversation_starters(prompt):
36
+ try:
37
+ response = openai.ChatCompletion.create(
38
+ model=MODEL,
39
+ messages=[{"role": "system", "content": prompt}],
40
+ temperature=0.7,
41
+ max_tokens=100,
42
+ n=1,
43
+ request_timeout=AI_RESPONSE_TIMEOUT
44
+ )
45
+ return response.choices[0].message["content"]
46
+ except openai.error.OpenAIError as e:
47
+ raise Exception(f"OpenAI API error: {str(e)}")
48
+ except Exception as e:
49
+ raise Exception(f"Unexpected error: {str(e)}")