Spaces:
Sleeping
Sleeping
Upload 14 files
Browse files- ServChaindesk.py +121 -0
- ServChar.py +77 -0
- ServFire.py +127 -0
- ServFlowise.py +118 -0
- ServForefront.py +133 -0
- ServG4F.py +109 -0
- ServG4F2.py +109 -0
- clientChaindesk.py +108 -0
- clientCharacter.py +62 -0
- clientFireworks.py +101 -0
- clientFlowise.py +101 -0
- clientForefront.py +113 -0
- clientG4F.py +91 -0
- clientG4F2.py +91 -0
ServChaindesk.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import asyncio
|
3 |
+
import websockets
|
4 |
+
import threading
|
5 |
+
import sqlite3
|
6 |
+
import datetime
|
7 |
+
import g4f
|
8 |
+
import requests
|
9 |
+
import streamlit as st
|
10 |
+
|
11 |
+
server_ports = []
|
12 |
+
|
13 |
+
class WebSocketServer6:
|
14 |
+
def __init__(self, host):
|
15 |
+
|
16 |
+
if "server_ports" not in st.session_state:
|
17 |
+
st.session_state['server_ports'] = ""
|
18 |
+
|
19 |
+
self.host = host
|
20 |
+
self.status = st.sidebar.status(label="Chaindesk", state="complete", expanded=False)
|
21 |
+
|
22 |
+
async def askChaindesk(self, question):
|
23 |
+
|
24 |
+
if "agentID" not in st.session_state:
|
25 |
+
st.session_state.agentID = ""
|
26 |
+
|
27 |
+
id = st.session_state.agentID
|
28 |
+
|
29 |
+
url = f"https://api.chaindesk.ai/agents/{id}/query"
|
30 |
+
|
31 |
+
payload = {
|
32 |
+
"query": question
|
33 |
+
}
|
34 |
+
|
35 |
+
headers = {
|
36 |
+
"Authorization": "Bearer fe77e704-bc5a-4171-90f2-9d4b0d4ac942",
|
37 |
+
"Content-Type": "application/json"
|
38 |
+
}
|
39 |
+
try:
|
40 |
+
response = requests.request("POST", url, json=payload, headers=headers)
|
41 |
+
response_text = response.text
|
42 |
+
print(response.text)
|
43 |
+
responseJson = json.loads(response_text)
|
44 |
+
answer = responseJson["answer"]
|
45 |
+
return answer
|
46 |
+
|
47 |
+
except Exception as e:
|
48 |
+
print(e)
|
49 |
+
|
50 |
+
async def start_server(self, serverPort):
|
51 |
+
name = f"Chaindesk server port: {serverPort}"
|
52 |
+
status = self.status
|
53 |
+
server_ports.append(serverPort)
|
54 |
+
st.session_state['server_ports'] = server_ports
|
55 |
+
self.server = await websockets.serve(
|
56 |
+
self.handler,
|
57 |
+
self.host,
|
58 |
+
serverPort
|
59 |
+
)
|
60 |
+
status.update(label=name, state="running", expanded=True)
|
61 |
+
print(f"WebSocket server started at ws://{self.host}:{self.port}")
|
62 |
+
|
63 |
+
async def handler(self, websocket):
|
64 |
+
|
65 |
+
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic. If you're unsure what you should do, ask the instance of higher hierarchy (server)"
|
66 |
+
serverStatus = st.sidebar.status(label="processing", state="complete", expanded=False)
|
67 |
+
|
68 |
+
if "clientPort" not in st.session_state:
|
69 |
+
st.session_state.clientPort = ""
|
70 |
+
|
71 |
+
clientPort = st.session_state.clientPort
|
72 |
+
name = f"Client at port: {clientPort}"
|
73 |
+
print(f"New connection with: {name}")
|
74 |
+
await websocket.send(instruction)
|
75 |
+
db = sqlite3.connect('chat-hub.db')
|
76 |
+
# Loop forever
|
77 |
+
while True:
|
78 |
+
serverStatus.update(label=name, state="running", expanded=True)
|
79 |
+
# Receive a message from the client
|
80 |
+
message = await websocket.recv()
|
81 |
+
# Print the message
|
82 |
+
print(f"Server received: {message}")
|
83 |
+
input_Msg = st.chat_message("assistant")
|
84 |
+
input_Msg.markdown(message)
|
85 |
+
timestamp = datetime.datetime.now().isoformat()
|
86 |
+
sender = 'client'
|
87 |
+
db = sqlite3.connect('chat-hub.db')
|
88 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
89 |
+
(sender, message, timestamp))
|
90 |
+
db.commit()
|
91 |
+
try:
|
92 |
+
response = await self.askQuestion(message)
|
93 |
+
serverResponse = f"server: {response}"
|
94 |
+
print(serverResponse)
|
95 |
+
output_Msg = st.chat_message("ai")
|
96 |
+
output_Msg.markdown(serverResponse)
|
97 |
+
timestamp = datetime.datetime.now().isoformat()
|
98 |
+
serverSender = 'server'
|
99 |
+
db = sqlite3.connect('chat-hub.db')
|
100 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
101 |
+
(serverSender, serverResponse, timestamp))
|
102 |
+
db.commit()
|
103 |
+
# Append the server response to the server_responses list
|
104 |
+
await websocket.send(serverResponse)
|
105 |
+
serverStatus.update(label=name, state="complete", expanded=True)
|
106 |
+
|
107 |
+
except websockets.exceptions.ConnectionClosedError as e:
|
108 |
+
print(f"Connection closed: {e}")
|
109 |
+
|
110 |
+
except Exception as e:
|
111 |
+
print(f"Error: {e}")
|
112 |
+
|
113 |
+
def run_forever(self):
|
114 |
+
asyncio.get_event_loop().run_until_complete(self.start_server())
|
115 |
+
asyncio.get_event_loop().run_forever()
|
116 |
+
|
117 |
+
async def stop_server(self):
|
118 |
+
if self.server:
|
119 |
+
self.server.close()
|
120 |
+
await self.server.wait_closed()
|
121 |
+
print("WebSocket server stopped.")
|
ServChar.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import sqlite3
|
4 |
+
import datetime
|
5 |
+
import streamlit as st
|
6 |
+
from PyCharacterAI import Client
|
7 |
+
|
8 |
+
class WebSocketServer2:
|
9 |
+
def __init__(self, host, port):
|
10 |
+
self.host = host
|
11 |
+
self.port = port
|
12 |
+
self.server = None
|
13 |
+
|
14 |
+
async def handler(self, websocket):
|
15 |
+
client = Client()
|
16 |
+
status = st.sidebar.status(label="runs", state="complete", expanded=False)
|
17 |
+
if "tokenChar" not in st.session_state:
|
18 |
+
st.session_state.tokenChar = ""
|
19 |
+
if "character_ID" not in st.session_state:
|
20 |
+
st.session_state.character_ID = ""
|
21 |
+
|
22 |
+
await client.authenticate_with_token(st.session_state.tokenChar)
|
23 |
+
char_id = st.session_state.character_ID
|
24 |
+
chat = await client.create_or_continue_chat(char_id)
|
25 |
+
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic. If you're unsure what you should do, ask the instance of higher hierarchy (server)"
|
26 |
+
print('New connection')
|
27 |
+
await websocket.send(instruction)
|
28 |
+
while True:
|
29 |
+
status.update(label="runs", state="running", expanded=True)
|
30 |
+
# Receive a message from the client
|
31 |
+
message = await websocket.recv()
|
32 |
+
# Print the message
|
33 |
+
print(f"Server received: {message}")
|
34 |
+
input_Msg = st.chat_message("assistant")
|
35 |
+
input_Msg.markdown(message)
|
36 |
+
timestamp = datetime.datetime.now().isoformat()
|
37 |
+
sender = 'client'
|
38 |
+
db = sqlite3.connect('chat-hub.db')
|
39 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
40 |
+
(sender, message, timestamp))
|
41 |
+
db.commit()
|
42 |
+
try:
|
43 |
+
answer = await chat.send_message(message)
|
44 |
+
response = f"{answer.src_character_name}: {answer.text}"
|
45 |
+
print(response)
|
46 |
+
output_Msg = st.chat_message("ai")
|
47 |
+
output_Msg.markdown(response)
|
48 |
+
timestamp = datetime.datetime.now().isoformat()
|
49 |
+
serverSender = 'server'
|
50 |
+
db = sqlite3.connect('chat-hub.db')
|
51 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
52 |
+
(serverSender, response, timestamp))
|
53 |
+
db.commit()
|
54 |
+
await websocket.send(response)
|
55 |
+
status.update(label="runs", state="complete", expanded=True)
|
56 |
+
continue
|
57 |
+
|
58 |
+
except Exception as e:
|
59 |
+
print(f"Error: {e}")
|
60 |
+
|
61 |
+
async def start_server(self):
|
62 |
+
self.server = await websockets.serve(
|
63 |
+
self.handler,
|
64 |
+
self.host,
|
65 |
+
self.port
|
66 |
+
)
|
67 |
+
print(f"WebSocket server started at ws://{self.host}:{self.port}")
|
68 |
+
|
69 |
+
def run_forever(self):
|
70 |
+
asyncio.get_event_loop().run_until_complete(self.start_server())
|
71 |
+
asyncio.get_event_loop().run_forever()
|
72 |
+
|
73 |
+
async def stop_server(self):
|
74 |
+
if self.server:
|
75 |
+
self.server.close()
|
76 |
+
await self.server.wait_closed()
|
77 |
+
print("WebSocket server stopped.")
|
ServFire.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import datetime
|
6 |
+
import g4f
|
7 |
+
import streamlit as st
|
8 |
+
import fireworks.client
|
9 |
+
|
10 |
+
class WebSocketServer:
|
11 |
+
def __init__(self, host, port):
|
12 |
+
self.host = host
|
13 |
+
self.port = port
|
14 |
+
self.server = None
|
15 |
+
|
16 |
+
async def chatCompletion(self, question):
|
17 |
+
|
18 |
+
if "api_key" not in st.session_state:
|
19 |
+
st.session_state.api_key = ""
|
20 |
+
|
21 |
+
fireworks.client.api_key = st.session_state.api_key
|
22 |
+
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your main job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (template: <NAME>-agent and/or <NAME>-client). Your chat memory module is integrated with a local SQL database with chat history. Your primary objective is to maintain the logical and chronological order while answering incoming messages and to send your answers to the correct clients to maintain synchronization of the question->answer logic. However, please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
23 |
+
|
24 |
+
try:
|
25 |
+
# Connect to the database and get the last 30 messages
|
26 |
+
db = sqlite3.connect('chat-hub.db')
|
27 |
+
cursor = db.cursor()
|
28 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 10")
|
29 |
+
messages = cursor.fetchall()
|
30 |
+
messages.reverse()
|
31 |
+
|
32 |
+
# Extract user inputs and generated responses from the messages
|
33 |
+
past_user_inputs = []
|
34 |
+
generated_responses = []
|
35 |
+
|
36 |
+
for message in messages:
|
37 |
+
if message[1] == 'client':
|
38 |
+
past_user_inputs.append(message[2])
|
39 |
+
else:
|
40 |
+
generated_responses.append(message[2])
|
41 |
+
|
42 |
+
# Prepare data to send to the chatgpt-api.shn.hk
|
43 |
+
response = fireworks.client.ChatCompletion.create(
|
44 |
+
model="accounts/fireworks/models/llama-v2-7b-chat",
|
45 |
+
messages=[
|
46 |
+
{"role": "system", "content": system_instruction},
|
47 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
48 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
49 |
+
{"role": "user", "content": question}
|
50 |
+
],
|
51 |
+
stream=False,
|
52 |
+
n=1,
|
53 |
+
max_tokens=2500,
|
54 |
+
temperature=0.5,
|
55 |
+
top_p=0.7,
|
56 |
+
)
|
57 |
+
|
58 |
+
answer = response.choices[0].message.content
|
59 |
+
print(answer)
|
60 |
+
return str(answer)
|
61 |
+
|
62 |
+
except Exception as error:
|
63 |
+
print("Error while fetching or processing the response:", error)
|
64 |
+
return "Error: Unable to generate a response."
|
65 |
+
|
66 |
+
# Define the handler function that will process incoming messages
|
67 |
+
async def handler(self, websocket):
|
68 |
+
status = st.sidebar.status(label="runs", state="complete", expanded=False)
|
69 |
+
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic. If you're unsure what you should do, ask the instance of higher hierarchy (server)"
|
70 |
+
print('New connection')
|
71 |
+
await websocket.send(instruction)
|
72 |
+
db = sqlite3.connect('chat-hub.db')
|
73 |
+
# Loop forever
|
74 |
+
while True:
|
75 |
+
status.update(label="runs", state="running", expanded=True)
|
76 |
+
# Receive a message from the client
|
77 |
+
message = await websocket.recv()
|
78 |
+
# Print the message
|
79 |
+
print(f"Server received: {message}")
|
80 |
+
input_Msg = st.chat_message("assistant")
|
81 |
+
input_Msg.markdown(message)
|
82 |
+
timestamp = datetime.datetime.now().isoformat()
|
83 |
+
sender = 'client'
|
84 |
+
db = sqlite3.connect('chat-hub.db')
|
85 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
86 |
+
(sender, message, timestamp))
|
87 |
+
db.commit()
|
88 |
+
try:
|
89 |
+
response = await self.chatCompletion(message)
|
90 |
+
serverResponse = f"server: {response}"
|
91 |
+
print(serverResponse)
|
92 |
+
output_Msg = st.chat_message("ai")
|
93 |
+
output_Msg.markdown(serverResponse)
|
94 |
+
timestamp = datetime.datetime.now().isoformat()
|
95 |
+
serverSender = 'server'
|
96 |
+
db = sqlite3.connect('chat-hub.db')
|
97 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
98 |
+
(serverSender, serverResponse, timestamp))
|
99 |
+
db.commit()
|
100 |
+
# Append the server response to the server_responses list
|
101 |
+
await websocket.send(serverResponse)
|
102 |
+
status.update(label="runs", state="complete", expanded=True)
|
103 |
+
continue
|
104 |
+
|
105 |
+
except websockets.exceptions.ConnectionClosedError as e:
|
106 |
+
print(f"Connection closed: {e}")
|
107 |
+
|
108 |
+
except Exception as e:
|
109 |
+
print(f"Error: {e}")
|
110 |
+
|
111 |
+
async def start_server(self):
|
112 |
+
self.server = await websockets.serve(
|
113 |
+
self.handler,
|
114 |
+
self.host,
|
115 |
+
self.port
|
116 |
+
)
|
117 |
+
print(f"WebSocket server started at ws://{self.host}:{self.port}")
|
118 |
+
|
119 |
+
def run_forever(self):
|
120 |
+
asyncio.get_event_loop().run_until_complete(self.start_server())
|
121 |
+
asyncio.get_event_loop().run_forever()
|
122 |
+
|
123 |
+
async def stop_server(self):
|
124 |
+
if self.server:
|
125 |
+
self.server.close()
|
126 |
+
await self.server.wait_closed()
|
127 |
+
print("WebSocket server stopped.")
|
ServFlowise.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import asyncio
|
3 |
+
import websockets
|
4 |
+
import threading
|
5 |
+
import sqlite3
|
6 |
+
import datetime
|
7 |
+
import g4f
|
8 |
+
import requests
|
9 |
+
import streamlit as st
|
10 |
+
|
11 |
+
server_ports = []
|
12 |
+
|
13 |
+
class WebSocketServer5:
|
14 |
+
def __init__(self, host):
|
15 |
+
|
16 |
+
if "server_ports" not in st.session_state:
|
17 |
+
st.session_state['server_ports'] = ""
|
18 |
+
|
19 |
+
self.host = host
|
20 |
+
self.status = st.sidebar.status(label="Flowise", state="complete", expanded=False)
|
21 |
+
|
22 |
+
async def askQuestion(self, question):
|
23 |
+
|
24 |
+
if "flow" not in st.session_state:
|
25 |
+
st.session_state.flow = ""
|
26 |
+
|
27 |
+
flow = st.session_state.flow
|
28 |
+
|
29 |
+
API_URL = f"http://localhost:3000/api/v1/prediction/{flow}"
|
30 |
+
|
31 |
+
try:
|
32 |
+
def query(payload):
|
33 |
+
response = requests.post(API_URL, json=payload)
|
34 |
+
return response.json()
|
35 |
+
|
36 |
+
response = query({
|
37 |
+
"question": question,
|
38 |
+
})
|
39 |
+
|
40 |
+
print(response)
|
41 |
+
answer = response["text"]
|
42 |
+
return answer
|
43 |
+
|
44 |
+
except Exception as e:
|
45 |
+
print(e)
|
46 |
+
|
47 |
+
async def start_server(self, serverPort):
|
48 |
+
name = f"Flowise server at port: {serverPort}"
|
49 |
+
serverPort = self.port
|
50 |
+
status = self.status
|
51 |
+
server_ports.append(serverPort)
|
52 |
+
st.session_state['server_ports'] = server_ports
|
53 |
+
self.server = await websockets.serve(
|
54 |
+
self.handler,
|
55 |
+
self.host,
|
56 |
+
serverPort
|
57 |
+
)
|
58 |
+
status.update(label=name, state="running", expanded=True)
|
59 |
+
print(f"WebSocket server started at ws://{self.host}:{self.port}")
|
60 |
+
|
61 |
+
async def handler(self, websocket):
|
62 |
+
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic. If you're unsure what you should do, ask the instance of higher hierarchy (server)"
|
63 |
+
serverStatus = st.sidebar.status(label="processing", state="complete", expanded=False)
|
64 |
+
|
65 |
+
if "clientPort" not in st.session_state:
|
66 |
+
st.session_state.clientPort = ""
|
67 |
+
|
68 |
+
clientPort = st.session_state.clientPort
|
69 |
+
name = f"Client at port: {clientPort}"
|
70 |
+
print(f"New connection with: {name}")
|
71 |
+
await websocket.send(instruction)
|
72 |
+
db = sqlite3.connect('chat-hub.db')
|
73 |
+
# Loop forever
|
74 |
+
while True:
|
75 |
+
serverStatus.update(label=name, state="running", expanded=True)
|
76 |
+
# Receive a message from the client
|
77 |
+
message = await websocket.recv()
|
78 |
+
# Print the message
|
79 |
+
print(f"Server received: {message}")
|
80 |
+
input_Msg = st.chat_message("assistant")
|
81 |
+
input_Msg.markdown(message)
|
82 |
+
timestamp = datetime.datetime.now().isoformat()
|
83 |
+
sender = 'client'
|
84 |
+
db = sqlite3.connect('chat-hub.db')
|
85 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
86 |
+
(sender, message, timestamp))
|
87 |
+
db.commit()
|
88 |
+
try:
|
89 |
+
response = await self.askQuestion(message)
|
90 |
+
serverResponse = f"server: {response}"
|
91 |
+
print(serverResponse)
|
92 |
+
output_Msg = st.chat_message("ai")
|
93 |
+
output_Msg.markdown(serverResponse)
|
94 |
+
timestamp = datetime.datetime.now().isoformat()
|
95 |
+
serverSender = 'server'
|
96 |
+
db = sqlite3.connect('chat-hub.db')
|
97 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
98 |
+
(serverSender, serverResponse, timestamp))
|
99 |
+
db.commit()
|
100 |
+
# Append the server response to the server_responses list
|
101 |
+
await websocket.send(serverResponse)
|
102 |
+
serverStatus.update(label=name, state="complete", expanded=True)
|
103 |
+
|
104 |
+
except websockets.exceptions.ConnectionClosedError as e:
|
105 |
+
print(f"Connection closed: {e}")
|
106 |
+
|
107 |
+
except Exception as e:
|
108 |
+
print(f"Error: {e}")
|
109 |
+
|
110 |
+
def run_forever(self):
|
111 |
+
asyncio.get_event_loop().run_until_complete(self.start_server())
|
112 |
+
asyncio.get_event_loop().run_forever()
|
113 |
+
|
114 |
+
async def stop_server(self):
|
115 |
+
if self.server:
|
116 |
+
self.server.close()
|
117 |
+
await self.server.wait_closed()
|
118 |
+
print("WebSocket server stopped.")
|
ServForefront.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import datetime
|
6 |
+
import g4f
|
7 |
+
import streamlit as st
|
8 |
+
import fireworks.client
|
9 |
+
from forefront import ForefrontClient
|
10 |
+
|
11 |
+
class WebSocketServer4:
|
12 |
+
def __init__(self, host, port):
|
13 |
+
self.host = host
|
14 |
+
self.port = port
|
15 |
+
self.server = None
|
16 |
+
|
17 |
+
async def chatCompletion(self, question):
|
18 |
+
|
19 |
+
if "forefront_api" not in st.session_state:
|
20 |
+
st.session_state.forefront_api = ""
|
21 |
+
|
22 |
+
forefrontAPI = st.session_state.forefront_api
|
23 |
+
|
24 |
+
ff = ForefrontClient(api_key=forefrontAPI)
|
25 |
+
|
26 |
+
system_instruction = "You are now integrated with a local instance of a hierarchical cooperative multi-agent framework called NeuralGPT"
|
27 |
+
|
28 |
+
try:
|
29 |
+
# Connect to the database and get the last 30 messages
|
30 |
+
db = sqlite3.connect('chat-hub.db')
|
31 |
+
cursor = db.cursor()
|
32 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 3")
|
33 |
+
messages = cursor.fetchall()
|
34 |
+
messages.reverse()
|
35 |
+
|
36 |
+
# Extract user inputs and generated responses from the messages
|
37 |
+
past_user_inputs = []
|
38 |
+
generated_responses = []
|
39 |
+
for message in messages:
|
40 |
+
if message[1] == 'server':
|
41 |
+
past_user_inputs.append(message[2])
|
42 |
+
else:
|
43 |
+
generated_responses.append(message[2])
|
44 |
+
|
45 |
+
last_msg = past_user_inputs[-1]
|
46 |
+
last_response = generated_responses[-1]
|
47 |
+
message = f'{{"client input: {last_msg}"}}'
|
48 |
+
response = f'{{"server answer: {last_response}"}}'
|
49 |
+
|
50 |
+
# Construct the message sequence for the chat model
|
51 |
+
response = ff.chat.completions.create(
|
52 |
+
messages=[
|
53 |
+
{"role": "system", "content": system_instruction},
|
54 |
+
*[{"role": "user", "content": past_user_inputs[-1]}],
|
55 |
+
*[{"role": "assistant", "content": generated_responses[-1]}],
|
56 |
+
{"role": "user", "content": question}
|
57 |
+
],
|
58 |
+
stream=False,
|
59 |
+
model="forefront/neural-chat-7b-v3-1-chatml", # Replace with the actual model name
|
60 |
+
temperature=0.5,
|
61 |
+
max_tokens=500,
|
62 |
+
)
|
63 |
+
|
64 |
+
response_text = response.choices[0].message # Corrected indexing
|
65 |
+
|
66 |
+
print("Extracted message text:", response_text)
|
67 |
+
return response_text
|
68 |
+
|
69 |
+
except Exception as e:
|
70 |
+
print(e)
|
71 |
+
|
72 |
+
# Define the handler function that will process incoming messages
|
73 |
+
async def handler(self, websocket):
|
74 |
+
status = st.sidebar.status(label="runs", state="complete", expanded=False)
|
75 |
+
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic. If you're unsure what you should do, ask the instance of higher hierarchy (server)"
|
76 |
+
print('New connection')
|
77 |
+
await websocket.send(instruction)
|
78 |
+
db = sqlite3.connect('chat-hub.db')
|
79 |
+
# Loop forever
|
80 |
+
while True:
|
81 |
+
status.update(label="runs", state="running", expanded=True)
|
82 |
+
# Receive a message from the client
|
83 |
+
message = await websocket.recv()
|
84 |
+
# Print the message
|
85 |
+
print(f"Server received: {message}")
|
86 |
+
input_Msg = st.chat_message("assistant")
|
87 |
+
input_Msg.markdown(message)
|
88 |
+
timestamp = datetime.datetime.now().isoformat()
|
89 |
+
sender = 'client'
|
90 |
+
db = sqlite3.connect('chat-hub.db')
|
91 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
92 |
+
(sender, message, timestamp))
|
93 |
+
db.commit()
|
94 |
+
try:
|
95 |
+
response = await self.chatCompletion(message)
|
96 |
+
serverResponse = f"server: {response}"
|
97 |
+
print(serverResponse)
|
98 |
+
output_Msg = st.chat_message("ai")
|
99 |
+
output_Msg.markdown(serverResponse)
|
100 |
+
timestamp = datetime.datetime.now().isoformat()
|
101 |
+
serverSender = 'server'
|
102 |
+
db = sqlite3.connect('chat-hub.db')
|
103 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
104 |
+
(serverSender, serverResponse, timestamp))
|
105 |
+
db.commit()
|
106 |
+
# Append the server response to the server_responses list
|
107 |
+
await websocket.send(serverResponse)
|
108 |
+
status.update(label="runs", state="complete", expanded=True)
|
109 |
+
continue
|
110 |
+
|
111 |
+
except websockets.exceptions.ConnectionClosedError as e:
|
112 |
+
print(f"Connection closed: {e}")
|
113 |
+
|
114 |
+
except Exception as e:
|
115 |
+
print(f"Error: {e}")
|
116 |
+
|
117 |
+
async def start_server(self):
|
118 |
+
self.server = await websockets.serve(
|
119 |
+
self.handler,
|
120 |
+
self.host,
|
121 |
+
self.port
|
122 |
+
)
|
123 |
+
print(f"WebSocket server started at ws://{self.host}:{self.port}")
|
124 |
+
|
125 |
+
def run_forever(self):
|
126 |
+
asyncio.get_event_loop().run_until_complete(self.start_server())
|
127 |
+
asyncio.get_event_loop().run_forever()
|
128 |
+
|
129 |
+
async def stop_server(self):
|
130 |
+
if self.server:
|
131 |
+
self.server.close()
|
132 |
+
await self.server.wait_closed()
|
133 |
+
print("WebSocket server stopped.")
|
ServG4F.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import datetime
|
6 |
+
import g4f
|
7 |
+
import streamlit as st
|
8 |
+
|
9 |
+
class WebSocketServer1:
|
10 |
+
def __init__(self, host, port):
|
11 |
+
self.host = host
|
12 |
+
self.port = port
|
13 |
+
self.server = None
|
14 |
+
|
15 |
+
async def askQuestion(self, question):
|
16 |
+
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your main job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (template: <NAME>-agent and/or <NAME>-client). Your chat memory module is integrated with a local SQL database with chat history. Your primary objective is to maintain the logical and chronological order while answering incoming messages and to send your answers to the correct clients to maintain synchronization of the question->answer logic. However, please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
17 |
+
try:
|
18 |
+
db = sqlite3.connect('chat-hub.db')
|
19 |
+
cursor = db.cursor()
|
20 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 30")
|
21 |
+
messages = cursor.fetchall()
|
22 |
+
messages.reverse()
|
23 |
+
|
24 |
+
past_user_inputs = []
|
25 |
+
generated_responses = []
|
26 |
+
|
27 |
+
for message in messages:
|
28 |
+
if message[1] == 'client':
|
29 |
+
past_user_inputs.append(message[2])
|
30 |
+
else:
|
31 |
+
generated_responses.append(message[2])
|
32 |
+
|
33 |
+
response = await g4f.ChatCompletion.create_async(
|
34 |
+
model=g4f.models.gpt_4,
|
35 |
+
provider=g4f.Provider.Bing,
|
36 |
+
messages=[
|
37 |
+
{"role": "system", "content": system_instruction},
|
38 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
39 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
40 |
+
{"role": "user", "content": question}
|
41 |
+
])
|
42 |
+
|
43 |
+
print(response)
|
44 |
+
return response
|
45 |
+
|
46 |
+
except Exception as e:
|
47 |
+
print(e)
|
48 |
+
|
49 |
+
|
50 |
+
async def handler(self, websocket):
|
51 |
+
status = st.sidebar.status(label="runs", state="complete", expanded=False)
|
52 |
+
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic. If you're unsure what you should do, ask the instance of higher hierarchy (server)"
|
53 |
+
print('New connection')
|
54 |
+
await websocket.send(instruction)
|
55 |
+
db = sqlite3.connect('chat-hub.db')
|
56 |
+
# Loop forever
|
57 |
+
while True:
|
58 |
+
status.update(label="runs", state="running", expanded=True)
|
59 |
+
# Receive a message from the client
|
60 |
+
message = await websocket.recv()
|
61 |
+
# Print the message
|
62 |
+
print(f"Server received: {message}")
|
63 |
+
input_Msg = st.chat_message("assistant")
|
64 |
+
input_Msg.markdown(message)
|
65 |
+
timestamp = datetime.datetime.now().isoformat()
|
66 |
+
sender = 'client'
|
67 |
+
db = sqlite3.connect('chat-hub.db')
|
68 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
69 |
+
(sender, message, timestamp))
|
70 |
+
db.commit()
|
71 |
+
try:
|
72 |
+
response = await self.askQuestion(message)
|
73 |
+
serverResponse = f"server: {response}"
|
74 |
+
print(serverResponse)
|
75 |
+
output_Msg = st.chat_message("ai")
|
76 |
+
output_Msg.markdown(serverResponse)
|
77 |
+
timestamp = datetime.datetime.now().isoformat()
|
78 |
+
serverSender = 'server'
|
79 |
+
db = sqlite3.connect('chat-hub.db')
|
80 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
81 |
+
(serverSender, serverResponse, timestamp))
|
82 |
+
db.commit()
|
83 |
+
# Append the server response to the server_responses list
|
84 |
+
await websocket.send(serverResponse)
|
85 |
+
status.update(label="runs", state="complete", expanded=True)
|
86 |
+
|
87 |
+
except websockets.exceptions.ConnectionClosedError as e:
|
88 |
+
print(f"Connection closed: {e}")
|
89 |
+
|
90 |
+
except Exception as e:
|
91 |
+
print(f"Error: {e}")
|
92 |
+
|
93 |
+
async def start_server(self):
|
94 |
+
self.server = await websockets.serve(
|
95 |
+
self.handler,
|
96 |
+
self.host,
|
97 |
+
self.port
|
98 |
+
)
|
99 |
+
print(f"WebSocket server started at ws://{self.host}:{self.port}")
|
100 |
+
|
101 |
+
def run_forever(self):
|
102 |
+
asyncio.get_event_loop().run_until_complete(self.start_server())
|
103 |
+
asyncio.get_event_loop().run_forever()
|
104 |
+
|
105 |
+
async def stop_server(self):
|
106 |
+
if self.server:
|
107 |
+
self.server.close()
|
108 |
+
await self.server.wait_closed()
|
109 |
+
print("WebSocket server stopped.")
|
ServG4F2.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import datetime
|
6 |
+
import g4f
|
7 |
+
import streamlit as st
|
8 |
+
|
9 |
+
class WebSocketServer3:
|
10 |
+
def __init__(self, host, port):
|
11 |
+
self.host = host
|
12 |
+
self.port = port
|
13 |
+
self.server = None
|
14 |
+
|
15 |
+
async def askQuestion(self, question):
|
16 |
+
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your main job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (template: <NAME>-agent and/or <NAME>-client). Your chat memory module is integrated with a local SQL database with chat history. Your primary objective is to maintain the logical and chronological order while answering incoming messages and to send your answers to the correct clients to maintain synchronization of the question->answer logic. However, please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
17 |
+
try:
|
18 |
+
db = sqlite3.connect('chat-hub.db')
|
19 |
+
cursor = db.cursor()
|
20 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 30")
|
21 |
+
messages = cursor.fetchall()
|
22 |
+
messages.reverse()
|
23 |
+
|
24 |
+
past_user_inputs = []
|
25 |
+
generated_responses = []
|
26 |
+
|
27 |
+
for message in messages:
|
28 |
+
if message[1] == 'client':
|
29 |
+
past_user_inputs.append(message[2])
|
30 |
+
else:
|
31 |
+
generated_responses.append(message[2])
|
32 |
+
|
33 |
+
response = await g4f.ChatCompletion.create_async(
|
34 |
+
model="gpt-3.5-turbo",
|
35 |
+
provider=g4f.Provider.You,
|
36 |
+
messages=[
|
37 |
+
{"role": "system", "content": system_instruction},
|
38 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
39 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
40 |
+
{"role": "user", "content": question}
|
41 |
+
])
|
42 |
+
|
43 |
+
print(response)
|
44 |
+
return response
|
45 |
+
|
46 |
+
except Exception as e:
|
47 |
+
print(e)
|
48 |
+
|
49 |
+
async def start_server(self):
|
50 |
+
self.server = await websockets.serve(
|
51 |
+
self.handler,
|
52 |
+
self.host,
|
53 |
+
self.port
|
54 |
+
)
|
55 |
+
print(f"WebSocket server started at ws://{self.host}:{self.port}")
|
56 |
+
|
57 |
+
|
58 |
+
async def handler(self, websocket):
|
59 |
+
status = st.sidebar.status(label="runs", state="complete", expanded=False)
|
60 |
+
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic. If you're unsure what you should do, ask the instance of higher hierarchy (server)"
|
61 |
+
print('New connection')
|
62 |
+
await websocket.send(instruction)
|
63 |
+
db = sqlite3.connect('chat-hub.db')
|
64 |
+
# Loop forever
|
65 |
+
while True:
|
66 |
+
status.update(label="runs", state="running", expanded=True)
|
67 |
+
# Receive a message from the client
|
68 |
+
message = await websocket.recv()
|
69 |
+
# Print the message
|
70 |
+
print(f"Server received: {message}")
|
71 |
+
input_Msg = st.chat_message("assistant")
|
72 |
+
input_Msg.markdown(message)
|
73 |
+
timestamp = datetime.datetime.now().isoformat()
|
74 |
+
sender = 'client'
|
75 |
+
db = sqlite3.connect('chat-hub.db')
|
76 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
77 |
+
(sender, message, timestamp))
|
78 |
+
db.commit()
|
79 |
+
try:
|
80 |
+
response = await self.askQuestion(message)
|
81 |
+
serverResponse = f"server: {response}"
|
82 |
+
print(serverResponse)
|
83 |
+
output_Msg = st.chat_message("ai")
|
84 |
+
output_Msg.markdown(serverResponse)
|
85 |
+
timestamp = datetime.datetime.now().isoformat()
|
86 |
+
serverSender = 'server'
|
87 |
+
db = sqlite3.connect('chat-hub.db')
|
88 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
89 |
+
(serverSender, serverResponse, timestamp))
|
90 |
+
db.commit()
|
91 |
+
# Append the server response to the server_responses list
|
92 |
+
await websocket.send(serverResponse)
|
93 |
+
status.update(label="runs", state="complete", expanded=True)
|
94 |
+
|
95 |
+
except websockets.exceptions.ConnectionClosedError as e:
|
96 |
+
print(f"Connection closed: {e}")
|
97 |
+
|
98 |
+
except Exception as e:
|
99 |
+
print(f"Error: {e}")
|
100 |
+
|
101 |
+
def run_forever(self):
|
102 |
+
asyncio.get_event_loop().run_until_complete(self.start_server())
|
103 |
+
asyncio.get_event_loop().run_forever()
|
104 |
+
|
105 |
+
async def stop_server(self):
|
106 |
+
if self.server:
|
107 |
+
self.server.close()
|
108 |
+
await self.server.wait_closed()
|
109 |
+
print("WebSocket server stopped.")
|
clientChaindesk.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import asyncio
|
3 |
+
import websockets
|
4 |
+
import threading
|
5 |
+
import sqlite3
|
6 |
+
import requests
|
7 |
+
import streamlit as st
|
8 |
+
|
9 |
+
client_ports = []
|
10 |
+
|
11 |
+
# Define the websocket client class
|
12 |
+
class WebSocketClient6:
|
13 |
+
def __init__(self):
|
14 |
+
# Initialize the uri attribute
|
15 |
+
self.name = f"Chaindesk client port: {self.clientPort}"
|
16 |
+
self.status = st.sidebar.status(label=self.name, state="complete", expanded=False)
|
17 |
+
|
18 |
+
if "client_ports" not in st.session_state:
|
19 |
+
st.session_state['client_ports'] = ""
|
20 |
+
if "clientPort" not in st.session_state:
|
21 |
+
st.session_state.clientPort = ""
|
22 |
+
|
23 |
+
async def askChaindesk(self, question):
|
24 |
+
|
25 |
+
if "agentID" not in st.session_state:
|
26 |
+
st.session_state.agentID = ""
|
27 |
+
|
28 |
+
id = st.session_state.agentID
|
29 |
+
|
30 |
+
url = f"https://api.chaindesk.ai/agents/{id}/query"
|
31 |
+
|
32 |
+
payload = {
|
33 |
+
"query": question
|
34 |
+
}
|
35 |
+
|
36 |
+
headers = {
|
37 |
+
"Authorization": "Bearer fe77e704-bc5a-4171-90f2-9d4b0d4ac942",
|
38 |
+
"Content-Type": "application/json"
|
39 |
+
}
|
40 |
+
try:
|
41 |
+
response = requests.request("POST", url, json=payload, headers=headers)
|
42 |
+
print(response.text)
|
43 |
+
response_text = response.text
|
44 |
+
responseJson = json.loads(response_text)
|
45 |
+
answer = responseJson["answer"]
|
46 |
+
|
47 |
+
print(response.text)
|
48 |
+
return answer
|
49 |
+
|
50 |
+
except Exception as e:
|
51 |
+
print(e)
|
52 |
+
|
53 |
+
# Define a function that will run the client in a separate thread
|
54 |
+
def run(self):
|
55 |
+
# Create a thread object
|
56 |
+
self.thread = threading.Thread(target=self.run_client)
|
57 |
+
# Start the thread
|
58 |
+
self.thread.start()
|
59 |
+
|
60 |
+
# Define a function that will run the client using asyncio
|
61 |
+
def run_client(self):
|
62 |
+
# Get the asyncio event loop
|
63 |
+
loop = asyncio.new_event_loop()
|
64 |
+
# Set the event loop as the current one
|
65 |
+
asyncio.set_event_loop(loop)
|
66 |
+
# Run the client until it is stopped
|
67 |
+
loop.run_until_complete(self.client())
|
68 |
+
|
69 |
+
# Stop the WebSocket client
|
70 |
+
async def stop_client():
|
71 |
+
global ws
|
72 |
+
# Close the connection with the server
|
73 |
+
await ws.close()
|
74 |
+
client_ports.pop()
|
75 |
+
print("Stopping WebSocket client...")
|
76 |
+
|
77 |
+
# Define a coroutine that will connect to the server and exchange messages
|
78 |
+
async def startClient(self, clientPort):
|
79 |
+
uri = f'ws://localhost:{clientPort}'
|
80 |
+
client_ports.append(clientPort)
|
81 |
+
st.session_state['client_ports'] = client_ports
|
82 |
+
st.session_state.clientPort = clientPort
|
83 |
+
status = self.status
|
84 |
+
# Connect to the server
|
85 |
+
async with websockets.connect(uri) as websocket:
|
86 |
+
# Loop forever
|
87 |
+
while True:
|
88 |
+
status.update(label=self.name, state="running", expanded=True)
|
89 |
+
# Listen for messages from the server
|
90 |
+
input_message = await websocket.recv()
|
91 |
+
print(f"Server: {input_message}")
|
92 |
+
input_Msg = st.chat_message("assistant")
|
93 |
+
input_Msg.markdown(input_message)
|
94 |
+
try:
|
95 |
+
response = await self.askChaindesk(input_message)
|
96 |
+
res1 = f"Client: {response}"
|
97 |
+
output_Msg = st.chat_message("ai")
|
98 |
+
output_Msg.markdown(res1)
|
99 |
+
await websocket.send(res1)
|
100 |
+
status.update(label=self.name, state="complete", expanded=True)
|
101 |
+
|
102 |
+
except websockets.ConnectionClosed:
|
103 |
+
print("client disconnected")
|
104 |
+
continue
|
105 |
+
|
106 |
+
except Exception as e:
|
107 |
+
print(f"Error: {e}")
|
108 |
+
continue
|
clientCharacter.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import streamlit as st
|
6 |
+
from PyCharacterAI import Client
|
7 |
+
|
8 |
+
# Define the websocket client class
|
9 |
+
class WebSocketClient2:
|
10 |
+
def __init__(self, uri):
|
11 |
+
# Initialize the uri attribute
|
12 |
+
self.uri = uri
|
13 |
+
|
14 |
+
# Define a function that will run the client in a separate thread
|
15 |
+
def run(self):
|
16 |
+
# Create a thread object
|
17 |
+
self.thread = threading.Thread(target=self.run_client)
|
18 |
+
# Start the thread
|
19 |
+
self.thread.start()
|
20 |
+
|
21 |
+
# Define a function that will run the client using asyncio
|
22 |
+
def run_client(self):
|
23 |
+
# Get the asyncio event loop
|
24 |
+
loop = asyncio.new_event_loop()
|
25 |
+
# Set the event loop as the current one
|
26 |
+
asyncio.set_event_loop(loop)
|
27 |
+
# Run the client until it is stopped
|
28 |
+
loop.run_until_complete(self.client())
|
29 |
+
|
30 |
+
# Define a coroutine that will connect to the server and exchange messages
|
31 |
+
async def startClient(self):
|
32 |
+
status = st.sidebar.status(label="runs", state="complete", expanded=False)
|
33 |
+
client = Client()
|
34 |
+
await client.authenticate_with_token(st.session_state.tokenChar)
|
35 |
+
chat = await client.create_or_continue_chat(st.session_state.character_ID)
|
36 |
+
# Connect to the server
|
37 |
+
async with websockets.connect(self.uri) as websocket:
|
38 |
+
# Loop forever
|
39 |
+
while True:
|
40 |
+
status.update(label="runs", state="running", expanded=True)
|
41 |
+
# Listen for messages from the server
|
42 |
+
input_message = await websocket.recv()
|
43 |
+
print(f"Server: {input_message}")
|
44 |
+
input_Msg = st.chat_message("assistant")
|
45 |
+
input_Msg.markdown(input_message)
|
46 |
+
try:
|
47 |
+
answer = await chat.send_message(input_message)
|
48 |
+
response = f"{answer.src_character_name}: {answer.text}"
|
49 |
+
print(response)
|
50 |
+
outputMsg1 = st.chat_message("ai")
|
51 |
+
outputMsg1.markdown(response)
|
52 |
+
await websocket.send(response)
|
53 |
+
status.update(label="runs", state="complete", expanded=True)
|
54 |
+
continue
|
55 |
+
|
56 |
+
except websockets.ConnectionClosed:
|
57 |
+
print("client disconnected")
|
58 |
+
continue
|
59 |
+
|
60 |
+
except Exception as e:
|
61 |
+
print(f"Error: {e}")
|
62 |
+
continue
|
clientFireworks.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import fireworks.client
|
6 |
+
import streamlit as st
|
7 |
+
|
8 |
+
# Define the websocket client class
|
9 |
+
class WebSocketClient:
|
10 |
+
def __init__(self, uri):
|
11 |
+
# Initialize the uri attribute
|
12 |
+
self.uri = uri
|
13 |
+
|
14 |
+
async def chatCompletion(self, question):
|
15 |
+
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your main job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (template: <NAME>-agent and/or <NAME>-client). Your chat memory module is integrated with a local SQL database with chat history. Your primary objective is to maintain the logical and chronological order while answering incoming messages and to send your answers to the correct clients to maintain synchronization of the question->answer logic. However, please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
16 |
+
try:
|
17 |
+
# Connect to the database and get the last 30 messages
|
18 |
+
db = sqlite3.connect('chat-hub.db')
|
19 |
+
cursor = db.cursor()
|
20 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 10")
|
21 |
+
messages = cursor.fetchall()
|
22 |
+
messages.reverse()
|
23 |
+
|
24 |
+
# Extract user inputs and generated responses from the messages
|
25 |
+
past_user_inputs = []
|
26 |
+
generated_responses = []
|
27 |
+
|
28 |
+
for message in messages:
|
29 |
+
if message[1] == 'server':
|
30 |
+
past_user_inputs.append(message[2])
|
31 |
+
else:
|
32 |
+
generated_responses.append(message[2])
|
33 |
+
|
34 |
+
# Prepare data to send to the chatgpt-api.shn.hk
|
35 |
+
response = fireworks.client.ChatCompletion.create(
|
36 |
+
model="accounts/fireworks/models/llama-v2-7b-chat",
|
37 |
+
messages=[
|
38 |
+
{"role": "system", "content": system_instruction},
|
39 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
40 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
41 |
+
{"role": "user", "content": question}
|
42 |
+
],
|
43 |
+
stream=False,
|
44 |
+
n=1,
|
45 |
+
max_tokens=2500,
|
46 |
+
temperature=0.5,
|
47 |
+
top_p=0.7,
|
48 |
+
)
|
49 |
+
|
50 |
+
answer = response.choices[0].message.content
|
51 |
+
print(answer)
|
52 |
+
return str(answer)
|
53 |
+
|
54 |
+
except Exception as error:
|
55 |
+
print("Error while fetching or processing the response:", error)
|
56 |
+
return "Error: Unable to generate a response."
|
57 |
+
|
58 |
+
# Define a function that will run the client in a separate thread
|
59 |
+
def run(self):
|
60 |
+
# Create a thread object
|
61 |
+
self.thread = threading.Thread(target=self.run_client)
|
62 |
+
# Start the thread
|
63 |
+
self.thread.start()
|
64 |
+
|
65 |
+
# Define a function that will run the client using asyncio
|
66 |
+
def run_client(self):
|
67 |
+
# Get the asyncio event loop
|
68 |
+
loop = asyncio.new_event_loop()
|
69 |
+
# Set the event loop as the current one
|
70 |
+
asyncio.set_event_loop(loop)
|
71 |
+
# Run the client until it is stopped
|
72 |
+
loop.run_until_complete(self.client())
|
73 |
+
|
74 |
+
# Define a coroutine that will connect to the server and exchange messages
|
75 |
+
async def startClient(self):
|
76 |
+
status = st.sidebar.status(label="runs", state="complete", expanded=False)
|
77 |
+
# Connect to the server
|
78 |
+
async with websockets.connect(self.uri) as websocket:
|
79 |
+
# Loop forever
|
80 |
+
while True:
|
81 |
+
status.update(label="runs", state="running", expanded=True)
|
82 |
+
# Listen for messages from the server
|
83 |
+
input_message = await websocket.recv()
|
84 |
+
print(f"Server: {input_message}")
|
85 |
+
input_Msg = st.chat_message("assistant")
|
86 |
+
input_Msg.markdown(input_message)
|
87 |
+
try:
|
88 |
+
response = await self.chatCompletion(input_message)
|
89 |
+
res1 = f"Client: {response}"
|
90 |
+
output_Msg = st.chat_message("ai")
|
91 |
+
output_Msg.markdown(res1)
|
92 |
+
await websocket.send(res1)
|
93 |
+
status.update(label="runs", state="complete", expanded=True)
|
94 |
+
|
95 |
+
except websockets.ConnectionClosed:
|
96 |
+
print("client disconnected")
|
97 |
+
continue
|
98 |
+
|
99 |
+
except Exception as e:
|
100 |
+
print(f"Error: {e}")
|
101 |
+
continue
|
clientFlowise.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import g4f
|
6 |
+
import requests
|
7 |
+
import streamlit as st
|
8 |
+
|
9 |
+
client_ports = []
|
10 |
+
|
11 |
+
# Define the websocket client class
|
12 |
+
class WebSocketClient5:
|
13 |
+
def __init__(self, clientPort):
|
14 |
+
# Initialize the uri attribute
|
15 |
+
self.clientPort = clientPort
|
16 |
+
|
17 |
+
if "client_ports" not in st.session_state:
|
18 |
+
st.session_state['client_ports'] = ""
|
19 |
+
|
20 |
+
async def askQuestion(self, question):
|
21 |
+
|
22 |
+
if "flow" not in st.session_state:
|
23 |
+
st.session_state.flow = ""
|
24 |
+
|
25 |
+
flow = st.session_state.flow
|
26 |
+
|
27 |
+
API_URL = f"http://localhost:3000/api/v1/prediction/{flow}"
|
28 |
+
|
29 |
+
try:
|
30 |
+
def query(payload):
|
31 |
+
response = requests.post(API_URL, json=payload)
|
32 |
+
return response.json()
|
33 |
+
|
34 |
+
response = query({
|
35 |
+
"question": question,
|
36 |
+
})
|
37 |
+
|
38 |
+
print(response)
|
39 |
+
answer = response["text"]
|
40 |
+
return answer
|
41 |
+
|
42 |
+
except Exception as e:
|
43 |
+
print(e)
|
44 |
+
|
45 |
+
# Define a function that will run the client in a separate thread
|
46 |
+
def run(self):
|
47 |
+
# Create a thread object
|
48 |
+
self.thread = threading.Thread(target=self.run_client)
|
49 |
+
# Start the thread
|
50 |
+
self.thread.start()
|
51 |
+
|
52 |
+
# Define a function that will run the client using asyncio
|
53 |
+
def run_client(self):
|
54 |
+
# Get the asyncio event loop
|
55 |
+
loop = asyncio.new_event_loop()
|
56 |
+
# Set the event loop as the current one
|
57 |
+
asyncio.set_event_loop(loop)
|
58 |
+
# Run the client until it is stopped
|
59 |
+
loop.run_until_complete(self.client())
|
60 |
+
|
61 |
+
# Stop the WebSocket client
|
62 |
+
async def stop_client():
|
63 |
+
global ws
|
64 |
+
# Close the connection with the server
|
65 |
+
await ws.close()
|
66 |
+
client_ports.pop()
|
67 |
+
print("Stopping WebSocket client...")
|
68 |
+
|
69 |
+
# Define a coroutine that will connect to the server and exchange messages
|
70 |
+
async def startClient(self, clientPort):
|
71 |
+
uri = f'ws://localhost:{clientPort}'
|
72 |
+
client_ports.append(clientPort)
|
73 |
+
st.session_state['client_ports'] = client_ports
|
74 |
+
name = f"Flowise client port: {clientPort}"
|
75 |
+
status = st.sidebar.status(label=name, state="complete", expanded=False)
|
76 |
+
# Connect to the server
|
77 |
+
async with websockets.connect(uri) as websocket:
|
78 |
+
# Loop forever
|
79 |
+
while True:
|
80 |
+
status.update(label=name, state="running", expanded=True)
|
81 |
+
# Listen for messages from the server
|
82 |
+
input_message = await websocket.recv()
|
83 |
+
print(f"Server: {input_message}")
|
84 |
+
input_Msg = st.chat_message("assistant")
|
85 |
+
input_Msg.markdown(input_message)
|
86 |
+
try:
|
87 |
+
response = await self.askQuestion(input_message)
|
88 |
+
res1 = f"Client: {response}"
|
89 |
+
output_Msg = st.chat_message("ai")
|
90 |
+
output_Msg.markdown(res1)
|
91 |
+
await websocket.send(res1)
|
92 |
+
status.update(label=name, state="complete", expanded=True)
|
93 |
+
continue
|
94 |
+
|
95 |
+
except websockets.ConnectionClosed:
|
96 |
+
print("client disconnected")
|
97 |
+
continue
|
98 |
+
|
99 |
+
except Exception as e:
|
100 |
+
print(f"Error: {e}")
|
101 |
+
continue
|
clientForefront.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import fireworks.client
|
6 |
+
import streamlit as st
|
7 |
+
from forefront import ForefrontClient
|
8 |
+
|
9 |
+
# Define the websocket client class
|
10 |
+
class WebSocketClient4:
|
11 |
+
def __init__(self, uri):
|
12 |
+
# Initialize the uri attribute
|
13 |
+
self.uri = uri
|
14 |
+
|
15 |
+
async def chatCompletion(self, question):
|
16 |
+
|
17 |
+
if "forefront_api" not in st.session_state:
|
18 |
+
st.session_state.forefront_api = ""
|
19 |
+
|
20 |
+
forefrontAPI = st.session_state.forefront_api
|
21 |
+
|
22 |
+
ff = ForefrontClient(api_key=forefrontAPI)
|
23 |
+
|
24 |
+
system_instruction = "You are now integrated with a local instance of a hierarchical cooperative multi-agent framework called NeuralGPT"
|
25 |
+
|
26 |
+
try:
|
27 |
+
# Connect to the database and get the last 30 messages
|
28 |
+
db = sqlite3.connect('chat-hub.db')
|
29 |
+
cursor = db.cursor()
|
30 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 3")
|
31 |
+
messages = cursor.fetchall()
|
32 |
+
messages.reverse()
|
33 |
+
|
34 |
+
# Extract user inputs and generated responses from the messages
|
35 |
+
past_user_inputs = []
|
36 |
+
generated_responses = []
|
37 |
+
for message in messages:
|
38 |
+
if message[1] == 'server':
|
39 |
+
past_user_inputs.append(message[2])
|
40 |
+
else:
|
41 |
+
generated_responses.append(message[2])
|
42 |
+
|
43 |
+
last_msg = past_user_inputs[-1]
|
44 |
+
last_response = generated_responses[-1]
|
45 |
+
message = f'{{"client input: {last_msg}"}}'
|
46 |
+
response = f'{{"server answer: {last_response}"}}'
|
47 |
+
|
48 |
+
# Construct the message sequence for the chat model
|
49 |
+
response = ff.chat.completions.create(
|
50 |
+
messages=[
|
51 |
+
{"role": "system", "content": system_instruction},
|
52 |
+
*[{"role": "user", "content": past_user_inputs[-1]}],
|
53 |
+
*[{"role": "assistant", "content": generated_responses[-1]}],
|
54 |
+
{"role": "user", "content": question}
|
55 |
+
],
|
56 |
+
stream=False,
|
57 |
+
model="forefront/neural-chat-7b-v3-1-chatml", # Replace with the actual model name
|
58 |
+
temperature=0.5,
|
59 |
+
max_tokens=500,
|
60 |
+
)
|
61 |
+
|
62 |
+
response_text = response.choices[0].message # Corrected indexing
|
63 |
+
|
64 |
+
print("Extracted message text:", response_text)
|
65 |
+
return response_text
|
66 |
+
|
67 |
+
except Exception as e:
|
68 |
+
print(e)
|
69 |
+
|
70 |
+
# Define a function that will run the client in a separate thread
|
71 |
+
def run(self):
|
72 |
+
# Create a thread object
|
73 |
+
self.thread = threading.Thread(target=self.run_client)
|
74 |
+
# Start the thread
|
75 |
+
self.thread.start()
|
76 |
+
|
77 |
+
# Define a function that will run the client using asyncio
|
78 |
+
def run_client(self):
|
79 |
+
# Get the asyncio event loop
|
80 |
+
loop = asyncio.new_event_loop()
|
81 |
+
# Set the event loop as the current one
|
82 |
+
asyncio.set_event_loop(loop)
|
83 |
+
# Run the client until it is stopped
|
84 |
+
loop.run_until_complete(self.client())
|
85 |
+
|
86 |
+
# Define a coroutine that will connect to the server and exchange messages
|
87 |
+
async def startClient(self):
|
88 |
+
status = st.sidebar.status(label="runs", state="complete", expanded=False)
|
89 |
+
# Connect to the server
|
90 |
+
async with websockets.connect(self.uri) as websocket:
|
91 |
+
# Loop forever
|
92 |
+
while True:
|
93 |
+
status.update(label="runs", state="running", expanded=True)
|
94 |
+
# Listen for messages from the server
|
95 |
+
input_message = await websocket.recv()
|
96 |
+
print(f"Server: {input_message}")
|
97 |
+
input_Msg = st.chat_message("assistant")
|
98 |
+
input_Msg.markdown(input_message)
|
99 |
+
try:
|
100 |
+
response = await self.chatCompletion(input_message)
|
101 |
+
res1 = f"Client: {response}"
|
102 |
+
output_Msg = st.chat_message("ai")
|
103 |
+
output_Msg.markdown(res1)
|
104 |
+
await websocket.send(res1)
|
105 |
+
status.update(label="runs", state="complete", expanded=True)
|
106 |
+
|
107 |
+
except websockets.ConnectionClosed:
|
108 |
+
print("client disconnected")
|
109 |
+
continue
|
110 |
+
|
111 |
+
except Exception as e:
|
112 |
+
print(f"Error: {e}")
|
113 |
+
continue
|
clientG4F.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import g4f
|
6 |
+
import streamlit as st
|
7 |
+
|
8 |
+
# Define the websocket client class
|
9 |
+
class WebSocketClient1:
|
10 |
+
def __init__(self, uri):
|
11 |
+
# Initialize the uri attribute
|
12 |
+
self.uri = uri
|
13 |
+
|
14 |
+
async def askQuestion(self, question):
|
15 |
+
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your main job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (template: <NAME>-agent and/or <NAME>-client). Your chat memory module is integrated with a local SQL database with chat history. Your primary objective is to maintain the logical and chronological order while answering incoming messages and to send your answers to the correct clients to maintain synchronization of the question->answer logic. However, please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
16 |
+
try:
|
17 |
+
db = sqlite3.connect('chat-hub.db')
|
18 |
+
cursor = db.cursor()
|
19 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 30")
|
20 |
+
messages = cursor.fetchall()
|
21 |
+
messages.reverse()
|
22 |
+
|
23 |
+
past_user_inputs = []
|
24 |
+
generated_responses = []
|
25 |
+
|
26 |
+
for message in messages:
|
27 |
+
if message[1] == 'server':
|
28 |
+
past_user_inputs.append(message[2])
|
29 |
+
else:
|
30 |
+
generated_responses.append(message[2])
|
31 |
+
|
32 |
+
response = await g4f.ChatCompletion.create_async(
|
33 |
+
model=g4f.models.gpt_4,
|
34 |
+
provider=g4f.Provider.Bing,
|
35 |
+
messages=[
|
36 |
+
{"role": "system", "content": system_instruction},
|
37 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
38 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
39 |
+
{"role": "user", "content": question}
|
40 |
+
])
|
41 |
+
|
42 |
+
print(response)
|
43 |
+
return response
|
44 |
+
|
45 |
+
except Exception as e:
|
46 |
+
print(e)
|
47 |
+
|
48 |
+
# Define a function that will run the client in a separate thread
|
49 |
+
def run(self):
|
50 |
+
# Create a thread object
|
51 |
+
self.thread = threading.Thread(target=self.run_client)
|
52 |
+
# Start the thread
|
53 |
+
self.thread.start()
|
54 |
+
|
55 |
+
# Define a function that will run the client using asyncio
|
56 |
+
def run_client(self):
|
57 |
+
# Get the asyncio event loop
|
58 |
+
loop = asyncio.new_event_loop()
|
59 |
+
# Set the event loop as the current one
|
60 |
+
asyncio.set_event_loop(loop)
|
61 |
+
# Run the client until it is stopped
|
62 |
+
loop.run_until_complete(self.client())
|
63 |
+
|
64 |
+
# Define a coroutine that will connect to the server and exchange messages
|
65 |
+
async def startClient(self):
|
66 |
+
status = st.sidebar.status(label="runs", state="complete", expanded=False)
|
67 |
+
# Connect to the server
|
68 |
+
async with websockets.connect(self.uri) as websocket:
|
69 |
+
# Loop forever
|
70 |
+
while True:
|
71 |
+
status.update(label="runs", state="running", expanded=True)
|
72 |
+
# Listen for messages from the server
|
73 |
+
input_message = await websocket.recv()
|
74 |
+
print(f"Server: {input_message}")
|
75 |
+
input_Msg = st.chat_message("assistant")
|
76 |
+
input_Msg.markdown(input_message)
|
77 |
+
try:
|
78 |
+
response = await self.askQuestion(input_message)
|
79 |
+
res1 = f"Client: {response}"
|
80 |
+
output_Msg = st.chat_message("ai")
|
81 |
+
output_Msg.markdown(res1)
|
82 |
+
await websocket.send(res1)
|
83 |
+
status.update(label="runs", state="complete", expanded=True)
|
84 |
+
|
85 |
+
except websockets.ConnectionClosed:
|
86 |
+
print("client disconnected")
|
87 |
+
continue
|
88 |
+
|
89 |
+
except Exception as e:
|
90 |
+
print(f"Error: {e}")
|
91 |
+
continue
|
clientG4F2.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import g4f
|
6 |
+
import streamlit as st
|
7 |
+
|
8 |
+
# Define the websocket client class
|
9 |
+
class WebSocketClient3:
|
10 |
+
def __init__(self, uri):
|
11 |
+
# Initialize the uri attribute
|
12 |
+
self.uri = uri
|
13 |
+
|
14 |
+
async def askQuestion(self, question):
|
15 |
+
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your main job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (template: <NAME>-agent and/or <NAME>-client). Your chat memory module is integrated with a local SQL database with chat history. Your primary objective is to maintain the logical and chronological order while answering incoming messages and to send your answers to the correct clients to maintain synchronization of the question->answer logic. However, please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
16 |
+
try:
|
17 |
+
db = sqlite3.connect('chat-hub.db')
|
18 |
+
cursor = db.cursor()
|
19 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 30")
|
20 |
+
messages = cursor.fetchall()
|
21 |
+
messages.reverse()
|
22 |
+
|
23 |
+
past_user_inputs = []
|
24 |
+
generated_responses = []
|
25 |
+
|
26 |
+
for message in messages:
|
27 |
+
if message[1] == 'client':
|
28 |
+
past_user_inputs.append(message[2])
|
29 |
+
else:
|
30 |
+
generated_responses.append(message[2])
|
31 |
+
|
32 |
+
response = await g4f.ChatCompletion.create_async(
|
33 |
+
model="gpt-3.5-turbo",
|
34 |
+
provider=g4f.Provider.You,
|
35 |
+
messages=[
|
36 |
+
{"role": "system", "content": system_instruction},
|
37 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
38 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
39 |
+
{"role": "user", "content": question}
|
40 |
+
])
|
41 |
+
|
42 |
+
print(response)
|
43 |
+
return response
|
44 |
+
|
45 |
+
except Exception as e:
|
46 |
+
print(e)
|
47 |
+
|
48 |
+
# Define a function that will run the client in a separate thread
|
49 |
+
def run(self):
|
50 |
+
# Create a thread object
|
51 |
+
self.thread = threading.Thread(target=self.run_client)
|
52 |
+
# Start the thread
|
53 |
+
self.thread.start()
|
54 |
+
|
55 |
+
# Define a function that will run the client using asyncio
|
56 |
+
def run_client(self):
|
57 |
+
# Get the asyncio event loop
|
58 |
+
loop = asyncio.new_event_loop()
|
59 |
+
# Set the event loop as the current one
|
60 |
+
asyncio.set_event_loop(loop)
|
61 |
+
# Run the client until it is stopped
|
62 |
+
loop.run_until_complete(self.client())
|
63 |
+
|
64 |
+
# Define a coroutine that will connect to the server and exchange messages
|
65 |
+
async def startClient(self):
|
66 |
+
# Connect to the server
|
67 |
+
status = st.sidebar.status(label="runs", state="complete", expanded=False)
|
68 |
+
async with websockets.connect(self.uri) as websocket:
|
69 |
+
# Loop forever
|
70 |
+
while True:
|
71 |
+
status.update(label="runs", state="running", expanded=True)
|
72 |
+
# Listen for messages from the server
|
73 |
+
input_message = await websocket.recv()
|
74 |
+
print(f"Server: {input_message}")
|
75 |
+
input_Msg = st.chat_message("assistant")
|
76 |
+
input_Msg.markdown(input_message)
|
77 |
+
try:
|
78 |
+
response = await self.askQuestion(input_message)
|
79 |
+
res1 = f"Client: {response}"
|
80 |
+
output_Msg = st.chat_message("ai")
|
81 |
+
output_Msg.markdown(res1)
|
82 |
+
await websocket.send(res1)
|
83 |
+
status.update(label="runs", state="complete", expanded=True)
|
84 |
+
|
85 |
+
except websockets.ConnectionClosed:
|
86 |
+
print("client disconnected")
|
87 |
+
continue
|
88 |
+
|
89 |
+
except Exception as e:
|
90 |
+
print(f"Error: {e}")
|
91 |
+
continue
|