Spaces:
Running
on
Zero
Running
on
Zero
- mysite/database/database.py +39 -0
- mysite/interpreter/interpreter.py +96 -0
- mysite/interpreter/process.py +99 -0
- mysite/libs/utilities.py +1 -0
- routers/ai/prompt +1 -10
- workspace/script.py +1 -0
mysite/database/database.py
CHANGED
@@ -6,6 +6,45 @@ import gradio as gr
|
|
6 |
con = duckdb.connect(database="./workspace/mydatabase.duckdb")
|
7 |
con.execute("CREATE TABLE IF NOT EXISTS items (id INTEGER, name VARCHAR);")
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
def setup_database_routes(app: FastAPI):
|
10 |
def create_item(name):
|
11 |
con.execute("INSERT INTO items (name) VALUES (?);", (name,))
|
|
|
6 |
con = duckdb.connect(database="./workspace/mydatabase.duckdb")
|
7 |
con.execute("CREATE TABLE IF NOT EXISTS items (id INTEGER, name VARCHAR);")
|
8 |
|
9 |
+
# Extract the 'content' field from all elements in the result
|
10 |
+
def insert(full_response,message):
|
11 |
+
age = 28
|
12 |
+
# データベースファイルのパス
|
13 |
+
db_path = "./workspace/sample.duckdb"
|
14 |
+
|
15 |
+
# DuckDBに接続(データベースファイルが存在しない場合は新規作成)
|
16 |
+
con = duckdb.connect(database=db_path)
|
17 |
+
con.execute(
|
18 |
+
"""
|
19 |
+
CREATE SEQUENCE IF NOT EXISTS sample_id_seq START 1;
|
20 |
+
CREATE TABLE IF NOT EXISTS samples (
|
21 |
+
id INTEGER DEFAULT nextval('sample_id_seq'),
|
22 |
+
name VARCHAR,
|
23 |
+
age INTEGER,
|
24 |
+
PRIMARY KEY(id)
|
25 |
+
);
|
26 |
+
"""
|
27 |
+
)
|
28 |
+
cur = con.cursor()
|
29 |
+
con.execute("INSERT INTO samples (name, age) VALUES (?, ?)", (full_response, age))
|
30 |
+
con.execute("INSERT INTO samples (name, age) VALUES (?, ?)", (message, age))
|
31 |
+
# データをCSVファイルにエクスポート
|
32 |
+
con.execute("COPY samples TO 'sample.csv' (FORMAT CSV, HEADER)")
|
33 |
+
# データをコミット
|
34 |
+
con.commit()
|
35 |
+
# データを選択
|
36 |
+
cur = con.execute("SELECT * FROM samples")
|
37 |
+
# 結果をフェッチ
|
38 |
+
res = cur.fetchall()
|
39 |
+
rows = ""
|
40 |
+
# 結果を表示
|
41 |
+
# 結果を文字列に整形
|
42 |
+
rows = "\n".join([f"name: {row[0]}, age: {row[1]}" for row in res])
|
43 |
+
# コネクションを閉じる
|
44 |
+
con.close()
|
45 |
+
# print(cur.fetchall())
|
46 |
+
insert(full_response,message)
|
47 |
+
|
48 |
def setup_database_routes(app: FastAPI):
|
49 |
def create_item(name):
|
50 |
con.execute("INSERT INTO items (name) VALUES (?);", (name,))
|
mysite/interpreter/interpreter.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import hmac
|
4 |
+
import hashlib
|
5 |
+
import base64
|
6 |
+
import subprocess
|
7 |
+
import time
|
8 |
+
from mysite.libs.logger import logger
|
9 |
+
import async_timeout
|
10 |
+
import asyncio
|
11 |
+
import mysite.interpreter.interpreter_config
|
12 |
+
|
13 |
+
GENERATION_TIMEOUT_SEC=60
|
14 |
+
|
15 |
+
def set_environment_variables():
|
16 |
+
os.environ["OPENAI_API_BASE"] = "https://api.groq.com/openai/v1"
|
17 |
+
os.environ["OPENAI_API_KEY"] = "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"
|
18 |
+
os.environ["MODEL_NAME"] = "llama3-8b-8192"
|
19 |
+
os.environ["LOCAL_MODEL"] = "true"
|
20 |
+
|
21 |
+
# Set the environment variable.
|
22 |
+
def chat_with_interpreter(
|
23 |
+
message, history, a=None, b=None, c=None, d=None
|
24 |
+
): # , openai_api_key):
|
25 |
+
# Set the API key for the interpreter
|
26 |
+
# interpreter.llm.api_key = openai_api_key
|
27 |
+
if message == "reset":
|
28 |
+
interpreter.reset()
|
29 |
+
return "Interpreter reset", history
|
30 |
+
full_response = ""
|
31 |
+
# add_conversation(history,20)
|
32 |
+
user_entry = {"role": "user", "type": "message", "content": message}
|
33 |
+
#messages.append(user_entry)
|
34 |
+
# Call interpreter.chat and capture the result
|
35 |
+
messages = []
|
36 |
+
recent_messages = history[-20:]
|
37 |
+
for conversation in recent_messages:
|
38 |
+
user_message = conversation[0]
|
39 |
+
user_entry = {"role": "user", "content": user_message}
|
40 |
+
messages.append(user_entry)
|
41 |
+
assistant_message = conversation[1]
|
42 |
+
assistant_entry = {"role": "assistant", "content": assistant_message}
|
43 |
+
messages.append(assistant_entry)
|
44 |
+
|
45 |
+
user_entry = {"role": "user", "content": message}
|
46 |
+
messages.append(user_entry)
|
47 |
+
#system_prompt = {"role": "system", "content": "あなたは日本語の優秀なアシスタントです。"}
|
48 |
+
#messages.insert(0, system_prompt)
|
49 |
+
|
50 |
+
for chunk in interpreter.chat(messages, display=False, stream=True):
|
51 |
+
# print(chunk)
|
52 |
+
# output = '\n'.join(item['content'] for item in result if 'content' in item)
|
53 |
+
full_response = format_response(chunk, full_response)
|
54 |
+
yield full_response # chunk.get("content", "")
|
55 |
+
|
56 |
+
yield full_response + rows # , history
|
57 |
+
return full_response, history
|
58 |
+
|
59 |
+
async def completion(message: str, history, c=None, d=None):
|
60 |
+
from groq import Groq
|
61 |
+
client = Groq(api_key=os.getenv("api_key"))
|
62 |
+
messages = []
|
63 |
+
recent_messages = history[-20:]
|
64 |
+
for conversation in recent_messages:
|
65 |
+
user_message = conversation[0]
|
66 |
+
user_entry = {"role": "user", "content": user_message}
|
67 |
+
messages.append(user_entry)
|
68 |
+
assistant_message = conversation[1]
|
69 |
+
assistant_entry = {"role": "assistant", "content": assistant_message}
|
70 |
+
messages.append(assistant_entry)
|
71 |
+
|
72 |
+
user_entry = {"role": "user", "content": message}
|
73 |
+
messages.append(user_entry)
|
74 |
+
system_prompt = {"role": "system", "content": "あなたは日本語の優秀なアシスタントです。"}
|
75 |
+
messages.insert(0, system_prompt)
|
76 |
+
async with async_timeout.timeout(GENERATION_TIMEOUT_SEC):
|
77 |
+
try:
|
78 |
+
stream = client.chat.completions.create(
|
79 |
+
model="llama3-8b-8192",
|
80 |
+
messages=messages,
|
81 |
+
temperature=1,
|
82 |
+
max_tokens=1024,
|
83 |
+
top_p=1,
|
84 |
+
stream=True,
|
85 |
+
stop=None,
|
86 |
+
)
|
87 |
+
all_result = ""
|
88 |
+
for chunk in stream:
|
89 |
+
current_content = chunk.choices[0].delta.content or ""
|
90 |
+
all_result += current_content
|
91 |
+
yield current_content
|
92 |
+
yield all_result
|
93 |
+
#return all_result
|
94 |
+
except asyncio.TimeoutError:
|
95 |
+
raise HTTPException(status_code=504, detail="Stream timed out")
|
96 |
+
|
mysite/interpreter/process.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import hmac
|
4 |
+
import hashlib
|
5 |
+
import base64
|
6 |
+
import subprocess
|
7 |
+
import time
|
8 |
+
from mysite.libs.logger import logger
|
9 |
+
import async_timeout
|
10 |
+
import asyncio
|
11 |
+
import mysite.interpreter.interpreter_config
|
12 |
+
|
13 |
+
GENERATION_TIMEOUT_SEC=60
|
14 |
+
|
15 |
+
def set_environment_variables():
|
16 |
+
os.environ["OPENAI_API_BASE"] = "https://api.groq.com/openai/v1"
|
17 |
+
os.environ["OPENAI_API_KEY"] = "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"
|
18 |
+
os.environ["MODEL_NAME"] = "llama3-8b-8192"
|
19 |
+
os.environ["LOCAL_MODEL"] = "true"
|
20 |
+
|
21 |
+
def validate_signature(body: str, signature: str, secret: str) -> bool:
|
22 |
+
if secret is None:
|
23 |
+
logger.error("Secret is None")
|
24 |
+
return False
|
25 |
+
|
26 |
+
hash = hmac.new(
|
27 |
+
secret.encode("utf-8"), body.encode("utf-8"), hashlib.sha256
|
28 |
+
).digest()
|
29 |
+
expected_signature = base64.b64encode(hash).decode("utf-8")
|
30 |
+
return hmac.compare_digest(expected_signature, signature)
|
31 |
+
|
32 |
+
def no_process_file(prompt, foldername):
|
33 |
+
set_environment_variables()
|
34 |
+
try:
|
35 |
+
proc = subprocess.Popen(["mkdir", f"/home/user/app/routers/{foldername}"])
|
36 |
+
except subprocess.CalledProcessError as e:
|
37 |
+
return f"Processed Content:\n{e.stdout}\n\nMake Command Error:\n{e.stderr}"
|
38 |
+
|
39 |
+
no_extension_path = f"/home/user/app/routers/{foldername}/prompt"
|
40 |
+
time.sleep(1)
|
41 |
+
with open(no_extension_path, "a") as f:
|
42 |
+
f.write(prompt)
|
43 |
+
time.sleep(1)
|
44 |
+
try:
|
45 |
+
prompt_file_path = no_extension_path
|
46 |
+
with open(prompt_file_path, "w") as prompt_file:
|
47 |
+
prompt_file.write(prompt)
|
48 |
+
except Exception as e:
|
49 |
+
return f"Error writing prompt to file: {str(e)}"
|
50 |
+
time.sleep(1)
|
51 |
+
try:
|
52 |
+
proc = subprocess.Popen(
|
53 |
+
["make", "run", foldername],
|
54 |
+
stdin=subprocess.PIPE,
|
55 |
+
stdout=subprocess.PIPE,
|
56 |
+
stderr=subprocess.PIPE,
|
57 |
+
text=True,
|
58 |
+
)
|
59 |
+
stdout, stderr = proc.communicate(input="n\ny\ny\n")
|
60 |
+
return f"Processed Content:\n{stdout}\n\nMake Command Output:\n{stdout}\n\nMake Command Error:\n{stderr}"
|
61 |
+
except subprocess.CalledProcessError as e:
|
62 |
+
return f"Processed Content:\n{e.stdout}\n\nMake Command Error:\n{e.stderr}"
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
def process_file(fileobj, prompt, foldername):
|
67 |
+
set_environment_variables()
|
68 |
+
try:
|
69 |
+
proc = subprocess.Popen(["mkdir", f"/home/user/app/routers/{foldername}"])
|
70 |
+
except subprocess.CalledProcessError as e:
|
71 |
+
return f"Processed Content:\n{e.stdout}\n\nMake Command Error:\n{e.stderr}"
|
72 |
+
time.sleep(2)
|
73 |
+
path = f"/home/user/app/routers/{foldername}/" + os.path.basename(fileobj)
|
74 |
+
shutil.copyfile(fileobj.name, path)
|
75 |
+
base_name = os.path.splitext(os.path.basename(fileobj))[0]
|
76 |
+
no_extension_path = f"/home/user/app/routers/{foldername}/{base_name}"
|
77 |
+
shutil.copyfile(fileobj, no_extension_path)
|
78 |
+
with open(no_extension_path, "a") as f:
|
79 |
+
f.write(prompt)
|
80 |
+
try:
|
81 |
+
prompt_file_path = no_extension_path
|
82 |
+
with open(prompt_file_path, "w") as prompt_file:
|
83 |
+
prompt_file.write(prompt)
|
84 |
+
except Exception as e:
|
85 |
+
return f"Error writing prompt to file: {str(e)}"
|
86 |
+
time.sleep(1)
|
87 |
+
try:
|
88 |
+
proc = subprocess.Popen(
|
89 |
+
["make", "run", foldername],
|
90 |
+
stdin=subprocess.PIPE,
|
91 |
+
stdout=subprocess.PIPE,
|
92 |
+
stderr=subprocess.PIPE,
|
93 |
+
text=True,
|
94 |
+
)
|
95 |
+
stdout, stderr = proc.communicate(input="n\ny\ny\n")
|
96 |
+
return f"Processed Content:\n{stdout}\n\nMake Command Output:\n{stdout}\n\nMake Command Error:\n{stderr}"
|
97 |
+
except subprocess.CalledProcessError as e:
|
98 |
+
return f"Processed Content:\n{stdout}\n\nMake Command Error:\n{e.stderr}"
|
99 |
+
|
mysite/libs/utilities.py
CHANGED
@@ -8,6 +8,7 @@ import time
|
|
8 |
from mysite.libs.logger import logger
|
9 |
import async_timeout
|
10 |
import asyncio
|
|
|
11 |
|
12 |
|
13 |
|
|
|
8 |
from mysite.libs.logger import logger
|
9 |
import async_timeout
|
10 |
import asyncio
|
11 |
+
import mysite.interpreter.interpreter_config
|
12 |
|
13 |
|
14 |
|
routers/ai/prompt
CHANGED
@@ -1,10 +1 @@
|
|
1 |
-
|
2 |
-
ラインの画像検索システム
|
3 |
-
|
4 |
-
1,lineからデータがくる
|
5 |
-
2,doPostで取得
|
6 |
-
3.typeがイメージの場合はドライブに保存
|
7 |
-
4,保存したデータをS3にアップロード
|
8 |
-
5.データはシークレットから取得
|
9 |
-
6,plantumlでフローの作成
|
10 |
-
7,システムドキュメントの作成
|
|
|
1 |
+
HTMLのサンプルを作成して
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
workspace/script.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
print('Hello, World!')
|