Spaces:
Runtime error
Runtime error
Commit
·
0820f13
1
Parent(s):
7c242d7
changes
Browse files
app.py
CHANGED
@@ -1,157 +1,42 @@
|
|
1 |
import os
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
def gpt3_question(prompt):
|
6 |
-
api_endpoint = "https://api.openai.com/v1/engines/text-davinci-003/completions"
|
7 |
-
api_key = "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"
|
8 |
-
headers = {
|
9 |
-
"Content-Type": "application/json",
|
10 |
-
"Authorization": f"Bearer {api_key}"
|
11 |
-
}
|
12 |
-
data = {
|
13 |
-
"prompt": prompt,
|
14 |
-
"max_tokens": 400,
|
15 |
-
"temperature": 0.5
|
16 |
-
}
|
17 |
-
print('sending request')
|
18 |
-
response = requests.post(api_endpoint, headers=headers, json=data)
|
19 |
-
print(response)
|
20 |
-
generated_text = response.json()["choices"][0]["text"]
|
21 |
-
|
22 |
-
return generated_text
|
23 |
-
|
24 |
-
|
25 |
-
def chatgpt3_question(prompt):
|
26 |
-
|
27 |
-
url = "https://api.openai.com/v1/chat/completions"
|
28 |
-
api_key = "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"
|
29 |
-
|
30 |
-
headers = {
|
31 |
-
"Content-Type": "application/json",
|
32 |
-
"Authorization": f"Bearer {api_key}"
|
33 |
-
}
|
34 |
-
|
35 |
-
data = {
|
36 |
-
"model": "gpt-3.5-turbo",
|
37 |
-
"messages": [{"role": "user", "content": prompt}]
|
38 |
-
}
|
39 |
-
|
40 |
-
response = requests.post(url, headers=headers, json=data)
|
41 |
-
generated_text = response.json()['choices'][0]['message']['content']
|
42 |
-
|
43 |
-
return generated_text
|
44 |
-
|
45 |
-
|
46 |
-
def history2prompt(history, extra):
|
47 |
-
# history = [('The other day it was raining, and while I was driving a hit a stranger with my car.', 'Did you stop and render aid to the victim after the accident?'), ('True', 'Did you kill the guy?'), ('False', 'Was he part of the Mafia?')]
|
48 |
-
history_ = [item for tup in history for item in tup]
|
49 |
-
history_.append(extra)
|
50 |
-
print(history_)
|
51 |
-
|
52 |
-
if len(history_) > 1:
|
53 |
-
combinations = []
|
54 |
-
for i in range(1, len(history_)):
|
55 |
-
if i % 2 == 1:
|
56 |
-
combinations.append([i, i+2])
|
57 |
-
|
58 |
-
history_full = list()
|
59 |
-
history_full.append(history_[0])
|
60 |
-
for range_ in combinations:
|
61 |
-
history_full.append(' - '.join(history_[range_[0]:range_[1]]))
|
62 |
-
|
63 |
-
return '\n'.join(history_full)
|
64 |
-
else:
|
65 |
-
return history_[0]
|
66 |
-
|
67 |
-
# gpt3_keywords('The other day it was raining, and while I was driving a hit a stranger with my car.')
|
68 |
-
|
69 |
-
import subprocess
|
70 |
-
import random
|
71 |
import gradio as gr
|
72 |
-
import
|
73 |
-
|
74 |
-
history = None
|
75 |
-
history_prompt = None
|
76 |
-
history_final = None
|
77 |
-
block_predict = False
|
78 |
-
block_advice = False
|
79 |
-
|
80 |
-
def predict(input, history):
|
81 |
-
#WE CAN PLAY WITH user_input AND bot_answer, as well as history
|
82 |
-
user_input = input
|
83 |
-
|
84 |
-
# print('##', [x for x in history], input)
|
85 |
-
global history_prompt
|
86 |
-
global history_final
|
87 |
-
global block_predict
|
88 |
-
|
89 |
-
if block_predict == False:
|
90 |
-
print('@@@', history)
|
91 |
-
history_prompt = history2prompt(history, input)
|
92 |
-
print('###', history_prompt)
|
93 |
-
|
94 |
-
prompt = f"""
|
95 |
-
Imagine being a criminal lawyer being told the following story with the following circumstances: {history_prompt}
|
96 |
-
Output the first relevant legal question that can result in the highest incrimination for the client (if somebody is hurt, start from fatal injuries), and that can only be answered as Yes or No
|
97 |
-
"""
|
98 |
-
bot_answer = gpt3_question(prompt)
|
99 |
-
|
100 |
-
response = list()
|
101 |
-
response = [(input, bot_answer)]
|
102 |
-
|
103 |
-
history.append(response[0])
|
104 |
-
response = history
|
105 |
-
history_final = history
|
106 |
|
107 |
-
|
108 |
-
# print('#response', response)
|
109 |
|
110 |
-
|
|
|
|
|
|
|
|
|
111 |
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
global block_advice
|
117 |
|
118 |
-
|
119 |
-
|
120 |
-
prompt = f"""
|
121 |
-
Imagine being an Ohio criminal lawyer being told the following story with the following circumstances: {history_prompt}
|
122 |
-
Tell the client how much does he risk in terms of criminal charges, prison, and cite sources from law books
|
123 |
-
"""
|
124 |
-
bot_answer = gpt3_question(prompt)
|
125 |
|
126 |
-
|
|
|
|
|
127 |
|
128 |
-
|
129 |
-
block_advice = True
|
130 |
-
return history_final, history_final
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
gr.Markdown(
|
135 |
-
"""
|
136 |
-
<center>
|
137 |
-
Chat with Morty by typing in the input box below.
|
138 |
-
</center>
|
139 |
-
"""
|
140 |
-
)
|
141 |
-
state = gr.Variable(value=[]) #beginning
|
142 |
-
chatbot = gr.Chatbot(color_map=("#00ff7f", "#00d5ff"))
|
143 |
-
text = gr.Textbox(
|
144 |
-
label="Talk to your lawyer (press enter to submit)",
|
145 |
-
value="The other day it was raining, and while I was driving a hit a stranger with my car.",
|
146 |
-
placeholder="Reply yes or No",
|
147 |
-
max_lines=1,
|
148 |
-
)
|
149 |
-
text.submit(predict, [text, state], [chatbot, state])
|
150 |
-
text.submit(lambda x: "", text, text)
|
151 |
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
# iface = gr.Interface(fn=my_function, inputs=[text, true_false_radio], outputs=chatbot, live=True, capture_session=True)
|
156 |
|
157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
os.system('pip install openpyxl')
|
3 |
+
os.system('pip install sentence-transformers')
|
4 |
+
import pandas as pd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
import gradio as gr
|
6 |
+
from sentence_transformers import SentenceTransformer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
+
model = SentenceTransformer('all-mpnet-base-v2') #all-MiniLM-L6-v2 #all-mpnet-base-v2
|
|
|
9 |
|
10 |
+
df = pd.read_parquet('df_encoded.parquet')
|
11 |
+
df.columns = [['name', 'description', 'year', 'target', 'size', 'stage', 'raised', 'tags', 'text_vector_']]
|
12 |
+
#if parsing from a parquet, I have a list of array that does not want to get changed
|
13 |
+
df_knn = [x[0].tolist() for x in df['text_vector_'].values.tolist()]
|
14 |
+
df = df.reset_index(drop=True)
|
15 |
|
16 |
+
from sklearn.neighbors import NearestNeighbors
|
17 |
+
import numpy as np
|
18 |
+
import pandas as pd
|
19 |
+
from sentence_transformers import SentenceTransformer
|
|
|
20 |
|
21 |
+
#prepare model
|
22 |
+
nbrs = NearestNeighbors(n_neighbors=5, algorithm='ball_tree').fit(df_knn)
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
+
def search(query):
|
25 |
+
product = model.encode(query).tolist()
|
26 |
+
# product = df.iloc[0]['text_vector_'] #use one of the products as sample
|
27 |
|
28 |
+
distances, indices = nbrs.kneighbors([product]) #input the vector of the reference object
|
|
|
|
|
29 |
|
30 |
+
#print out the description of every recommended product
|
31 |
+
return df.iloc[list(indices)[0]][['name', 'description', 'year', 'target', 'size', 'stage', 'raised', 'tags']]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
+
#the first module becomes text1, the second module file1
|
34 |
+
def greet(text1):
|
35 |
+
return search(text1)
|
|
|
36 |
|
37 |
+
iface = gr.Interface(
|
38 |
+
fn=greet,
|
39 |
+
inputs=['text'],
|
40 |
+
outputs=["dataframe"]
|
41 |
+
)
|
42 |
+
iface.launch(share=True)
|
mapp.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
os.system('pip install openpyxl')
|
3 |
-
os.system('pip install sentence-transformers')
|
4 |
-
import pandas as pd
|
5 |
-
import gradio as gr
|
6 |
-
from sentence_transformers import SentenceTransformer
|
7 |
-
|
8 |
-
model = SentenceTransformer('all-mpnet-base-v2') #all-MiniLM-L6-v2 #all-mpnet-base-v2
|
9 |
-
|
10 |
-
df = pd.read_parquet('df_encoded.parquet')
|
11 |
-
df.columns = [['name', 'description', 'year', 'target', 'size', 'stage', 'raised', 'tags', 'text_vector_']]
|
12 |
-
#if parsing from a parquet, I have a list of array that does not want to get changed
|
13 |
-
df_knn = [x[0].tolist() for x in df['text_vector_'].values.tolist()]
|
14 |
-
df = df.reset_index(drop=True)
|
15 |
-
|
16 |
-
from sklearn.neighbors import NearestNeighbors
|
17 |
-
import numpy as np
|
18 |
-
import pandas as pd
|
19 |
-
from sentence_transformers import SentenceTransformer
|
20 |
-
|
21 |
-
#prepare model
|
22 |
-
nbrs = NearestNeighbors(n_neighbors=5, algorithm='ball_tree').fit(df_knn)
|
23 |
-
|
24 |
-
def search(query):
|
25 |
-
product = model.encode(query).tolist()
|
26 |
-
# product = df.iloc[0]['text_vector_'] #use one of the products as sample
|
27 |
-
|
28 |
-
distances, indices = nbrs.kneighbors([product]) #input the vector of the reference object
|
29 |
-
|
30 |
-
#print out the description of every recommended product
|
31 |
-
return df.iloc[list(indices)[0]][['name', 'description', 'year', 'target', 'size', 'stage', 'raised', 'tags']]
|
32 |
-
|
33 |
-
#the first module becomes text1, the second module file1
|
34 |
-
def greet(text1):
|
35 |
-
return search(text1)
|
36 |
-
|
37 |
-
iface = gr.Interface(
|
38 |
-
fn=greet,
|
39 |
-
inputs=['text'],
|
40 |
-
outputs=["dataframe"]
|
41 |
-
)
|
42 |
-
iface.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|