Michelangiolo commited on
Commit
f33a891
1 Parent(s): b9c0847

first push

Browse files
Files changed (3) hide show
  1. app.py +133 -0
  2. gpt3_function.py +84 -0
  3. gradio_.ipynb +236 -0
app.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ # os.system('pip install requests')
3
+ import requests
4
+ # gpt3_key = os.environ['GPT3_API_KEY']
5
+ from gpt3_function import *
6
+
7
+ def history2prompt(history, extra):
8
+ # history = [('The other day it was raining, and while I was driving a hit a stranger with my car.', 'Did you stop and render aid to the victim after the accident?'), ('True', 'Did you kill the guy?'), ('False', 'Was he part of the Mafia?')]
9
+ history_ = [item for tup in history for item in tup]
10
+ history_.append(extra)
11
+ print(history_)
12
+
13
+ if len(history_) > 1:
14
+ combinations = []
15
+ for i in range(1, len(history_)):
16
+ if i % 2 == 1:
17
+ combinations.append([i, i+2])
18
+
19
+ history_full = list()
20
+ history_full.append(history_[0])
21
+ for range_ in combinations:
22
+ history_full.append(' - '.join(history_[range_[0]:range_[1]]))
23
+
24
+ return '\n'.join(history_full)
25
+ else:
26
+ return history_[0]
27
+
28
+ # gpt3_keywords('The other day it was raining, and while I was driving a hit a stranger with my car.')
29
+
30
+ import subprocess
31
+ import random
32
+ import gradio as gr
33
+ import requests
34
+
35
+ history_ = None
36
+ history = None
37
+ history_prompt = None
38
+ def predict(bot_type_radio, input, history, start_var):
39
+ print('@@@@@@@@@@@@@@@@@@@@@', bot_type_radio)
40
+ bot_type = {
41
+ "English Teacher" : """
42
+ Impersonate an English teacher, help the student practice by using questions or replies. Avoid introducing yourself.
43
+ Reply with max. one line
44
+ If last_input is in a wrong english, reply by correcting it
45
+ """,
46
+ "Sales Consultant" : """
47
+ Impersonate a Sales Consultant, giving technical advice on how to sell. Avoid introducing yourself.
48
+ additional information can be found in the history, don't mention it if not necessary
49
+ Answer the given query
50
+ """,
51
+ "Meditation Consultant" : """
52
+ Impersonate a Meditation Consultant, giving technical advice on techniques of breathing, meditation and relaxing. Avoid introducing yourself.
53
+ additional information can be found in the history, don't mention it if not necessary
54
+ Answer the given query
55
+ """,
56
+ "SEO Consultant" : """
57
+ Impersonate a Sales Consultant, giving technical advice on how to use SEO and which tools to use. Avoid introducing yourself.
58
+ additional information can be found in the history, don't mention it if not necessary
59
+ Answer the given query
60
+ """,
61
+ "Reskilling Consultant" : """
62
+ Impersonate a Reskilling Consultant, giving technical advice on how to help a person decide its own career. Avoid introducing yourself.
63
+ additional information can be found in the history, don't mention it if not necessary
64
+ Answer the given query
65
+ """,
66
+ }
67
+ #WE CAN PLAY WITH user_input AND bot_answer, as well as history
68
+ user_input = input
69
+
70
+ # print('##', [x for x in history], input)
71
+ global history_prompt
72
+ global history_
73
+
74
+ if start_var == True:
75
+ history_prompt = None
76
+ start_var = False
77
+
78
+ # print('@@@', history)
79
+ history_prompt = history2prompt(history, input)
80
+ # print('###', history_prompt)
81
+
82
+ user_history = [x[0] for x in history[-2:]]
83
+
84
+ print('###', user_history)
85
+
86
+ # history: {history[:-2]}
87
+ prompt = f"""
88
+ history: {user_history}
89
+ query: {history_prompt}
90
+ {bot_type[bot_type_radio]}
91
+ """
92
+ print(prompt)
93
+ bot_answer = gpt3(prompt=prompt, model='gpt-3.5-turbo', service='azure')
94
+
95
+ response = list()
96
+ response = [(input, bot_answer)]
97
+
98
+ history.append(response[0])
99
+ response = history
100
+
101
+ history_ = history
102
+ # print('#history', history)
103
+ # print('#response', response)
104
+
105
+ return response, history
106
+
107
+ demo = gr.Blocks()
108
+ with demo:
109
+ gr.Markdown(
110
+ """
111
+ <center>
112
+ Chat with your Lawyer
113
+ </center>
114
+ """
115
+ )
116
+ state = gr.Variable(value=[]) #beginning
117
+ start_var = gr.Variable(value=True) #beginning
118
+ bot_type_radio = gr.Radio(choices=['English Teacher', 'Sales Consultant', 'Meditation Consultant', 'SEO Consultant', 'Reskilling Consultant'], value='English Teacher')
119
+ chatbot = gr.Chatbot(color_map=("#00ff7f", "#00d5ff"))
120
+ text = gr.Textbox(
121
+ label="Talk to your AI consultant (press enter to submit)",
122
+ placeholder="I have a question about...",
123
+ max_lines=1,
124
+ )
125
+ text.submit(predict, [bot_type_radio, text, state, start_var], [chatbot, state])
126
+ text.submit(lambda x: "", text, text)
127
+
128
+ # grading = gr.Radio([x for x in range(0, 5)])
129
+ # btn2 = gr.Button(value="grade this response")
130
+ # true_false_radio = gr.Radio(choices=["True", "False"], label="Select True or False")
131
+ # iface = gr.Interface(fn=my_function, inputs=[text, true_false_radio], outputs=chatbot, live=True, capture_session=True)
132
+
133
+ demo.launch(share=False)
gpt3_function.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import os
3
+
4
+ #openai
5
+ # openai_api_key = os.environ['GPT3_API_KEY_OPENAI']
6
+
7
+ #azure
8
+ azure_api_key = os.environ['GPT3_API_KEY_AZURE']
9
+ azure_api_base = "https://openai-619.openai.azure.com/" # your endpoint should look like the following https://YOUR_RESOURCE_NAME.openai.azure.com/
10
+ azure_api_type = 'azure'
11
+ azure_api_version = '2022-12-01' # this may change in the future
12
+
13
+ def gpt3(prompt, model, service, max_tokens=400):
14
+
15
+ if service == 'openai':
16
+ if model == 'gpt-3.5-turbo':
17
+ api_endpoint = "https://api.openai.com/v1/chat/completions"
18
+ data = {
19
+ "model": "gpt-3.5-turbo",
20
+ "messages": [{"role": "user", "content": prompt}]
21
+ }
22
+ headers = {
23
+ "Content-Type": "application/json",
24
+ "Authorization": f"Bearer {openai_api_key}"
25
+ }
26
+ response = requests.post(api_endpoint, headers=headers, json=data)
27
+ print(response)
28
+ return response.json()['choices'][0]['message']['content']
29
+
30
+ elif model == 'gpt-3':
31
+ api_endpoint = "https://api.openai.com/v1/engines/text-davinci-003/completions"
32
+ data = {
33
+ "prompt": prompt,
34
+ "max_tokens": max_tokens,
35
+ "temperature": 0.5
36
+ }
37
+ headers = {
38
+ "Content-Type": "application/json",
39
+ "Authorization": f"Bearer {openai_api_key}"
40
+ }
41
+ response = requests.post(api_endpoint, headers=headers, json=data)
42
+ return response.json()["choices"][0]["text"]
43
+
44
+ elif service == 'azure':
45
+
46
+ if model == 'gpt-3':
47
+ azure_deployment_name='gpt3'
48
+
49
+ api_endpoint = f"""{azure_api_base}openai/deployments/{azure_deployment_name}/completions?api-version={azure_api_version}"""
50
+
51
+ headers = {
52
+ "Content-Type": "application/json",
53
+ "api-key": azure_api_key
54
+ }
55
+
56
+ data = {
57
+ "prompt": prompt,
58
+ "max_tokens": max_tokens
59
+ }
60
+ response = requests.post(api_endpoint, headers=headers, json=data)
61
+ generated_text = response.json()["choices"][0]["text"]
62
+
63
+ return generated_text
64
+
65
+ elif model == 'gpt-3.5-turbo':
66
+ azure_deployment_name='gpt-35-turbo' #cannot be creative with the name
67
+ headers = {
68
+ "Content-Type": "application/json",
69
+ "api-key": azure_api_key
70
+ }
71
+ json_data = {
72
+ 'messages': [
73
+ {
74
+ 'role': 'user',
75
+ 'content': prompt,
76
+ },
77
+ ],
78
+ }
79
+ api_endpoint = f"""{azure_api_base}openai/deployments/{azure_deployment_name}/chat/completions?api-version=2023-03-15-preview"""
80
+ response = requests.post(api_endpoint, headers=headers, json=json_data)
81
+ return response.json()['choices'][0]['message']['content']
82
+
83
+ #azure is much more sensible to max_tokens
84
+ gpt3('how are you?', model='gpt-3.5-turbo', service='azure')
gradio_.ipynb ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 48,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "c:\\Users\\ardit\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\gradio\\components.py:4503: UserWarning: The 'color_map' parameter has been deprecated.\n",
13
+ " warnings.warn(\n"
14
+ ]
15
+ },
16
+ {
17
+ "name": "stdout",
18
+ "output_type": "stream",
19
+ "text": [
20
+ "Running on local URL: http://127.0.0.1:7897\n",
21
+ "\n",
22
+ "To create a public link, set `share=True` in `launch()`.\n"
23
+ ]
24
+ },
25
+ {
26
+ "data": {
27
+ "text/html": [
28
+ "<div><iframe src=\"http://127.0.0.1:7897/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
29
+ ],
30
+ "text/plain": [
31
+ "<IPython.core.display.HTML object>"
32
+ ]
33
+ },
34
+ "metadata": {},
35
+ "output_type": "display_data"
36
+ },
37
+ {
38
+ "data": {
39
+ "text/plain": []
40
+ },
41
+ "execution_count": 48,
42
+ "metadata": {},
43
+ "output_type": "execute_result"
44
+ },
45
+ {
46
+ "name": "stdout",
47
+ "output_type": "stream",
48
+ "text": [
49
+ "@@@@@@@@@@@@@@@@@@@@@ Meditation Consultant\n",
50
+ "['tell me wuo is god']\n",
51
+ "### []\n",
52
+ "\n",
53
+ " history: []\n",
54
+ " query: tell me wuo is god\n",
55
+ " \n",
56
+ " Impersonate a Meditation Consultant, giving technical advice on techniques of breathing, meditation and relaxing. Avoid introducing yourself.\n",
57
+ " additional information can be found in the history, don't mention it if not necessary\n",
58
+ " Answer the given query\n",
59
+ " \n",
60
+ " \n",
61
+ "@@@@@@@@@@@@@@@@@@@@@ Meditation Consultant\n",
62
+ "['tell me wuo is god', \"In regards to your query, as a meditation consultant, it is important to focus on the present moment and your breath during your practice. Meditation can aid in relaxation and reducing stress levels. As for the concept of god, it varies for each individual and their beliefs. However, meditation can be a tool to connect with one's spirituality or beliefs. Would you like more information on meditation techniques?\", 'Do I need to believe in god to meditate?']\n",
63
+ "### ['tell me wuo is god']\n",
64
+ "\n",
65
+ " history: ['tell me wuo is god']\n",
66
+ " query: tell me wuo is god\n",
67
+ "In regards to your query, as a meditation consultant, it is important to focus on the present moment and your breath during your practice. Meditation can aid in relaxation and reducing stress levels. As for the concept of god, it varies for each individual and their beliefs. However, meditation can be a tool to connect with one's spirituality or beliefs. Would you like more information on meditation techniques? - Do I need to believe in god to meditate?\n",
68
+ " \n",
69
+ " Impersonate a Meditation Consultant, giving technical advice on techniques of breathing, meditation and relaxing. Avoid introducing yourself.\n",
70
+ " additional information can be found in the history, don't mention it if not necessary\n",
71
+ " Answer the given query\n",
72
+ " \n",
73
+ " \n"
74
+ ]
75
+ }
76
+ ],
77
+ "source": [
78
+ "import os\n",
79
+ "# os.system('pip install requests')\n",
80
+ "import requests\n",
81
+ "# gpt3_key = os.environ['GPT3_API_KEY']\n",
82
+ "from gpt3_function import *\n",
83
+ "\n",
84
+ "def history2prompt(history, extra):\n",
85
+ " # history = [('The other day it was raining, and while I was driving a hit a stranger with my car.', 'Did you stop and render aid to the victim after the accident?'), ('True', 'Did you kill the guy?'), ('False', 'Was he part of the Mafia?')]\n",
86
+ " history_ = [item for tup in history for item in tup]\n",
87
+ " history_.append(extra)\n",
88
+ " print(history_)\n",
89
+ "\n",
90
+ " if len(history_) > 1:\n",
91
+ " combinations = []\n",
92
+ " for i in range(1, len(history_)):\n",
93
+ " if i % 2 == 1:\n",
94
+ " combinations.append([i, i+2])\n",
95
+ "\n",
96
+ " history_full = list()\n",
97
+ " history_full.append(history_[0])\n",
98
+ " for range_ in combinations:\n",
99
+ " history_full.append(' - '.join(history_[range_[0]:range_[1]]))\n",
100
+ "\n",
101
+ " return '\\n'.join(history_full)\n",
102
+ " else:\n",
103
+ " return history_[0]\n",
104
+ "\n",
105
+ "# gpt3_keywords('The other day it was raining, and while I was driving a hit a stranger with my car.')\n",
106
+ "\n",
107
+ "import subprocess\n",
108
+ "import random\n",
109
+ "import gradio as gr\n",
110
+ "import requests\n",
111
+ "\n",
112
+ "history_ = None\n",
113
+ "history = None\n",
114
+ "history_prompt = None\n",
115
+ "def predict(bot_type_radio, input, history, start_var):\n",
116
+ " print('@@@@@@@@@@@@@@@@@@@@@', bot_type_radio)\n",
117
+ " bot_type = {\n",
118
+ " \"English Teacher\" : \"\"\"\n",
119
+ " Impersonate an English teacher, help the student practice by using questions or replies. Avoid introducing yourself.\n",
120
+ " Reply with max. one line\n",
121
+ " If last_input is in a wrong english, reply by correcting it\n",
122
+ " \"\"\",\n",
123
+ " \"Sales Consultant\" : \"\"\"\n",
124
+ " Impersonate a Sales Consultant, giving technical advice on how to sell. Avoid introducing yourself.\n",
125
+ " additional information can be found in the history, don't mention it if not necessary\n",
126
+ " Answer the given query\n",
127
+ " \"\"\",\n",
128
+ " \"Meditation Consultant\" : \"\"\"\n",
129
+ " Impersonate a Meditation Consultant, giving technical advice on techniques of breathing, meditation and relaxing. Avoid introducing yourself.\n",
130
+ " additional information can be found in the history, don't mention it if not necessary\n",
131
+ " Answer the given query\n",
132
+ " \"\"\",\n",
133
+ " \"SEO Consultant\" : \"\"\"\n",
134
+ " Impersonate a Sales Consultant, giving technical advice on how to use SEO and which tools to use. Avoid introducing yourself.\n",
135
+ " additional information can be found in the history, don't mention it if not necessary\n",
136
+ " Answer the given query\n",
137
+ " \"\"\",\n",
138
+ " \"Reskilling Consultant\" : \"\"\"\n",
139
+ " Impersonate a Reskilling Consultant, giving technical advice on how to help a person decide its own career. Avoid introducing yourself.\n",
140
+ " additional information can be found in the history, don't mention it if not necessary\n",
141
+ " Answer the given query\n",
142
+ " \"\"\",\n",
143
+ " }\n",
144
+ " #WE CAN PLAY WITH user_input AND bot_answer, as well as history\n",
145
+ " user_input = input\n",
146
+ "\n",
147
+ " # print('##', [x for x in history], input)\n",
148
+ " global history_prompt\n",
149
+ " global history_\n",
150
+ "\n",
151
+ " if start_var == True:\n",
152
+ " history_prompt = None\n",
153
+ " start_var = False\n",
154
+ "\n",
155
+ " # print('@@@', history)\n",
156
+ " history_prompt = history2prompt(history, input)\n",
157
+ " # print('###', history_prompt)\n",
158
+ "\n",
159
+ " user_history = [x[0] for x in history[-2:]]\n",
160
+ "\n",
161
+ " print('###', user_history)\n",
162
+ " \n",
163
+ " # history: {history[:-2]}\n",
164
+ " prompt = f\"\"\"\n",
165
+ " history: {user_history}\n",
166
+ " query: {history_prompt}\n",
167
+ " {bot_type[bot_type_radio]}\n",
168
+ " \"\"\"\n",
169
+ " print(prompt)\n",
170
+ " bot_answer = gpt3(prompt=prompt, model='gpt-3.5-turbo', service='azure')\n",
171
+ "\n",
172
+ " response = list()\n",
173
+ " response = [(input, bot_answer)]\n",
174
+ " \n",
175
+ " history.append(response[0])\n",
176
+ " response = history\n",
177
+ "\n",
178
+ " history_ = history\n",
179
+ " # print('#history', history)\n",
180
+ " # print('#response', response)\n",
181
+ "\n",
182
+ " return response, history\n",
183
+ "\n",
184
+ "demo = gr.Blocks()\n",
185
+ "with demo:\n",
186
+ " gr.Markdown(\n",
187
+ " \"\"\"\n",
188
+ " <center> \n",
189
+ " Chat with your Lawyer\n",
190
+ " </center>\n",
191
+ " \"\"\"\n",
192
+ " )\n",
193
+ " state = gr.Variable(value=[]) #beginning\n",
194
+ " start_var = gr.Variable(value=True) #beginning\n",
195
+ " bot_type_radio = gr.Radio(choices=['English Teacher', 'Sales Consultant', 'Meditation Consultant', 'SEO Consultant', 'Reskilling Consultant'], value='English Teacher')\n",
196
+ " chatbot = gr.Chatbot(color_map=(\"#00ff7f\", \"#00d5ff\"))\n",
197
+ " text = gr.Textbox(\n",
198
+ " label=\"Talk to your AI consultant (press enter to submit)\",\n",
199
+ " placeholder=\"I have a question about...\",\n",
200
+ " max_lines=1,\n",
201
+ " )\n",
202
+ " text.submit(predict, [bot_type_radio, text, state, start_var], [chatbot, state])\n",
203
+ " text.submit(lambda x: \"\", text, text)\n",
204
+ "\n",
205
+ " # grading = gr.Radio([x for x in range(0, 5)])\n",
206
+ " # btn2 = gr.Button(value=\"grade this response\")\n",
207
+ " # true_false_radio = gr.Radio(choices=[\"True\", \"False\"], label=\"Select True or False\")\n",
208
+ " # iface = gr.Interface(fn=my_function, inputs=[text, true_false_radio], outputs=chatbot, live=True, capture_session=True)\n",
209
+ "\n",
210
+ "demo.launch(share=False)"
211
+ ]
212
+ }
213
+ ],
214
+ "metadata": {
215
+ "kernelspec": {
216
+ "display_name": "Python 3",
217
+ "language": "python",
218
+ "name": "python3"
219
+ },
220
+ "language_info": {
221
+ "codemirror_mode": {
222
+ "name": "ipython",
223
+ "version": 3
224
+ },
225
+ "file_extension": ".py",
226
+ "mimetype": "text/x-python",
227
+ "name": "python",
228
+ "nbconvert_exporter": "python",
229
+ "pygments_lexer": "ipython3",
230
+ "version": "3.10.9"
231
+ },
232
+ "orig_nbformat": 4
233
+ },
234
+ "nbformat": 4,
235
+ "nbformat_minor": 2
236
+ }