Update app.py
Browse files
app.py
CHANGED
@@ -128,55 +128,31 @@ def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
|
128 |
conversation.append({'role': 'user', 'content': prompt})
|
129 |
if len(document_section)>0:
|
130 |
conversation.append({'role': 'assistant', 'content': document_section})
|
131 |
-
#response = openai.ChatCompletion.create(model=model, messages=conversation)
|
132 |
-
|
133 |
-
# streaming response
|
134 |
-
#result_textarea = st.empty()
|
135 |
-
#results=[]
|
136 |
-
#for responses in openai.Completion.create(model=model, prompt=conversation, stream=True):
|
137 |
-
# for responses in openai.ChatCompletion.create(model=model, messages=conversation, stream=True):
|
138 |
-
#results.append(str(responses.choices[0]))
|
139 |
-
# results.append(responses.choices[0].text)
|
140 |
-
#st.markdown(f'*{results}*')
|
141 |
-
# result = "".join(results).strip()
|
142 |
-
# result = result.replace('\n','')
|
143 |
-
# result_textarea.markdown(f'*{result}*')
|
144 |
|
145 |
|
146 |
-
#
|
147 |
start_time = time.time()
|
148 |
-
|
149 |
response = openai.ChatCompletion.create(
|
150 |
model='gpt-3.5-turbo',
|
151 |
messages=conversation,
|
152 |
temperature=0.5,
|
153 |
stream=True # again, we set stream=True
|
154 |
)
|
155 |
-
|
156 |
-
# create variables to collect the stream of chunks
|
157 |
collected_chunks = []
|
158 |
collected_messages = []
|
159 |
-
# iterate through the stream of events
|
160 |
for chunk in response:
|
161 |
chunk_time = time.time() - start_time # calculate the time delay of the chunk
|
162 |
collected_chunks.append(chunk) # save the event response
|
163 |
chunk_message = chunk['choices'][0]['delta'] # extract the message
|
164 |
collected_messages.append(chunk_message) # save the message
|
165 |
-
|
166 |
-
|
167 |
-
#st.markdown(f'*{results}*')
|
168 |
-
#result = "".join(collected_messages).strip()
|
169 |
-
#result = result.replace('\n','')
|
170 |
-
st.markdown(f'*{collected_messages}*')
|
171 |
-
|
172 |
st.markdown(f"Full response received {chunk_time:.2f} seconds after request")
|
173 |
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
|
174 |
st.markdown(f"Full conversation received: {full_reply_content}")
|
175 |
-
#return response
|
176 |
-
#return response['choices'][0]['message']['content']
|
177 |
return full_reply_content
|
178 |
-
|
179 |
|
|
|
|
|
180 |
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
|
181 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
182 |
conversation.append({'role': 'user', 'content': prompt})
|
|
|
128 |
conversation.append({'role': 'user', 'content': prompt})
|
129 |
if len(document_section)>0:
|
130 |
conversation.append({'role': 'assistant', 'content': document_section})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
|
133 |
+
# iterate through the stream of events
|
134 |
start_time = time.time()
|
|
|
135 |
response = openai.ChatCompletion.create(
|
136 |
model='gpt-3.5-turbo',
|
137 |
messages=conversation,
|
138 |
temperature=0.5,
|
139 |
stream=True # again, we set stream=True
|
140 |
)
|
|
|
|
|
141 |
collected_chunks = []
|
142 |
collected_messages = []
|
|
|
143 |
for chunk in response:
|
144 |
chunk_time = time.time() - start_time # calculate the time delay of the chunk
|
145 |
collected_chunks.append(chunk) # save the event response
|
146 |
chunk_message = chunk['choices'][0]['delta'] # extract the message
|
147 |
collected_messages.append(chunk_message) # save the message
|
148 |
+
st.markdown(f'*{collected_messages[1]}*')
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
st.markdown(f"Full response received {chunk_time:.2f} seconds after request")
|
150 |
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
|
151 |
st.markdown(f"Full conversation received: {full_reply_content}")
|
|
|
|
|
152 |
return full_reply_content
|
|
|
153 |
|
154 |
+
|
155 |
+
|
156 |
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
|
157 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
158 |
conversation.append({'role': 'user', 'content': prompt})
|