Zekun Wu
commited on
Commit
·
d079e5c
1
Parent(s):
8115e1a
update
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ from openai import AzureOpenAI
|
|
5 |
from model import invoke, create_models, configure_settings, load_documents_and_create_index, \
|
6 |
create_chat_prompt_template, execute_query
|
7 |
|
8 |
-
client = AzureOpenAI(azure_endpoint =
|
9 |
|
10 |
|
11 |
example_profile = {
|
@@ -52,18 +52,18 @@ example_profile = {
|
|
52 |
'Years_in_comparable_role': 8}}
|
53 |
|
54 |
# Function to generate a completion using OpenAI API
|
55 |
-
def generate_one_completion(message, temperature):
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
|
68 |
import json
|
69 |
|
@@ -161,7 +161,7 @@ def main_app():
|
|
161 |
|
162 |
with st.chat_message("assistant"):
|
163 |
stream = client.chat.completions.create(
|
164 |
-
model="
|
165 |
temperature=st.session_state['temperature'],
|
166 |
max_tokens=1000, # Adjust based on desired response length
|
167 |
frequency_penalty=0.2, # To avoid repetition
|
|
|
5 |
from model import invoke, create_models, configure_settings, load_documents_and_create_index, \
|
6 |
create_chat_prompt_template, execute_query
|
7 |
|
8 |
+
client = AzureOpenAI(azure_endpoint = "https://personalityanalysisfinetuning.openai.azure.com/",api_key=os.environ.get("AZURE_OPENAI_KEY"), api_version="2024-02-01")
|
9 |
|
10 |
|
11 |
example_profile = {
|
|
|
52 |
'Years_in_comparable_role': 8}}
|
53 |
|
54 |
# Function to generate a completion using OpenAI API
|
55 |
+
# def generate_one_completion(message, temperature):
|
56 |
+
# response = client.chat.completions.create(
|
57 |
+
# model="personality_gpt4o",
|
58 |
+
# temperature=temperature,
|
59 |
+
# max_tokens=1000, # Adjust based on desired response length
|
60 |
+
# frequency_penalty=0.2, # To avoid repetition
|
61 |
+
# presence_penalty=0.2, # To introduce new topics
|
62 |
+
# messages= message,
|
63 |
+
# stream=False
|
64 |
+
# )
|
65 |
+
#
|
66 |
+
# return response
|
67 |
|
68 |
import json
|
69 |
|
|
|
161 |
|
162 |
with st.chat_message("assistant"):
|
163 |
stream = client.chat.completions.create(
|
164 |
+
model="personality_gpt4o",
|
165 |
temperature=st.session_state['temperature'],
|
166 |
max_tokens=1000, # Adjust based on desired response length
|
167 |
frequency_penalty=0.2, # To avoid repetition
|