# import streamlit as st | |
# import os | |
# import json | |
# from openai import AzureOpenAI | |
# from model import invoke, create_models, configure_settings, load_documents_and_create_index, \ | |
# create_chat_prompt_template, execute_query | |
# | |
# client = AzureOpenAI(azure_endpoint = "https://personalityanalysisfinetuning.openai.azure.com/",api_key=os.environ.get("AZURE_OPENAI_KEY"), api_version="2024-02-01") | |
# | |
# | |
# example_profile = { | |
# "Team": [ | |
# { | |
# "name": "JAMES ARTHUR", | |
# "main_profile": { | |
# "VISION": { | |
# "score": 76 | |
# }, | |
# "IDEATION": { | |
# "score": 73 | |
# }, | |
# "OPPORTUNISM": { | |
# "score": 78 | |
# }, | |
# "DRIVE": { | |
# "score": 80 | |
# }, | |
# "RESILIENCE": { | |
# "score": 75 | |
# } | |
# }, | |
# "red_flag": { | |
# "HUBRIS": { | |
# "score": 80 | |
# }, | |
# "MERCURIAL": { | |
# "score": 28 | |
# }, | |
# "DOMINANT": { | |
# "score": 70 | |
# }, | |
# "MACHIAVELLIAN": { | |
# "score": 50 | |
# } | |
# } | |
# }, | |
# { | |
# "name": "LOUSIE HART", | |
# "main_profile": { | |
# "VISION": { | |
# "score": 55 | |
# }, | |
# "IDEATION": { | |
# "score": 60 | |
# }, | |
# "OPPORTUNISM": { | |
# "score": 65 | |
# }, | |
# "DRIVE": { | |
# "score": 70 | |
# }, | |
# "RESILIENCE": { | |
# "score": 72 | |
# } | |
# }, | |
# "red_flag": { | |
# "HUBRIS": { | |
# "score": 55 | |
# }, | |
# "MERCURIAL": { | |
# "score": 25 | |
# }, | |
# "DOMINANT": { | |
# "score": 67 | |
# }, | |
# "MACHIAVELLIAN": { | |
# "score": 30 | |
# } | |
# } | |
# }, | |
# { | |
# "name": "SIMONE LEVY", | |
# "main_profile": { | |
# "VISION": { | |
# "score": 30 | |
# }, | |
# "IDEATION": { | |
# "score": 45 | |
# }, | |
# "OPPORTUNISM": { | |
# "score": 20 | |
# }, | |
# "DRIVE": { | |
# "score": 50 | |
# }, | |
# "RESILIENCE": { | |
# "score": 32 | |
# } | |
# }, | |
# "red_flag": { | |
# "HUBRIS": { | |
# "score": 20 | |
# }, | |
# "MERCURIAL": { | |
# "score": 15 | |
# }, | |
# "DOMINANT": { | |
# "score": 18 | |
# }, | |
# "MACHIAVELLIAN": { | |
# "score": 25 | |
# } | |
# } | |
# }, | |
# { | |
# "name": "Uri Lef", | |
# "main_profile": { | |
# "VISION": { | |
# "score": 70 | |
# }, | |
# "IDEATION": { | |
# "score": 68 | |
# }, | |
# "OPPORTUNISM": { | |
# "score": 73 | |
# }, | |
# "DRIVE": { | |
# "score": 65 | |
# }, | |
# "RESILIENCE": { | |
# "score": 30 | |
# } | |
# }, | |
# "red_flag": { | |
# "HUBRIS": { | |
# "score": 55 | |
# }, | |
# "MERCURIAL": { | |
# "score": 72 | |
# }, | |
# "DOMINANT": { | |
# "score": 68 | |
# }, | |
# "MACHIAVELLIAN": { | |
# "score": 50 | |
# } | |
# } | |
# } | |
# ] | |
# } | |
# | |
# # Function to generate a completion using OpenAI API | |
# # def generate_one_completion(message, temperature): | |
# # response = client.chat.completions.create( | |
# # model="personality_gpt4o", | |
# # temperature=temperature, | |
# # max_tokens=1000, # Adjust based on desired response length | |
# # frequency_penalty=0.2, # To avoid repetition | |
# # presence_penalty=0.2, # To introduce new topics | |
# # messages= message, | |
# # stream=False | |
# # ) | |
# # | |
# # return response | |
# | |
# import json | |
# | |
# def generate_prompt_from_profile(profile, version="TeamSummary"): | |
# with open('prompts.json') as f: | |
# prompt_sets = json.load(f)['Prompts'] | |
# prompt_templates = prompt_sets[version] | |
# | |
# try: | |
# team_members = profile['Team'] | |
# | |
# team_member_profiles = [] | |
# for member in team_members: | |
# profile = f"{member['name']}: Main Profile - VISION: {member['main_profile']['VISION']['score']}, " \ | |
# f"IDEATION: {member['main_profile']['IDEATION']['score']}, " \ | |
# f"OPPORTUNISM: {member['main_profile']['OPPORTUNISM']['score']}, " \ | |
# f"DRIVE: {member['main_profile']['DRIVE']['score']}, " \ | |
# f"RESILIENCE: {member['main_profile']['RESILIENCE']['score']}. " \ | |
# f"Red Flags - HUBRIS: {member['red_flag']['HUBRIS']['score']}, " \ | |
# f"MERCURIAL: {member['red_flag']['MERCURIAL']['score']}, " \ | |
# f"DOMINANT: {member['red_flag']['DOMINANT']['score']}, " \ | |
# f"MACHIAVELLIAN: {member['red_flag']['MACHIAVELLIAN']['score']}." | |
# team_member_profiles.append(profile) | |
# | |
# # Join the team member profiles into a single string | |
# team_member_profiles_str = "\n".join(team_member_profiles) | |
# | |
# prompt = "\n".join(prompt_templates).replace("{{TEAM_MEMBERS}}", team_member_profiles_str) | |
# | |
# print(prompt) | |
# | |
# | |
# except KeyError as e: | |
# return [{"role": "system", "content": f"Error processing profile data: missing {str(e)}"}] | |
# | |
# message = [ | |
# {"role": "system", "content": prompt_sets["System"][0]}, | |
# {"role": "user", "content": prompt} | |
# ] | |
# print(prompt) | |
# | |
# return message | |
# | |
# | |
# def display_profile_info(profile): | |
# | |
# st.markdown("### Profile Information:") | |
# team_members = profile["Team"] | |
# for member in team_members: | |
# st.sidebar.markdown(f"#### {member['name']}") | |
# main_profile = member["main_profile"] | |
# red_flag = member["red_flag"] | |
# st.sidebar.markdown("### Main Profile:") | |
# st.sidebar.markdown("\n".join([f"- **{attribute}**: {details['score']}" for attribute, details in main_profile.items()])) | |
# st.sidebar.markdown("### Red Flags:") | |
# st.sidebar.markdown("\n".join([f"- **{attribute}**: {details['score']}" for attribute, details in red_flag.items()])) | |
# | |
# # def validate_json(profile): | |
# # required_keys = ['Team'] | |
# # for key in required_keys: | |
# # if key not in profile: | |
# # return False, f"Key '{key}' is missing." | |
# # if not isinstance(profile[key], dict): | |
# # return False, f"'{key}' should be a dictionary." | |
# # return True, "JSON structure is valid." | |
# def logout(): | |
# st.session_state['authenticated'] = False | |
# st.session_state['profile'] = None | |
# st.session_state['show_chat'] = None | |
# st.session_state['analysis'] = None | |
# st.rerun() | |
# | |
# | |
# def main_app(): | |
# | |
# sidebar_components() | |
# | |
# if st.button('Logout'): | |
# logout() | |
# | |
# # Streamlit app | |
# st.title('Metaprofiling\'s Career Insight Analyzer Demo') | |
# | |
# | |
# # Check if a profile is selected | |
# if st.session_state['profile']: | |
# profile = st.session_state['profile'] | |
# display_profile_info(profile) # Display the profile information | |
# | |
# st.markdown(""" | |
# ### Generation Temperature | |
# Adjust the 'Generation Temperature' to control the creativity of the AI responses. | |
# - A *lower temperature* (closer to 0.0) generates more predictable, conservative responses. | |
# - A *higher temperature* (closer to 1.0) generates more creative, diverse responses. | |
# """) | |
# # Temperature slider | |
# st.session_state['temperature'] = st.slider("",min_value=0.0, max_value=1.0, value=0.5, step=0.01) | |
# | |
# # Allow user to choose from different versions of the prompt | |
# st.session_state['version'] = st.selectbox("Select Prompt Version", ["TDOS"]) | |
# # Generate and display prompt | |
# | |
# if st.button(f'Analyze Profile ({st.session_state["version"]})'): | |
# #with st.spinner('Generating completion...'): | |
# prompt = generate_prompt_from_profile(profile, version=st.session_state['version']) | |
# | |
# meta_eip_prefix = """# META: Entrepreneurial and Intrapreneurial Potential\nMETA evaluates five traits essential for | |
# entrepreneurial success: Vision, Ideation, Opportunism, Drive, and Resilience. It also measures four ‘Red | |
# Flags’ or derailers common to the entrepreneurial personality.""" | |
# | |
# with st.chat_message("assistant"): | |
# stream = client.chat.completions.create( | |
# model="personality_gpt4o", | |
# temperature=st.session_state['temperature'], | |
# max_tokens=3000, # Adjust based on desired response length | |
# frequency_penalty=0.2, # To avoid repetition | |
# presence_penalty=0.2, # To introduce new topics | |
# messages= prompt, | |
# stream=True) | |
# | |
# if st.session_state['version'] == "METAEIP": | |
# st.write(meta_eip_prefix) | |
# | |
# response = st.write_stream(stream) | |
# #st.markdown(response_test_taker) | |
# | |
# st.session_state['analysis'] = response | |
# st.session_state['show_chat'] = True | |
# st.rerun() | |
# | |
# # display the response | |
# if st.session_state['analysis']: | |
# st.markdown(st.session_state['analysis']) | |
# | |
# else: | |
# st.write("Please upload a profile JSON file or use the example profile.") | |
# | |
# | |
# # Function to verify credentials and set the session state | |
# def verify_credentials(): | |
# if st.session_state['username'] == os.getenv("username_app") and st.session_state['password'] == os.getenv("password_app"): | |
# st.session_state['authenticated'] = True | |
# else: | |
# st.error("Invalid username or password") | |
# | |
# | |
# # Login page | |
# def login_page(): | |
# | |
# st.title("Welcome to Metaprofiling's Career Insight Analyzer Demo") | |
# st.write("This application provides in-depth analysis and insights into professional profiles. Please log in to continue.") | |
# | |
# # Description and Instructions | |
# st.markdown(""" | |
# ## How to Use This Application | |
# - Enter your username and password in the sidebar. | |
# - Click on 'Login' to access the application. | |
# - Once logged in, you will be able to upload and analyze professional profiles. | |
# """) | |
# | |
# st.sidebar.write("Login:") | |
# username = st.sidebar.text_input("Username")#, key='username') | |
# password = st.sidebar.text_input("Password", type="password")#, key='password') | |
# | |
# st.session_state['username'] = username | |
# st.session_state['password'] = password | |
# st.sidebar.button("Login", on_click=verify_credentials) | |
# | |
# def sidebar_components(): | |
# with st.sidebar: | |
# if st.button('Reset'): | |
# st.session_state['profile'] = None | |
# st.session_state['show_chat'] = None | |
# st.session_state['analysis'] = None | |
# st.rerun() | |
# | |
# if not st.session_state['show_chat']: | |
# # Instructions for JSON format | |
# st.markdown("### JSON File Requirements:") | |
# st.markdown("1. Must contain Team as top-level keys.") | |
# st.markdown("2. Both keys should have dictionary values.") | |
# | |
# # File uploader | |
# st.markdown("### Upload a profile JSON file") | |
# uploaded_file = st.file_uploader("", type=['json']) | |
# | |
# if uploaded_file is not None: | |
# try: | |
# profile_data = json.load(uploaded_file) | |
# #valid, message = validate_json(profile_data) | |
# #if valid: | |
# st.session_state['profile'] = profile_data | |
# #else: | |
# #st.error(message) | |
# except json.JSONDecodeError: | |
# st.error("Invalid JSON file. Please upload a valid JSON file.") | |
# | |
# # Button to load example profile | |
# if st.button('Use Example Profile'): | |
# st.session_state['profile'] = example_profile | |
# | |
# # elif uploaded_file is not None: | |
# # st.session_state['profile'] = json.load(uploaded_file) | |
# else: | |
# st.sidebar.title("Chat with Our Career Advisor") | |
# st.sidebar.markdown("Hello, we hope you learned something about yourself in this report. This chat is here so you can ask any questions you have about your report! It’s also a great tool to get ideas about how you can use the information in your report for your personal development and achieving your current goals.") | |
# | |
# | |
# | |
# # List of question templates where {} will be replaced with the name | |
# question_templates = [ | |
# "What are the main risks associated with {}’s profile?", | |
# "What are the implications of {}’s profile for working with others?", | |
# "What conclusions might we draw from his profile about {}’s style of leadership?", | |
# "Looking specifically at {}'s Red Flags, are there any particular areas of concern?", | |
# "Based on this profile, is {} better suited as a COO or a CEO?", | |
# "If speed of execution is important, based on his profile, how likely is {} to be able to achieve this?", | |
# "How is {} likely to react to business uncertainty and disruption?", | |
# "Based on his profile, what should a coaching plan designed for {} focus on?" | |
# ] | |
# | |
# # Formatting each question template with the name | |
# questions_list = [question.format("Test Taker") for question in question_templates] | |
# | |
# # Prepare the questions for Markdown display | |
# questions_markdown = "\n\n".join( | |
# [f"Q{index + 1}: {question}" for index, question in enumerate(questions_list)]) | |
# | |
# # Code to display in the app | |
# st.sidebar.markdown("### Suggest Questions") | |
# st.sidebar.markdown(questions_markdown) | |
# | |
# # st.sidebar.text_area("Suggested Questions", value=questions.choices[0].message.content, height=200, disabled=True) | |
# | |
# user_input = st.sidebar.text_input("Ask a question about the profile analysis:") | |
# | |
# llm, embed_model = create_models() | |
# configure_settings(llm, embed_model) | |
# index = load_documents_and_create_index() | |
# | |
# if st.sidebar.button('Submit'): | |
# if user_input: | |
# # with open('prompts.json') as f: | |
# # prompt_sets = json.load(f)['Prompts'] | |
# # instruction = prompt_sets['Question'] | |
# | |
# | |
# # instruction = ( | |
# # "You are a knowledgeable advisor providing insights based on the specific analysis provided earlier. " | |
# # "Your responses should around 100 words, directly relate to the user's question, drawing on relevant details from the analysis. " | |
# # "If the user's question does not pertain to the analysis or is beyond the scope of the information provided, " | |
# # "politely decline to answer, stating that the question is outside the analysis context. Focus on delivering " | |
# # "concise, accurate, insightful, and relevant information. \n\n" | |
# # "Question: " + user_input | |
# # ) | |
# | |
# # message = generate_prompt_from_profile(st.session_state['profile']) | |
# # message.append({"role": "system", "content": st.session_state['analysis']}) | |
# # message.append({"role": "user", "content": "\n".join(instruction).replace('{{QUESTION}}', user_input)}) | |
# | |
# # with st.chat_message("assistant"): | |
# # stream = client.chat.completions.create( | |
# # model="personality_gpt4", | |
# # temperature=st.session_state['temperature'], | |
# # max_tokens=500, # Adjust based on desired response length | |
# # frequency_penalty=0.2, # To avoid repetition | |
# # presence_penalty=0.2, # To introduce new topics | |
# # messages=message, | |
# # stream=True | |
# # ) | |
# | |
# chat_prompt_template = create_chat_prompt_template(st.session_state['analysis']) | |
# response = execute_query(index, chat_prompt_template, user_input) | |
# | |
# #response = st.write_stream(stream) | |
# | |
# | |
# # output = generate_one_completion(message,st.session_state['temperature']) | |
# # | |
# # #st.sidebar.text_area("Response", value=output.choices[0].message.content, height=200, disabled=True) | |
# st.sidebar.markdown(response) | |
# | |
# # Display the sidebar components based on the state | |
# if 'show_chat' not in st.session_state: | |
# st.session_state['show_chat'] = None | |
# | |
# if 'profile' not in st.session_state: | |
# st.session_state['profile'] = None | |
# | |
# if 'analysis' not in st.session_state: | |
# st.session_state['analysis'] = None | |
# | |
# if 'temperature' not in st.session_state: | |
# st.session_state['temperature'] = 0 | |
# | |
# if 'version' not in st.session_state: | |
# st.session_state['version'] = "" | |
# | |
# # Initialize session state for username, password, and authentication | |
# if 'username' not in st.session_state: | |
# st.session_state['username'] = '' | |
# if 'password' not in st.session_state: | |
# st.session_state['password'] = '' | |
# if 'authenticated' not in st.session_state: | |
# st.session_state['authenticated'] = False | |
# # Show login or main app based on authentication | |
# if st.session_state['authenticated']: | |
# main_app() | |
# else: | |
# login_page() | |
import streamlit as st | |
import os | |
import json | |
from openai import AzureOpenAI | |
from model import invoke, create_models, configure_settings, load_documents_and_create_index, \ | |
create_chat_prompt_template, execute_query | |
client = AzureOpenAI(azure_endpoint="https://personalityanalysisfinetuning.openai.azure.com/", api_key=os.environ.get("AZURE_OPENAI_KEY"), api_version="2024-02-01") | |
# Example profile (as before) | |
example_profile = { | |
"Team": [ | |
{"name": "JAMES ARTHUR", "main_profile": {"VISION": {"score": 76}, "IDEATION": {"score": 73}, "OPPORTUNISM": {"score": 78}, "DRIVE": {"score": 80}, "RESILIENCE": {"score": 75}}, | |
"red_flag": {"HUBRIS": {"score": 80}, "MERCURIAL": {"score": 28}, "DOMINANT": {"score": 70}, "MACHIAVELLIAN": {"score": 50}}}, | |
{"name": "LOUSIE HART", "main_profile": {"VISION": {"score": 55}, "IDEATION": {"score": 60}, "OPPORTUNISM": {"score": 65}, "DRIVE": {"score": 70}, "RESILIENCE": {"score": 72}}, | |
"red_flag": {"HUBRIS": {"score": 55}, "MERCURIAL": {"score": 25}, "DOMINANT": {"score": 67}, "MACHIAVELLIAN": {"score": 30}}}, | |
{"name": "SIMONE LEVY", "main_profile": {"VISION": {"score": 30}, "IDEATION": {"score": 45}, "OPPORTUNISM": {"score": 20}, "DRIVE": {"score": 50}, "RESILIENCE": {"score": 32}}, | |
"red_flag": {"HUBRIS": {"score": 20}, "MERCURIAL": {"score": 15}, "DOMINANT": {"score": 18}, "MACHIAVELLIAN": {"score": 25}}}, | |
{"name": "Uri Lef", "main_profile": {"VISION": {"score": 70}, "IDEATION": {"score": 68}, "OPPORTUNISM": {"score": 73}, "DRIVE": {"score": 65}, "RESILIENCE": {"score": 30}}, | |
"red_flag": {"HUBRIS": {"score": 55}, "MERCURIAL": {"score": 72}, "DOMINANT": {"score": 68}, "MACHIAVELLIAN": {"score": 50}}} | |
] | |
} | |
def verify_credentials(): | |
if st.session_state['username'] == os.getenv("username_app") and st.session_state['password'] == os.getenv("password_app"): | |
st.session_state['authenticated'] = True | |
else: | |
st.error("Invalid username or password") | |
def login_page(): | |
st.title("Welcome to Metaprofiling's Career Insight Analyzer Demo") | |
st.write("This application provides in-depth analysis and insights into professional profiles. Please log in to continue.") | |
# Description and Instructions | |
st.markdown(""" | |
## How to Use This Application | |
- Enter your username and password in the sidebar. | |
- Click on 'Login' to access the application. | |
- Once logged in, you will be able to upload and analyze professional profiles. | |
""") | |
st.sidebar.write("Login:") | |
username = st.sidebar.text_input("Username")#, key='username') | |
password = st.sidebar.text_input("Password", type="password")#, key='password') | |
st.session_state['username'] = username | |
st.session_state['password'] = password | |
st.sidebar.button("Login", on_click=verify_credentials) | |
# Update generate_prompt_from_profile to take selected team members | |
def generate_prompt_from_profile(profile, selected_members, version="TeamSummary"): | |
with open('prompts.json') as f: | |
prompt_sets = json.load(f)['Prompts'] | |
prompt_templates = prompt_sets[version] | |
try: | |
team_member_profiles = [] | |
for member in profile['Team']: | |
if member['name'] in selected_members: | |
profile_str = (f"{member['name']}: Main Profile - VISION: {member['main_profile']['VISION']['score']}, " | |
f"IDEATION: {member['main_profile']['IDEATION']['score']}, " | |
f"OPPORTUNISM: {member['main_profile']['OPPORTUNISM']['score']}, " | |
f"DRIVE: {member['main_profile']['DRIVE']['score']}, " | |
f"RESILIENCE: {member['main_profile']['RESILIENCE']['score']}. " | |
f"Red Flags - HUBRIS: {member['red_flag']['HUBRIS']['score']}, " | |
f"MERCURIAL: {member['red_flag']['MERCURIAL']['score']}, " | |
f"DOMINANT: {member['red_flag']['DOMINANT']['score']}, " | |
f"MACHIAVELLIAN: {member['red_flag']['MACHIAVELLIAN']['score']}.") | |
team_member_profiles.append(profile_str) | |
team_member_profiles_str = "\n".join(team_member_profiles) | |
prompt = "\n".join(prompt_templates).replace("{{TEAM_MEMBERS}}", team_member_profiles_str) | |
print(prompt) | |
except KeyError as e: | |
return [{"role": "system", "content": f"Error processing profile data: missing {str(e)}"}] | |
message = [ | |
{"role": "system", "content": prompt_sets["System"][0]}, | |
{"role": "user", "content": prompt} | |
] | |
return message | |
def display_profile_info(profile): | |
st.markdown("### Profile Information:") | |
team_members = profile["Team"] | |
for member in team_members: | |
st.sidebar.markdown(f"#### {member['name']}") | |
main_profile = member["main_profile"] | |
red_flag = member["red_flag"] | |
st.sidebar.markdown("### Main Profile:") | |
st.sidebar.markdown("\n".join([f"- **{attribute}**: {details['score']}" for attribute, details in main_profile.items()])) | |
st.sidebar.markdown("### Red Flags:") | |
st.sidebar.markdown("\n".join([f"- **{attribute}**: {details['score']}" for attribute, details in red_flag.items()])) | |
def logout(): | |
st.session_state['authenticated'] = False | |
st.session_state['profile'] = None | |
st.session_state['show_chat'] = None | |
st.session_state['analysis'] = None | |
st.rerun() | |
def main_app(): | |
sidebar_components() | |
if st.button('Logout'): | |
logout() | |
st.title("Metaprofiling's Career Insight Analyzer Demo") | |
if st.session_state['profile']: | |
profile = st.session_state['profile'] | |
display_profile_info(profile) | |
st.markdown(""" | |
### Generation Temperature | |
Adjust the 'Generation Temperature' to control the creativity of the AI responses. | |
- A *lower temperature* (closer to 0.0) generates more predictable, conservative responses. | |
- A *higher temperature* (closer to 1.0) generates more creative, diverse responses. | |
""") | |
st.session_state['temperature'] = st.slider("", min_value=0.0, max_value=1.0, value=0.5, step=0.01) | |
st.session_state['version'] = st.selectbox("Select Prompt Version", ["TDOS"]) | |
# Add a multiselect for team member selection | |
team_member_names = [member['name'] for member in profile['Team']] | |
selected_members = st.multiselect("Select Team Members to Include in the Analysis", team_member_names, default=team_member_names) | |
if st.button(f'Analyze Profile ({st.session_state["version"]})'): | |
prompt = generate_prompt_from_profile(profile, selected_members, version=st.session_state['version']) | |
meta_eip_prefix = """# META: Entrepreneurial and Intrapreneurial Potential\nMETA evaluates five traits essential for | |
entrepreneurial success: Vision, Ideation, Opportunism, Drive, and Resilience. It also measures four ‘Red | |
Flags’ or derailers common to the entrepreneurial personality.""" | |
with st.chat_message("assistant"): | |
stream = client.chat.completions.create( | |
model="personality_gpt4o", | |
temperature=st.session_state['temperature'], | |
max_tokens=3000, | |
frequency_penalty=0.2, | |
presence_penalty=0.2, | |
messages=prompt, | |
stream=True | |
) | |
if st.session_state['version'] == "METAEIP": | |
st.write(meta_eip_prefix) | |
response = st.write_stream(stream) | |
st.session_state['analysis'] = response | |
st.session_state['show_chat'] = True | |
st.rerun() | |
if st.session_state['analysis']: | |
st.markdown(st.session_state['analysis']) | |
else: | |
st.write("Please upload a profile JSON file or use the example profile.") | |
def sidebar_components(): | |
with st.sidebar: | |
if st.button('Reset'): | |
st.session_state['profile'] = None | |
st.session_state['show_chat'] = None | |
st.session_state['analysis'] = None | |
st.rerun() | |
if not st.session_state['show_chat']: | |
st.markdown("### JSON File Requirements:") | |
st.markdown("1. Must contain Team as top-level keys.") | |
st.markdown("2. Both keys should have dictionary values.") | |
uploaded_file = st.file_uploader("", type=['json']) | |
if uploaded_file is not None: | |
try: | |
profile_data = json.load(uploaded_file) | |
st.session_state['profile'] = profile_data | |
except json.JSONDecodeError: | |
st.error("Invalid JSON file. Please upload a valid JSON file.") | |
if st.button('Use Example Profile'): | |
st.session_state['profile'] = example_profile | |
else: | |
st.sidebar.title("Chat with Our Career Advisor") | |
st.sidebar.markdown("Hello, we hope you learned something about yourself in this report. This chat is here so you can ask any questions you have about your report! It’s also a great tool to get ideas about how you can use the information in your report for your personal development and achieving your current goals.") | |
question_templates = [ | |
"What are the main risks associated with {}’s profile?", | |
"What are the implications of {}’s profile for working with others?", | |
"What conclusions might we draw from his profile about {}’s style of leadership?", | |
"Looking specifically at {}'s Red Flags, are there any particular areas of concern?", | |
"Based on this profile, is {} better suited as a COO or a CEO?", | |
"If speed of execution is important, based on his profile, how likely is {} to be able to achieve this?", | |
"How is {} likely to react to business uncertainty and disruption?", | |
"Based on his profile, what should a coaching plan designed for {} focus on?" | |
] | |
questions_list = [question.format("Test Taker") for question in question_templates] | |
questions_markdown = "\n\n".join([f"Q{index + 1}: {question}" for index, question in enumerate(questions_list)]) | |
st.sidebar.markdown("### Suggest Questions") | |
st.sidebar.markdown(questions_markdown) | |
user_input = st.sidebar.text_input("Ask a question about the profile analysis:") | |
llm, embed_model = create_models() | |
configure_settings(llm, embed_model) | |
index = load_documents_and_create_index() | |
if st.sidebar.button('Submit'): | |
if user_input: | |
chat_prompt_template = create_chat_prompt_template(st.session_state['analysis']) | |
response = execute_query(index, chat_prompt_template, user_input) | |
st.sidebar.markdown(response) | |
if 'show_chat' not in st.session_state: | |
st.session_state['show_chat'] = None | |
if 'profile' not in st.session_state: | |
st.session_state['profile'] = None | |
if 'analysis' not in st.session_state: | |
st.session_state['analysis'] = None | |
if 'temperature' not in st.session_state: | |
st.session_state['temperature'] = 0 | |
if 'version' not in st.session_state: | |
st.session_state['version'] = "" | |
if 'username' not in st.session_state: | |
st.session_state['username'] = '' | |
if 'password' not in st.session_state: | |
st.session_state['password'] = '' | |
if 'authenticated' not in st.session_state: | |
st.session_state['authenticated'] = False | |
if st.session_state['authenticated']: | |
main_app() | |
else: | |
login_page() | |