|
import streamlit as st |
|
from streamlit_option_menu import option_menu |
|
from app_utils import switch_page |
|
from PIL import Image |
|
from streamlit_lottie import st_lottie |
|
from typing import Literal |
|
from dataclasses import dataclass |
|
import json |
|
import base64 |
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.chains import ConversationChain, RetrievalQA |
|
from langchain.prompts.prompt import PromptTemplate |
|
from langchain.text_splitter import NLTKTextSplitter |
|
from langchain.vectorstores import FAISS |
|
import nltk |
|
from prompts.prompts import templates |
|
from langchain_google_genai import ChatGoogleGenerativeAI |
|
import getpass |
|
import os |
|
from langchain_google_genai import GoogleGenerativeAIEmbeddings |
|
|
|
|
|
|
|
if "GOOGLE_API_KEY" not in os.environ: |
|
os.environ["GOOGLE_API_KEY"] = "AIzaSyCA4__JMC_ZIQ9xQegIj5LOMLhSSrn3pMw" |
|
|
|
im = Image.open("icon.png") |
|
|
|
def app(): |
|
home_title = "AI Interviewer" |
|
home_introduction = "Welcome to AI Interviewer, empowering your interview preparation with generative AI." |
|
|
|
st.markdown( |
|
"<style>#MainMenu{visibility:hidden;}</style>", |
|
unsafe_allow_html=True |
|
) |
|
st.image(im, width=100) |
|
st.markdown(f"""# {home_title}""", unsafe_allow_html=True) |
|
st.markdown("""\n""") |
|
|
|
st.markdown("Welcome to AI Interviewer! 👏 AI Interviewer is your personal interviewer powered by generative AI that conducts mock interviews." |
|
"You can upload your resume and enter job descriptions, and AI Interviewer will ask you customized questions. Additionally, you can configure your own Interviewer!") |
|
st.markdown("""\n""") |
|
role = st.text_input("Enter your role") |
|
if role: |
|
st.markdown(f"Your role is {role}") |
|
|
|
llm = ChatGoogleGenerativeAI( |
|
model="gemini-pro") |
|
llm = ChatGoogleGenerativeAI(model="gemini-pro") |
|
prompt = f"Provide the tech stack and responsibilities for the top 3 job recommendations based on the role: {role}. " + """ |
|
For each job recommendation, list the required tech stack and associated responsibilities without giving any title or role name. |
|
Ensure the information is detailed and precise. |
|
|
|
Give above tech stack and responsibilities in the following format : |
|
|
|
[ |
|
{ |
|
"tech_stack": ["tech1", "tech2", ...], |
|
"responsibilities": ["resp1", "resp2", ...] |
|
}, |
|
{ |
|
"tech_stack": ["tech1", "tech2", ...], |
|
"responsibilities": ["resp1", "resp2", ...] |
|
}, |
|
... |
|
] |
|
|
|
|
|
|
|
|
|
""" |
|
try: |
|
analysis = llm.invoke(prompt) |
|
st.write(analysis.content) |
|
job_recommendations = json.loads(analysis.content) |
|
except json.JSONDecodeError: |
|
st.error("Failed to parse the LLM response. Please ensure the LLM is returning a structured JSON-like response.") |
|
return |
|
except Exception as e: |
|
st.error(f"An error occurred: {e}") |
|
return |
|
|
|
if job_recommendations: |
|
|
|
options = [f"Tech Stack: {rec['tech_stack']}, Responsibilities: {rec['responsibilities']}" for rec in job_recommendations] |
|
selected_option = st.selectbox("Select your preferred tech stack and responsibilities", options) |
|
|
|
|
|
submit_button = st.button(label='Submit') |
|
|
|
if submit_button: |
|
selected_index = options.index(selected_option) |
|
selected_rec = job_recommendations[selected_index] |
|
tech_stack = ", ".join(selected_rec['tech_stack']) |
|
responsibilities = ", ".join(selected_rec['responsibilities']) |
|
|
|
jd = { |
|
"tech_stack": tech_stack, |
|
"responsibilities": responsibilities |
|
} |
|
|
|
|
|
if jd: |
|
|
|
with open("job_description.json", "w") as f: |
|
json.dump(jd, f) |
|
st.success("Job description saved successfully!") |
|
|
|
if __name__ == "__main__": |
|
app() |
|
|