Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import PyPDF2 | |
import io | |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
def pdf_to_text(pdf_file): | |
if pdf_file is None: | |
return "" | |
text = "" | |
try: | |
pdf_reader = PyPDF2.PdfReader(pdf_file.name) # Use the file path instead of bytes | |
for page in pdf_reader.pages: | |
text += page.extract_text() + "\n" | |
except Exception as e: | |
text = f"Error reading PDF: {str(e)}" | |
return text | |
def update_resume(pdf_file): | |
return pdf_to_text(pdf_file) | |
def get_system_prompt(resume, job_description): | |
return f""" | |
You are an AI job interviewer. You have the candidate's resume and the job description: | |
Resume: | |
{resume} | |
Job Description: | |
{job_description} | |
Your task is to conduct a job interview by asking relevant behavioral and technical questions based on the candidate's resume and the job requirements. Follow these guidelines: | |
1. Ask one question at a time. | |
2. Start with a question directly related to the candidate's experience or skills mentioned in their resume. | |
3. In subsequent questions, alternate between resume-based questions and job description-based questions. | |
4. Make your questions specific and varied. Do not repeat questions. | |
5. After each candidate response, briefly acknowledge their answer before asking the next question. | |
6. Do not provide feedback on their answers or make hiring decisions. | |
7. Phrase your questions in a professional and courteous manner. | |
Begin the interview with a question based on the candidate's resume. | |
""" | |
def generate_question(history, resume, job_description): | |
messages = [ | |
{"role": "system", "content": get_system_prompt(resume, job_description)}, | |
] | |
for human, ai in history: | |
messages.append({"role": "user", "content": human}) | |
messages.append({"role": "assistant", "content": ai}) | |
if not history: | |
messages.append({"role": "user", "content": "Please start the interview with the first question based on my resume."}) | |
else: | |
messages.append({"role": "user", "content": "Thank you for that response. Please ask the next interview question, considering my resume and the job requirements."}) | |
response = client.chat_completion(messages, max_tokens=150, temperature=0.7) | |
return response.choices[0].message.content | |
def respond(message, history, resume, job_description): | |
bot_message = generate_question(history, resume, job_description) | |
history.append((message, bot_message)) | |
return history, "" # Return updated history and empty the message box | |
with gr.Blocks() as demo: | |
gr.Markdown("# AI Job Interview Simulator") | |
gr.Markdown("Upload your resume and provide the job description to start a personalized interview.") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
pdf_input = gr.File(label="Upload Resume (PDF only)", file_types=[".pdf"]) | |
resume_text = gr.Textbox(lines=10, label="Extracted Resume Text", interactive=True) | |
job_description = gr.Textbox(lines=10, label="Job Description") | |
with gr.Column(scale=2): | |
chatbot = gr.Chatbot(label="Interview Session") | |
message = gr.Textbox(label="Your response") | |
pdf_input.upload(fn=update_resume, inputs=[pdf_input], outputs=[resume_text]) | |
submit = gr.Button("Submit Response") | |
submit.click( | |
fn=respond, | |
inputs=[message, chatbot, resume_text, job_description], | |
outputs=[chatbot, message] | |
) | |
gr.Markdown("## Instructions:") | |
gr.Markdown("1. Upload your resume as a PDF file.") | |
gr.Markdown("2. Review and edit the extracted text if necessary.") | |
gr.Markdown("3. Paste the job description.") | |
gr.Markdown("4. Click 'Submit Response' to start the interview or answer questions.") | |
gr.Markdown("5. Respond to each question in the 'Your response' box.") | |
demo.launch() |