Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import os | |
from mistralai.client import MistralClient | |
from mistralai.models.chat_completion import ChatMessage | |
client = MistralClient(api_key= os.getenv('mistral_api_key')) | |
# Function to get chat response | |
def get_mistral_response(user_input: str, history: list[tuple[str, str]], context: str): | |
messages = [ChatMessage(role='user', content='you are helpful assistant answering question based on given context. provide a highly consie and precise answer, along with a citation from original context in the format of JSON.'), | |
ChatMessage(role='assistant', content='I understand.')] # system prompt has been baked into fine-tuned model. | |
context_with_history = context # placeholder | |
response_after = client.chat( | |
model='ft:open-mistral-7b:1c04df3c:20240629:7010a3c8', | |
messages=[ChatMessage(role='user', content=f'''CONTEXT:{context_with_history} QUESTION: {user_input}''')] | |
).choices[0].message.content | |
response_before = client.chat( | |
model='open-mistral-7b', | |
messages=[ChatMessage(role='user', content=f'''CONTEXT:{context_with_history} QUESTION: {user_input}''')] | |
).choices[0].message.content | |
response = f"""**Before fine-tune**: <br> {response_before} <br><br> **After fine-tune**:<br><span style="color:green"> {response_after} </span><br>""" | |
return response | |
demo = gr.ChatInterface( | |
get_mistral_response, | |
title='no-nonsense QA bot', | |
description="After fine-tuning, the bot answers your question with a grounded citation. Paste your contextual information in the box below.", | |
additional_inputs=[ | |
gr.Textbox(value="", label="Answer will be based on the context input here", lines=5), | |
], | |
additional_inputs_accordion = gr.Accordion(label="", open=False), | |
) | |
if __name__ == "__main__": | |
demo.launch() |