Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import asyncio | |
# Define the model ID | |
model_id = 'Aksh1t/mistral-7b-oig-unsloth-merged' | |
# Initialize the Hugging Face inference client | |
client = InferenceClient() | |
async def generate_text(prompt): | |
# Use the Hugging Face client to generate text asynchronously | |
async with client.chat(model_id=model_id) as chat: | |
response = await chat(prompt) | |
return response['generated_text'] | |
# Create Gradio interface | |
inputs = gr.Textbox(label="Enter a prompt", lines=2) | |
outputs = gr.Textbox(label="Generated Text", readonly=True) | |
def predict(prompt): | |
output_text = asyncio.run(generate_text(prompt)) | |
return output_text | |
iface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title="Hugging Face Text Generation Model", description=f"Generate text based on a prompt using model '{model_id}'") | |
if __name__ == "__main__": | |
iface.launch() | |