Aksh1t's picture
Update app.py
dac1f25 verified
raw
history blame contribute delete
No virus
931 Bytes
import gradio as gr
from huggingface_hub import InferenceClient
import asyncio
# Define the model ID
model_id = 'meta-llama/Meta-Llama-3-8B-Instruct'
# Initialize the Hugging Face inference client
client = InferenceClient(model=model_id)
async def generate_text(prompt):
# Use the Hugging Face client to generate text asynchronously
response = await client.text_generation(prompt)
return response['generated_text']
# Create Gradio interface
inputs = gr.Textbox(label="Enter a prompt", lines=2)
outputs = gr.Textbox(label="Generated Text", placeholder="Generated text will appear here")
def predict(prompt):
output_text = asyncio.run(generate_text(prompt))
return output_text
iface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title="Hugging Face Text Generation Model", description=f"Generate text based on a prompt using model '{model_id}'")
if __name__ == "__main__":
iface.launch()