File size: 931 Bytes
d37a8b8
d40b3ea
07ebd00
d37a8b8
07ebd00
7627af1
d37a8b8
d40b3ea
65be8fe
07ebd00
 
 
65be8fe
bbd29a6
07ebd00
 
 
0b24358
07ebd00
 
 
 
 
 
 
 
 
65be8fe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import gradio as gr
from huggingface_hub import InferenceClient
import asyncio

# Define the model ID
model_id = 'Aksh1t/llama3-8b-oig-unsloth-merged'

# Initialize the Hugging Face inference client
client = InferenceClient(model=model_id)

async def generate_text(prompt):
    # Use the Hugging Face client to generate text asynchronously
    response = await client.text_generation(prompt)
    return response['generated_text']

# Create Gradio interface
inputs = gr.Textbox(label="Enter a prompt", lines=2)
outputs = gr.Textbox(label="Generated Text", placeholder="Generated text will appear here")

def predict(prompt):
    output_text = asyncio.run(generate_text(prompt))
    return output_text

iface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title="Hugging Face Text Generation Model", description=f"Generate text based on a prompt using model '{model_id}'")

if __name__ == "__main__":
    iface.launch()