Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,28 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from huggingface_hub.inference import Client
|
3 |
+
import asyncio
|
4 |
|
5 |
+
# Define the model ID
|
6 |
+
model_id = 'Aksh1t/mistral-7b-oig-unsloth-merged'
|
7 |
|
8 |
+
# Initialize the Hugging Face client
|
9 |
+
client = Client()
|
10 |
+
|
11 |
+
async def generate_text(prompt):
|
12 |
+
# Use the Hugging Face client to generate text asynchronously
|
13 |
+
async with client.chat(model_id=model_id) as chat:
|
14 |
+
response = await chat(prompt)
|
15 |
+
return response['generated_text']
|
16 |
+
|
17 |
+
# Create Gradio interface
|
18 |
+
inputs = gr.Textbox(label="Enter a prompt", lines=2)
|
19 |
+
outputs = gr.Textbox(label="Generated Text", readonly=True)
|
20 |
+
|
21 |
+
def predict(prompt):
|
22 |
+
output_text = asyncio.run(generate_text(prompt))
|
23 |
+
return output_text
|
24 |
+
|
25 |
+
iface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title="Hugging Face Text Generation Model", description=f"Generate text based on a prompt using model '{model_id}'")
|
26 |
+
|
27 |
+
if __name__ == "__main__":
|
28 |
+
iface.launch()
|