File size: 1,335 Bytes
0047098
 
 
18f6aaf
b48da00
0047098
f668f28
418756d
baed359
418756d
 
 
 
8976123
 
 
 
7ff78a5
8976123
7ff78a5
8976123
 
77bfe0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc3374a
77bfe0b
 
 
418756d
 
77bfe0b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import gradio as gr
import random
import time
import os
from huggingface_hub import InferenceClient

endpoint_url = os.getenv('url')

hf_token = os.getenv('hf_token')

# Streaming Client
client = InferenceClient(endpoint_url, token=hf_token)

gen_kwargs = dict(
    max_new_tokens=1024,
    top_k=30,
    top_p=0.9,
    temperature=0.8,
    repetition_penalty=1.05, #1.02
    stop= ["\n"],
)

def generate_text(prompt):
    """Generates text using the Hugging Face Inference API."""
    chat_prompt = f"""

### Instruction:
You are a chatbot. Chat in Urdu

### Input:
{prompt}

### Response:
""
"""
    stream = client.text_generation(chat_prompt, stream=True, details=True, **gen_kwargs)
    generated_text = ""
    for r in stream:
        if r.token.special:
            continue
        if r.token.text in gen_kwargs["stop"]:
            break
        generated_text += r.token.text
        yield generated_text

iface = gr.Interface(
    fn=generate_text,
    inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
    examples = ['میں کراچی جانا چاہتا ہوں، وہاں کے کچھ بہترین مقامات کون سے ہیں؟','recipe for making Pakoras','best things to do in London'],
    outputs="text",
    title="Urdu Chatbot",
    description="Ask me anything in Urdu!",
)

iface.launch()