File size: 1,505 Bytes
0047098
 
 
18f6aaf
b48da00
0047098
f668f28
418756d
baed359
418756d
 
 
 
8976123
 
3b28218
8976123
3b28218
8750dd6
6b145b2
8976123
 
77bfe0b
 
 
 
 
3b28218
77bfe0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85c01a5
77bfe0b
53f8187
77bfe0b
418756d
 
77bfe0b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import gradio as gr
import random
import time
import os
from huggingface_hub import InferenceClient

endpoint_url = os.getenv('url')

hf_token = os.getenv('hf_token')

# Streaming Client
client = InferenceClient(endpoint_url, token=hf_token)

gen_kwargs = dict(
    max_new_tokens=1024,
    top_k=50,
    top_p=0.9,
    temperature=0.5,
    repetition_penalty=1.2, #1.02
    stop= ["\nUser:", "<|endoftext|>", "</s>"],
)

def generate_text(prompt):
    """Generates text using the Hugging Face Inference API."""
    chat_prompt = f"""

### Instruction:
You are a chatbot. Chat in Urdu. Provide answers with your best knowledge. Don't say you don't know unless you really don't

### Input:
{prompt}

### Response:
""
"""
    stream = client.text_generation(chat_prompt, stream=True, details=True, **gen_kwargs)
    generated_text = ""
    for r in stream:
        if r.token.special:
            continue
        if r.token.text in gen_kwargs["stop"]:
            break
        generated_text += r.token.text
        yield generated_text

iface = gr.Interface(
    fn=generate_text,
    inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
    examples = ['میں کراچی جانا چاہتا ہوں، وہاں کے کچھ بہترین مقامات کون سے ہیں؟','amazing food locations in Singapore','best activities in London'],
    outputs="text",
    title="Urdu Chatbot- Powered by traversaal-urdu-llama-3.1-8b",
    description="Ask me anything in Urdu!",
)

iface.launch()