File size: 731 Bytes
6fd9a56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load model
tokenizer = AutoTokenizer.from_pretrained("ai4bharat/Airavata")
model = AutoModelForCausalLM.from_pretrained("ai4bharat/Airavata")

def chatbot(input_text):
    # Encode the input text
    input_text = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors='pt')

    # Generate a response
    response = model.generate(input_text, max_length=1000, pad_token_id=tokenizer.eos_token_id)

    # Decode the response
    response_text = tokenizer.decode(response[:, input_text.shape[-1]:][0], skip_special_tokens=True)
    
    return response_text

iface = gr.Interface(fn=chatbot, inputs="text", outputs="text")
iface.launch()