File size: 739 Bytes
a17b1be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr

# Load model and tokenizer
MODEL_NAME = "vilm/vinallama-7b-chat"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)

# Define chatbot function
def chatbot(input_text):
    inputs = tokenizer(input_text, return_tensors="pt").to("cuda")
    outputs = model.generate(inputs["input_ids"], max_length=200, do_sample=True, temperature=0.7)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response

# Create Gradio interface
interface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Vinamallama 7B Chatbot")

# Launch app
interface.launch()