phucdt89s's picture
Update app.py
a17b1be verified
raw
history blame
739 Bytes
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
# Load model and tokenizer
MODEL_NAME = "vilm/vinallama-7b-chat"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
# Define chatbot function
def chatbot(input_text):
inputs = tokenizer(input_text, return_tensors="pt").to("cuda")
outputs = model.generate(inputs["input_ids"], max_length=200, do_sample=True, temperature=0.7)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Create Gradio interface
interface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Vinamallama 7B Chatbot")
# Launch app
interface.launch()