Spaces:
Sleeping
Sleeping
import spaces, gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
tokenizer = AutoTokenizer.from_pretrained("LingoIITGN/ganga-1b") | |
model = AutoModelForCausalLM.from_pretrained("LingoIITGN/ganga-1b") | |
def greet(input_text): | |
input_token = tokenizer.encode(input_text, return_tensors="pt") | |
output = model.generate(input_token, max_new_tokens=100, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, temperature=0.7) | |
output_text = tokenizer.batch_decode(output)[0] | |
return output_text | |
demo = gr.Interface(fn=greet, inputs=["text"], outputs=["text"],) | |
demo.launch() |