from transformers import AutoTokenizer from transformers import pipeline import gradio as gr # Load tokenizer and generator tokenizer = AutoTokenizer.from_pretrained('google/mt5-base') generator3 = pipeline(model='Suchinthana/MT5-Sinhala-Wikigen-Experimental', tokenizer=tokenizer) # Define the function for Gradio to use def generate_sinhala_text(prompt): generated_text = generator3("writeWiki:"+prompt, do_sample=True, max_length=50)[0]['generated_text'] return generated_text # Define the function to display loader def loading(): return "Loading... Please wait." iface = gr.Interface( fn=generate_sinhala_text, inputs=gr.Textbox(label="Topic Here"), outputs=gr.Textbox(lines=3, placeholder="Result Comes Here...", label="Output"), live=loading, # Add the loading function here capture_session=True # Add capture_session parameter ) iface.launch()