import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from peft import PeftModel, PeftConfig config = PeftConfig.from_pretrained("zeyadusf/FlanT5Summarization-samsum") base_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-large") model = PeftModel.from_pretrained(base_model, "zeyadusf/FlanT5Summarization-samsum") tokenizer = AutoTokenizer.from_pretrained("zeyadusf/FlanT5Summarization-samsum") # Define the summarization function def summarize(text): inputs = tokenizer(text, return_tensors="pt", truncation=True) # Access the base model's generate method summary_ids = model.base_model.generate(inputs.input_ids, max_length=512, min_length=64, length_penalty=2.0, num_beams=4, early_stopping=True) return tokenizer.decode(summary_ids[0], skip_special_tokens=True) # Define the Gradio interface iface = gr.Interface( fn=summarize, inputs=gr.Textbox(lines=2, placeholder="Enter your text here..."), outputs="text", title="Summarization by Flan-T5-Large with PEFT" ) iface.launch()