import torch import transformers from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import gradio as gr tokenizer = AutoTokenizer.from_pretrained("AhmedSSoliman/MarianCG_NL2Code") model = AutoModelForSeq2SeqLM.from_pretrained("AhmedSSoliman/MarianCG_NL2Code") def generate_code(NL): inputs = tokenizer([NL], padding="max_length", truncation=True, max_length=512, return_tensors="pt") input_ids = inputs.input_ids.to("cuda") attention_mask = inputs.attention_mask.to("cuda") outputs = new_model.generate(input_ids, attention_mask=attention_mask) # all special tokens including will be removed output_code = tokenizer.batch_decode(outputs, skip_special_tokens=True) return output_code iface = gr.Interface(fn=generate_code, inputs="text", outputs="text") iface.launch() #output_text = gr.outputs.Textbox() #gr.Interface(generate_code,"textbox", output_text, title="MarianMT Code Generation", description="MarianMT Code Generation").launch()