Raghadabd's picture
Update app.py
238e2f5 verified
# Load the fine-tuned MarianMT model and tokenizer
# Replace with the path to your model directory
model_dir = '/content/drive/MyDrive/fine_tuned_marian' # Replace with the correct path
model = MarianMTModel.from_pretrained(model_dir)
tokenizer = MarianTokenizer.from_pretrained(model_dir)
# Function to translate text
def translate_arabic_to_english(arabic_text):
# Tokenize the input text
inputs = tokenizer(arabic_text, return_tensors="pt", padding=True, truncation=True, max_length=128)
# Move inputs to the same device as the model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
inputs = {k: v.to(device) for k, v in inputs.items()}
# Generate translation
with torch.no_grad():
translated_ids = model.generate(**inputs)
# Decode the translated text
translated_text = tokenizer.decode(translated_ids[0], skip_special_tokens=True)
return translated_text
# Create the Gradio interface
iface = gr.Interface(
fn=translate_arabic_to_english,
inputs=gr.Textbox(lines=5, placeholder="Enter Arabic text here..."),
outputs="text",
title="Arabic to English Machine Translation",
description="Translate Arabic text to English ",
)
# Launch the interface
iface.launch()