Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
# تحميل النموذج والـ Tokenizer | |
model_path = "inceptionai/jais-13b" | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto", trust_remote_code=True) | |
# دالة للحصول على الإجابة | |
def get_response(text): | |
input_ids = tokenizer(text, return_tensors="pt").input_ids | |
inputs = input_ids.to("cuda" if torch.cuda.is_available() else "cpu") | |
input_len = inputs.shape[-1] | |
generate_ids = model.generate( | |
inputs, | |
top_p=0.9, | |
temperature=0.3, | |
max_length=200 - input_len, | |
min_length=input_len + 4, | |
repetition_penalty=1.2, | |
do_sample=True, | |
) | |
response = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0] | |
return response | |
# واجهة Gradio | |
iface = gr.Interface( | |
fn=get_response, | |
inputs="text", | |
outputs="text", | |
title="Jais-13b Demo", | |
description="تجربة نموذج Jais-13b للغة العربية." | |
) | |
iface.launch() |