4673640bb1 commited on
Commit
9aa977a
·
verified ·
1 Parent(s): a0a6551

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # تحميل النموذج والـ Tokenizer
5
+ model_path = "inceptionai/jais-13b"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
7
+ model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto", trust_remote_code=True)
8
+
9
+ # دالة للحصول على الإجابة
10
+ def get_response(text):
11
+ input_ids = tokenizer(text, return_tensors="pt").input_ids
12
+ inputs = input_ids.to("cuda" if torch.cuda.is_available() else "cpu")
13
+ input_len = inputs.shape[-1]
14
+ generate_ids = model.generate(
15
+ inputs,
16
+ top_p=0.9,
17
+ temperature=0.3,
18
+ max_length=200 - input_len,
19
+ min_length=input_len + 4,
20
+ repetition_penalty=1.2,
21
+ do_sample=True,
22
+ )
23
+ response = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0]
24
+ return response
25
+
26
+ # واجهة Gradio
27
+ iface = gr.Interface(
28
+ fn=get_response,
29
+ inputs="text",
30
+ outputs="text",
31
+ title="Jais-13b Demo",
32
+ description="تجربة نموذج Jais-13b للغة العربية."
33
+ )
34
+
35
+ iface.launch()