Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,15 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
|
|
|
3 |
|
4 |
# Load the pretrained model and tokenizer
|
5 |
MODEL_NAME = "atlasia/Al-Atlas-LLM"
|
6 |
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
8 |
-
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map="auto")
|
9 |
|
10 |
def generate_text(prompt, max_length=256, temperature=0.7, top_p=0.9, top_k=150, repetition_penalty=1.5):
|
11 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
import os
|
4 |
+
|
5 |
+
# token
|
6 |
+
token = os.environ['TOKEN']
|
7 |
|
8 |
# Load the pretrained model and tokenizer
|
9 |
MODEL_NAME = "atlasia/Al-Atlas-LLM"
|
10 |
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=token)
|
12 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map="auto", token=token)
|
13 |
|
14 |
def generate_text(prompt, max_length=256, temperature=0.7, top_p=0.9, top_k=150, repetition_penalty=1.5):
|
15 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|