Spaces:
Running
on
Zero
Running
on
Zero
Zenithwang
commited on
Commit
•
ec07b24
1
Parent(s):
89c5f05
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import torch
|
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
5 |
from threading import Thread
|
6 |
|
7 |
-
model_path = '
|
8 |
|
9 |
# Loading the tokenizer and model from Hugging Face's model hub.
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
@@ -17,7 +17,7 @@ model = model.to(device)
|
|
17 |
# Defining a custom stopping criteria class for the model's text generation.
|
18 |
class StopOnTokens(StoppingCriteria):
|
19 |
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
20 |
-
stop_ids = [
|
21 |
for stop_id in stop_ids:
|
22 |
if input_ids[0][-1] == stop_id: # Checking if the last generated token is a stop token.
|
23 |
return True
|
|
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
5 |
from threading import Thread
|
6 |
|
7 |
+
model_path = 'infly/OpenCoder-8B-Instruct'
|
8 |
|
9 |
# Loading the tokenizer and model from Hugging Face's model hub.
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
|
|
17 |
# Defining a custom stopping criteria class for the model's text generation.
|
18 |
class StopOnTokens(StoppingCriteria):
|
19 |
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
20 |
+
stop_ids = [96539] # IDs of tokens where the generation should stop.
|
21 |
for stop_id in stop_ids:
|
22 |
if input_ids[0][-1] == stop_id: # Checking if the last generated token is a stop token.
|
23 |
return True
|