Update run_inference.py
Browse files- run_inference.py +5 -4
run_inference.py
CHANGED
@@ -1,11 +1,13 @@
|
|
1 |
from peft import AutoPeftModelForCausalLM
|
2 |
from transformers import AutoTokenizer
|
3 |
-
|
|
|
|
|
4 |
|
5 |
|
6 |
PROMPT = "[INST]YOUR PROMPT HERE[/INST]"
|
7 |
MAX_LENGTH = 32768 # Do not change
|
8 |
-
DEVICE = "
|
9 |
|
10 |
|
11 |
model_id = "agarkovv/CryptoTrader-LM"
|
@@ -27,5 +29,4 @@ res = model.generate(
|
|
27 |
max_new_tokens=MAX_LENGTH,
|
28 |
)
|
29 |
output = tokenizer.decode(res[0], skip_special_tokens=True)
|
30 |
-
|
31 |
-
print(answer)
|
|
|
1 |
from peft import AutoPeftModelForCausalLM
|
2 |
from transformers import AutoTokenizer
|
3 |
+
|
4 |
+
from huggingface_hub import login
|
5 |
+
login("YOUR TOKEN HERE")
|
6 |
|
7 |
|
8 |
PROMPT = "[INST]YOUR PROMPT HERE[/INST]"
|
9 |
MAX_LENGTH = 32768 # Do not change
|
10 |
+
DEVICE = "cpu"
|
11 |
|
12 |
|
13 |
model_id = "agarkovv/CryptoTrader-LM"
|
|
|
29 |
max_new_tokens=MAX_LENGTH,
|
30 |
)
|
31 |
output = tokenizer.decode(res[0], skip_special_tokens=True)
|
32 |
+
print(output)
|
|