yusufcakmak
commited on
Commit
•
948c5c1
1
Parent(s):
cc03808
Update README.md
Browse files
README.md
CHANGED
@@ -35,11 +35,13 @@ alt="drawing" width="600"/>
|
|
35 |
|
36 |
```python
|
37 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
|
38 |
|
39 |
model_id = "Trendyol/Trendyol-LLM-7b-chat-v1.8"
|
40 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
41 |
model = AutoModelForCausalLM.from_pretrained(model_id,
|
42 |
-
device_map='auto',
|
|
|
43 |
load_in_8bit=True)
|
44 |
|
45 |
sampling_params = dict(do_sample=True, temperature=0.3, top_k=50, top_p=0.9)
|
@@ -48,6 +50,7 @@ pipe = pipeline("text-generation",
|
|
48 |
model=model,
|
49 |
tokenizer=tokenizer,
|
50 |
device_map="auto",
|
|
|
51 |
max_new_tokens=1024,
|
52 |
return_full_text=True,
|
53 |
repetition_penalty=1.1
|
@@ -81,6 +84,7 @@ pipe = pipeline("conversational",
|
|
81 |
model=model,
|
82 |
tokenizer=tokenizer,
|
83 |
device_map="auto",
|
|
|
84 |
max_new_tokens=1024,
|
85 |
repetition_penalty=1.1
|
86 |
)
|
|
|
35 |
|
36 |
```python
|
37 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
38 |
+
import torch
|
39 |
|
40 |
model_id = "Trendyol/Trendyol-LLM-7b-chat-v1.8"
|
41 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
42 |
model = AutoModelForCausalLM.from_pretrained(model_id,
|
43 |
+
device_map='auto',
|
44 |
+
torch_dtype= torch.bfloat16,
|
45 |
load_in_8bit=True)
|
46 |
|
47 |
sampling_params = dict(do_sample=True, temperature=0.3, top_k=50, top_p=0.9)
|
|
|
50 |
model=model,
|
51 |
tokenizer=tokenizer,
|
52 |
device_map="auto",
|
53 |
+
torch_dtype= torch.bfloat16,
|
54 |
max_new_tokens=1024,
|
55 |
return_full_text=True,
|
56 |
repetition_penalty=1.1
|
|
|
84 |
model=model,
|
85 |
tokenizer=tokenizer,
|
86 |
device_map="auto",
|
87 |
+
torch_dtype= torch.bfloat16,
|
88 |
max_new_tokens=1024,
|
89 |
repetition_penalty=1.1
|
90 |
)
|