shun1taniguchi
commited on
Commit
•
374e20f
1
Parent(s):
bbac978
Update README.md
Browse files
README.md
CHANGED
@@ -27,8 +27,8 @@ language:
|
|
27 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
28 |
import torch
|
29 |
|
30 |
-
tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B-chat-plus")
|
31 |
-
model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B-chat-plus", torch_dtype=torch.bfloat16, device_map="auto")
|
32 |
|
33 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
34 |
|
@@ -46,7 +46,7 @@ pipe(prompt, max_new_tokens=100, do_sample=False, temperature=0.0, return_full_t
|
|
46 |
from vllm import LLM, SamplingParams
|
47 |
|
48 |
sampling_params = SamplingParams(temperature=0.0, max_tokens=100)
|
49 |
-
llm = LLM(model="lightblue/karasu-7B-chat-plus")
|
50 |
|
51 |
messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
|
52 |
messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
|
|
|
27 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
28 |
import torch
|
29 |
|
30 |
+
tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B-chat-plus-unleashed")
|
31 |
+
model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B-chat-plus-unleashed", torch_dtype=torch.bfloat16, device_map="auto")
|
32 |
|
33 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
34 |
|
|
|
46 |
from vllm import LLM, SamplingParams
|
47 |
|
48 |
sampling_params = SamplingParams(temperature=0.0, max_tokens=100)
|
49 |
+
llm = LLM(model="lightblue/karasu-7B-chat-plus-unleashed")
|
50 |
|
51 |
messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
|
52 |
messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
|