Update README.md
Browse files
README.md
CHANGED
@@ -7,3 +7,34 @@ The size is smaller and the execution speed is faster, but the inference perform
|
|
7 |
Benchmark results are in progress.
|
8 |
I will upload it at a later date.
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
Benchmark results are in progress.
|
8 |
I will upload it at a later date.
|
9 |
|
10 |
+
|
11 |
+
sample code
|
12 |
+
'''
|
13 |
+
pip install auto-gptq
|
14 |
+
'''
|
15 |
+
|
16 |
+
'''
|
17 |
+
from transformers import AutoTokenizer
|
18 |
+
from auto_gptq import AutoGPTQForCausalLM
|
19 |
+
|
20 |
+
quantized_model_dir = "dahara1/weblab-10b-instruction-sft-GPTQ"
|
21 |
+
model_basename = "gptq_model-4bit-128g"
|
22 |
+
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(quantized_model_dir)
|
24 |
+
|
25 |
+
model = AutoGPTQForCausalLM.from_quantized(
|
26 |
+
quantized_model_dir,
|
27 |
+
model_basename=model_basename,
|
28 |
+
use_safetensors=True,
|
29 |
+
device="cuda:0")
|
30 |
+
|
31 |
+
prompt = "スタジオジブリの作品を5つ教えてください"
|
32 |
+
prompt_template = f"### Instruction: {prompt}\n### Response:"
|
33 |
+
|
34 |
+
tokens = tokenizer(prompt_template, return_tensors="pt").to("cuda:0").input_ids
|
35 |
+
output = model.generate(input_ids=tokens, max_new_tokens=100, do_sample=True, temperature=0.8)
|
36 |
+
print(tokenizer.decode(output[0]))
|
37 |
+
'''
|
38 |
+
|
39 |
+
See Also
|
40 |
+
https://github.com/PanQiWei/AutoGPTQ/blob/main/docs/tutorial/01-Quick-Start.md
|