Create test-compile-tok-sec.py (#4)
Browse files- Create test-compile-tok-sec.py (9b08ed9e741a1f54a2244b7df9b9575724aae8f9)
- test-compile-tok-sec.py +59 -0
test-compile-tok-sec.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
import torch
|
3 |
+
from tqdm import tqdm
|
4 |
+
import os
|
5 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :)
|
6 |
+
|
7 |
+
torch.set_float32_matmul_precision('high')
|
8 |
+
|
9 |
+
# Other configuration options
|
10 |
+
DEVICE = "cuda:1"
|
11 |
+
NUM_RUNS = 10
|
12 |
+
MAX_NEW_TOKENS = 256
|
13 |
+
TEXT_INPUT = "def sieve_of_eratosthenes():"
|
14 |
+
|
15 |
+
# Load the model and prepare generate args
|
16 |
+
repo_id = "gg-hf/gemma-2-2b-it"
|
17 |
+
model = AutoModelForCausalLM.from_pretrained(repo_id).to(DEVICE)
|
18 |
+
|
19 |
+
model.generation_config.cache_implementation = "static"
|
20 |
+
|
21 |
+
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
|
22 |
+
|
23 |
+
assistant_model = None
|
24 |
+
tokenizer = AutoTokenizer.from_pretrained(repo_id, use_fast=True)
|
25 |
+
model_inputs = tokenizer(TEXT_INPUT, return_tensors="pt").to(DEVICE)
|
26 |
+
|
27 |
+
generate_kwargs = {
|
28 |
+
"max_new_tokens": MAX_NEW_TOKENS,
|
29 |
+
"do_sample": True,
|
30 |
+
"temperature": 0.2,
|
31 |
+
"eos_token_id": -1 # forces the generation of `max_new_tokens`
|
32 |
+
}
|
33 |
+
|
34 |
+
# Warmup
|
35 |
+
print("Warming up...")
|
36 |
+
for _ in range(2):
|
37 |
+
gen_out = model.generate(**model_inputs, **generate_kwargs)
|
38 |
+
print("Done!")
|
39 |
+
|
40 |
+
|
41 |
+
# Measure OR Stream
|
42 |
+
def measure_generate(model, model_inputs, generate_kwargs):
|
43 |
+
start_event = torch.cuda.Event(enable_timing=True)
|
44 |
+
end_event = torch.cuda.Event(enable_timing=True)
|
45 |
+
torch.cuda.reset_peak_memory_stats(DEVICE)
|
46 |
+
torch.cuda.empty_cache()
|
47 |
+
torch.cuda.synchronize()
|
48 |
+
|
49 |
+
start_event.record()
|
50 |
+
for _ in tqdm(range(NUM_RUNS)):
|
51 |
+
gen_out = model.generate(**model_inputs, **generate_kwargs)
|
52 |
+
end_event.record()
|
53 |
+
|
54 |
+
torch.cuda.synchronize()
|
55 |
+
max_memory = torch.cuda.max_memory_allocated(DEVICE)
|
56 |
+
print("Max memory (MB): ", max_memory * 1e-6)
|
57 |
+
print("Throughput (tokens/sec): ", (NUM_RUNS * MAX_NEW_TOKENS) / (start_event.elapsed_time(end_event) * 1.0e-3))
|
58 |
+
|
59 |
+
measure_generate(model, model_inputs, generate_kwargs)
|