Upload folder using huggingface_hub

#2
Files changed (5) hide show
  1. README.md +3 -2
  2. config.json +1 -1
  3. plots.png +0 -0
  4. results.json +24 -20
  5. smash_config.json +5 -5
README.md CHANGED
@@ -1,5 +1,6 @@
1
  ---
2
  thumbnail: "https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"
 
3
  metrics:
4
  - memory_disk
5
  - memory_inference
@@ -59,9 +60,9 @@ You can run the smashed model with these steps:
59
  2. Load & run the model.
60
  ```python
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
62
-
63
 
64
- model = AutoModelForCausalLM.from_pretrained("PrunaAI/MaziyarPanahi-Llama-3-8B-Instruct-64k-AWQ-4bit-smashed", trust_remote_code=True, device_map='auto')
65
  tokenizer = AutoTokenizer.from_pretrained("MaziyarPanahi/Llama-3-8B-Instruct-64k")
66
 
67
  input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
 
1
  ---
2
  thumbnail: "https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"
3
+ base_model: MaziyarPanahi/Llama-3-8B-Instruct-64k
4
  metrics:
5
  - memory_disk
6
  - memory_inference
 
60
  2. Load & run the model.
61
  ```python
62
  from transformers import AutoModelForCausalLM, AutoTokenizer
63
+ from awq import AutoAWQForCausalLM
64
 
65
+ model = AutoAWQForCausalLM.from_quantized("PrunaAI/MaziyarPanahi-Llama-3-8B-Instruct-64k-AWQ-4bit-smashed", trust_remote_code=True, device_map='auto')
66
  tokenizer = AutoTokenizer.from_pretrained("MaziyarPanahi/Llama-3-8B-Instruct-64k")
67
 
68
  input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/tmp/tmptiupqo16",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/tmp/tmpe413c_m0",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
plots.png CHANGED
results.json CHANGED
@@ -1,26 +1,30 @@
1
  {
2
  "base_current_gpu_type": "NVIDIA A100-PCIE-40GB",
3
  "base_current_gpu_total_memory": 40339.3125,
4
- "base_token_generation_latency_sync": 101.87378540039063,
5
- "base_token_generation_latency_async": 101.67951695621014,
6
- "base_token_generation_throughput_sync": 0.009816067951826256,
7
- "base_token_generation_throughput_async": 0.009834822488689296,
8
- "base_token_generation_CO2_emissions": 2.6819935676011093e-05,
9
- "base_token_generation_energy_consumption": 0.008565413871084828,
10
- "base_inference_latency_sync": 90.84702758789062,
11
- "base_inference_latency_async": 88.71569633483887,
12
- "base_inference_throughput_sync": 0.011007514792187808,
13
- "base_inference_throughput_async": 0.01127196247466411,
 
 
14
  "smashed_current_gpu_type": "NVIDIA A100-PCIE-40GB",
15
  "smashed_current_gpu_total_memory": 40339.3125,
16
- "smashed_token_generation_latency_sync": 63.452283477783205,
17
- "smashed_token_generation_latency_async": 64.33862000703812,
18
- "smashed_token_generation_throughput_sync": 0.015759874116274063,
19
- "smashed_token_generation_throughput_async": 0.01554276420430852,
20
- "smashed_token_generation_CO2_emissions": 1.9680748845555618e-05,
21
- "smashed_token_generation_energy_consumption": 0.0053972414185424426,
22
- "smashed_inference_latency_sync": 76.47395782470703,
23
- "smashed_inference_latency_async": 73.17061424255371,
24
- "smashed_inference_throughput_sync": 0.013076346882584418,
25
- "smashed_inference_throughput_async": 0.013666688606509356
 
 
26
  }
 
1
  {
2
  "base_current_gpu_type": "NVIDIA A100-PCIE-40GB",
3
  "base_current_gpu_total_memory": 40339.3125,
4
+ "base_token_generation_latency_sync": 54.22916030883789,
5
+ "base_token_generation_latency_async": 54.13356442004442,
6
+ "base_token_generation_throughput_sync": 0.018440263398971105,
7
+ "base_token_generation_throughput_async": 0.018472827546336907,
8
+ "base_token_generation_CO2_emissions": null,
9
+ "base_token_generation_energy_consumption": null,
10
+ "base_inference_latency_sync": 52.29578285217285,
11
+ "base_inference_latency_async": 51.306843757629395,
12
+ "base_inference_throughput_sync": 0.019122000770630986,
13
+ "base_inference_throughput_async": 0.019490577216636887,
14
+ "base_inference_CO2_emissions": null,
15
+ "base_inference_energy_consumption": null,
16
  "smashed_current_gpu_type": "NVIDIA A100-PCIE-40GB",
17
  "smashed_current_gpu_total_memory": 40339.3125,
18
+ "smashed_token_generation_latency_sync": 41.130767822265625,
19
+ "smashed_token_generation_latency_async": 40.58046396821737,
20
+ "smashed_token_generation_throughput_sync": 0.024312699542133578,
21
+ "smashed_token_generation_throughput_async": 0.024642399376783867,
22
+ "smashed_token_generation_CO2_emissions": null,
23
+ "smashed_token_generation_energy_consumption": null,
24
+ "smashed_inference_latency_sync": 51.22703399658203,
25
+ "smashed_inference_latency_async": 39.65771198272705,
26
+ "smashed_inference_throughput_sync": 0.01952094279100215,
27
+ "smashed_inference_throughput_async": 0.025215776453153697,
28
+ "smashed_inference_CO2_emissions": null,
29
+ "smashed_inference_energy_consumption": null
30
  }
smash_config.json CHANGED
@@ -2,19 +2,19 @@
2
  "api_key": null,
3
  "verify_url": "http://johnrachwan.pythonanywhere.com",
4
  "smash_config": {
5
- "pruners": "[]",
6
  "pruning_ratio": 0.0,
7
- "factorizers": "[]",
8
  "quantizers": "['awq']",
9
  "weight_quantization_bits": 4,
10
- "output_deviation": 0.01,
11
- "compilers": "[]",
12
  "static_batch": true,
13
  "static_shape": true,
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
- "cache_dir": "/ceph/hdd/staff/charpent/.cache/models78wd5963",
18
  "batch_size": 1,
19
  "model_name": "MaziyarPanahi/Llama-3-8B-Instruct-64k",
20
  "task": "text_text_generation",
 
2
  "api_key": null,
3
  "verify_url": "http://johnrachwan.pythonanywhere.com",
4
  "smash_config": {
5
+ "pruners": "None",
6
  "pruning_ratio": 0.0,
7
+ "factorizers": "None",
8
  "quantizers": "['awq']",
9
  "weight_quantization_bits": 4,
10
+ "output_deviation": 0.005,
11
+ "compilers": "None",
12
  "static_batch": true,
13
  "static_shape": true,
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
+ "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelsvigq773l",
18
  "batch_size": 1,
19
  "model_name": "MaziyarPanahi/Llama-3-8B-Instruct-64k",
20
  "task": "text_text_generation",