thamesdrawers commited on
Commit
4746012
·
verified ·
1 Parent(s): f7167fc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. config.toml +49 -0
  2. epoch10/adapter_config.json +43 -0
  3. epoch10/config.toml +49 -0
  4. epoch5/adapter_config.json +43 -0
  5. epoch5/config.toml +49 -0
  6. events.out.tfevents.1736781782.a3f2ec4cbd97.3820.0 +3 -0
  7. global_step208/layer_00-model_states.pt +3 -0
  8. global_step208/layer_01-model_states.pt +3 -0
  9. global_step208/layer_02-model_states.pt +3 -0
  10. global_step208/layer_03-model_states.pt +3 -0
  11. global_step208/layer_04-model_states.pt +3 -0
  12. global_step208/layer_05-model_states.pt +3 -0
  13. global_step208/layer_06-model_states.pt +3 -0
  14. global_step208/layer_07-model_states.pt +3 -0
  15. global_step208/layer_08-model_states.pt +3 -0
  16. global_step208/layer_09-model_states.pt +3 -0
  17. global_step208/layer_10-model_states.pt +3 -0
  18. global_step208/layer_11-model_states.pt +3 -0
  19. global_step208/layer_12-model_states.pt +3 -0
  20. global_step208/layer_13-model_states.pt +3 -0
  21. global_step208/layer_14-model_states.pt +3 -0
  22. global_step208/layer_15-model_states.pt +3 -0
  23. global_step208/layer_16-model_states.pt +3 -0
  24. global_step208/layer_17-model_states.pt +3 -0
  25. global_step208/layer_18-model_states.pt +3 -0
  26. global_step208/layer_19-model_states.pt +3 -0
  27. global_step208/layer_20-model_states.pt +3 -0
  28. global_step208/layer_22-model_states.pt +3 -0
  29. global_step208/layer_23-model_states.pt +3 -0
  30. global_step208/layer_24-model_states.pt +3 -0
  31. global_step208/layer_25-model_states.pt +3 -0
  32. global_step208/layer_26-model_states.pt +3 -0
  33. global_step208/layer_27-model_states.pt +3 -0
  34. global_step208/layer_28-model_states.pt +3 -0
  35. global_step208/layer_29-model_states.pt +3 -0
  36. global_step208/layer_30-model_states.pt +3 -0
  37. global_step208/layer_31-model_states.pt +3 -0
  38. global_step208/layer_32-model_states.pt +3 -0
  39. global_step208/layer_33-model_states.pt +3 -0
  40. global_step208/layer_34-model_states.pt +3 -0
  41. global_step208/layer_35-model_states.pt +3 -0
  42. global_step208/layer_36-model_states.pt +3 -0
  43. global_step208/layer_37-model_states.pt +3 -0
  44. global_step208/layer_38-model_states.pt +3 -0
  45. global_step208/layer_39-model_states.pt +3 -0
  46. global_step208/layer_40-model_states.pt +3 -0
  47. global_step208/layer_41-model_states.pt +3 -0
  48. global_step208/layer_42-model_states.pt +3 -0
  49. global_step208/layer_43-model_states.pt +3 -0
  50. global_step208/layer_44-model_states.pt +3 -0
config.toml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset config file.
2
+ output_dir = '/workspace/training_output'
3
+ dataset = 'dataset.toml'
4
+
5
+ # Training settings
6
+ epochs = 50
7
+ micro_batch_size_per_gpu = 1
8
+ pipeline_stages = 1
9
+ gradient_accumulation_steps = 4
10
+ gradient_clipping = 1.0
11
+ warmup_steps = 100
12
+
13
+ # eval settings
14
+ eval_every_n_epochs = 5
15
+ eval_before_first_step = true
16
+ eval_micro_batch_size_per_gpu = 1
17
+ eval_gradient_accumulation_steps = 1
18
+
19
+ # misc settings
20
+ save_every_n_epochs = 5
21
+ checkpoint_every_n_minutes = 30
22
+ activation_checkpointing = true
23
+ partition_method = 'parameters'
24
+ save_dtype = 'bfloat16'
25
+ caching_batch_size = 1
26
+ steps_per_print = 1
27
+ video_clip_mode = 'single_middle'
28
+
29
+ [model]
30
+ type = 'hunyuan-video'
31
+ transformer_path = '/workspace/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
32
+ vae_path = '/workspace/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
33
+ llm_path = '/workspace/diffusion-pipe/models/llm'
34
+ clip_path = '/workspace/diffusion-pipe/models/clip'
35
+ dtype = 'bfloat16'
36
+ transformer_dtype = 'float8'
37
+ timestep_sample_method = 'logit_normal'
38
+
39
+ [adapter]
40
+ type = 'lora'
41
+ rank = 64
42
+ dtype = 'bfloat16'
43
+
44
+ [optimizer]
45
+ type = 'adamw_optimi'
46
+ lr = 5e-5
47
+ betas = [0.9, 0.99]
48
+ weight_decay = 0.02
49
+ eps = 1e-8
epoch10/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 64,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 64,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "modulation.linear",
27
+ "txt_attn_proj",
28
+ "img_mod.linear",
29
+ "txt_mlp.fc2",
30
+ "img_mlp.fc1",
31
+ "img_attn_proj",
32
+ "txt_mod.linear",
33
+ "txt_mlp.fc1",
34
+ "linear2",
35
+ "img_mlp.fc2",
36
+ "img_attn_qkv",
37
+ "txt_attn_qkv",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch10/config.toml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset config file.
2
+ output_dir = '/workspace/training_output'
3
+ dataset = 'dataset.toml'
4
+
5
+ # Training settings
6
+ epochs = 50
7
+ micro_batch_size_per_gpu = 1
8
+ pipeline_stages = 1
9
+ gradient_accumulation_steps = 4
10
+ gradient_clipping = 1.0
11
+ warmup_steps = 100
12
+
13
+ # eval settings
14
+ eval_every_n_epochs = 5
15
+ eval_before_first_step = true
16
+ eval_micro_batch_size_per_gpu = 1
17
+ eval_gradient_accumulation_steps = 1
18
+
19
+ # misc settings
20
+ save_every_n_epochs = 5
21
+ checkpoint_every_n_minutes = 30
22
+ activation_checkpointing = true
23
+ partition_method = 'parameters'
24
+ save_dtype = 'bfloat16'
25
+ caching_batch_size = 1
26
+ steps_per_print = 1
27
+ video_clip_mode = 'single_middle'
28
+
29
+ [model]
30
+ type = 'hunyuan-video'
31
+ transformer_path = '/workspace/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
32
+ vae_path = '/workspace/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
33
+ llm_path = '/workspace/diffusion-pipe/models/llm'
34
+ clip_path = '/workspace/diffusion-pipe/models/clip'
35
+ dtype = 'bfloat16'
36
+ transformer_dtype = 'float8'
37
+ timestep_sample_method = 'logit_normal'
38
+
39
+ [adapter]
40
+ type = 'lora'
41
+ rank = 64
42
+ dtype = 'bfloat16'
43
+
44
+ [optimizer]
45
+ type = 'adamw_optimi'
46
+ lr = 5e-5
47
+ betas = [0.9, 0.99]
48
+ weight_decay = 0.02
49
+ eps = 1e-8
epoch5/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 64,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 64,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "modulation.linear",
27
+ "txt_attn_proj",
28
+ "img_mod.linear",
29
+ "txt_mlp.fc2",
30
+ "img_mlp.fc1",
31
+ "img_attn_proj",
32
+ "txt_mod.linear",
33
+ "txt_mlp.fc1",
34
+ "linear2",
35
+ "img_mlp.fc2",
36
+ "img_attn_qkv",
37
+ "txt_attn_qkv",
38
+ "linear1"
39
+ ],
40
+ "task_type": null,
41
+ "use_dora": false,
42
+ "use_rslora": false
43
+ }
epoch5/config.toml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset config file.
2
+ output_dir = '/workspace/training_output'
3
+ dataset = 'dataset.toml'
4
+
5
+ # Training settings
6
+ epochs = 50
7
+ micro_batch_size_per_gpu = 1
8
+ pipeline_stages = 1
9
+ gradient_accumulation_steps = 4
10
+ gradient_clipping = 1.0
11
+ warmup_steps = 100
12
+
13
+ # eval settings
14
+ eval_every_n_epochs = 5
15
+ eval_before_first_step = true
16
+ eval_micro_batch_size_per_gpu = 1
17
+ eval_gradient_accumulation_steps = 1
18
+
19
+ # misc settings
20
+ save_every_n_epochs = 5
21
+ checkpoint_every_n_minutes = 30
22
+ activation_checkpointing = true
23
+ partition_method = 'parameters'
24
+ save_dtype = 'bfloat16'
25
+ caching_batch_size = 1
26
+ steps_per_print = 1
27
+ video_clip_mode = 'single_middle'
28
+
29
+ [model]
30
+ type = 'hunyuan-video'
31
+ transformer_path = '/workspace/diffusion-pipe/models/hunyuan/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors'
32
+ vae_path = '/workspace/diffusion-pipe/models/hunyuan/hunyuan_video_vae_bf16.safetensors'
33
+ llm_path = '/workspace/diffusion-pipe/models/llm'
34
+ clip_path = '/workspace/diffusion-pipe/models/clip'
35
+ dtype = 'bfloat16'
36
+ transformer_dtype = 'float8'
37
+ timestep_sample_method = 'logit_normal'
38
+
39
+ [adapter]
40
+ type = 'lora'
41
+ rank = 64
42
+ dtype = 'bfloat16'
43
+
44
+ [optimizer]
45
+ type = 'adamw_optimi'
46
+ lr = 5e-5
47
+ betas = [0.9, 0.99]
48
+ weight_decay = 0.02
49
+ eps = 1e-8
events.out.tfevents.1736781782.a3f2ec4cbd97.3820.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97a65b4df45385f9044f77f1be3645436363af779273ed9b5355eff52b020812
3
+ size 61461
global_step208/layer_00-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c682c9cc8e731c37498845f3a980635b04094468e55e77553c7a12ccff998f5
3
+ size 920
global_step208/layer_01-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eb07645ffdc0836008d1f7b175f9cc29eac95f4097df9f5349a085d81dc94cd
3
+ size 18095082
global_step208/layer_02-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:501512bbb798b1f358aa68660d99ff7c4532ee9db5220cf693ea7b481846b7e5
3
+ size 18095082
global_step208/layer_03-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a00a61b0e8d2ceb88a3db60838c386b9a0bc7ca0dad1890b6d09cd6e1332fd3e
3
+ size 18095082
global_step208/layer_04-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0addf5649922b9f0fdbc3f3fea10f49386500f26c3f3554efa56a4329bdd524
3
+ size 18095082
global_step208/layer_05-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dd7e38df443ea77faaa6f5ade16bf3ec4a9687f100635ffee46e9d04b9abaeb
3
+ size 18095082
global_step208/layer_06-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67269f7c4b31d7b716c12c628c58fdd5bb04a4c6d20eac06e4720b784d95e7e0
3
+ size 18095082
global_step208/layer_07-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca3bf3d0893c0fcd207e3961c88fbeceea50372e5e6681273b6b6d30df820d21
3
+ size 18095082
global_step208/layer_08-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d9ee4ad0b724a2522bb8dae61a981172a47ba6e9b12230ae7e22ac972d92c21
3
+ size 18095082
global_step208/layer_09-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e24f20a857cbaac09c4628c3a78a56b4ebfcb89ec4ec51bf111fdc77da6f8ef8
3
+ size 18095082
global_step208/layer_10-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17995bd49316f9533e7d406d3f0c69ee0e03afd389b39ffda030011e5706d7d0
3
+ size 18095082
global_step208/layer_11-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dbb3c7922d4bd526fa950573590e584d7a506ed5e119e1881012d82ac14a65e
3
+ size 18095082
global_step208/layer_12-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1610f3827587628ead9568fe792c12ebb4082cbcf103a465b1297dd264713a06
3
+ size 18095082
global_step208/layer_13-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7aff23d9b8d33f90e21138297cd6555b9dc706445b91b0259cea98ab26b4e59
3
+ size 18095082
global_step208/layer_14-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98243b8c3a3290be1d0a50166191a25dc98cef81fbd68e5dd263affa65eb3f62
3
+ size 18095082
global_step208/layer_15-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc6822f56a2796e1609d38a04c51d8bde108578db710f4040bed62ba877dadaf
3
+ size 18095082
global_step208/layer_16-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a086f236ced113b4b0c1dd131af23ccb5ace4548a86e971ff2888d7c7d628699
3
+ size 18095082
global_step208/layer_17-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32b4c8b49777ad6e3289f027f849a0f89d0ab68d07b0592c5e8778b20a6a5cc5
3
+ size 18095082
global_step208/layer_18-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc7c672b713f568270fe238ec1ee41551a3788e83cc30d92323689bac445ec05
3
+ size 18095082
global_step208/layer_19-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11a231470790adc390df45acd950df6ddb8ae587116de74275bdd688ed4e285d
3
+ size 18095082
global_step208/layer_20-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:260730b9f9f2bebac80802ce465b5c1e73d1ba966b86e4c1c0ab010881a10847
3
+ size 18095082
global_step208/layer_22-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fbb8d897dcfb1c12cd62c6c8c1b4006b0463132c052d8277a52537dd9dc9abe
3
+ size 7080724
global_step208/layer_23-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcfa2ccc09e4c2b9832755a89e84b0fccec27f7f19786fff26b2301bc674a77b
3
+ size 7080724
global_step208/layer_24-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dff85b37e6ea7724b00fc31763729a556d640e91c621bfd3d6557106cf9c1544
3
+ size 7080724
global_step208/layer_25-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3d03205797e80474a15f18ff5bf008f132d2dc14d2d9d2f9ebb40fcbdb2508c
3
+ size 7080724
global_step208/layer_26-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6dc32fcd3b16a8ef1a6b8408fb65c3614a505af1358d46502b37deadde011d5
3
+ size 7080724
global_step208/layer_27-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94d1d15be72c7c26a541b9f73856e701e4da3329a0e0e722650aa4dd1daaa7f8
3
+ size 7080724
global_step208/layer_28-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc4537296deac2959bb1719585751f3bc367055fead3c6f748cf2c5e2b4efaae
3
+ size 7080724
global_step208/layer_29-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ff91e14f17b1aab72950dce16796a075268d1b870ddab3a9b38808424d07465
3
+ size 7080724
global_step208/layer_30-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b048985c62b53bbf1323259d7ae8c0292082955b26e079758dc6c07234f037cc
3
+ size 7080724
global_step208/layer_31-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec0e1fbc3e30b5da0d0bc84fc98e59deb3aa1c1ca05802d876116896dad8a89d
3
+ size 7080724
global_step208/layer_32-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5305786462ddf79cc31c9652fdc1167422871415ba35030a5ad17beb3f3f2eed
3
+ size 7080724
global_step208/layer_33-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84fe66dfed4875d457e4cbe1f1192881ff1629d3eab7443d7e7cb53e3c678a6c
3
+ size 7080724
global_step208/layer_34-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fb387840cdc95c96d8ee7ff9a4cc1fbc33725989c3c95980f0fcee4b26a7f45
3
+ size 7080724
global_step208/layer_35-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57063178589dc3bfa2892e6585c466310823d6554a8f846f101400e6ccc3550b
3
+ size 7080724
global_step208/layer_36-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad5f3894f0f10a32300b3a6c5f121e5fe86a99e8b16f320c0d051dc1bddfeb63
3
+ size 7080724
global_step208/layer_37-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f06e770d25ac5fa2ecc9806743e4de57b2b7363905b211f8c6d2cf8df6b8fd0c
3
+ size 7080724
global_step208/layer_38-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f61e4cf6fb12741c30c45cf4d7482fa7f97d0b0f50de451511b3c4911ead0dc
3
+ size 7080724
global_step208/layer_39-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfe35391d0b073b37d49ec00ad212d0b126eaaf74f39146d244d903d0d10eb80
3
+ size 7080724
global_step208/layer_40-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c170f5697d1399cad00ede709615ac6289dbc8ac155154856c9201fefd77334
3
+ size 7080724
global_step208/layer_41-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1384a56ddf5711aefb5367d2185582d1b81bdce0941383d4fd12a6d49fc5137a
3
+ size 7080724
global_step208/layer_42-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93a4e115ce637f5d35cc134c2842468848fb231e5b77f81e20db6fd2e1228091
3
+ size 7080724
global_step208/layer_43-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86627501ae9ffcb73d70eeda80e63aaea4c19ce504a3efeef24894bbc55ea8d2
3
+ size 7080724
global_step208/layer_44-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e5db2c50fcb81a2d2d971dda00d35493f90935f831df33171cec3f0ee057286
3
+ size 7080724