cwchang commited on
Commit
817073a
1 Parent(s): cc46023

End of training

Browse files
README.md CHANGED
@@ -3,9 +3,23 @@ license: apache-2.0
3
  base_model: google/mt5-small
4
  tags:
5
  - generated_from_trainer
 
 
 
 
6
  model-index:
7
  - name: formatted_addr
8
- results: []
 
 
 
 
 
 
 
 
 
 
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -13,7 +27,14 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # formatted_addr
15
 
16
- This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset.
 
 
 
 
 
 
 
17
 
18
  ## Model description
19
 
 
3
  base_model: google/mt5-small
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - cwchang/tw_address_medium
8
+ metrics:
9
+ - rouge
10
  model-index:
11
  - name: formatted_addr
12
+ results:
13
+ - task:
14
+ name: Summarization
15
+ type: summarization
16
+ dataset:
17
+ name: cwchang/tw_address_medium
18
+ type: cwchang/tw_address_medium
19
+ metrics:
20
+ - name: Rouge1
21
+ type: rouge
22
+ value: 99.0667
23
  ---
24
 
25
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
27
 
28
  # formatted_addr
29
 
30
+ This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the cwchang/tw_address_medium dataset.
31
+ It achieves the following results on the evaluation set:
32
+ - Loss: 0.1763
33
+ - Rouge1: 99.0667
34
+ - Rouge2: 39.6667
35
+ - Rougel: 99.1333
36
+ - Rougelsum: 99.1333
37
+ - Gen Len: 13.375
38
 
39
  ## Model description
40
 
all_results.json CHANGED
@@ -1,8 +1,18 @@
1
  {
2
  "epoch": 3.0,
3
- "train_loss": 6.107138163248698,
4
- "train_runtime": 143.5946,
5
- "train_samples": 800,
6
- "train_samples_per_second": 16.714,
7
- "train_steps_per_second": 4.178
 
 
 
 
 
 
 
 
 
 
8
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_gen_len": 13.375,
4
+ "eval_loss": 0.17634712159633636,
5
+ "eval_rouge1": 99.0667,
6
+ "eval_rouge2": 39.6667,
7
+ "eval_rougeL": 99.1333,
8
+ "eval_rougeLsum": 99.1333,
9
+ "eval_runtime": 16.2949,
10
+ "eval_samples": 200,
11
+ "eval_samples_per_second": 12.274,
12
+ "eval_steps_per_second": 3.068,
13
+ "train_loss": 2.2646766764322916,
14
+ "train_runtime": 282.894,
15
+ "train_samples": 2000,
16
+ "train_samples_per_second": 21.209,
17
+ "train_steps_per_second": 5.302
18
  }
eval_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_gen_len": 13.375,
4
+ "eval_loss": 0.17634712159633636,
5
+ "eval_rouge1": 99.0667,
6
+ "eval_rouge2": 39.6667,
7
+ "eval_rougeL": 99.1333,
8
+ "eval_rougeLsum": 99.1333,
9
+ "eval_runtime": 16.2949,
10
+ "eval_samples": 200,
11
+ "eval_samples_per_second": 12.274,
12
+ "eval_steps_per_second": 3.068
13
+ }
generation_config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_from_model_config": true,
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
 
1
  {
 
2
  "decoder_start_token_id": 0,
3
  "eos_token_id": 1,
4
  "pad_token_id": 0,
runs/Dec12_06-16-38_e168a4df8b39/events.out.tfevents.1702362181.e168a4df8b39.5272.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3142bbb691760bd6cc67b1a818bc6ee0ac726de8387d4486ccd2ab8f9dd1f2e
3
+ size 613
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 3.0,
3
- "train_loss": 6.107138163248698,
4
- "train_runtime": 143.5946,
5
- "train_samples": 800,
6
- "train_samples_per_second": 16.714,
7
- "train_steps_per_second": 4.178
8
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "train_loss": 2.2646766764322916,
4
+ "train_runtime": 282.894,
5
+ "train_samples": 2000,
6
+ "train_samples_per_second": 21.209,
7
+ "train_steps_per_second": 5.302
8
  }
trainer_state.json CHANGED
@@ -3,33 +3,45 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 3.0,
5
  "eval_steps": 500,
6
- "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 2.5,
13
- "learning_rate": 8.333333333333334e-06,
14
- "loss": 6.9234,
15
  "step": 500
16
  },
 
 
 
 
 
 
 
 
 
 
 
 
17
  {
18
  "epoch": 3.0,
19
- "step": 600,
20
- "total_flos": 52647771955200.0,
21
- "train_loss": 6.107138163248698,
22
- "train_runtime": 143.5946,
23
- "train_samples_per_second": 16.714,
24
- "train_steps_per_second": 4.178
25
  }
26
  ],
27
  "logging_steps": 500,
28
- "max_steps": 600,
29
  "num_input_tokens_seen": 0,
30
  "num_train_epochs": 3,
31
- "save_steps": 500,
32
- "total_flos": 52647771955200.0,
33
  "train_batch_size": 4,
34
  "trial_name": null,
35
  "trial_params": null
 
3
  "best_model_checkpoint": null,
4
  "epoch": 3.0,
5
  "eval_steps": 500,
6
+ "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 1.0,
13
+ "learning_rate": 3.3333333333333335e-05,
14
+ "loss": 5.4127,
15
  "step": 500
16
  },
17
+ {
18
+ "epoch": 2.0,
19
+ "learning_rate": 1.6666666666666667e-05,
20
+ "loss": 0.8232,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 3.0,
25
+ "learning_rate": 0.0,
26
+ "loss": 0.5581,
27
+ "step": 1500
28
+ },
29
  {
30
  "epoch": 3.0,
31
+ "step": 1500,
32
+ "total_flos": 143865355345920.0,
33
+ "train_loss": 2.2646766764322916,
34
+ "train_runtime": 282.894,
35
+ "train_samples_per_second": 21.209,
36
+ "train_steps_per_second": 5.302
37
  }
38
  ],
39
  "logging_steps": 500,
40
+ "max_steps": 1500,
41
  "num_input_tokens_seen": 0,
42
  "num_train_epochs": 3,
43
+ "save_steps": 5000,
44
+ "total_flos": 143865355345920.0,
45
  "train_batch_size": 4,
46
  "trial_name": null,
47
  "trial_params": null