finetuned-bart-large-samsum
Browse files- README.md +20 -19
- config.json +1 -1
- generation_config.json +1 -1
- model.safetensors +1 -1
- training_args.bin +2 -2
README.md
CHANGED
@@ -5,12 +5,10 @@ tags:
|
|
5 |
- generated_from_trainer
|
6 |
metrics:
|
7 |
- rouge
|
|
|
8 |
model-index:
|
9 |
- name: bart_samsum
|
10 |
results: []
|
11 |
-
datasets:
|
12 |
-
- samsum
|
13 |
-
pipeline_tag: summarization
|
14 |
---
|
15 |
|
16 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
@@ -18,14 +16,16 @@ should probably proofread and complete it, then remove this comment. -->
|
|
18 |
|
19 |
# bart_samsum
|
20 |
|
21 |
-
This model is a fine-tuned version of [facebook/bart-large-xsum](https://huggingface.co/facebook/bart-large-xsum) on the
|
22 |
It achieves the following results on the evaluation set:
|
23 |
-
- Loss: 1.
|
24 |
-
- Rouge1: 53.
|
25 |
-
- Rouge2: 28.
|
26 |
-
- Rougel:
|
27 |
-
- Rougelsum: 49.
|
28 |
-
-
|
|
|
|
|
29 |
|
30 |
## Model description
|
31 |
|
@@ -52,22 +52,23 @@ The following hyperparameters were used during training:
|
|
52 |
- total_train_batch_size: 8
|
53 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
54 |
- lr_scheduler_type: linear
|
55 |
-
- num_epochs:
|
56 |
- mixed_precision_training: Native AMP
|
57 |
|
58 |
### Training results
|
59 |
|
60 |
-
| Training Loss | Epoch
|
61 |
-
|
62 |
-
| 1.
|
63 |
-
| 1.
|
64 |
-
| 0.
|
65 |
-
| 0.
|
|
|
66 |
|
67 |
|
68 |
### Framework versions
|
69 |
|
70 |
-
- Transformers 4.
|
71 |
- Pytorch 2.2.1+cu121
|
72 |
- Datasets 2.19.0
|
73 |
-
- Tokenizers 0.
|
|
|
5 |
- generated_from_trainer
|
6 |
metrics:
|
7 |
- rouge
|
8 |
+
- bleu
|
9 |
model-index:
|
10 |
- name: bart_samsum
|
11 |
results: []
|
|
|
|
|
|
|
12 |
---
|
13 |
|
14 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
16 |
|
17 |
# bart_samsum
|
18 |
|
19 |
+
This model is a fine-tuned version of [facebook/bart-large-xsum](https://huggingface.co/facebook/bart-large-xsum) on the None dataset.
|
20 |
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 1.4947
|
22 |
+
- Rouge1: 53.3294
|
23 |
+
- Rouge2: 28.6009
|
24 |
+
- Rougel: 44.2008
|
25 |
+
- Rougelsum: 49.2031
|
26 |
+
- Bleu: 0.0
|
27 |
+
- Meteor: 0.4887
|
28 |
+
- Gen Len: 30.1209
|
29 |
|
30 |
## Model description
|
31 |
|
|
|
52 |
- total_train_batch_size: 8
|
53 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
54 |
- lr_scheduler_type: linear
|
55 |
+
- num_epochs: 5
|
56 |
- mixed_precision_training: Native AMP
|
57 |
|
58 |
### Training results
|
59 |
|
60 |
+
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Bleu | Meteor | Gen Len |
|
61 |
+
|:-------------:|:------:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:----:|:------:|:-------:|
|
62 |
+
| 1.3838 | 0.9997 | 1841 | 1.5631 | 52.3252 | 27.2646 | 42.5893 | 48.2397 | 0.0 | 0.4825 | 32.0415 |
|
63 |
+
| 1.0835 | 2.0 | 3683 | 1.4947 | 53.3294 | 28.6009 | 44.2008 | 49.2031 | 0.0 | 0.4887 | 30.1209 |
|
64 |
+
| 0.8345 | 2.9997 | 5524 | 1.5956 | 52.1812 | 27.1239 | 42.9864 | 47.6384 | 0.0 | 0.4774 | 30.5446 |
|
65 |
+
| 0.672 | 4.0 | 7366 | 1.6695 | 52.8148 | 27.4815 | 43.3732 | 48.4633 | 0.0 | 0.4836 | 31.0342 |
|
66 |
+
| 0.538 | 4.9986 | 9205 | 1.8055 | 52.0988 | 26.762 | 42.5505 | 47.3721 | 0.0 | 0.4738 | 29.8901 |
|
67 |
|
68 |
|
69 |
### Framework versions
|
70 |
|
71 |
+
- Transformers 4.40.0
|
72 |
- Pytorch 2.2.1+cu121
|
73 |
- Datasets 2.19.0
|
74 |
+
- Tokenizers 0.19.1
|
config.json
CHANGED
@@ -61,7 +61,7 @@
|
|
61 |
"student_encoder_layers": null,
|
62 |
"task_specific_params": {},
|
63 |
"torch_dtype": "float32",
|
64 |
-
"transformers_version": "4.
|
65 |
"use_cache": true,
|
66 |
"vocab_size": 50264
|
67 |
}
|
|
|
61 |
"student_encoder_layers": null,
|
62 |
"task_specific_params": {},
|
63 |
"torch_dtype": "float32",
|
64 |
+
"transformers_version": "4.40.0",
|
65 |
"use_cache": true,
|
66 |
"vocab_size": 50264
|
67 |
}
|
generation_config.json
CHANGED
@@ -9,5 +9,5 @@
|
|
9 |
"no_repeat_ngram_size": 3,
|
10 |
"num_beams": 6,
|
11 |
"pad_token_id": 1,
|
12 |
-
"transformers_version": "4.
|
13 |
}
|
|
|
9 |
"no_repeat_ngram_size": 3,
|
10 |
"num_beams": 6,
|
11 |
"pad_token_id": 1,
|
12 |
+
"transformers_version": "4.40.0"
|
13 |
}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1625422896
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef85303f6a6796b49a98d6790f6ced0ee4d000b537eba4f2ca02da9a942b9f6b
|
3 |
size 1625422896
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf944548b0ed121927f4be44ca6cb14647f006338805e42c2a8a0b04f34f879f
|
3 |
+
size 5112
|