gyr66
commited on
Commit
·
3f3c3fa
1
Parent(s):
c424f9f
Update model
Browse files- README.md +16 -22
- model.safetensors +1 -1
- tokenizer_config.json +4 -0
- training_args.bin +1 -1
README.md
CHANGED
@@ -9,12 +9,6 @@ metrics:
|
|
9 |
model-index:
|
10 |
- name: Ernie-3.0-base-chinese-finetuned-ner
|
11 |
results: []
|
12 |
-
datasets:
|
13 |
-
- gyr66/privacy_detection
|
14 |
-
language:
|
15 |
-
- zh
|
16 |
-
library_name: transformers
|
17 |
-
pipeline_tag: token-classification
|
18 |
---
|
19 |
|
20 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
@@ -24,11 +18,11 @@ should probably proofread and complete it, then remove this comment. -->
|
|
24 |
|
25 |
This model was trained from scratch on an unknown dataset.
|
26 |
It achieves the following results on the evaluation set:
|
27 |
-
- Loss: 0.
|
28 |
-
- Precision: 0.
|
29 |
-
- Recall: 0.
|
30 |
-
- F1: 0.
|
31 |
-
- Accuracy: 0.
|
32 |
|
33 |
## Model description
|
34 |
|
@@ -59,16 +53,16 @@ The following hyperparameters were used during training:
|
|
59 |
|
60 |
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|
61 |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
|
62 |
-
| 0.
|
63 |
-
| 0.
|
64 |
-
| 0.
|
65 |
-
| 0.
|
66 |
-
| 0.
|
67 |
-
| 0.
|
68 |
-
| 0.
|
69 |
-
| 0.
|
70 |
-
| 0.
|
71 |
-
| 0.
|
72 |
|
73 |
|
74 |
### Framework versions
|
@@ -76,4 +70,4 @@ The following hyperparameters were used during training:
|
|
76 |
- Transformers 4.35.2
|
77 |
- Pytorch 2.1.0+cu121
|
78 |
- Datasets 2.16.1
|
79 |
-
- Tokenizers 0.15.0
|
|
|
9 |
model-index:
|
10 |
- name: Ernie-3.0-base-chinese-finetuned-ner
|
11 |
results: []
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
---
|
13 |
|
14 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
18 |
|
19 |
This model was trained from scratch on an unknown dataset.
|
20 |
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 0.4856
|
22 |
+
- Precision: 0.6511
|
23 |
+
- Recall: 0.7535
|
24 |
+
- F1: 0.6986
|
25 |
+
- Accuracy: 0.9053
|
26 |
|
27 |
## Model description
|
28 |
|
|
|
53 |
|
54 |
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|
55 |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
|
56 |
+
| 0.0667 | 1.0 | 126 | 0.4589 | 0.6387 | 0.7553 | 0.6921 | 0.9012 |
|
57 |
+
| 0.0594 | 2.0 | 252 | 0.4656 | 0.6444 | 0.7515 | 0.6939 | 0.9057 |
|
58 |
+
| 0.053 | 3.0 | 378 | 0.4524 | 0.6444 | 0.7477 | 0.6922 | 0.9064 |
|
59 |
+
| 0.0473 | 4.0 | 504 | 0.4955 | 0.6298 | 0.7568 | 0.6875 | 0.9012 |
|
60 |
+
| 0.0461 | 5.0 | 630 | 0.4892 | 0.6512 | 0.7505 | 0.6973 | 0.9077 |
|
61 |
+
| 0.0438 | 6.0 | 756 | 0.5021 | 0.6450 | 0.7528 | 0.6947 | 0.9054 |
|
62 |
+
| 0.0428 | 7.0 | 882 | 0.5048 | 0.6471 | 0.7576 | 0.6980 | 0.9050 |
|
63 |
+
| 0.0583 | 8.0 | 1008 | 0.4990 | 0.6401 | 0.7533 | 0.6921 | 0.9038 |
|
64 |
+
| 0.0582 | 9.0 | 1134 | 0.4833 | 0.6457 | 0.7513 | 0.6945 | 0.9064 |
|
65 |
+
| 0.0635 | 10.0 | 1260 | 0.4856 | 0.6511 | 0.7535 | 0.6986 | 0.9053 |
|
66 |
|
67 |
|
68 |
### Framework versions
|
|
|
70 |
- Transformers 4.35.2
|
71 |
- Pytorch 2.1.0+cu121
|
72 |
- Datasets 2.16.1
|
73 |
+
- Tokenizers 0.15.0
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 469529812
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ff1eb75663877384e996e1939a7bcd93a6c1a2a855f977af53386c266dae1326
|
3 |
size 469529812
|
tokenizer_config.json
CHANGED
@@ -47,12 +47,16 @@
|
|
47 |
"do_lower_case": true,
|
48 |
"ignore_mismatched_sizes": true,
|
49 |
"mask_token": "[MASK]",
|
|
|
50 |
"model_max_length": 1000000000000000019884624838656,
|
51 |
"never_split": null,
|
52 |
"pad_token": "[PAD]",
|
53 |
"sep_token": "[SEP]",
|
|
|
54 |
"strip_accents": null,
|
55 |
"tokenize_chinese_chars": true,
|
56 |
"tokenizer_class": "BertTokenizer",
|
|
|
|
|
57 |
"unk_token": "[UNK]"
|
58 |
}
|
|
|
47 |
"do_lower_case": true,
|
48 |
"ignore_mismatched_sizes": true,
|
49 |
"mask_token": "[MASK]",
|
50 |
+
"max_length": 512,
|
51 |
"model_max_length": 1000000000000000019884624838656,
|
52 |
"never_split": null,
|
53 |
"pad_token": "[PAD]",
|
54 |
"sep_token": "[SEP]",
|
55 |
+
"stride": 0,
|
56 |
"strip_accents": null,
|
57 |
"tokenize_chinese_chars": true,
|
58 |
"tokenizer_class": "BertTokenizer",
|
59 |
+
"truncation_side": "right",
|
60 |
+
"truncation_strategy": "longest_first",
|
61 |
"unk_token": "[UNK]"
|
62 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4600
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b836eaee33d8db30d4101a292d5b9aba4ee92fb6237699f866b88cc32a109a7a
|
3 |
size 4600
|