further training
Browse files- model.safetensors +1 -1
- tokenizer.json +8 -2
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 414943200
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a4c8172a49f85019fa2400c8cc0393fe109af4673541156a8ed0f588ce8c9c75
|
3 |
size 414943200
|
tokenizer.json
CHANGED
@@ -61,8 +61,14 @@
|
|
61 |
},
|
62 |
"post_processor": {
|
63 |
"type": "BertProcessing",
|
64 |
-
"sep": [
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
},
|
67 |
"decoder": {
|
68 |
"type": "WordPiece",
|
|
|
61 |
},
|
62 |
"post_processor": {
|
63 |
"type": "BertProcessing",
|
64 |
+
"sep": [
|
65 |
+
"[SEP]",
|
66 |
+
3
|
67 |
+
],
|
68 |
+
"cls": [
|
69 |
+
"[CLS]",
|
70 |
+
2
|
71 |
+
]
|
72 |
},
|
73 |
"decoder": {
|
74 |
"type": "WordPiece",
|