Commit
·
c8310f2
1
Parent(s):
430203e
add tokenizer
Browse files- tokenizer.json +135 -84
- tokenizer_config.json +5 -0
tokenizer.json
CHANGED
@@ -2,7 +2,53 @@
|
|
2 |
"version": "1.0",
|
3 |
"truncation": null,
|
4 |
"padding": null,
|
5 |
-
"added_tokens": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
"normalizer": null,
|
7 |
"pre_tokenizer": {
|
8 |
"type": "WhitespaceSplit"
|
@@ -15,90 +61,95 @@
|
|
15 |
"continuing_subword_prefix": "##",
|
16 |
"max_input_chars_per_word": 100,
|
17 |
"vocab": {
|
18 |
-
"[
|
19 |
-
"[=
|
20 |
-
"[
|
21 |
-
"[
|
22 |
-
"[
|
23 |
-
"[
|
24 |
-
"[
|
25 |
-
"[
|
26 |
-
"[
|
27 |
-
"[
|
28 |
-
"[
|
29 |
-
"[
|
30 |
-
"[
|
31 |
-
"[
|
32 |
-
"[
|
33 |
-
"[
|
34 |
-
"[
|
35 |
-
"[
|
36 |
-
"[
|
37 |
-
"[
|
38 |
-
"[
|
39 |
-
"[
|
40 |
-
"[
|
41 |
-
"[
|
42 |
-
"[
|
43 |
-
"[
|
44 |
-
"[
|
45 |
-
"[=
|
46 |
-
"[
|
47 |
-
"[
|
48 |
-
"[
|
49 |
-
"[
|
50 |
-
"[
|
51 |
-
"[
|
52 |
-
"[
|
53 |
-
"[
|
54 |
-
"[
|
55 |
-
"[
|
56 |
-
"[
|
57 |
-
"[
|
58 |
-
"[
|
59 |
-
"[
|
60 |
-
"[
|
61 |
-
"[
|
62 |
-
"[
|
63 |
-
"[
|
64 |
-
"[
|
65 |
-
"[
|
66 |
-
"[#
|
67 |
-
"[
|
68 |
-
"[
|
69 |
-
"[
|
70 |
-
"[
|
71 |
-
"[
|
72 |
-
"[=
|
73 |
-
"[
|
74 |
-
"[=
|
75 |
-
"[
|
76 |
-
"[
|
77 |
-
"[C
|
78 |
-
"[=
|
79 |
-
"[
|
80 |
-
"[
|
81 |
-
"[
|
82 |
-
"[
|
83 |
-
"[
|
84 |
-
"[
|
85 |
"[NH3+1]": 67,
|
86 |
-
"[
|
87 |
-
"[
|
88 |
-
"[
|
89 |
-
"[
|
90 |
-
"[
|
91 |
-
"[
|
92 |
-
"[Se]": 74,
|
93 |
-
"[
|
94 |
-
"[
|
95 |
-
"[
|
96 |
-
"[
|
97 |
-
"[
|
98 |
-
"[
|
99 |
-
"[N+1]": 81,
|
100 |
-
"[
|
101 |
-
"[Se+1]": 83
|
|
|
|
|
|
|
|
|
|
|
102 |
}
|
103 |
}
|
104 |
}
|
|
|
2 |
"version": "1.0",
|
3 |
"truncation": null,
|
4 |
"padding": null,
|
5 |
+
"added_tokens": [
|
6 |
+
{
|
7 |
+
"id": 10,
|
8 |
+
"content": "[bos]",
|
9 |
+
"single_word": false,
|
10 |
+
"lstrip": false,
|
11 |
+
"rstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"special": true
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"id": 32,
|
17 |
+
"content": "[eos]",
|
18 |
+
"single_word": false,
|
19 |
+
"lstrip": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"normalized": false,
|
22 |
+
"special": true
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"id": 57,
|
26 |
+
"content": "[unk]",
|
27 |
+
"single_word": false,
|
28 |
+
"lstrip": false,
|
29 |
+
"rstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"special": true
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"id": 65,
|
35 |
+
"content": "[nop]",
|
36 |
+
"single_word": false,
|
37 |
+
"lstrip": false,
|
38 |
+
"rstrip": false,
|
39 |
+
"normalized": false,
|
40 |
+
"special": true
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"id": 88,
|
44 |
+
"content": "[mask]",
|
45 |
+
"single_word": false,
|
46 |
+
"lstrip": false,
|
47 |
+
"rstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"special": true
|
50 |
+
}
|
51 |
+
],
|
52 |
"normalizer": null,
|
53 |
"pre_tokenizer": {
|
54 |
"type": "WhitespaceSplit"
|
|
|
61 |
"continuing_subword_prefix": "##",
|
62 |
"max_input_chars_per_word": 100,
|
63 |
"vocab": {
|
64 |
+
"[Branch1]": 0,
|
65 |
+
"[=NH1+1]": 1,
|
66 |
+
"[Se]": 2,
|
67 |
+
"[#Branch2]": 3,
|
68 |
+
"[O-1]": 4,
|
69 |
+
"[SiH1]": 5,
|
70 |
+
"[SeH1]": 6,
|
71 |
+
"[CH2-1]": 7,
|
72 |
+
"[SH0]": 8,
|
73 |
+
"[PH1]": 9,
|
74 |
+
"[bos]": 10,
|
75 |
+
"[Si]": 11,
|
76 |
+
"[OH1+1]": 12,
|
77 |
+
"[Fe]": 13,
|
78 |
+
"[NH1]": 14,
|
79 |
+
"[Ring2]": 15,
|
80 |
+
"[=N]": 16,
|
81 |
+
"[=NH2+1]": 17,
|
82 |
+
"[B]": 18,
|
83 |
+
"[=SH1]": 19,
|
84 |
+
"[C]": 20,
|
85 |
+
"[=C]": 21,
|
86 |
+
"[NH1-1]": 22,
|
87 |
+
"[=O+1]": 23,
|
88 |
+
"[As]": 24,
|
89 |
+
"[#Branch1]": 25,
|
90 |
+
"[I]": 26,
|
91 |
+
"[=O]": 27,
|
92 |
+
"[B-1]": 28,
|
93 |
+
"[Fe-4]": 29,
|
94 |
+
"[=Ring1]": 30,
|
95 |
+
"[=S]": 31,
|
96 |
+
"[eos]": 32,
|
97 |
+
"[Cl]": 33,
|
98 |
+
"[=P]": 34,
|
99 |
+
"[=Fe]": 35,
|
100 |
+
"[NH1+1]": 36,
|
101 |
+
"[CH1]": 37,
|
102 |
+
"[#Ring1]": 38,
|
103 |
+
"[As+1]": 39,
|
104 |
+
"[Branch3]": 40,
|
105 |
+
"[O]": 41,
|
106 |
+
"[=OH1+1]": 42,
|
107 |
+
"[Branch2]": 43,
|
108 |
+
"[=As]": 44,
|
109 |
+
"[F]": 45,
|
110 |
+
"[P+1]": 46,
|
111 |
+
"[S]": 47,
|
112 |
+
"[#Ring2]": 48,
|
113 |
+
"[#N]": 49,
|
114 |
+
"[CH1+1]": 50,
|
115 |
+
"[OH0]": 51,
|
116 |
+
"[N]": 52,
|
117 |
+
"[I+1]": 53,
|
118 |
+
"[=Ring2]": 54,
|
119 |
+
"[C+1]": 55,
|
120 |
+
"[=B]": 56,
|
121 |
+
"[unk]": 57,
|
122 |
+
"[SiH2]": 58,
|
123 |
+
"[C-1]": 59,
|
124 |
+
"[=PH1]": 60,
|
125 |
+
"[#C]": 61,
|
126 |
+
"[SH1]": 62,
|
127 |
+
"[Fe-3]": 63,
|
128 |
+
"[Br]": 64,
|
129 |
+
"[nop]": 65,
|
130 |
+
"[CH1-1]": 66,
|
131 |
"[NH3+1]": 67,
|
132 |
+
"[=Branch1]": 68,
|
133 |
+
"[NH2+1]": 69,
|
134 |
+
"[P]": 70,
|
135 |
+
"[K]": 71,
|
136 |
+
"[N+1]": 72,
|
137 |
+
"[CH0]": 73,
|
138 |
+
"[=Se]": 74,
|
139 |
+
"[Fe+1]": 75,
|
140 |
+
"[Ring1]": 76,
|
141 |
+
"[S+1]": 77,
|
142 |
+
"[=Branch3]": 78,
|
143 |
+
"[Fe+2]": 79,
|
144 |
+
"[=S+1]": 80,
|
145 |
+
"[=N+1]": 81,
|
146 |
+
"[Na]": 82,
|
147 |
+
"[Se+1]": 83,
|
148 |
+
"[N-1]": 84,
|
149 |
+
"[NH0]": 85,
|
150 |
+
"[#S]": 86,
|
151 |
+
"[=Branch2]": 87,
|
152 |
+
"[mask]": 88
|
153 |
}
|
154 |
}
|
155 |
}
|
tokenizer_config.json
CHANGED
@@ -1,8 +1,13 @@
|
|
1 |
{
|
2 |
"cls_token": "[bos]",
|
3 |
"mask_token": "[mask]",
|
|
|
|
|
4 |
"pad_token": "[nop]",
|
|
|
5 |
"sep_token": "[eos]",
|
|
|
6 |
"tokenizer_class": "PreTrainedTokenizerFast",
|
|
|
7 |
"unk_token": "[unk]"
|
8 |
}
|
|
|
1 |
{
|
2 |
"cls_token": "[bos]",
|
3 |
"mask_token": "[mask]",
|
4 |
+
"model_max_length": 427,
|
5 |
+
"name_or_path": "tokenizer",
|
6 |
"pad_token": "[nop]",
|
7 |
+
"padding_side": "right",
|
8 |
"sep_token": "[eos]",
|
9 |
+
"special_tokens_map_file": "tokenizer/special_tokens_map.json",
|
10 |
"tokenizer_class": "PreTrainedTokenizerFast",
|
11 |
+
"truncation_side": "right",
|
12 |
"unk_token": "[unk]"
|
13 |
}
|