poteminr commited on
Commit
b5cade1
·
verified ·
1 Parent(s): 5d9b7f8

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -1,26 +1,21 @@
1
  {
2
- "_name_or_path": "Tochka-AI/ruRoPEBert-classic-base-2k",
3
  "architectures": [
4
- "RoPEBertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
- "auto_map": {
8
- "AutoConfig": "Tochka-AI/ruRoPEBert-classic-base-2k--modeling_rope_bert.RoPEBertConfig",
9
- "AutoModel": "Tochka-AI/ruRoPEBert-classic-base-2k--modeling_rope_bert.RoPEBertModel",
10
- "AutoModelForMaskedLM": "Tochka-AI/ruRoPEBert-classic-base-2k--modeling_rope_bert.RoPEBertForMaskedLM",
11
- "AutoModelForSequenceClassification": "Tochka-AI/ruRoPEBert-classic-base-2k--modeling_rope_bert.RoPEBertForSequenceClassification"
12
- },
13
  "classifier_dropout": null,
14
- "directionality": "bidi",
 
15
  "hidden_act": "gelu",
16
  "hidden_dropout_prob": 0.1,
17
- "hidden_size": 768,
18
  "id2label": {
19
  "0": "common",
20
  "1": "toxic"
21
  },
22
  "initializer_range": 0.02,
23
- "intermediate_size": 3072,
24
  "label2id": {
25
  "common": 0,
26
  "toxic": 1
@@ -29,14 +24,13 @@
29
  "max_position_embeddings": 2048,
30
  "model_type": "bert",
31
  "num_attention_heads": 12,
32
- "num_hidden_layers": 12,
33
  "pad_token_id": 0,
34
- "pooler_type": "mean",
35
  "problem_type": "single_label_classification",
36
- "rope_scaling": null,
37
- "rope_theta": 10000.0,
38
  "torch_dtype": "float32",
39
  "transformers_version": "4.44.2",
40
  "type_vocab_size": 2,
41
- "vocab_size": 120138
 
42
  }
 
1
  {
2
+ "_name_or_path": "cointegrated/rubert-tiny2",
3
  "architectures": [
4
+ "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
 
 
 
 
 
7
  "classifier_dropout": null,
8
+ "emb_size": 312,
9
+ "gradient_checkpointing": false,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 312,
13
  "id2label": {
14
  "0": "common",
15
  "1": "toxic"
16
  },
17
  "initializer_range": 0.02,
18
+ "intermediate_size": 600,
19
  "label2id": {
20
  "common": 0,
21
  "toxic": 1
 
24
  "max_position_embeddings": 2048,
25
  "model_type": "bert",
26
  "num_attention_heads": 12,
27
+ "num_hidden_layers": 3,
28
  "pad_token_id": 0,
29
+ "position_embedding_type": "absolute",
30
  "problem_type": "single_label_classification",
 
 
31
  "torch_dtype": "float32",
32
  "transformers_version": "4.44.2",
33
  "type_vocab_size": 2,
34
+ "use_cache": true,
35
+ "vocab_size": 83828
36
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:766532e99133d0437568e193f326d390e516d49f3f6795f53e3746dd8a2d8d9f
3
- size 709323472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bd1fbd909d119bfe31a7d1fa088732357d3ecbc2a970e4f3f396839e8840db9
3
+ size 116784136
runs/Sep12_22-59-14_legal-tech-0/events.out.tfevents.1726171155.legal-tech-0.1432.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:339618d5590c611c9494e854475cb05cd21780b1a4f71a1dcfb23ec54f8caaa9
3
+ size 5343
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -8,7 +8,7 @@
8
  "single_word": false,
9
  "special": true
10
  },
11
- "100": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
@@ -16,7 +16,7 @@
16
  "single_word": false,
17
  "special": true
18
  },
19
- "101": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
@@ -24,7 +24,7 @@
24
  "single_word": false,
25
  "special": true
26
  },
27
- "102": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
@@ -32,7 +32,7 @@
32
  "single_word": false,
33
  "special": true
34
  },
35
- "103": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
@@ -44,14 +44,21 @@
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
  "do_basic_tokenize": true,
47
- "do_lower_case": true,
48
  "mask_token": "[MASK]",
 
49
  "model_max_length": 512,
50
  "never_split": null,
 
51
  "pad_token": "[PAD]",
 
 
52
  "sep_token": "[SEP]",
 
53
  "strip_accents": null,
54
  "tokenize_chinese_chars": true,
55
  "tokenizer_class": "BertTokenizer",
 
 
56
  "unk_token": "[UNK]"
57
  }
 
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "1": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "2": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
 
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "3": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
 
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "4": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
 
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
  "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
  "mask_token": "[MASK]",
49
+ "max_length": 512,
50
  "model_max_length": 512,
51
  "never_split": null,
52
+ "pad_to_multiple_of": null,
53
  "pad_token": "[PAD]",
54
+ "pad_token_type_id": 0,
55
+ "padding_side": "right",
56
  "sep_token": "[SEP]",
57
+ "stride": 0,
58
  "strip_accents": null,
59
  "tokenize_chinese_chars": true,
60
  "tokenizer_class": "BertTokenizer",
61
+ "truncation_side": "right",
62
+ "truncation_strategy": "longest_first",
63
  "unk_token": "[UNK]"
64
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d94cb99dab90a9a1fb3d6cad477cbcc624059b0b838eed719677c70f1cf6d95
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad9764698f8858c1c7bbf086145d3e65b900084b81090e9e507e1931f03cf71c
3
  size 5240
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff