Gleb Vinarskis commited on
Commit
4c31efb
·
1 Parent(s): 0020a06
Files changed (2) hide show
  1. config.json +5 -1
  2. configuration_stacked.py +1 -92
config.json CHANGED
@@ -13,5 +13,9 @@
13
  }
14
  },
15
  "repo_id": "Maslionok/pipeline1",
16
- "flename": "LID-40-3-2000000-1-4.bin"
 
 
 
 
17
  }
 
13
  }
14
  },
15
  "repo_id": "Maslionok/pipeline1",
16
+ "flename": "LID-40-3-2000000-1-4.bin",
17
+ "auto_map": {
18
+ "AutoConfig": "configuration_stacked.ImpressoConfig",
19
+ "AutoModelForTokenClassification": "modeling_stacked.ExtendedMultitaskModelForTokenClassification"
20
+ }
21
  }
configuration_stacked.py CHANGED
@@ -2,98 +2,7 @@ from transformers import PretrainedConfig
2
  import torch
3
 
4
  class ImpressoConfig(PretrainedConfig):
5
- model_type = "stacked_bert"
6
-
7
- def __init__(
8
- self,
9
- vocab_size=30522,
10
- hidden_size=768,
11
- num_hidden_layers=12,
12
- num_attention_heads=12,
13
- intermediate_size=3072,
14
- hidden_act="gelu",
15
- hidden_dropout_prob=0.1,
16
- attention_probs_dropout_prob=0.1,
17
- max_position_embeddings=512,
18
- type_vocab_size=2,
19
- initializer_range=0.02,
20
- layer_norm_eps=1e-12,
21
- pad_token_id=0,
22
- position_embedding_type="absolute",
23
- use_cache=True,
24
- classifier_dropout=None,
25
- pretrained_config=None,
26
- values_override=None,
27
- label_map=None,
28
- **kwargs,
29
- ):
30
- super().__init__(pad_token_id=pad_token_id, **kwargs)
31
-
32
- self.vocab_size = vocab_size
33
- self.hidden_size = hidden_size
34
- self.num_hidden_layers = num_hidden_layers
35
- self.num_attention_heads = num_attention_heads
36
- self.hidden_act = hidden_act
37
- self.intermediate_size = intermediate_size
38
- self.hidden_dropout_prob = hidden_dropout_prob
39
- self.attention_probs_dropout_prob = attention_probs_dropout_prob
40
- self.max_position_embeddings = max_position_embeddings
41
- self.type_vocab_size = type_vocab_size
42
- self.initializer_range = initializer_range
43
- self.layer_norm_eps = layer_norm_eps
44
- self.position_embedding_type = position_embedding_type
45
- self.use_cache = use_cache
46
- self.classifier_dropout = classifier_dropout
47
- self.pretrained_config = pretrained_config
48
- self.label_map = label_map
49
-
50
- self.values_override = values_override or {}
51
- self.outputs = {
52
- "logits": {"shape": [None, None, self.hidden_size], "dtype": "float32"}
53
- }
54
-
55
- @classmethod
56
- def is_torch_support_available(cls):
57
- """
58
- Indicate whether Torch support is available for this configuration.
59
- Required for compatibility with certain parts of the Transformers library.
60
- """
61
- return True
62
-
63
- @classmethod
64
- def patch_ops(self):
65
- """
66
- A method required by some Hugging Face utilities to modify operator mappings.
67
- Currently, it performs no operation and is included for compatibility.
68
- Args:
69
- ops: A dictionary of operations to potentially patch.
70
- Returns:
71
- The (unmodified) ops dictionary.
72
- """
73
- return None
74
-
75
- def generate_dummy_inputs(self, tokenizer, batch_size=1, seq_length=8, framework="pt"):
76
- """
77
- Generate dummy inputs for testing or export.
78
- Args:
79
- tokenizer: The tokenizer used to tokenize inputs.
80
- batch_size: Number of input samples in the batch.
81
- seq_length: Length of each sequence.
82
- framework: Framework ("pt" for PyTorch, "tf" for TensorFlow).
83
- Returns:
84
- Dummy inputs as a dictionary.
85
- """
86
- if framework == "pt":
87
- input_ids = torch.randint(
88
- low=0,
89
- high=self.vocab_size,
90
- size=(batch_size, seq_length),
91
- dtype=torch.long
92
- )
93
- attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
94
- return {"input_ids": input_ids, "attention_mask": attention_mask}
95
- else:
96
- raise ValueError("Framework '{}' not supported.".format(framework))
97
 
98
  # Register the configuration with the transformers library
99
  ImpressoConfig.register_for_auto_class()
 
2
  import torch
3
 
4
  class ImpressoConfig(PretrainedConfig):
5
+ model_type = "floret"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  # Register the configuration with the transformers library
8
  ImpressoConfig.register_for_auto_class()