law-ai commited on
Commit
20452bb
·
verified ·
1 Parent(s): a6e25b2

Upload 9 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.TGT filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ai4bharat/indictrans2-en-indic-1B",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "architectures": [
6
+ "IndicTransForConditionalGeneration"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "attn_implementation": "eager",
10
+ "auto_map": {
11
+ "AutoConfig": "ai4bharat/indictrans2-en-indic-1B--configuration_indictrans.IndicTransConfig",
12
+ "AutoModelForSeq2SeqLM": "ai4bharat/indictrans2-en-indic-1B--modeling_indictrans.IndicTransForConditionalGeneration"
13
+ },
14
+ "bos_token_id": 0,
15
+ "decoder_attention_heads": 16,
16
+ "decoder_embed_dim": 1024,
17
+ "decoder_ffn_dim": 8192,
18
+ "decoder_layerdrop": 0,
19
+ "decoder_layers": 18,
20
+ "decoder_normalize_before": true,
21
+ "decoder_start_token_id": 2,
22
+ "decoder_vocab_size": 122672,
23
+ "dropout": 0.2,
24
+ "encoder_attention_heads": 16,
25
+ "encoder_embed_dim": 1024,
26
+ "encoder_ffn_dim": 8192,
27
+ "encoder_layerdrop": 0,
28
+ "encoder_layers": 18,
29
+ "encoder_normalize_before": true,
30
+ "encoder_vocab_size": 32322,
31
+ "eos_token_id": 2,
32
+ "init_std": 0.02,
33
+ "is_encoder_decoder": true,
34
+ "layernorm_embedding": false,
35
+ "max_source_positions": 256,
36
+ "max_target_positions": 256,
37
+ "model_type": "IndicTrans",
38
+ "num_hidden_layers": 18,
39
+ "pad_token_id": 1,
40
+ "scale_embedding": true,
41
+ "share_decoder_input_output_embed": false,
42
+ "tokenizer_class": "IndicTransTokenizer",
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.47.1",
45
+ "use_cache": false
46
+ }
dict.SRC.json ADDED
The diff for this file is too large to render. See raw diff
 
dict.TGT.json ADDED
The diff for this file is too large to render. See raw diff
 
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 0,
3
+ "decoder_start_token_id": 2,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.47.1"
7
+ }
model.SRC ADDED
Binary file (759 kB). View file
 
model.TGT ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac9257c8e76b8b607705b959cc3d075656ea33032f7a974e467b8941df6e98d4
3
+ size 3256903
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56d41ab36b9a86816afab32582dc2f4e904e2bc60084e7ebad7d936797c5e7f5
3
+ size 4462265272
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "auto_map": {
37
+ "AutoTokenizer": [
38
+ "ai4bharat/indictrans2-en-indic-1B--tokenization_indictrans.IndicTransTokenizer",
39
+ null
40
+ ]
41
+ },
42
+ "bos_token": "<s>",
43
+ "clean_up_tokenization_spaces": true,
44
+ "do_lower_case": false,
45
+ "eos_token": "</s>",
46
+ "extra_special_tokens": {},
47
+ "model_max_length": 256,
48
+ "pad_token": "<pad>",
49
+ "src_vocab_file": "/home/debtanu/.cache/huggingface/hub/models--ai4bharat--indictrans2-en-indic-1B/snapshots/fba89d6196ad6c0bbe0031bf3e44fe17313e94f1/dict.SRC.json",
50
+ "tgt_vocab_file": "/home/debtanu/.cache/huggingface/hub/models--ai4bharat--indictrans2-en-indic-1B/snapshots/fba89d6196ad6c0bbe0031bf3e44fe17313e94f1/dict.SRC.json",
51
+ "tokenizer_class": "IndicTransTokenizer",
52
+ "unk_token": "<unk>"
53
+ }