ghofrani commited on
Commit
7c3d84b
1 Parent(s): 88c0773

Training in progress, step 17000

Browse files
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<s>": 124, "</s>": 125}
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "checkpoint-1500",
3
  "activation_dropout": 0.1,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
 
1
  {
2
+ "_name_or_path": "wghts/checkpoint-16500",
3
  "activation_dropout": 0.1,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e431da6630c3823751eb5ee982b5d696f232b581a6a4b8d7f311c71b8cfff4f4
3
  size 1262440241
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8729d4c218abf7d9ef7d7b07ddeea2c93da2c1967499b2f38cb6779812feae75
3
  size 1262440241
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "wghts/", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b4bc05f95097b1f49508d44e17407361cbf95b3448d4e2f5407e2ac5f4a6e52
3
  size 2991
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:035cdfd69375b85645cb3dc23a6ed676a19bc90d8fc9ed10bb3a4fd5a11d2d5a
3
  size 2991
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"=": 1, "_": 2, "a": 3, "b": 4, "c": 5, "d": 6, "e": 7, "f": 8, "g": 9, "h": 10, "i": 11, "k": 12, "l": 13, "m": 14, "n": 15, "o": 16, "p": 17, "q": 18, "r": 19, "s": 20, "t": 21, "u": 22, "v": 23, "w": 24, "x": 25, "y": 26, "z": 27, "«": 28, "»": 29, "ā": 30, "š": 31, "،": 32, "؛": 33, "؟": 34, "ء": 35, "آ": 36, "أ": 37, "ؤ": 38, "ئ": 39, "ا": 40, "ب": 41, "ة": 42, "ت": 43, "ث": 44, "ج": 45, "ح": 46, "خ": 47, "د": 48, "ذ": 49, "ر": 50, "ز": 51, "س": 52, "ش": 53, "ص": 54, "ض": 55, "ط": 56, "ظ": 57, "ع": 58, "غ": 59, "ـ": 60, "ف": 61, "ق": 62, "ك": 63, "ل": 64, "م": 65, "ن": 66, "ه": 67, "و": 68, "ى": 69, "ي": 70, "ً": 71, "ٌ": 72, "َ": 73, "ُ": 74, "ِ": 75, "ّ": 76, "ْ": 77, "ٔ": 78, "٬": 79, "پ": 80, "چ": 81, "ژ": 82, "ک": 83, "گ": 84, "ۀ": 85, "ی": 86, "ے": 87, "ە": 88, "ﭘ": 89, "ﮐ": 90, "ﮔ": 91, "ﯽ": 92, "ﯾ": 93, "ﯿ": 94, "ﺍ": 95, "ﺎ": 96, "ﺑ": 97, "ﺒ": 98, "ﺖ": 99, "ﺘ": 100, "ﺧ": 101, "ﺩ": 102, "ﺪ": 103, "ﺭ": 104, "ﺮ": 105, "ﺱ": 106, "ﺴ": 107, "ﺷ": 108, "ﺸ": 109, "ﻀ": 110, "ﻋ": 111, "ﻌ": 112, "ﻟ": 113, "ﻡ": 114, "ﻢ": 115, "ﻤ": 116, "ﻥ": 117, "ﻧ": 118, "ﻪ": 119, "ﻭ": 120, "ﻮ": 121, "|": 0, "[UNK]": 122, "[PAD]": 123}