farzadab commited on
Commit
2ced303
·
verified ·
1 Parent(s): dd5d735

End of training

Browse files
README.md ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ model-index:
5
+ - name: fixie-ai-dev/internal-testing-untrained-ultravox
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
10
+ should probably proofread and complete it, then remove this comment. -->
11
+
12
+ # fixie-ai-dev/internal-testing-untrained-ultravox
13
+
14
+ This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
15
+
16
+ ## Model description
17
+
18
+ More information needed
19
+
20
+ ## Intended uses & limitations
21
+
22
+ More information needed
23
+
24
+ ## Training and evaluation data
25
+
26
+ More information needed
27
+
28
+ ## Training procedure
29
+
30
+ ### Training hyperparameters
31
+
32
+ The following hyperparameters were used during training:
33
+ - learning_rate: 0.002
34
+ - train_batch_size: 2
35
+ - eval_batch_size: 8
36
+ - seed: 42
37
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
38
+ - lr_scheduler_type: cosine_with_min_lr
39
+ - lr_scheduler_warmup_steps: 1000
40
+ - training_steps: 2
41
+
42
+ ### Training results
43
+
44
+
45
+
46
+ ### Framework versions
47
+
48
+ - Transformers 4.44.0
49
+ - Pytorch 2.4.1
50
+ - Datasets 2.19.1
51
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "UltravoxModel"
4
+ ],
5
+ "audio_latency_block_size": null,
6
+ "audio_model_id": "hf-internal-testing/tiny-random-WhisperForCausalLM",
7
+ "audio_model_lora_config": {
8
+ "lora_alpha": 8,
9
+ "r": 0,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "q_proj",
13
+ "linear_k",
14
+ "linear_q"
15
+ ]
16
+ },
17
+ "auto_map": {
18
+ "AutoConfig": "ultravox_config.UltravoxConfig",
19
+ "AutoModel": "ultravox_model.UltravoxModel"
20
+ },
21
+ "custom_pipelines": {
22
+ "ultravox-pipeline": {
23
+ "impl": "ultravox_pipeline.UltravoxPipeline",
24
+ "pt": [
25
+ "AutoModel"
26
+ ],
27
+ "tf": [],
28
+ "type": "multimodal"
29
+ }
30
+ },
31
+ "hidden_size": 4096,
32
+ "ignore_index": -100,
33
+ "initializer_range": 0.02,
34
+ "last_layer_norm": false,
35
+ "model_type": "ultravox",
36
+ "norm_init": 0.4,
37
+ "pad_token_id": 2,
38
+ "projector_act": "swiglu",
39
+ "stack_factor": 8,
40
+ "text_model_id": "hf-internal-testing/tiny-random-LlamaForCausalLM",
41
+ "text_model_lora_config": {
42
+ "lora_alpha": 8,
43
+ "r": 0,
44
+ "target_modules": [
45
+ "k_proj",
46
+ "q_proj",
47
+ "linear_k",
48
+ "linear_q"
49
+ ]
50
+ },
51
+ "torch_dtype": "float32",
52
+ "transformers_version": "4.44.0",
53
+ "vocab_size": 32000
54
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 2,
6
+ "transformers_version": "4.44.0"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96f4118522deba8f6cb2844b76d406411fc8dd10fc784876fc5f2e1b0b02492c
3
+ size 2229088
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\n' + system_message + '\n<</SYS>>\n\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<<SYS>>\n' + content.strip() + '\n<</SYS>>\n\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": true,
36
+ "model_max_length": 2048,
37
+ "pad_token": "</s>",
38
+ "tokenizer_class": "LlamaTokenizer",
39
+ "unk_token": "<unk>",
40
+ "use_default_system_prompt": true
41
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c3cd8944d3ca0724b7232be3c25ca17108b7f62338c552d7aef13c097dd6984
3
+ size 5688
ultravox_config.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from enum import Enum
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ import transformers
6
+
7
+
8
+ @dataclasses.dataclass
9
+ class LoraConfigSimplified:
10
+ """
11
+ Low Rank Approximation (LoRA) configuration.
12
+
13
+ Used for language and audio models separately.
14
+ """
15
+
16
+ # The rank of the approximation
17
+ r: int = 0
18
+ lora_alpha: float = 8
19
+ target_modules: Optional[List[str]] = dataclasses.field(
20
+ default_factory=lambda: ["k_proj", "q_proj", "linear_k", "linear_q"]
21
+ )
22
+ # A list of module names regex patterns to unfreeze. Only used if r == 0.
23
+ unfreeze_layers: Optional[List[str]] = None
24
+
25
+
26
+ class LossFunction(str, Enum):
27
+ CrossEntropy = "ce"
28
+ KL_Divergence = "kl"
29
+
30
+
31
+ @dataclasses.dataclass
32
+ class LossConfig:
33
+ loss_function: LossFunction = LossFunction.CrossEntropy
34
+ kl_temperature: float = 2.0
35
+
36
+ @property
37
+ def requires_alt_fields(self):
38
+ return self.loss_function == LossFunction.KL_Divergence
39
+
40
+
41
+ class UltravoxConfig(transformers.PretrainedConfig):
42
+ r"""
43
+ This is the configuration class to store the configuration of a [`UltravoxForConditionalGeneration`]. It is used to instantiate an
44
+ Ultravox model according to the specified arguments, defining the model architecture.
45
+
46
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
47
+ documentation from [`PretrainedConfig`] for more information.
48
+
49
+ Args:
50
+ audio_config (`Wav2Vec2Config`, *optional*):
51
+ Custom audio config or dict
52
+ text_config (`Union[AutoConfig, dict]`, *optional*):
53
+ The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`.
54
+ ignore_index (`int`, *optional*, defaults to -100):
55
+ The ignore index for the loss function.
56
+ audio_token_index (`int`, *optional*, defaults to 32000):
57
+ The audio token index to encode the audio prompt.
58
+ stack_factor (`int`, *optional*, defaults to 8):
59
+ Audio downsampling factor for the multimodal projector.
60
+ norm_init (`float`, *optional*, defaults to 0.4):
61
+ The initialization value for the layer normalization.
62
+ projector_act (`str`, *optional*, defaults to `"swiglu"`):
63
+ The activation function used by the multimodal projector.
64
+ text_model_lora_config (`LoraConfigSimplified`, *optional*):
65
+ The LoRA configuration for finetuning the text model.
66
+ audio_model_lora_config (`LoraConfigSimplified`, *optional*):
67
+ The LoRA configuration for finetuning the audio model.
68
+ audio_latency_block_size (`int`, *optional*, defaults to `None`):
69
+ The latency block size for simulating audio streaming.
70
+
71
+
72
+ Example:
73
+
74
+ ```python
75
+ >>> from transformers import UltravoxForConditionalGeneration, Wav2Vec2Config, UltravoxConfig, LlamaConfig
76
+
77
+ >>> # Initializing an audio encoder config
78
+ >>> audio_config = Wav2Vec2Config()
79
+
80
+ >>> # Initializing a Llama config
81
+ >>> text_config = LlamaConfig()
82
+
83
+ >>> # Initializing a default configuration
84
+ >>> configuration = UltravoxConfig(audio_config, text_config)
85
+
86
+ >>> # Initializing a completely untrained model from the configuration
87
+ >>> model = UltravoxForConditionalGeneration(configuration)
88
+
89
+ >>> # Accessing the model configuration
90
+ >>> configuration = model.config
91
+
92
+ >>> # Initialize a model from pretrained checkpoints and random projector weights
93
+ >>> config = UltravoxConfig(audio_model_id="facebook/wav2vec2-base-960h", text_model_id="meta-llama/Llama-2-7b-chat-hf")
94
+ ```"""
95
+
96
+ model_type = "ultravox"
97
+ is_composition = False
98
+
99
+ def __init__(
100
+ self,
101
+ audio_config: Optional[Dict[str, Any]] = None,
102
+ text_config: Optional[Dict[str, Any]] = None,
103
+ audio_model_id: Optional[str] = None,
104
+ text_model_id: Optional[str] = None,
105
+ ignore_index: int = -100,
106
+ hidden_size: int = 4096,
107
+ stack_factor: int = 8,
108
+ norm_init: float = 0.4,
109
+ projector_act: str = "swiglu",
110
+ last_layer_norm: bool = True,
111
+ text_model_lora_config: Optional[LoraConfigSimplified] = None,
112
+ audio_model_lora_config: Optional[LoraConfigSimplified] = None,
113
+ audio_latency_block_size: Optional[int] = None,
114
+ **kwargs,
115
+ ):
116
+ self.ignore_index = ignore_index
117
+
118
+ self.audio_model_id = audio_model_id
119
+ self.text_model_id = text_model_id
120
+
121
+ self.hidden_size = hidden_size
122
+ self.stack_factor = stack_factor
123
+ self.norm_init = norm_init
124
+ self.projector_act = projector_act
125
+ self.last_layer_norm = last_layer_norm
126
+
127
+ if text_model_id is not None:
128
+ self.text_config: transformers.LlamaConfig = (
129
+ transformers.AutoConfig.from_pretrained(text_model_id)
130
+ )
131
+ else:
132
+ text_config = text_config or {}
133
+ self.text_config = transformers.CONFIG_MAPPING[
134
+ text_config.get("model_type", "llama")
135
+ ](**text_config)
136
+
137
+ if audio_model_id is not None:
138
+ self.audio_config: transformers.PretrainedConfig = (
139
+ transformers.AutoConfig.from_pretrained(audio_model_id)
140
+ )
141
+ else:
142
+ audio_config = audio_config or {}
143
+ self.audio_config = transformers.CONFIG_MAPPING[
144
+ audio_config.get("model_type", "wav2vec2")
145
+ ](**audio_config)
146
+
147
+ self.text_model_lora_config = (
148
+ text_model_lora_config
149
+ if isinstance(text_model_lora_config, dict)
150
+ else dataclasses.asdict(text_model_lora_config or LoraConfigSimplified())
151
+ )
152
+ self.audio_model_lora_config = (
153
+ audio_model_lora_config
154
+ if isinstance(audio_model_lora_config, dict)
155
+ else dataclasses.asdict(audio_model_lora_config or LoraConfigSimplified())
156
+ )
157
+ self.audio_latency_block_size = audio_latency_block_size
158
+
159
+ self.vocab_size = self.text_config.vocab_size
160
+
161
+ self.initializer_range = self.text_config.initializer_range
162
+
163
+ super().__init__(**kwargs)
164
+
165
+ def to_diff_dict(self) -> Dict[str, Any]:
166
+ diff_dict = super().to_diff_dict()
167
+
168
+ # remove text_config and audio_config if text_model_id and audio_model_id are present
169
+ if self.text_model_id is not None:
170
+ diff_dict.pop("text_config", None)
171
+ if self.audio_model_id is not None:
172
+ diff_dict.pop("audio_config", None)
173
+
174
+ return diff_dict
ultravox_model.py ADDED
@@ -0,0 +1,739 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import re
3
+ from typing import Any, Dict, Optional, Set, Tuple, Union
4
+
5
+ import peft
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import transformers
10
+ import transformers.activations
11
+ import transformers.modeling_outputs
12
+ import transformers.models
13
+ from transformers.models.whisper import modeling_whisper as whisper
14
+
15
+ # We must use relative import in this directory to allow uploading to HF Hub
16
+ # Even "from . import X" pattern doesn't work (undocumented and unclear why)
17
+ from .ultravox_config import LossConfig
18
+ from .ultravox_config import LossFunction
19
+ from .ultravox_config import UltravoxConfig
20
+
21
+
22
+ class UltravoxModel(transformers.LlamaPreTrainedModel):
23
+ """
24
+ The Ultravox model which consists of an audio encoder and a language model.
25
+
26
+ Audio input is processed by the audio encoder, then every `stack_factor` frames are stacked together and
27
+ projected to the language model's embedding space using a few linear layers.
28
+ The text is embedded by the language model as usual and then the audio and text embeddings are merged together.
29
+
30
+ A special token `<|audio|>` is used to indicate the start of the audio embeddings in the merged embeddings.
31
+
32
+ Parameters:
33
+ config: Model configuration class with all the parameters of the model.
34
+ """
35
+
36
+ config_class = UltravoxConfig
37
+ config: UltravoxConfig # for type hinting
38
+ # Usually we load encoder and LLM weights from a pretrained model separately, so they are allowed to be missing
39
+ _keys_to_ignore_on_load_missing = ["audio_tower.*", "language_model.*"]
40
+
41
+ def __init__(self, config: UltravoxConfig):
42
+ super().__init__(config)
43
+ self._register_load_state_dict_pre_hook(self._pre_load_state_dict_hook)
44
+
45
+ self.keep_params: Set[str] = set()
46
+ self.vocab_size = config.vocab_size
47
+
48
+ self.audio_tower = self._create_audio_tower(config)
49
+ self.multi_modal_projector = self._create_multi_modal_projector(config)
50
+ self.language_model = self._create_language_model(config)
51
+
52
+ # Determine no_split_modules dynamically to use with FSDP auto_wrap policy.
53
+ # FSDP throws an error if some of the layer types are not found in the model.
54
+ # This would be something like ["LlamaDecoderLayer", "WhisperEncoderLayer"]
55
+ self._no_split_modules = (self.language_model._no_split_modules or []) + (
56
+ self.audio_tower._no_split_modules or []
57
+ )
58
+
59
+ self.loss_config = LossConfig()
60
+ self.post_init()
61
+
62
+ def get_input_embeddings(self):
63
+ return self.language_model.get_input_embeddings()
64
+
65
+ def set_input_embeddings(self, value):
66
+ self.language_model.set_input_embeddings(value)
67
+
68
+ def get_output_embeddings(self):
69
+ return self.language_model.get_output_embeddings()
70
+
71
+ def set_output_embeddings(self, new_embeddings):
72
+ self.language_model.set_output_embeddings(new_embeddings)
73
+
74
+ def set_decoder(self, decoder):
75
+ self.language_model.set_decoder(decoder)
76
+
77
+ def get_decoder(self):
78
+ return self.language_model.get_decoder()
79
+
80
+ def tie_weights(self):
81
+ return self.language_model.tie_weights()
82
+
83
+ def set_loss_config(self, loss_config: LossConfig):
84
+ self.loss_config = loss_config
85
+
86
+ def _setup_cache(
87
+ self, cache_cls, max_batch_size: int, max_cache_len: Optional[int] = None
88
+ ):
89
+ self.language_model._setup_cache(cache_cls, max_batch_size, max_cache_len)
90
+
91
+ def _reorder_cache(self, past_key_values, beam_idx):
92
+ return self.language_model._reorder_cache(past_key_values, beam_idx)
93
+
94
+ def resize_token_embeddings(
95
+ self,
96
+ new_num_tokens: Optional[int] = None,
97
+ pad_to_multiple_of: Optional[int] = None,
98
+ ) -> nn.Embedding:
99
+ model_embeds = self.language_model.resize_token_embeddings(
100
+ new_num_tokens, pad_to_multiple_of
101
+ )
102
+ # update vocab size
103
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
104
+ self.config.vocab_size = model_embeds.num_embeddings
105
+ self.vocab_size = model_embeds.num_embeddings
106
+ return model_embeds
107
+
108
+ def _compute_kl_loss(
109
+ self,
110
+ lm_output: transformers.modeling_outputs.CausalLMOutputWithPast,
111
+ labels: Optional[torch.Tensor] = None,
112
+ past_key_values: Optional[Union[Tuple, transformers.cache_utils.Cache]] = None,
113
+ alt_input_ids: Optional[torch.Tensor] = None,
114
+ alt_attention_mask: Optional[torch.Tensor] = None,
115
+ alt_labels: Optional[torch.Tensor] = None,
116
+ **kwargs,
117
+ ):
118
+ # disable gradient computation for the teacher model
119
+ with torch.no_grad():
120
+ # compute the teacher (text-only) model's distribution
121
+ alt_inputs_embeds = self.get_input_embeddings().forward(alt_input_ids)
122
+ alt_lm_output = self.language_model.forward(
123
+ inputs_embeds=alt_inputs_embeds,
124
+ labels=alt_labels,
125
+ attention_mask=alt_attention_mask,
126
+ past_key_values=past_key_values,
127
+ **kwargs,
128
+ )
129
+ # compute the KL divergence loss between the two models
130
+ kl_loss = F.kl_div(
131
+ F.log_softmax(
132
+ lm_output.logits[labels != -100] / self.loss_config.kl_temperature,
133
+ dim=-1,
134
+ ),
135
+ F.softmax(
136
+ alt_lm_output.logits[alt_labels != -100]
137
+ / self.loss_config.kl_temperature,
138
+ dim=-1,
139
+ ),
140
+ reduction="batchmean",
141
+ )
142
+ return {"loss": kl_loss}
143
+
144
+ def forward(
145
+ self,
146
+ input_ids: torch.Tensor,
147
+ audio_values: Optional[torch.FloatTensor] = None,
148
+ inputs_embeds: Optional[torch.FloatTensor] = None,
149
+ labels: Optional[torch.Tensor] = None,
150
+ attention_mask: Optional[torch.Tensor] = None,
151
+ audio_token_start_idx: Optional[torch.Tensor] = None,
152
+ audio_len: Optional[torch.Tensor] = None,
153
+ audio_token_len: Optional[torch.Tensor] = None,
154
+ past_key_values: Optional[Union[Tuple, transformers.cache_utils.Cache]] = None,
155
+ # the alt_* fields are needed for KL divergence loss
156
+ alt_input_ids: Optional[torch.Tensor] = None,
157
+ alt_attention_mask: Optional[torch.Tensor] = None,
158
+ alt_labels: Optional[torch.Tensor] = None,
159
+ **kwargs,
160
+ ) -> Union[Tuple, transformers.modeling_outputs.CausalLMOutputWithPast]:
161
+ """
162
+ Forward pass for the Ultravox model.
163
+
164
+ `input_ids` are the tokenized text input. They are embedded by the language model as usual.
165
+ `audio_values` are processed by the audio encoder and then every `stack_factor` frames are stacked together and
166
+ projected to the language model's embedding space using a few linear layers.
167
+ The audio and text embeddings are merged together. A special token `<|audio|>` is used to indicate the start
168
+ of the audio embeddings in the merged embeddings.
169
+
170
+ Args:
171
+ input_ids: The tokenized text input.
172
+ audio_values: The processed audio values.
173
+ inputs_embeds: The embeddings for the input tokens.
174
+ labels: The tokenized text labels.
175
+ attention_mask: The attention mask for the input.
176
+ position_ids: The position ids for the input.
177
+ past_key_values: The past key value cache for the language model attention layers.
178
+ **kwargs: Additional keyword arguments. Passed directly to the language model.
179
+ """
180
+ if inputs_embeds is None:
181
+ # B x T -> B x T x D
182
+ inputs_embeds = self.get_input_embeddings().forward(input_ids)
183
+
184
+ if audio_values is not None:
185
+ assert (
186
+ audio_token_start_idx is not None and audio_token_len is not None
187
+ ), "audio_token_start_idx and audio_token_len must be provided if audio_values are provided."
188
+ assert (
189
+ len(audio_token_start_idx) == len(audio_token_len) == len(audio_values)
190
+ ), "audio_token_start_idx, audio_token_len, and audio_values must have the same batch size."
191
+
192
+ # B x A/3200 x D
193
+ audio_tower_output = self.audio_tower.forward(
194
+ audio_values.to(self.audio_tower.dtype),
195
+ audio_len=audio_len,
196
+ ).last_hidden_state
197
+ audio_tower_output = audio_tower_output.to(inputs_embeds.dtype)
198
+
199
+ audio_embeds = self.multi_modal_projector.forward(audio_tower_output)
200
+
201
+ # combine audio and text embeddings
202
+ for i, (audio, start, length) in enumerate(
203
+ zip(audio_embeds, audio_token_start_idx, audio_token_len)
204
+ ):
205
+ length = min(length, audio.shape[0])
206
+ inputs_embeds[i, start : start + length] = audio[:length]
207
+
208
+ lm_output = self.language_model.forward(
209
+ inputs_embeds=inputs_embeds,
210
+ labels=labels,
211
+ attention_mask=attention_mask,
212
+ past_key_values=past_key_values,
213
+ **kwargs,
214
+ )
215
+ if self.training:
216
+ if self.loss_config.loss_function == LossFunction.CrossEntropy:
217
+ return lm_output
218
+ elif self.loss_config.loss_function == LossFunction.KL_Divergence:
219
+ return self._compute_kl_loss(
220
+ lm_output=lm_output,
221
+ labels=labels,
222
+ past_key_values=past_key_values,
223
+ alt_input_ids=alt_input_ids,
224
+ alt_attention_mask=alt_attention_mask,
225
+ alt_labels=alt_labels,
226
+ **kwargs,
227
+ )
228
+ else:
229
+ raise ValueError(
230
+ f"Unsupported loss function: {self.loss_config.loss_function}"
231
+ )
232
+ else:
233
+ return lm_output
234
+
235
+ def prepare_inputs_for_generation(
236
+ self,
237
+ input_ids: torch.Tensor,
238
+ audio_values: Optional[torch.FloatTensor] = None,
239
+ audio_token_start_idx: Optional[torch.Tensor] = None,
240
+ audio_token_len: Optional[torch.Tensor] = None,
241
+ audio_len: Optional[torch.Tensor] = None,
242
+ past_key_values: Optional[Union[Tuple, transformers.cache_utils.Cache]] = None,
243
+ attention_mask: Optional[torch.Tensor] = None,
244
+ inputs_embeds: Optional[torch.Tensor] = None,
245
+ cache_position: Optional[torch.Tensor] = None,
246
+ **kwargs,
247
+ ) -> Dict[str, Any]:
248
+ model_input = self.language_model.prepare_inputs_for_generation(
249
+ input_ids=input_ids,
250
+ past_key_values=past_key_values,
251
+ attention_mask=attention_mask,
252
+ inputs_embeds=inputs_embeds,
253
+ cache_position=cache_position,
254
+ **kwargs,
255
+ )
256
+
257
+ # include audio information in model_input only when it is needed during prefilling
258
+ # audio_token_start_idx should always be relative to the current cache position
259
+ prefill_start_idx = 0 if cache_position is None else cache_position[0]
260
+ if (
261
+ audio_values is not None
262
+ and audio_token_start_idx is not None
263
+ and prefill_start_idx <= torch.max(audio_token_start_idx)
264
+ ):
265
+ model_input["audio_values"] = audio_values
266
+ model_input["audio_token_start_idx"] = (
267
+ audio_token_start_idx - prefill_start_idx
268
+ )
269
+ model_input["audio_token_len"] = audio_token_len
270
+ model_input["audio_len"] = audio_len
271
+
272
+ return model_input
273
+
274
+ @classmethod
275
+ def _create_multi_modal_projector(
276
+ cls, config: UltravoxConfig
277
+ ) -> "UltravoxProjector":
278
+ projector = UltravoxProjector(config)
279
+ projector.to(config.torch_dtype)
280
+ return projector
281
+
282
+ @classmethod
283
+ def _create_audio_tower(
284
+ cls, config: UltravoxConfig
285
+ ) -> Union[transformers.Wav2Vec2Model, "ModifiedWhisperEncoder"]:
286
+ if config.audio_model_id is not None:
287
+ if "whisper" in config.audio_model_id.lower():
288
+ audio_tower = ModifiedWhisperEncoder.from_pretrained(
289
+ config.audio_model_id, torch_dtype=config.torch_dtype
290
+ )
291
+ audio_tower.init_latency_mask(
292
+ config.audio_latency_block_size, dtype=config.torch_dtype
293
+ )
294
+ else:
295
+ assert config.audio_latency_block_size in (
296
+ None,
297
+ 0,
298
+ ), "only whisper audio tower supports audio latency masking, got non-zero value for 'audio_latency_block_size'"
299
+ audio_tower = transformers.AutoModel.from_pretrained(
300
+ config.audio_model_id, torch_dtype=config.torch_dtype
301
+ )
302
+ else:
303
+ if "whisper" in config.audio_config._name_or_path.lower():
304
+ audio_tower = ModifiedWhisperEncoder(config.audio_config)
305
+ audio_tower.init_latency_mask(
306
+ config.audio_latency_block_size, dtype=config.torch_dtype
307
+ )
308
+ else:
309
+ assert config.audio_latency_block_size in (
310
+ None,
311
+ 0,
312
+ ), "only whisper audio tower supports audio latency masking, got non-zero value for 'audio_latency_block_size'"
313
+ with transformers.modeling_utils.no_init_weights():
314
+ # we only ever use from_config if the weights are retrained, hence initializing is not
315
+ # required. This makes the model quite creation faster since init on CPU is quite slow.
316
+ audio_tower = transformers.AutoModel.from_config(
317
+ config.audio_config
318
+ )
319
+
320
+ if isinstance(
321
+ audio_tower,
322
+ (transformers.Wav2Vec2BertModel, transformers.WhisperModel),
323
+ ):
324
+ # For these models we only need the encoder part
325
+ # Wav2Vec2BertModel -> Wav2Vec2BertEncoder
326
+ # WhisperModel -> WhisperEncoder
327
+ audio_tower = audio_tower.encoder
328
+
329
+ audio_tower = apply_lora(audio_tower, config.audio_model_lora_config)
330
+ return audio_tower
331
+
332
+ @classmethod
333
+ def _create_language_model(
334
+ cls, config: UltravoxConfig
335
+ ) -> transformers.LlamaForCausalLM:
336
+ if config.text_model_id is not None:
337
+ language_model = transformers.AutoModelForCausalLM.from_pretrained(
338
+ config.text_model_id,
339
+ attn_implementation=config._attn_implementation,
340
+ torch_dtype=config.torch_dtype,
341
+ )
342
+ else:
343
+ with transformers.modeling_utils.no_init_weights():
344
+ # we only ever use from_config if the weights are retrained, hence initializing is not
345
+ # required. This makes the model quite creation faster since init on CPU is quite slow.
346
+ language_model = transformers.AutoModelForCausalLM.from_config(
347
+ config.text_config,
348
+ attn_implementation=config._attn_implementation,
349
+ torch_dtype=config.torch_dtype,
350
+ )
351
+
352
+ language_model = apply_lora(language_model, config.text_model_lora_config)
353
+ return language_model
354
+
355
+ def merge_and_unload(self):
356
+ if isinstance(self.language_model, peft.PeftModel):
357
+ self.language_model = self.language_model.merge_and_unload()
358
+ # no need to download base language model weights anymore, so we can remove the id
359
+ self.config.text_model_id = None
360
+ self.keep_params.update(
361
+ set(
362
+ [
363
+ f"language_model.{name}"
364
+ for name, _ in self.language_model.named_parameters()
365
+ ]
366
+ )
367
+ )
368
+
369
+ if isinstance(self.audio_tower, peft.PeftModel):
370
+ self.audio_tower = self.audio_tower.merge_and_unload()
371
+ # no need to download base audio model weights anymore, so we can remove the id
372
+ self.config.audio_model_id = None
373
+ self.keep_params.update(
374
+ set(
375
+ [
376
+ f"audio_tower.{name}"
377
+ for name, _ in self.audio_tower.named_parameters()
378
+ ]
379
+ )
380
+ )
381
+
382
+ for param in ["text_model_lora_config", "audio_model_lora_config"]:
383
+ if hasattr(self.config, param):
384
+ delattr(self.config, param)
385
+
386
+ def push_to_hub(self, *args, **kwargs):
387
+ self.merge_and_unload()
388
+ return super().push_to_hub(*args, **kwargs)
389
+
390
+ def save_pretrained(
391
+ self, *args, state_dict: Optional[Dict[str, Any]] = None, **kwargs
392
+ ):
393
+ if state_dict is None:
394
+ state_dict = super().state_dict()
395
+
396
+ named_params = dict(self.named_parameters())
397
+
398
+ state_dict = {
399
+ k: v
400
+ for k, v in state_dict.items()
401
+ if k in self.keep_params
402
+ or (k in named_params and named_params[k].requires_grad)
403
+ }
404
+
405
+ super().save_pretrained(*args, state_dict=state_dict, **kwargs)
406
+
407
+ def _pre_load_state_dict_hook(self, state_dict: Dict[str, Any], *args, **kwargs):
408
+ self.keep_params.update(set(state_dict.keys()))
409
+
410
+ def print_trainable_parameters(self):
411
+ """
412
+ Prints the number of trainable parameters in the model (reuses Peft model's method)
413
+ """
414
+ count_params = peft.peft_model.PeftModel.get_nb_trainable_parameters
415
+
416
+ trainable_params, all_param = count_params(self)
417
+
418
+ logging.info(
419
+ f"trainable params: {trainable_params:,d} || all params: {all_param:,d}"
420
+ f" || trainable%: {100 * trainable_params / all_param:.1f}%"
421
+ )
422
+
423
+ lm_trainable_params, lm_all_params = count_params(self.language_model)
424
+ audio_trainable_params, audio_all_params = count_params(self.audio_tower)
425
+
426
+ projector_trainable_params = (
427
+ trainable_params - lm_trainable_params - audio_trainable_params
428
+ )
429
+ projector_all_params = all_param - lm_all_params - audio_all_params
430
+
431
+ logging.info(
432
+ f"Trainable%: "
433
+ f" LLM: {100 * lm_trainable_params / lm_all_params:.1f}%"
434
+ f" || Audio Encoder: {100 * audio_trainable_params / audio_all_params:.1f}%"
435
+ f" || Projector: {100 * projector_trainable_params / projector_all_params:.1f}%"
436
+ )
437
+
438
+
439
+ # TODO: refactor common parts to a shared module
440
+ def is_cache_empty(
441
+ past_key_values: Optional[Union[Tuple, transformers.cache_utils.Cache]]
442
+ ) -> bool:
443
+ """
444
+ Check if the cache is empty.
445
+ """
446
+ if past_key_values is None:
447
+ return True
448
+ if isinstance(past_key_values, tuple):
449
+ return all(len(c) == 0 for c in past_key_values)
450
+ return past_key_values.get_seq_length() == 0
451
+
452
+
453
+ def apply_lora(model: torch.nn.Module, lora_config: dict) -> torch.nn.Module:
454
+ """
455
+ Applies LoRA finetuning to the model. If the `r` parameter is set to 0, the model is frozen instead.
456
+ """
457
+ unfreeze_layers = lora_config.pop("unfreeze_layers", None)
458
+ lora_config = peft.LoraConfig(**lora_config or {})
459
+
460
+ if lora_config.r == 0:
461
+ # freeze the model entirely, except for the specified layers
462
+ for name, param in model.named_parameters():
463
+ if not unfreeze_layers or not any(
464
+ re.match(layer, name) for layer in unfreeze_layers
465
+ ):
466
+ param.requires_grad = False
467
+ else:
468
+ logging.info(f"Unfreezing layer: {name} with #{param.numel()} params")
469
+ else:
470
+ model = peft.get_peft_model(model, lora_config)
471
+
472
+ return model
473
+
474
+
475
+ class StackAudioFrames(nn.Module):
476
+ """
477
+ Stack the audio embedding frames to reduce the sequence length by a factor of `stack_factor`.
478
+
479
+ The number of output frames will be `ceil(T / stack_factor) + 1` where `T` is the number of input frames.
480
+ NOTE: the extra +1 is intentional: in case the number of audio tokens are over-estimated by the processor,
481
+ we want to make sure `processor.audio_token_replacement` (i.e. EOS) doesn't get leaked into the middle of embeddings.
482
+ In most cases this extra padding will get removed in the model's forward function so it has no effect.
483
+ """
484
+
485
+ def __init__(self, stack_factor: int = 8):
486
+ super().__init__()
487
+ self.stack_factor = stack_factor
488
+
489
+ def forward(self, audio_embeds: torch.Tensor) -> torch.Tensor:
490
+ B, T, C = audio_embeds.shape
491
+ T_pad = (T + self.stack_factor - 1) // self.stack_factor * self.stack_factor
492
+ audio_embeds = F.pad(audio_embeds, (0, 0, 0, T_pad - T + self.stack_factor))
493
+ B, T, C = audio_embeds.shape
494
+ audio_embeds = audio_embeds.view(
495
+ B, T // self.stack_factor, C * self.stack_factor
496
+ )
497
+ return audio_embeds
498
+
499
+
500
+ class RMSNorm(transformers.models.llama.modeling_llama.LlamaRMSNorm):
501
+ def __init__(self, hidden_size: int, init: float = 1, eps: float = 1e-6):
502
+ super().__init__(hidden_size=hidden_size, eps=eps)
503
+ self.weight.data.fill_(init)
504
+
505
+
506
+ class SwiGLU(nn.Module):
507
+ def forward(self, x):
508
+ x, gate = x.chunk(2, dim=-1)
509
+ return F.silu(gate) * x
510
+
511
+
512
+ class UltravoxProjector(nn.Module):
513
+ def __init__(self, config: UltravoxConfig):
514
+ super().__init__()
515
+ self.hidden_dim = config.hidden_size
516
+ self._pad_and_stack = StackAudioFrames(config.stack_factor)
517
+ dim = config.audio_config.hidden_size * config.stack_factor
518
+ self.ln_pre = RMSNorm(dim, init=config.norm_init)
519
+ self.linear_1 = nn.Linear(dim, self.hidden_dim, bias=False)
520
+ dim = self.hidden_dim
521
+ self.act = transformers.activations.get_activation(config.projector_act)
522
+ dim = dim // 2 if config.projector_act == "swiglu" else dim
523
+ dim_out = config.text_config.hidden_size
524
+ self.linear_2 = nn.Linear(dim, dim_out, bias=False)
525
+ self.ln_post: Union[RMSNorm, nn.Identity] = (
526
+ RMSNorm(dim_out, init=config.norm_init)
527
+ if config.last_layer_norm
528
+ else nn.Identity()
529
+ )
530
+
531
+ def forward(self, audio_features: torch.Tensor) -> torch.Tensor:
532
+ audio_features = self._pad_and_stack(audio_features)
533
+ audio_features = self.ln_pre(audio_features)
534
+ hidden_states = self.linear_1(audio_features)
535
+ hidden_states = self.act(hidden_states)
536
+ hidden_states = self.linear_2(hidden_states)
537
+ hidden_states = self.ln_post(hidden_states)
538
+ return hidden_states
539
+
540
+
541
+ class ModifiedWhisperEncoder(
542
+ whisper.WhisperEncoder, transformers.modeling_utils.ModuleUtilsMixin
543
+ ):
544
+ """
545
+ Encoder portion of OpenAI's Whisper model.
546
+
547
+ This implementation is a slightly modified version of HF Transformers' Whisper Encoder, with only a few fixes:
548
+ 1. base_model_prefix updated to allow for doing `.from_pretrained` directly on the encoder
549
+ 2. allow less than 30 second of audio padding to be passed in:
550
+ - relaxed ValueError check for `input_features` length to be less than or equal to `expected_seq_length` instead of strictly equal
551
+ - embed_pos is now sliced to match the length of `inputs_embeds`
552
+
553
+ Original: https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py
554
+ """
555
+
556
+ base_model_prefix = "model.encoder"
557
+ _no_split_modules = ["WhisperEncoderLayer"]
558
+
559
+ def __init__(self, config: transformers.WhisperConfig):
560
+ super().__init__(config)
561
+ self.config.is_decoder = False
562
+
563
+ def init_latency_mask(self, audio_latency_block_size: int, dtype: torch.dtype):
564
+ if audio_latency_block_size is None:
565
+ self.audio_streaming_mask = None
566
+ return
567
+
568
+ # maximum sequence length
569
+ max_seqlen = (
570
+ self.config.max_source_positions
571
+ * self.conv1.stride[0]
572
+ * self.conv2.stride[0]
573
+ )
574
+ assert (
575
+ max_seqlen > 0
576
+ ), f"maximum sequence length must be positive, got {max_seqlen}"
577
+ assert (
578
+ max_seqlen % audio_latency_block_size == 0
579
+ ), f"audio_latency_block_size {audio_latency_block_size} must divide {max_seqlen} evenly."
580
+ # Given the block size, we calculate number of blocks.
581
+ audio_latency_nblocks = max_seqlen // audio_latency_block_size
582
+ audio_streaming_mask = (
583
+ torch.tril(
584
+ torch.ones(audio_latency_nblocks, audio_latency_nblocks),
585
+ diagonal=0,
586
+ )
587
+ .repeat_interleave(audio_latency_block_size, dim=0)
588
+ .repeat_interleave(audio_latency_block_size, dim=1)
589
+ )
590
+ audio_streaming_mask = (1.0 - audio_streaming_mask) * torch.finfo(dtype).min
591
+ audio_streaming_mask = audio_streaming_mask[None, None, :, :]
592
+ self.register_buffer(
593
+ "audio_streaming_mask", audio_streaming_mask, persistent=False
594
+ )
595
+
596
+ def forward(
597
+ self,
598
+ input_features,
599
+ audio_len=None,
600
+ head_mask=None,
601
+ output_attentions=None,
602
+ output_hidden_states=None,
603
+ return_dict=None,
604
+ ):
605
+ expected_seq_length = (
606
+ self.config.max_source_positions
607
+ * self.conv1.stride[0]
608
+ * self.conv2.stride[0]
609
+ )
610
+ if input_features.shape[-1] > expected_seq_length:
611
+ raise ValueError(
612
+ f"Whisper expects the mel input features to be of length {expected_seq_length} or less, but found {input_features.shape[-1]}. Make sure to pad the input mel features to {expected_seq_length}."
613
+ )
614
+
615
+ output_attentions = (
616
+ output_attentions
617
+ if output_attentions is not None
618
+ else self.config.output_attentions
619
+ )
620
+ output_hidden_states = (
621
+ output_hidden_states
622
+ if output_hidden_states is not None
623
+ else self.config.output_hidden_states
624
+ )
625
+ return_dict = (
626
+ return_dict if return_dict is not None else self.config.use_return_dict
627
+ )
628
+ inputs_embeds = nn.functional.gelu(self.conv1(input_features))
629
+ inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
630
+
631
+ inputs_embeds = inputs_embeds.permute(0, 2, 1)
632
+ embed_pos = self.embed_positions.weight[: inputs_embeds.size(-2)]
633
+
634
+ hidden_states = inputs_embeds + embed_pos
635
+ hidden_states = nn.functional.dropout(
636
+ hidden_states, p=self.dropout, training=self.training
637
+ )
638
+
639
+ encoder_states = () if output_hidden_states else None
640
+ all_attentions = () if output_attentions else None
641
+
642
+ # Create attention mask based on audio lengths to mask out padding tokens
643
+ # For each sample in batch:
644
+ # - Convert raw audio length to feature length after convolutions
645
+ # - Create boolean mask that is True for valid positions and False for padding
646
+ # - Convert to extended attention mask format expected by transformer layers
647
+ # (1.0 for positions to attend to, large negative for positions to ignore)
648
+ # This masking ensures consistent behavior between training and inference
649
+ # by preventing the model from attending to padding tokens in both cases
650
+ attention_mask = None
651
+ if audio_len != None:
652
+ audio_feature_len = self._get_feat_extract_output_lengths(audio_len)
653
+ max_seq_len = hidden_states.shape[1]
654
+ attention_mask = torch.arange(max_seq_len, device=hidden_states.device)[
655
+ None, :
656
+ ].lt(audio_feature_len.view(-1, 1))
657
+ attention_mask = self.get_extended_attention_mask(
658
+ attention_mask,
659
+ None,
660
+ device=hidden_states.device,
661
+ dtype=hidden_states.dtype,
662
+ )
663
+
664
+ if self.audio_streaming_mask is not None:
665
+ seqlen = hidden_states.size(-2)
666
+ if attention_mask is not None:
667
+ attention_mask = torch.minimum(
668
+ self.audio_streaming_mask[:, :, :seqlen, :seqlen], attention_mask
669
+ ) # merge
670
+ else:
671
+ attention_mask = self.audio_streaming_mask[:, :, :seqlen, :seqlen]
672
+ attention_mask = attention_mask.to(hidden_states.dtype)
673
+
674
+ # check if head_mask has a correct number of layers specified if desired
675
+ if head_mask is not None:
676
+ assert head_mask.size()[0] == (
677
+ len(self.layers)
678
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
679
+
680
+ for idx, encoder_layer in enumerate(self.layers):
681
+ if output_hidden_states:
682
+ encoder_states = encoder_states + (hidden_states,)
683
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
684
+ to_drop = False
685
+ if self.training:
686
+ dropout_probability = torch.rand([])
687
+ if dropout_probability < self.layerdrop: # skip the layer
688
+ to_drop = True
689
+
690
+ if to_drop:
691
+ layer_outputs = (None, None)
692
+ else:
693
+ if self.gradient_checkpointing and self.training:
694
+ layer_outputs = self._gradient_checkpointing_func(
695
+ encoder_layer.__call__,
696
+ hidden_states,
697
+ attention_mask,
698
+ (head_mask[idx] if head_mask is not None else None),
699
+ output_attentions,
700
+ )
701
+ else:
702
+ layer_outputs = encoder_layer(
703
+ hidden_states,
704
+ attention_mask,
705
+ layer_head_mask=(
706
+ head_mask[idx] if head_mask is not None else None
707
+ ),
708
+ output_attentions=output_attentions,
709
+ )
710
+
711
+ hidden_states = layer_outputs[0]
712
+
713
+ if output_attentions:
714
+ all_attentions = all_attentions + (layer_outputs[1],)
715
+
716
+ hidden_states = self.layer_norm(hidden_states)
717
+ if output_hidden_states:
718
+ encoder_states = encoder_states + (hidden_states,)
719
+
720
+ if not return_dict:
721
+ return tuple(
722
+ v
723
+ for v in [hidden_states, encoder_states, all_attentions]
724
+ if v is not None
725
+ )
726
+ return transformers.modeling_outputs.BaseModelOutput(
727
+ last_hidden_state=hidden_states,
728
+ hidden_states=encoder_states,
729
+ attentions=all_attentions,
730
+ )
731
+
732
+
733
+ UltravoxConfig.register_for_auto_class()
734
+ UltravoxModel.register_for_auto_class()
735
+
736
+ transformers.AutoConfig.register("ultravox", UltravoxConfig)
737
+ transformers.AutoModel.register(UltravoxConfig, UltravoxModel)
738
+
739
+ transformers.activations.ACT2FN["swiglu"] = SwiGLU