method_name
stringlengths
3
45
method_body
stringlengths
9
6.25k
full_code
stringlengths
35
7.02k
docstring
stringlengths
18
4.7k
get_cumulative_logprob
return self.data.cumulative_logprob
def get_cumulative_logprob(self) ->float: return self.data.cumulative_logprob
null
forward
intermediate_parallel, _ = self.dense_h_to_4h(hidden_states) intermediate_parallel = self.activation_func(intermediate_parallel) output, _ = self.dense_4h_to_h(intermediate_parallel) return output
def forward(self, hidden_states): intermediate_parallel, _ = self.dense_h_to_4h(hidden_states) intermediate_parallel = self.activation_func(intermediate_parallel) output, _ = self.dense_4h_to_h(intermediate_parallel) return output
null
get_supported_act_dtypes
return [torch.half]
def get_supported_act_dtypes(self) ->List[torch.dtype]: return [torch.half]
null
from_sampling_metadata
prompt_tokens: List[List[int]] = [] output_tokens: List[List[int]] = [] top_ks: List[int] = [] temperatures: List[float] = [] top_ps: List[float] = [] min_ps: List[float] = [] presence_penalties: List[float] = [] frequency_penalties: List[float] = [] repetition_penalties: List[float] = [] do_penalties = False do_top_p_top_k = False do_min_p = False for i, seq_group in enumerate(sampling_metadata.seq_groups): seq_ids, sampling_params = seq_group temperature = sampling_params.temperature p = sampling_params.presence_penalty f = sampling_params.frequency_penalty r = sampling_params.repetition_penalty top_p = sampling_params.top_p min_p = sampling_params.min_p top_k = min(sampling_params.top_k, vocab_size) top_k = vocab_size if top_k == -1 else top_k if temperature < _SAMPLING_EPS: temperature = 1.0 if not do_top_p_top_k and (top_p < 1.0 - _SAMPLING_EPS or top_k != vocab_size): do_top_p_top_k = True if not do_min_p and min_p > _SAMPLING_EPS: do_min_p = True if not do_penalties and (abs(p) >= _SAMPLING_EPS or abs(f) >= _SAMPLING_EPS or abs(r - 1.0) >= _SAMPLING_EPS): do_penalties = True if (i < sampling_metadata.num_prompts and sampling_params. prompt_logprobs is not None): prompt_len = sampling_metadata.prompt_lens[i] temperatures += [temperature] * (prompt_len - 1) top_ps += [top_p] * (prompt_len - 1) top_ks += [top_k] * (prompt_len - 1) min_ps += [min_p] * (prompt_len - 1) presence_penalties += [0] * (prompt_len - 1) frequency_penalties += [0] * (prompt_len - 1) repetition_penalties += [1] * (prompt_len - 1) prompt_tokens.extend([] for _ in range(prompt_len - 1)) output_tokens.extend([] for _ in range(prompt_len - 1)) for seq_id in seq_ids: seq_data = sampling_metadata.seq_data[seq_id] prompt_tokens.append(seq_data.prompt_token_ids) output_tokens.append(seq_data.output_token_ids) temperatures += [temperature] * len(seq_ids) top_ps += [top_p] * len(seq_ids) top_ks += [top_k] * len(seq_ids) min_ps += [min_p] * len(seq_ids) presence_penalties += [p] * len(seq_ids) frequency_penalties += [f] * len(seq_ids) repetition_penalties += [r] * len(seq_ids) sampling_tensors = SamplingTensors.from_lists(temperatures, top_ps, top_ks, min_ps, presence_penalties, frequency_penalties, repetition_penalties, prompt_tokens, output_tokens, vocab_size, device, dtype) return sampling_tensors, do_penalties, do_top_p_top_k, do_min_p
@classmethod def from_sampling_metadata(cls, sampling_metadata: 'SamplingMetadata', vocab_size: int, device: torch.device, dtype: torch.dtype) ->Tuple[ 'SamplingTensors', bool, bool, bool]: prompt_tokens: List[List[int]] = [] output_tokens: List[List[int]] = [] top_ks: List[int] = [] temperatures: List[float] = [] top_ps: List[float] = [] min_ps: List[float] = [] presence_penalties: List[float] = [] frequency_penalties: List[float] = [] repetition_penalties: List[float] = [] do_penalties = False do_top_p_top_k = False do_min_p = False for i, seq_group in enumerate(sampling_metadata.seq_groups): seq_ids, sampling_params = seq_group temperature = sampling_params.temperature p = sampling_params.presence_penalty f = sampling_params.frequency_penalty r = sampling_params.repetition_penalty top_p = sampling_params.top_p min_p = sampling_params.min_p top_k = min(sampling_params.top_k, vocab_size) top_k = vocab_size if top_k == -1 else top_k if temperature < _SAMPLING_EPS: temperature = 1.0 if not do_top_p_top_k and (top_p < 1.0 - _SAMPLING_EPS or top_k != vocab_size): do_top_p_top_k = True if not do_min_p and min_p > _SAMPLING_EPS: do_min_p = True if not do_penalties and (abs(p) >= _SAMPLING_EPS or abs(f) >= _SAMPLING_EPS or abs(r - 1.0) >= _SAMPLING_EPS): do_penalties = True if (i < sampling_metadata.num_prompts and sampling_params. prompt_logprobs is not None): prompt_len = sampling_metadata.prompt_lens[i] temperatures += [temperature] * (prompt_len - 1) top_ps += [top_p] * (prompt_len - 1) top_ks += [top_k] * (prompt_len - 1) min_ps += [min_p] * (prompt_len - 1) presence_penalties += [0] * (prompt_len - 1) frequency_penalties += [0] * (prompt_len - 1) repetition_penalties += [1] * (prompt_len - 1) prompt_tokens.extend([] for _ in range(prompt_len - 1)) output_tokens.extend([] for _ in range(prompt_len - 1)) for seq_id in seq_ids: seq_data = sampling_metadata.seq_data[seq_id] prompt_tokens.append(seq_data.prompt_token_ids) output_tokens.append(seq_data.output_token_ids) temperatures += [temperature] * len(seq_ids) top_ps += [top_p] * len(seq_ids) top_ks += [top_k] * len(seq_ids) min_ps += [min_p] * len(seq_ids) presence_penalties += [p] * len(seq_ids) frequency_penalties += [f] * len(seq_ids) repetition_penalties += [r] * len(seq_ids) sampling_tensors = SamplingTensors.from_lists(temperatures, top_ps, top_ks, min_ps, presence_penalties, frequency_penalties, repetition_penalties, prompt_tokens, output_tokens, vocab_size, device, dtype) return sampling_tensors, do_penalties, do_top_p_top_k, do_min_p
null
__init__
super().__init__() self.config = config assert config.tie_word_embeddings self.linear_method = linear_method self.transformer = MPTModel(config, linear_method) self.lm_head_weight = self.transformer.wte.weight self.sampler = Sampler(config.vocab_size)
def __init__(self, config: MPTConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config assert config.tie_word_embeddings self.linear_method = linear_method self.transformer = MPTModel(config, linear_method) self.lm_head_weight = self.transformer.wte.weight self.sampler = Sampler(config.vocab_size)
null
generate
outputs: List[Tuple[List[int], str]] = [] for prompt in prompts: input_ids = self.tokenizer(prompt, return_tensors='pt').input_ids output_ids = self.model.generate(input_ids.cuda(), use_cache=True, **kwargs ) output_str = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) output_ids = output_ids.cpu().tolist() outputs.append((output_ids, output_str)) return outputs
def generate(self, prompts: List[str], **kwargs) ->List[Tuple[List[int], str]]: outputs: List[Tuple[List[int], str]] = [] for prompt in prompts: input_ids = self.tokenizer(prompt, return_tensors='pt').input_ids output_ids = self.model.generate(input_ids.cuda(), use_cache=True, **kwargs) output_str = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) output_ids = output_ids.cpu().tolist() outputs.append((output_ids, output_str)) return outputs
null
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
null
generate_beam_search
beam_search_params = SamplingParams(n=beam_width, use_beam_search=True, temperature=0.0, max_tokens=max_tokens) outputs = self.generate(prompts, beam_search_params) return outputs
def generate_beam_search(self, prompts: List[str], beam_width: int, max_tokens: int) ->List[Tuple[List[int], str]]: beam_search_params = SamplingParams(n=beam_width, use_beam_search=True, temperature=0.0, max_tokens=max_tokens) outputs = self.generate(prompts, beam_search_params) return outputs
null
get_scaled_act_names
return ['gelu', 'gelu_fast', 'gelu_new', 'gelu_pytorch_tanh']
def get_scaled_act_names(self) ->List[str]: return ['gelu', 'gelu_fast', 'gelu_new', 'gelu_pytorch_tanh']
null
get_act_fn
"""Get an activation function by name.""" act_fn_name = act_fn_name.lower() if act_fn_name not in _ACTIVATION_REGISTRY: raise ValueError(f'Activation function {act_fn_name!r} is not supported.') act_fn = _ACTIVATION_REGISTRY[act_fn_name] if quant_config is not None and act_fn_name in quant_config.get_scaled_act_names( ): if intermediate_size is None: raise ValueError( 'intermediate_size must be specified for scaled activation functions.' ) return ScaledActivation(act_fn, intermediate_size, input_is_parallel, params_dtype) return act_fn
def get_act_fn(act_fn_name: str, quant_config: Optional[QuantizationConfig] =None, intermediate_size: Optional[int]=None, input_is_parallel: bool= True, params_dtype: Optional[torch.dtype]=None) ->nn.Module: """Get an activation function by name.""" act_fn_name = act_fn_name.lower() if act_fn_name not in _ACTIVATION_REGISTRY: raise ValueError( f'Activation function {act_fn_name!r} is not supported.') act_fn = _ACTIVATION_REGISTRY[act_fn_name] if (quant_config is not None and act_fn_name in quant_config. get_scaled_act_names()): if intermediate_size is None: raise ValueError( 'intermediate_size must be specified for scaled activation functions.' ) return ScaledActivation(act_fn, intermediate_size, input_is_parallel, params_dtype) return act_fn
Get an activation function by name.
weight_loader
param_data = param.data if self.input_is_parallel: tp_rank = get_tensor_model_parallel_rank() shard_size = param_data.shape[0] start_idx = tp_rank * shard_size loaded_weight = loaded_weight.narrow(0, start_idx, shard_size) assert param_data.shape == loaded_weight.shape param_data.copy_(loaded_weight)
def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor): param_data = param.data if self.input_is_parallel: tp_rank = get_tensor_model_parallel_rank() shard_size = param_data.shape[0] start_idx = tp_rank * shard_size loaded_weight = loaded_weight.narrow(0, start_idx, shard_size) assert param_data.shape == loaded_weight.shape param_data.copy_(loaded_weight)
null
__init__
super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias= False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsupported activation: {hidden_act}. Only silu is supported for now.' ) self.act_fn = SiluAndMul()
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias =False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsupported activation: {hidden_act}. Only silu is supported for now.' ) self.act_fn = SiluAndMul()
null
get_linear_method
"""Get the linear method to use for the quantized linear layer.""" raise NotImplementedError
@abstractmethod def get_linear_method(self) ->LinearMethodBase: """Get the linear method to use for the quantized linear layer.""" raise NotImplementedError
Get the linear method to use for the quantized linear layer.
_read_prompts
prompts = [] with open(filename, 'r') as f: prompt = f.readline() prompts.append(prompt) return prompts
def _read_prompts(filename: str) ->str: prompts = [] with open(filename, 'r') as f: prompt = f.readline() prompts.append(prompt) return prompts
null
forward
return self.decoder(input_ids, positions, kv_caches, input_metadata)
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: return self.decoder(input_ids, positions, kv_caches, input_metadata)
null
__init__
self.scaling_factor = scaling_factor super().__init__(head_size, rotary_dim, max_position_embeddings, base, is_neox_style)
def __init__(self, head_size: int, rotary_dim: int, max_position_embeddings: int, base: int, is_neox_style: bool, scaling_factor: float) ->None: self.scaling_factor = scaling_factor super().__init__(head_size, rotary_dim, max_position_embeddings, base, is_neox_style)
null
__init__
self.scheduler_config = scheduler_config self.cache_config = cache_config self.prompt_limit = min(self.scheduler_config.max_model_len, self. scheduler_config.max_num_batched_tokens) self.policy = PolicyFactory.get_policy(policy_name='fcfs') self.block_manager = BlockSpaceManager(block_size=self.cache_config. block_size, num_gpu_blocks=self.cache_config.num_gpu_blocks, num_cpu_blocks=self.cache_config.num_cpu_blocks, sliding_window=self. cache_config.sliding_window) self.waiting: List[SequenceGroup] = [] self.running: List[SequenceGroup] = [] self.swapped: List[SequenceGroup] = []
def __init__(self, scheduler_config: SchedulerConfig, cache_config: CacheConfig ) ->None: self.scheduler_config = scheduler_config self.cache_config = cache_config self.prompt_limit = min(self.scheduler_config.max_model_len, self. scheduler_config.max_num_batched_tokens) self.policy = PolicyFactory.get_policy(policy_name='fcfs') self.block_manager = BlockSpaceManager(block_size=self.cache_config. block_size, num_gpu_blocks=self.cache_config.num_gpu_blocks, num_cpu_blocks=self.cache_config.num_cpu_blocks, sliding_window= self.cache_config.sliding_window) self.waiting: List[SequenceGroup] = [] self.running: List[SequenceGroup] = [] self.swapped: List[SequenceGroup] = []
null
__init__
self.quant_config = quant_config
def __init__(self, quant_config: AWQConfig): self.quant_config = quant_config
null
test_rotary_embedding
if rotary_dim is None: rotary_dim = head_size torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' if rotary_dim is None: rotary_dim = head_size rope = get_rope(head_size, rotary_dim, max_position, base, is_neox_style) rope = rope.to(dtype=dtype, device=gpu_id) positions = torch.randint(0, max_position, (batch_size, seq_len), device=gpu_id ) query = torch.randn(batch_size, seq_len, num_heads * head_size, dtype=dtype, device=gpu_id) key = torch.randn_like(query) ref_query, ref_key = rope._forward(positions, query, key) out_query, out_key = rope.forward(positions, query, key) assert torch.allclose(out_query, ref_query, atol=1e-05, rtol=1e-05) assert torch.allclose(out_key, ref_key, atol=1e-05, rtol=1e-05)
@pytest.mark.parametrize('is_neox_style', IS_NEOX_STYLE) @pytest.mark.parametrize('batch_size', BATCH_SIZES) @pytest.mark.parametrize('seq_len', SEQ_LENS) @pytest.mark.parametrize('num_heads', NUM_HEADS) @pytest.mark.parametrize('head_size', HEAD_SIZES) @pytest.mark.parametrize('rotary_dim', ROTARY_DIMS) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('seed', SEEDS) @pytest.mark.parametrize('device', DEVICES) @torch.inference_mode() def test_rotary_embedding(is_neox_style: bool, batch_size: int, seq_len: int, num_heads: int, head_size: int, rotary_dim: Optional[int], dtype: torch.dtype, seed: int, device: int, max_position: int=8192, base: int= 10000) ->None: if rotary_dim is None: rotary_dim = head_size torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' if rotary_dim is None: rotary_dim = head_size rope = get_rope(head_size, rotary_dim, max_position, base, is_neox_style) rope = rope.to(dtype=dtype, device=gpu_id) positions = torch.randint(0, max_position, (batch_size, seq_len), device=gpu_id) query = torch.randn(batch_size, seq_len, num_heads * head_size, dtype= dtype, device=gpu_id) key = torch.randn_like(query) ref_query, ref_key = rope._forward(positions, query, key) out_query, out_key = rope.forward(positions, query, key) assert torch.allclose(out_query, ref_query, atol=1e-05, rtol=1e-05) assert torch.allclose(out_key, ref_key, atol=1e-05, rtol=1e-05)
null
__init__
self.vocab_size = vocab_size n_embed = kwargs.pop('n_embed', None) self.hidden_size = hidden_size if n_embed is None else n_embed self.n_layer = n_layer self.n_head = n_head self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.multi_query = multi_query self.n_head_kv = 1 if n_head_kv is None else n_head_kv self.alibi = alibi self.bias = bias self.parallel_attn = parallel_attn self.new_decoder_architecture = new_decoder_architecture if self.hidden_size == 8192: self.new_decoder_architecture = True super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs )
def __init__(self, vocab_size=250880, hidden_size=64, n_layer=2, n_head=8, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, bos_token_id=1, eos_token_id=2, hidden_dropout=0.0, attention_dropout= 0.0, multi_query=True, n_head_kv=None, alibi=False, bias=False, parallel_attn=False, new_decoder_architecture=False, **kwargs) ->None: self.vocab_size = vocab_size n_embed = kwargs.pop('n_embed', None) self.hidden_size = hidden_size if n_embed is None else n_embed self.n_layer = n_layer self.n_head = n_head self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.multi_query = multi_query self.n_head_kv = 1 if n_head_kv is None else n_head_kv self.alibi = alibi self.bias = bias self.parallel_attn = parallel_attn self.new_decoder_architecture = new_decoder_architecture if self.hidden_size == 8192: self.new_decoder_architecture = True super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
null
forward
qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
null
is_empty
return not self.scheduled_seq_groups and not self.blocks_to_swap_in and not self.blocks_to_swap_out and not self.blocks_to_copy
def is_empty(self) ->bool: return (not self.scheduled_seq_groups and not self.blocks_to_swap_in and not self.blocks_to_swap_out and not self.blocks_to_copy)
null
generate_beam_search
outputs = self.generate(prompts, do_sample=False, max_new_tokens=max_tokens, num_beams=beam_width, num_return_sequences=beam_width) for i in range(len(outputs)): output_ids, output_str = outputs[i] for j in range(len(output_ids)): output_ids[j] = [x for x in output_ids[j] if x != self.tokenizer. pad_token_id] outputs[i] = output_ids, output_str return outputs
def generate_beam_search(self, prompts: List[str], beam_width: int, max_tokens: int) ->List[Tuple[List[int], str]]: outputs = self.generate(prompts, do_sample=False, max_new_tokens= max_tokens, num_beams=beam_width, num_return_sequences=beam_width) for i in range(len(outputs)): output_ids, output_str = outputs[i] for j in range(len(output_ids)): output_ids[j] = [x for x in output_ids[j] if x != self. tokenizer.pad_token_id] outputs[i] = output_ids, output_str return outputs
null
append_token_id
assert token_id in logprobs self._append_tokens_to_blocks([token_id]) self.output_logprobs.append(logprobs) self.data.append_token_id(token_id, logprobs[token_id])
def append_token_id(self, token_id: int, logprobs: Dict[int, float]) ->None: assert token_id in logprobs self._append_tokens_to_blocks([token_id]) self.output_logprobs.append(logprobs) self.data.append_token_id(token_id, logprobs[token_id])
null
forward
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
forward
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
weight_loader
param_data = param.data output_dim = getattr(param, 'output_dim', None) if loaded_shard_id is None: if output_dim is None: assert param_data.shape == loaded_weight.shape param_data.copy_(loaded_weight) return shard_offsets = [('q', 0, self.total_num_heads * self.head_size), ('k', self.total_num_heads * self.head_size, self.total_num_kv_heads * self.head_size), ('v', (self.total_num_heads + self. total_num_kv_heads) * self.head_size, self.total_num_kv_heads * self.head_size)] packed_dim = getattr(param, 'packed_dim', None) for shard_id, shard_offset, shard_size in shard_offsets: if packed_dim == output_dim: shard_size = shard_size // param.pack_factor shard_offset = shard_offset // param.pack_factor loaded_weight_shard = loaded_weight.narrow(output_dim, shard_offset, shard_size) self.weight_loader(param, loaded_weight_shard, shard_id) return tp_rank = get_tensor_model_parallel_rank() assert loaded_shard_id in ['q', 'k', 'v'] if output_dim is not None: if loaded_shard_id == 'q': shard_offset = 0 shard_size = self.num_heads * self.head_size elif loaded_shard_id == 'k': shard_offset = self.num_heads * self.head_size shard_size = self.num_kv_heads * self.head_size elif loaded_shard_id == 'v': shard_offset = (self.num_heads + self.num_kv_heads) * self.head_size shard_size = self.num_kv_heads * self.head_size packed_dim = getattr(param, 'packed_dim', None) if packed_dim == output_dim: shard_size = shard_size // param.pack_factor shard_offset = shard_offset // param.pack_factor param_data = param_data.narrow(output_dim, shard_offset, shard_size) shard_id = tp_rank // self.num_kv_head_replicas start_idx = shard_id * shard_size loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size) else: ignore_warning = getattr(param, 'ignore_warning', False) if not ignore_warning: logger.warning( 'Loading a weight without `output_dim` attribute in QKVParallelLinear, assume the weight is the same for all partitions.' ) assert param_data.shape == loaded_weight.shape param_data.copy_(loaded_weight)
def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor, loaded_shard_id: Optional[str]=None): param_data = param.data output_dim = getattr(param, 'output_dim', None) if loaded_shard_id is None: if output_dim is None: assert param_data.shape == loaded_weight.shape param_data.copy_(loaded_weight) return shard_offsets = [('q', 0, self.total_num_heads * self.head_size), ( 'k', self.total_num_heads * self.head_size, self. total_num_kv_heads * self.head_size), ('v', (self. total_num_heads + self.total_num_kv_heads) * self.head_size, self.total_num_kv_heads * self.head_size)] packed_dim = getattr(param, 'packed_dim', None) for shard_id, shard_offset, shard_size in shard_offsets: if packed_dim == output_dim: shard_size = shard_size // param.pack_factor shard_offset = shard_offset // param.pack_factor loaded_weight_shard = loaded_weight.narrow(output_dim, shard_offset, shard_size) self.weight_loader(param, loaded_weight_shard, shard_id) return tp_rank = get_tensor_model_parallel_rank() assert loaded_shard_id in ['q', 'k', 'v'] if output_dim is not None: if loaded_shard_id == 'q': shard_offset = 0 shard_size = self.num_heads * self.head_size elif loaded_shard_id == 'k': shard_offset = self.num_heads * self.head_size shard_size = self.num_kv_heads * self.head_size elif loaded_shard_id == 'v': shard_offset = (self.num_heads + self.num_kv_heads ) * self.head_size shard_size = self.num_kv_heads * self.head_size packed_dim = getattr(param, 'packed_dim', None) if packed_dim == output_dim: shard_size = shard_size // param.pack_factor shard_offset = shard_offset // param.pack_factor param_data = param_data.narrow(output_dim, shard_offset, shard_size) shard_id = tp_rank // self.num_kv_head_replicas start_idx = shard_id * shard_size loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size) else: ignore_warning = getattr(param, 'ignore_warning', False) if not ignore_warning: logger.warning( 'Loading a weight without `output_dim` attribute in QKVParallelLinear, assume the weight is the same for all partitions.' ) assert param_data.shape == loaded_weight.shape param_data.copy_(loaded_weight)
null
_init_cache
"""Profiles the memory usage and initializes the KV cache.""" num_blocks = self._run_workers('profile_num_available_blocks', block_size= self.cache_config.block_size, gpu_memory_utilization=self.cache_config. gpu_memory_utilization, cpu_swap_space=self.cache_config.swap_space_bytes) num_gpu_blocks = min(b[0] for b in num_blocks) num_cpu_blocks = min(b[1] for b in num_blocks) logger.info(f'# GPU blocks: {num_gpu_blocks}, # CPU blocks: {num_cpu_blocks}') if num_gpu_blocks <= 0: raise ValueError( 'No available memory for the cache blocks. Try increasing `gpu_memory_utilization` when initializing the engine.' ) max_seq_len = self.cache_config.block_size * num_gpu_blocks if self.model_config.max_model_len > max_seq_len: raise ValueError( f"The model's max seq len ({self.model_config.max_model_len}) is larger than the maximum number of tokens that can be stored in KV cache ({max_seq_len}). Try increasing `gpu_memory_utilization` or decreasing `max_model_len` when initializing the engine." ) self.cache_config.num_gpu_blocks = num_gpu_blocks self.cache_config.num_cpu_blocks = num_cpu_blocks self._run_workers('init_cache_engine', cache_config=self.cache_config) self._run_workers('warm_up_model')
def _init_cache(self) ->None: """Profiles the memory usage and initializes the KV cache.""" num_blocks = self._run_workers('profile_num_available_blocks', block_size=self.cache_config.block_size, gpu_memory_utilization= self.cache_config.gpu_memory_utilization, cpu_swap_space=self. cache_config.swap_space_bytes) num_gpu_blocks = min(b[0] for b in num_blocks) num_cpu_blocks = min(b[1] for b in num_blocks) logger.info( f'# GPU blocks: {num_gpu_blocks}, # CPU blocks: {num_cpu_blocks}') if num_gpu_blocks <= 0: raise ValueError( 'No available memory for the cache blocks. Try increasing `gpu_memory_utilization` when initializing the engine.' ) max_seq_len = self.cache_config.block_size * num_gpu_blocks if self.model_config.max_model_len > max_seq_len: raise ValueError( f"The model's max seq len ({self.model_config.max_model_len}) is larger than the maximum number of tokens that can be stored in KV cache ({max_seq_len}). Try increasing `gpu_memory_utilization` or decreasing `max_model_len` when initializing the engine." ) self.cache_config.num_gpu_blocks = num_gpu_blocks self.cache_config.num_cpu_blocks = num_cpu_blocks self._run_workers('init_cache_engine', cache_config=self.cache_config) self._run_workers('warm_up_model')
Profiles the memory usage and initializes the KV cache.
sample
next_tokens = self.sampler(self.lm_head_weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head_weight, hidden_states, sampling_metadata) return next_tokens
null
_compute_cos_sin_cache
inv_freq = self._compute_inv_freq(self.base) max_len = self.max_position_embeddings * self.scaling_factor t = torch.arange(max_len, dtype=torch.float, device='cuda') t = t / self.scaling_factor freqs = torch.einsum('i,j -> ij', t, inv_freq) cos = freqs.cos() sin = freqs.sin() cache = torch.cat((cos, sin), dim=-1) return cache
def _compute_cos_sin_cache(self) ->torch.Tensor: inv_freq = self._compute_inv_freq(self.base) max_len = self.max_position_embeddings * self.scaling_factor t = torch.arange(max_len, dtype=torch.float, device='cuda') t = t / self.scaling_factor freqs = torch.einsum('i,j -> ij', t, inv_freq) cos = freqs.cos() sin = freqs.sin() cache = torch.cat((cos, sin), dim=-1) return cache
null
test_api_server
""" Run the API server and test it. We run both the server and requests in separate processes. We test that the server can handle incoming requests, including multiple requests at the same time, and that it can handle requests being cancelled without crashing. """ with Pool(32) as pool: prompts = ['warm up'] * 1 result = None while not result: try: for r in pool.map(_query_server, prompts): result = r break except requests.exceptions.ConnectionError: time.sleep(1) for result in pool.map(_query_server, prompts): assert result num_aborted_requests = requests.get('http://localhost:8000/stats').json()[ 'num_aborted_requests'] assert num_aborted_requests == 0 prompts = ['test prompt'] * 100 for result in pool.map(_query_server, prompts): assert result with Pool(32) as pool: prompts = ['canceled requests'] * 100 pool.map_async(_query_server_long, prompts) time.sleep(0.01) pool.terminate() pool.join() num_aborted_requests = requests.get('http://localhost:8000/stats').json()[ 'num_aborted_requests'] assert num_aborted_requests > 0 with Pool(32) as pool: prompts = ['test prompt after canceled'] * 100 for result in pool.map(_query_server, prompts): assert result
def test_api_server(api_server): """ Run the API server and test it. We run both the server and requests in separate processes. We test that the server can handle incoming requests, including multiple requests at the same time, and that it can handle requests being cancelled without crashing. """ with Pool(32) as pool: prompts = ['warm up'] * 1 result = None while not result: try: for r in pool.map(_query_server, prompts): result = r break except requests.exceptions.ConnectionError: time.sleep(1) for result in pool.map(_query_server, prompts): assert result num_aborted_requests = requests.get('http://localhost:8000/stats' ).json()['num_aborted_requests'] assert num_aborted_requests == 0 prompts = ['test prompt'] * 100 for result in pool.map(_query_server, prompts): assert result with Pool(32) as pool: prompts = ['canceled requests'] * 100 pool.map_async(_query_server_long, prompts) time.sleep(0.01) pool.terminate() pool.join() num_aborted_requests = requests.get('http://localhost:8000/stats' ).json()['num_aborted_requests'] assert num_aborted_requests > 0 with Pool(32) as pool: prompts = ['test prompt after canceled'] * 100 for result in pool.map(_query_server, prompts): assert result
Run the API server and test it. We run both the server and requests in separate processes. We test that the server can handle incoming requests, including multiple requests at the same time, and that it can handle requests being cancelled without crashing.
generate
self.request_id = request_id
def generate(self, request_id): self.request_id = request_id
null
get_hipcc_rocm_version
result = subprocess.run(['hipcc', '--version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) if result.returncode != 0: print("Error running 'hipcc --version'") return None match = re.search('HIP version: (\\S+)', result.stdout) if match: return match.group(1) else: print('Could not find HIP version in the output') return None
def get_hipcc_rocm_version(): result = subprocess.run(['hipcc', '--version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) if result.returncode != 0: print("Error running 'hipcc --version'") return None match = re.search('HIP version: (\\S+)', result.stdout) if match: return match.group(1) else: print('Could not find HIP version in the output') return None
null
__init__
super().__init__() self.config = config self.embed_in = VocabParallelEmbedding(config.vocab_size, config.hidden_size) self.layers = nn.ModuleList([GPTNeoXLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps)
def __init__(self, config: GPTNeoXConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.embed_in = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([GPTNeoXLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps)
null
__init__
assert dtype in _STR_DTYPE_TO_TORCH_DTYPE torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype] self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype= torch_dtype, trust_remote_code=True).cuda() if tokenizer_name is None: tokenizer_name = model_name self.tokenizer = get_tokenizer(tokenizer_name, trust_remote_code=True)
def __init__(self, model_name: str, tokenizer_name: Optional[str]=None, dtype: str='half') ->None: assert dtype in _STR_DTYPE_TO_TORCH_DTYPE torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype] self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch_dtype, trust_remote_code=True).cuda() if tokenizer_name is None: tokenizer_name = model_name self.tokenizer = get_tokenizer(tokenizer_name, trust_remote_code=True)
null
main
print(args) llm = LLM(model=args.model, tokenizer=args.tokenizer, quantization=args. quantization, tensor_parallel_size=args.tensor_parallel_size, trust_remote_code=args.trust_remote_code, dtype=args.dtype, enforce_eager=args.enforce_eager) sampling_params = SamplingParams(n=args.n, temperature=0.0 if args. use_beam_search else 1.0, top_p=1.0, use_beam_search=args. use_beam_search, ignore_eos=True, max_tokens=args.output_len) print(sampling_params) dummy_prompt_token_ids = [[0] * args.input_len] * args.batch_size def run_to_completion(profile_dir: Optional[str]=None): if profile_dir: with torch.profiler.profile(activities=[torch.profiler. ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], on_trace_ready=torch.profiler.tensorboard_trace_handler(str( profile_dir))) as p: llm.generate(prompt_token_ids=dummy_prompt_token_ids, sampling_params=sampling_params, use_tqdm=False) print(p.key_averages()) else: start_time = time.perf_counter() llm.generate(prompt_token_ids=dummy_prompt_token_ids, sampling_params=sampling_params, use_tqdm=False) end_time = time.perf_counter() latency = end_time - start_time return latency print('Warming up...') run_to_completion(profile_dir=None) if args.profile: profile_dir = args.profile_result_dir if not profile_dir: profile_dir = Path('.' ) / 'vllm_benchmark_result' / f'latency_result_{time.time()}' print(f"Profiling (results will be saved to '{profile_dir}')...") run_to_completion(profile_dir=args.profile_result_dir) return latencies = [] for _ in tqdm(range(args.num_iters), desc='Profiling iterations'): latencies.append(run_to_completion(profile_dir=None)) print(f'Avg latency: {np.mean(latencies)} seconds')
def main(args: argparse.Namespace): print(args) llm = LLM(model=args.model, tokenizer=args.tokenizer, quantization=args .quantization, tensor_parallel_size=args.tensor_parallel_size, trust_remote_code=args.trust_remote_code, dtype=args.dtype, enforce_eager=args.enforce_eager) sampling_params = SamplingParams(n=args.n, temperature=0.0 if args. use_beam_search else 1.0, top_p=1.0, use_beam_search=args. use_beam_search, ignore_eos=True, max_tokens=args.output_len) print(sampling_params) dummy_prompt_token_ids = [[0] * args.input_len] * args.batch_size def run_to_completion(profile_dir: Optional[str]=None): if profile_dir: with torch.profiler.profile(activities=[torch.profiler. ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], on_trace_ready=torch.profiler.tensorboard_trace_handler(str (profile_dir))) as p: llm.generate(prompt_token_ids=dummy_prompt_token_ids, sampling_params=sampling_params, use_tqdm=False) print(p.key_averages()) else: start_time = time.perf_counter() llm.generate(prompt_token_ids=dummy_prompt_token_ids, sampling_params=sampling_params, use_tqdm=False) end_time = time.perf_counter() latency = end_time - start_time return latency print('Warming up...') run_to_completion(profile_dir=None) if args.profile: profile_dir = args.profile_result_dir if not profile_dir: profile_dir = Path('.' ) / 'vllm_benchmark_result' / f'latency_result_{time.time()}' print(f"Profiling (results will be saved to '{profile_dir}')...") run_to_completion(profile_dir=args.profile_result_dir) return latencies = [] for _ in tqdm(range(args.num_iters), desc='Profiling iterations'): latencies.append(run_to_completion(profile_dir=None)) print(f'Avg latency: {np.mean(latencies)} seconds')
null
_swap
with torch.cuda.stream(self.cache_stream): for i in range(self.num_layers): src_key_cache, src_value_cache = src[i] dst_key_cache, dst_value_cache = dst[i] cache_ops.swap_blocks(src_key_cache, dst_key_cache, src_to_dst) cache_ops.swap_blocks(src_value_cache, dst_value_cache, src_to_dst) event = self.events[i] event.record(stream=self.cache_stream)
def _swap(self, src: List[KVCache], dst: List[KVCache], src_to_dst: Dict[ int, int]) ->None: with torch.cuda.stream(self.cache_stream): for i in range(self.num_layers): src_key_cache, src_value_cache = src[i] dst_key_cache, dst_value_cache = dst[i] cache_ops.swap_blocks(src_key_cache, dst_key_cache, src_to_dst) cache_ops.swap_blocks(src_value_cache, dst_value_cache, src_to_dst) event = self.events[i] event.record(stream=self.cache_stream)
null
get_streaming_response
for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b'\x00'): if chunk: data = json.loads(chunk.decode('utf-8')) output = data['text'] yield output
def get_streaming_response(response: requests.Response) ->Iterable[List[str]]: for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b'\x00'): if chunk: data = json.loads(chunk.decode('utf-8')) output = data['text'] yield output
null
_get_graph_batch_size
if batch_size <= 2: return batch_size elif batch_size <= 4: return 4 else: return (batch_size + 7) // 8 * 8
def _get_graph_batch_size(batch_size: int) ->int: if batch_size <= 2: return batch_size elif batch_size <= 4: return 4 else: return (batch_size + 7) // 8 * 8
null
get_tensor_model_parallel_group
"""Get the tensor model parallel group the caller rank belongs to.""" assert _TENSOR_MODEL_PARALLEL_GROUP is not None, 'tenosr model parallel group is not initialized' return _TENSOR_MODEL_PARALLEL_GROUP
def get_tensor_model_parallel_group(): """Get the tensor model parallel group the caller rank belongs to.""" assert _TENSOR_MODEL_PARALLEL_GROUP is not None, 'tenosr model parallel group is not initialized' return _TENSOR_MODEL_PARALLEL_GROUP
Get the tensor model parallel group the caller rank belongs to.
forward
hidden_states = self.wte(input_ids) for i in range(len(self.h)): layer = self.h[i] hidden_states = layer(position_ids, hidden_states, kv_caches[i], input_metadata) hidden_states = self.ln_f(hidden_states) return hidden_states
def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.wte(input_ids) for i in range(len(self.h)): layer = self.h[i] hidden_states = layer(position_ids, hidden_states, kv_caches[i], input_metadata) hidden_states = self.ln_f(hidden_states) return hidden_states
null
get_min_capability
"""Minimum GPU capability to support the quantization method. E.g., 70 for Volta, 75 for Turing, 80 for Ampere. This requirement is due to the custom CUDA kernels used by the quantization method. """ raise NotImplementedError
@abstractmethod def get_min_capability(self) ->int: """Minimum GPU capability to support the quantization method. E.g., 70 for Volta, 75 for Turing, 80 for Ampere. This requirement is due to the custom CUDA kernels used by the quantization method. """ raise NotImplementedError
Minimum GPU capability to support the quantization method. E.g., 70 for Volta, 75 for Turing, 80 for Ampere. This requirement is due to the custom CUDA kernels used by the quantization method.
capture
assert self.graph is None self.model(input_ids, positions, kv_caches, input_metadata) torch.cuda.synchronize() self.graph = torch.cuda.CUDAGraph() with torch.cuda.graph(self.graph, pool=memory_pool): hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) torch.cuda.synchronize() self.input_buffers = {'input_ids': input_ids, 'positions': positions, 'kv_caches': kv_caches, 'slot_mapping': input_metadata.slot_mapping, 'context_lens': input_metadata.context_lens, 'block_tables': input_metadata.block_tables} self.output_buffers = {'hidden_states': hidden_states} return
def capture(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata, memory_pool ) ->None: assert self.graph is None self.model(input_ids, positions, kv_caches, input_metadata) torch.cuda.synchronize() self.graph = torch.cuda.CUDAGraph() with torch.cuda.graph(self.graph, pool=memory_pool): hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) torch.cuda.synchronize() self.input_buffers = {'input_ids': input_ids, 'positions': positions, 'kv_caches': kv_caches, 'slot_mapping': input_metadata.slot_mapping, 'context_lens': input_metadata.context_lens, 'block_tables': input_metadata.block_tables} self.output_buffers = {'hidden_states': hidden_states} return
null
__init__
super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias= False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsupported activation: {hidden_act}. Only silu is supported for now.' ) self.act_fn = SiluAndMul()
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias =False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsupported activation: {hidden_act}. Only silu is supported for now.' ) self.act_fn = SiluAndMul()
null
__init__
self.device = device self.block_size = block_size self.num_blocks = num_blocks self.free_blocks: BlockTable = [] for i in range(num_blocks): block = PhysicalTokenBlock(device=device, block_number=i, block_size= block_size) self.free_blocks.append(block)
def __init__(self, device: Device, block_size: int, num_blocks: int) ->None: self.device = device self.block_size = block_size self.num_blocks = num_blocks self.free_blocks: BlockTable = [] for i in range(num_blocks): block = PhysicalTokenBlock(device=device, block_number=i, block_size=block_size) self.free_blocks.append(block)
null
get_name
return 'squeezellm'
def get_name(self) ->str: return 'squeezellm'
null
_make_tensor_with_pad
padded_x = [_pad_to_max(x_i, max_len, pad) for x_i in x] return torch.tensor(padded_x, dtype=dtype, device=device, pin_memory= pin_memory and str(device) == 'cpu')
def _make_tensor_with_pad(x: List[List[int]], max_len: int, pad: int, dtype: torch.dtype, device: Union[str, torch.device]='cuda', pin_memory: bool= False) ->torch.Tensor: padded_x = [_pad_to_max(x_i, max_len, pad) for x_i in x] return torch.tensor(padded_x, dtype=dtype, device=device, pin_memory= pin_memory and str(device) == 'cpu')
null
sample_requests
if fixed_output_len is not None and fixed_output_len < 4: raise ValueError('output_len too small') with open(dataset_path) as f: dataset = json.load(f) dataset = [data for data in dataset if len(data['conversations']) >= 2] dataset = [(data['conversations'][0]['value'], data['conversations'][1][ 'value']) for data in dataset] prompts = [prompt for prompt, _ in dataset] prompt_token_ids = tokenizer(prompts).input_ids completions = [completion for _, completion in dataset] completion_token_ids = tokenizer(completions).input_ids tokenized_dataset = [] for i in range(len(dataset)): output_len = len(completion_token_ids[i]) if fixed_output_len is not None: output_len = fixed_output_len tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len)) filtered_dataset: List[Tuple[str, int, int]] = [] for prompt, prompt_token_ids, output_len in tokenized_dataset: prompt_len = len(prompt_token_ids) if prompt_len < 4 or output_len < 4: continue if prompt_len > 1024 or prompt_len + output_len > 2048: continue filtered_dataset.append((prompt, prompt_len, output_len)) sampled_requests = random.sample(filtered_dataset, num_requests) return sampled_requests
def sample_requests(dataset_path: str, num_requests: int, tokenizer: PreTrainedTokenizerBase, fixed_output_len: Optional[int]) ->List[Tuple[ str, int, int]]: if fixed_output_len is not None and fixed_output_len < 4: raise ValueError('output_len too small') with open(dataset_path) as f: dataset = json.load(f) dataset = [data for data in dataset if len(data['conversations']) >= 2] dataset = [(data['conversations'][0]['value'], data['conversations'][1] ['value']) for data in dataset] prompts = [prompt for prompt, _ in dataset] prompt_token_ids = tokenizer(prompts).input_ids completions = [completion for _, completion in dataset] completion_token_ids = tokenizer(completions).input_ids tokenized_dataset = [] for i in range(len(dataset)): output_len = len(completion_token_ids[i]) if fixed_output_len is not None: output_len = fixed_output_len tokenized_dataset.append((prompts[i], prompt_token_ids[i], output_len)) filtered_dataset: List[Tuple[str, int, int]] = [] for prompt, prompt_token_ids, output_len in tokenized_dataset: prompt_len = len(prompt_token_ids) if prompt_len < 4 or output_len < 4: continue if prompt_len > 1024 or prompt_len + output_len > 2048: continue filtered_dataset.append((prompt, prompt_len, output_len)) sampled_requests = random.sample(filtered_dataset, num_requests) return sampled_requests
null
set
self.flag = True
def set(self): self.flag = True
null
load_weights
stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
forward
layernorm_output = self.input_layernorm(hidden_states) attention_output = self.self_attention(hidden_states=layernorm_output, position_ids=position_ids, kv_cache=kv_cache, input_metadata=input_metadata ) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states layernorm_input = residual + attention_output layernorm_output = self.post_attention_layernorm(layernorm_input) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = layernorm_input output = self.mlp(layernorm_output) + residual return output
def forward(self, hidden_states: torch.Tensor, position_ids: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: layernorm_output = self.input_layernorm(hidden_states) attention_output = self.self_attention(hidden_states=layernorm_output, position_ids=position_ids, kv_cache=kv_cache, input_metadata= input_metadata) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states layernorm_input = residual + attention_output layernorm_output = self.post_attention_layernorm(layernorm_input) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = layernorm_input output = self.mlp(layernorm_output) + residual return output
null
_verify_cuda_graph
if self.max_context_len_to_capture is None: self.max_context_len_to_capture = self.max_model_len self.max_context_len_to_capture = min(self.max_context_len_to_capture, self .max_model_len)
def _verify_cuda_graph(self) ->None: if self.max_context_len_to_capture is None: self.max_context_len_to_capture = self.max_model_len self.max_context_len_to_capture = min(self.max_context_len_to_capture, self.max_model_len)
null
forward
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
__init__
self.request_id = request_id self.prompt = prompt self.prompt_token_ids = prompt_token_ids self.prompt_logprobs = prompt_logprobs self.outputs = outputs self.finished = finished
def __init__(self, request_id: str, prompt: str, prompt_token_ids: List[int ], prompt_logprobs: Optional[PromptLogprobs], outputs: List[ CompletionOutput], finished: bool) ->None: self.request_id = request_id self.prompt = prompt self.prompt_token_ids = prompt_token_ids self.prompt_logprobs = prompt_logprobs self.outputs = outputs self.finished = finished
null
forward
if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.post_attention_layernorm(hidden_states, residual ) hidden_states = self.mlp(hidden_states) return hidden_states, residual
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, residual: Optional[ torch.Tensor]) ->Tuple[torch.Tensor, torch.Tensor]: if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.post_attention_layernorm(hidden_states, residual) hidden_states = self.mlp(hidden_states) return hidden_states, residual
null
__init__
super().__init__() assert config.embedding_fraction == 1.0 assert config.norm_type == 'low_precision_layernorm' self.wte = VocabParallelEmbedding(config.vocab_size, config.d_model) self.blocks = nn.ModuleList([MPTBlock(config, linear_method) for _ in range (config.n_layers)]) self.norm_f = nn.LayerNorm(config.d_model) if config.no_bias: for module in self.modules(): if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter): module.register_parameter('bias', None)
def __init__(self, config: MPTConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() assert config.embedding_fraction == 1.0 assert config.norm_type == 'low_precision_layernorm' self.wte = VocabParallelEmbedding(config.vocab_size, config.d_model) self.blocks = nn.ModuleList([MPTBlock(config, linear_method) for _ in range(config.n_layers)]) self.norm_f = nn.LayerNorm(config.d_model) if config.no_bias: for module in self.modules(): if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter ): module.register_parameter('bias', None)
null
test_models
hf_model = hf_runner(model, dtype=dtype) hf_outputs = hf_model.generate_greedy(example_long_prompts, max_tokens) del hf_model vllm_model = vllm_runner(model, dtype=dtype) vllm_outputs = vllm_model.generate_greedy(example_long_prompts, max_tokens) del vllm_model for i in range(len(example_long_prompts)): hf_output_ids, hf_output_str = hf_outputs[i] vllm_output_ids, vllm_output_str = vllm_outputs[i] assert hf_output_str == vllm_output_str, f"""Test{i}: HF: {hf_output_str!r} vLLM: {vllm_output_str!r}""" assert hf_output_ids == vllm_output_ids, f"""Test{i}: HF: {hf_output_ids} vLLM: {vllm_output_ids}"""
@pytest.mark.parametrize('model', MODELS) @pytest.mark.parametrize('dtype', ['bfloat16']) @pytest.mark.parametrize('max_tokens', [128]) def test_models(hf_runner, vllm_runner, example_long_prompts, model: str, dtype: str, max_tokens: int) ->None: hf_model = hf_runner(model, dtype=dtype) hf_outputs = hf_model.generate_greedy(example_long_prompts, max_tokens) del hf_model vllm_model = vllm_runner(model, dtype=dtype) vllm_outputs = vllm_model.generate_greedy(example_long_prompts, max_tokens) del vllm_model for i in range(len(example_long_prompts)): hf_output_ids, hf_output_str = hf_outputs[i] vllm_output_ids, vllm_output_str = vllm_outputs[i] assert hf_output_str == vllm_output_str, f"""Test{i}: HF: {hf_output_str!r} vLLM: {vllm_output_str!r}""" assert hf_output_ids == vllm_output_ids, f"""Test{i}: HF: {hf_output_ids} vLLM: {vllm_output_ids}"""
null
create_logprobs
"""Create OpenAI-style logprobs.""" logprobs = LogProbs() last_token_len = 0 if num_output_top_logprobs: logprobs.top_logprobs = [] for i, token_id in enumerate(token_ids): step_top_logprobs = top_logprobs[i] if step_top_logprobs is not None: token_logprob = step_top_logprobs[token_id] else: token_logprob = None token = tokenizer.convert_ids_to_tokens(token_id) logprobs.tokens.append(token) logprobs.token_logprobs.append(token_logprob) if len(logprobs.text_offset) == 0: logprobs.text_offset.append(initial_text_offset) else: logprobs.text_offset.append(logprobs.text_offset[-1] + last_token_len) last_token_len = len(token) if num_output_top_logprobs: logprobs.top_logprobs.append({tokenizer.convert_ids_to_tokens(i): p for i, p in step_top_logprobs.items()} if step_top_logprobs else None) return logprobs
def create_logprobs(token_ids: List[int], top_logprobs: Optional[List[ Optional[Dict[int, float]]]]=None, num_output_top_logprobs: Optional[ int]=None, initial_text_offset: int=0) ->LogProbs: """Create OpenAI-style logprobs.""" logprobs = LogProbs() last_token_len = 0 if num_output_top_logprobs: logprobs.top_logprobs = [] for i, token_id in enumerate(token_ids): step_top_logprobs = top_logprobs[i] if step_top_logprobs is not None: token_logprob = step_top_logprobs[token_id] else: token_logprob = None token = tokenizer.convert_ids_to_tokens(token_id) logprobs.tokens.append(token) logprobs.token_logprobs.append(token_logprob) if len(logprobs.text_offset) == 0: logprobs.text_offset.append(initial_text_offset) else: logprobs.text_offset.append(logprobs.text_offset[-1] + last_token_len) last_token_len = len(token) if num_output_top_logprobs: logprobs.top_logprobs.append({tokenizer.convert_ids_to_tokens(i ): p for i, p in step_top_logprobs.items()} if step_top_logprobs else None) return logprobs
Create OpenAI-style logprobs.
get_pipeline_model_parallel_next_rank
"""Return the global rank that follows the caller in the pipeline""" assert _PIPELINE_GLOBAL_RANKS is not None, 'Pipeline parallel group is not initialized' rank_in_pipeline = get_pipeline_model_parallel_rank() world_size = get_pipeline_model_parallel_world_size() return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size]
def get_pipeline_model_parallel_next_rank(): """Return the global rank that follows the caller in the pipeline""" assert _PIPELINE_GLOBAL_RANKS is not None, 'Pipeline parallel group is not initialized' rank_in_pipeline = get_pipeline_model_parallel_rank() world_size = get_pipeline_model_parallel_world_size() return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size]
Return the global rank that follows the caller in the pipeline
__init__
super().__init__() self.config = config self.linear_method = linear_method self.embd = PhiEmbedding(config) self.h = nn.ModuleList([PhiLayer(config, linear_method) for _ in range( config.num_hidden_layers)])
def __init__(self, config: PretrainedConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method self.embd = PhiEmbedding(config) self.h = nn.ModuleList([PhiLayer(config, linear_method) for _ in range( config.num_hidden_layers)])
null
hf_model_weights_iterator
hf_folder, hf_weights_files, use_safetensors = prepare_hf_model_weights( model_name_or_path, cache_dir=cache_dir, load_format=load_format, fall_back_to_pt=fall_back_to_pt, revision=revision) if load_format == 'npcache': assert use_safetensors is False np_folder = os.path.join(hf_folder, 'np') os.makedirs(np_folder, exist_ok=True) weight_names_file = os.path.join(np_folder, 'weight_names.json') with get_lock(model_name_or_path, cache_dir): if not os.path.exists(weight_names_file): weight_names = [] for bin_file in hf_weights_files: state = torch.load(bin_file, map_location='cpu') for name, param in state.items(): param_path = os.path.join(np_folder, name) with open(param_path, 'wb') as f: np.save(f, param.cpu().detach().numpy()) weight_names.append(name) with open(weight_names_file, 'w') as f: json.dump(weight_names, f) with open(weight_names_file, 'r') as f: weight_names = json.load(f) for name in weight_names: param_path = os.path.join(np_folder, name) with open(param_path, 'rb') as f: param = np.load(f) yield name, torch.from_numpy(param) elif use_safetensors: for st_file in hf_weights_files: with safe_open(st_file, framework='pt') as f: for name in f.keys(): param = f.get_tensor(name) yield name, param else: for bin_file in hf_weights_files: state = torch.load(bin_file, map_location='cpu') for name, param in state.items(): yield name, param del state torch.cuda.empty_cache()
def hf_model_weights_iterator(model_name_or_path: str, cache_dir: Optional[ str]=None, load_format: str='auto', revision: Optional[str]=None, fall_back_to_pt: Optional[bool]=True) ->Iterator[Tuple[str, torch.Tensor]]: hf_folder, hf_weights_files, use_safetensors = prepare_hf_model_weights( model_name_or_path, cache_dir=cache_dir, load_format=load_format, fall_back_to_pt=fall_back_to_pt, revision=revision) if load_format == 'npcache': assert use_safetensors is False np_folder = os.path.join(hf_folder, 'np') os.makedirs(np_folder, exist_ok=True) weight_names_file = os.path.join(np_folder, 'weight_names.json') with get_lock(model_name_or_path, cache_dir): if not os.path.exists(weight_names_file): weight_names = [] for bin_file in hf_weights_files: state = torch.load(bin_file, map_location='cpu') for name, param in state.items(): param_path = os.path.join(np_folder, name) with open(param_path, 'wb') as f: np.save(f, param.cpu().detach().numpy()) weight_names.append(name) with open(weight_names_file, 'w') as f: json.dump(weight_names, f) with open(weight_names_file, 'r') as f: weight_names = json.load(f) for name in weight_names: param_path = os.path.join(np_folder, name) with open(param_path, 'rb') as f: param = np.load(f) yield name, torch.from_numpy(param) elif use_safetensors: for st_file in hf_weights_files: with safe_open(st_file, framework='pt') as f: for name in f.keys(): param = f.get_tensor(name) yield name, param else: for bin_file in hf_weights_files: state = torch.load(bin_file, map_location='cpu') for name, param in state.items(): yield name, param del state torch.cuda.empty_cache()
null
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
null
create_error_response
return JSONResponse(ErrorResponse(message=message, type= 'invalid_request_error').dict(), status_code=status_code.value)
def create_error_response(status_code: HTTPStatus, message: str ) ->JSONResponse: return JSONResponse(ErrorResponse(message=message, type= 'invalid_request_error').dict(), status_code=status_code.value)
null
forward
hidden_states, _ = self.fc_in(hidden_states) hidden_states = self.act(hidden_states) hidden_states, _ = self.fc_out(hidden_states) return hidden_states
def forward(self, hidden_states: torch.Tensor) ->torch.Tensor: hidden_states, _ = self.fc_in(hidden_states) hidden_states = self.act(hidden_states) hidden_states, _ = self.fc_out(hidden_states) return hidden_states
null
can_swap_out
blocks = self._get_physical_blocks(seq_group) return len(blocks) <= self.cpu_allocator.get_num_free_blocks()
def can_swap_out(self, seq_group: SequenceGroup) ->bool: blocks = self._get_physical_blocks(seq_group) return len(blocks) <= self.cpu_allocator.get_num_free_blocks()
null
forward
del position_ids qkv, _ = self.Wqkv(hidden_states) if self.clip_qkv is not None: qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) if self.qk_ln: q = self.q_ln(q) k = self.k_ln(k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.out_proj(attn_output) return output
def forward(self, position_ids: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: del position_ids qkv, _ = self.Wqkv(hidden_states) if self.clip_qkv is not None: qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) if self.qk_ln: q = self.q_ln(q) k = self.k_ln(k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.out_proj(attn_output) return output
null
_process_sequence_group_outputs
prompt_logprobs = outputs.prompt_logprobs if prompt_logprobs is not None: seq_group.prompt_logprobs = prompt_logprobs samples = outputs.samples parent_seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING) existing_finished_seqs = seq_group.get_finished_seqs() parent_child_dict = {parent_seq.seq_id: [] for parent_seq in parent_seqs} for sample in samples: parent_child_dict[sample.parent_seq_id].append(sample) child_seqs: List[Tuple[Sequence, Sequence]] = [] for parent in parent_seqs: child_samples: List[SequenceOutput] = parent_child_dict[parent.seq_id] if len(child_samples) == 0: parent.status = SequenceStatus.FINISHED_ABORTED seq_group.remove(parent.seq_id) self.scheduler.free_seq(parent) continue for child_sample in child_samples[:-1]: new_child_seq_id = next(self.seq_counter) child = parent.fork(new_child_seq_id) child.append_token_id(child_sample.output_token, child_sample.logprobs) child_seqs.append((child, parent)) last_child_sample = child_samples[-1] parent.append_token_id(last_child_sample.output_token, last_child_sample.logprobs) child_seqs.append((parent, parent)) for seq, _ in child_seqs: self._decode_sequence(seq, seq_group.sampling_params) self._check_stop(seq, seq_group.sampling_params) if not seq_group.sampling_params.use_beam_search: for seq, parent in child_seqs: if seq is not parent: seq_group.add(seq) if not seq.is_finished(): self.scheduler.fork_seq(parent, seq) for seq, parent in child_seqs: if seq is parent and seq.is_finished(): self.scheduler.free_seq(seq) return selected_child_seqs = [] unselected_child_seqs = [] beam_width = seq_group.sampling_params.best_of length_penalty = seq_group.sampling_params.length_penalty existing_finished_seqs = [(seq, None, False) for seq in existing_finished_seqs] new_finished_seqs = [(seq, parent, True) for seq, parent in child_seqs if seq.is_finished()] all_finished_seqs = existing_finished_seqs + new_finished_seqs all_finished_seqs.sort(key=lambda x: x[0].get_beam_search_score( length_penalty=length_penalty, eos_token_id=self.tokenizer.eos_token_id ), reverse=True) for seq, parent, is_new in all_finished_seqs[:beam_width]: if is_new: selected_child_seqs.append((seq, parent)) for seq, parent, is_new in all_finished_seqs[beam_width:]: if is_new: unselected_child_seqs.append((seq, parent)) else: seq_group.remove(seq.seq_id) running_child_seqs = [(seq, parent) for seq, parent in child_seqs if not seq.is_finished()] running_child_seqs.sort(key=lambda x: x[0].get_beam_search_score( length_penalty=length_penalty, eos_token_id=self.tokenizer.eos_token_id ), reverse=True) if len(running_child_seqs) == 0: stop_beam_search = True elif len(all_finished_seqs) < beam_width: stop_beam_search = False else: best_running_seq = running_child_seqs[0][0] current_worst_seq = all_finished_seqs[beam_width - 1][0] stop_beam_search = self._check_beam_search_early_stopping(seq_group. sampling_params.early_stopping, seq_group.sampling_params, best_running_seq, current_worst_seq) if stop_beam_search: unselected_child_seqs.extend(running_child_seqs) else: selected_child_seqs.extend(running_child_seqs[:beam_width]) unselected_child_seqs.extend(running_child_seqs[beam_width:]) for seq, parent in selected_child_seqs: if seq is not parent: seq_group.add(seq) if not seq.is_finished(): self.scheduler.fork_seq(parent, seq) for seq, parent in selected_child_seqs: if seq is parent and seq.is_finished(): self.scheduler.free_seq(seq) for seq, parent in unselected_child_seqs: if seq is parent: seq_group.remove(seq.seq_id) self.scheduler.free_seq(seq)
def _process_sequence_group_outputs(self, seq_group: SequenceGroup, outputs: SequenceGroupOutput) ->None: prompt_logprobs = outputs.prompt_logprobs if prompt_logprobs is not None: seq_group.prompt_logprobs = prompt_logprobs samples = outputs.samples parent_seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING) existing_finished_seqs = seq_group.get_finished_seqs() parent_child_dict = {parent_seq.seq_id: [] for parent_seq in parent_seqs} for sample in samples: parent_child_dict[sample.parent_seq_id].append(sample) child_seqs: List[Tuple[Sequence, Sequence]] = [] for parent in parent_seqs: child_samples: List[SequenceOutput] = parent_child_dict[parent.seq_id] if len(child_samples) == 0: parent.status = SequenceStatus.FINISHED_ABORTED seq_group.remove(parent.seq_id) self.scheduler.free_seq(parent) continue for child_sample in child_samples[:-1]: new_child_seq_id = next(self.seq_counter) child = parent.fork(new_child_seq_id) child.append_token_id(child_sample.output_token, child_sample. logprobs) child_seqs.append((child, parent)) last_child_sample = child_samples[-1] parent.append_token_id(last_child_sample.output_token, last_child_sample.logprobs) child_seqs.append((parent, parent)) for seq, _ in child_seqs: self._decode_sequence(seq, seq_group.sampling_params) self._check_stop(seq, seq_group.sampling_params) if not seq_group.sampling_params.use_beam_search: for seq, parent in child_seqs: if seq is not parent: seq_group.add(seq) if not seq.is_finished(): self.scheduler.fork_seq(parent, seq) for seq, parent in child_seqs: if seq is parent and seq.is_finished(): self.scheduler.free_seq(seq) return selected_child_seqs = [] unselected_child_seqs = [] beam_width = seq_group.sampling_params.best_of length_penalty = seq_group.sampling_params.length_penalty existing_finished_seqs = [(seq, None, False) for seq in existing_finished_seqs] new_finished_seqs = [(seq, parent, True) for seq, parent in child_seqs if seq.is_finished()] all_finished_seqs = existing_finished_seqs + new_finished_seqs all_finished_seqs.sort(key=lambda x: x[0].get_beam_search_score( length_penalty=length_penalty, eos_token_id=self.tokenizer. eos_token_id), reverse=True) for seq, parent, is_new in all_finished_seqs[:beam_width]: if is_new: selected_child_seqs.append((seq, parent)) for seq, parent, is_new in all_finished_seqs[beam_width:]: if is_new: unselected_child_seqs.append((seq, parent)) else: seq_group.remove(seq.seq_id) running_child_seqs = [(seq, parent) for seq, parent in child_seqs if not seq.is_finished()] running_child_seqs.sort(key=lambda x: x[0].get_beam_search_score( length_penalty=length_penalty, eos_token_id=self.tokenizer. eos_token_id), reverse=True) if len(running_child_seqs) == 0: stop_beam_search = True elif len(all_finished_seqs) < beam_width: stop_beam_search = False else: best_running_seq = running_child_seqs[0][0] current_worst_seq = all_finished_seqs[beam_width - 1][0] stop_beam_search = self._check_beam_search_early_stopping(seq_group .sampling_params.early_stopping, seq_group.sampling_params, best_running_seq, current_worst_seq) if stop_beam_search: unselected_child_seqs.extend(running_child_seqs) else: selected_child_seqs.extend(running_child_seqs[:beam_width]) unselected_child_seqs.extend(running_child_seqs[beam_width:]) for seq, parent in selected_child_seqs: if seq is not parent: seq_group.add(seq) if not seq.is_finished(): self.scheduler.fork_seq(parent, seq) for seq, parent in selected_child_seqs: if seq is parent and seq.is_finished(): self.scheduler.free_seq(seq) for seq, parent in unselected_child_seqs: if seq is parent: seq_group.remove(seq.seq_id) self.scheduler.free_seq(seq)
null
destroy_model_parallel
"""Set the groups to none.""" global _TENSOR_MODEL_PARALLEL_GROUP _TENSOR_MODEL_PARALLEL_GROUP = None global _PIPELINE_MODEL_PARALLEL_GROUP _PIPELINE_MODEL_PARALLEL_GROUP = None global _PIPELINE_GLOBAL_RANKS _PIPELINE_GLOBAL_RANKS = None
def destroy_model_parallel(): """Set the groups to none.""" global _TENSOR_MODEL_PARALLEL_GROUP _TENSOR_MODEL_PARALLEL_GROUP = None global _PIPELINE_MODEL_PARALLEL_GROUP _PIPELINE_MODEL_PARALLEL_GROUP = None global _PIPELINE_GLOBAL_RANKS _PIPELINE_GLOBAL_RANKS = None
Set the groups to none.
generate
req_outputs = self.model.generate(prompts, sampling_params=sampling_params) outputs = [] for req_output in req_outputs: prompt_str = req_output.prompt prompt_ids = req_output.prompt_token_ids req_sample_output_ids = [] req_sample_output_strs = [] for sample in req_output.outputs: output_str = sample.text output_ids = sample.token_ids req_sample_output_ids.append(prompt_ids + output_ids) req_sample_output_strs.append(prompt_str + output_str) outputs.append((req_sample_output_ids, req_sample_output_strs)) return outputs
def generate(self, prompts: List[str], sampling_params: SamplingParams) ->List[ Tuple[List[int], str]]: req_outputs = self.model.generate(prompts, sampling_params=sampling_params) outputs = [] for req_output in req_outputs: prompt_str = req_output.prompt prompt_ids = req_output.prompt_token_ids req_sample_output_ids = [] req_sample_output_strs = [] for sample in req_output.outputs: output_str = sample.text output_ids = sample.token_ids req_sample_output_ids.append(prompt_ids + output_ids) req_sample_output_strs.append(prompt_str + output_str) outputs.append((req_sample_output_ids, req_sample_output_strs)) return outputs
null
_preempt
if preemption_mode is None: if seq_group.get_max_num_running_seqs() == 1: preemption_mode = PreemptionMode.RECOMPUTE else: preemption_mode = PreemptionMode.SWAP if preemption_mode == PreemptionMode.RECOMPUTE: self._preempt_by_recompute(seq_group) elif preemption_mode == PreemptionMode.SWAP: self._preempt_by_swap(seq_group, blocks_to_swap_out) else: raise AssertionError('Invalid preemption mode.')
def _preempt(self, seq_group: SequenceGroup, blocks_to_swap_out: Dict[int, int], preemption_mode: Optional[PreemptionMode]=None) ->None: if preemption_mode is None: if seq_group.get_max_num_running_seqs() == 1: preemption_mode = PreemptionMode.RECOMPUTE else: preemption_mode = PreemptionMode.SWAP if preemption_mode == PreemptionMode.RECOMPUTE: self._preempt_by_recompute(seq_group) elif preemption_mode == PreemptionMode.SWAP: self._preempt_by_swap(seq_group, blocks_to_swap_out) else: raise AssertionError('Invalid preemption mode.')
null
get_response
data = json.loads(response.content) output = data['text'] return output
def get_response(response: requests.Response) ->List[str]: data = json.loads(response.content) output = data['text'] return output
null
__repr__
return f'RequestOutput(request_id={self.request_id}, prompt={self.prompt!r}, prompt_token_ids={self.prompt_token_ids}, prompt_logprobs={self.prompt_logprobs}, outputs={self.outputs}, finished={self.finished})'
def __repr__(self) ->str: return ( f'RequestOutput(request_id={self.request_id}, prompt={self.prompt!r}, prompt_token_ids={self.prompt_token_ids}, prompt_logprobs={self.prompt_logprobs}, outputs={self.outputs}, finished={self.finished})' )
null
forward
gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x
def forward(self, x): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x
null
__init__
self.scheduled_seq_groups = scheduled_seq_groups self.prompt_run = prompt_run self.num_batched_tokens = num_batched_tokens self.blocks_to_swap_in = blocks_to_swap_in self.blocks_to_swap_out = blocks_to_swap_out self.blocks_to_copy = blocks_to_copy assert not (blocks_to_swap_in and blocks_to_swap_out) self.ignored_seq_groups = ignored_seq_groups
def __init__(self, scheduled_seq_groups: List[SequenceGroup], prompt_run: bool, num_batched_tokens: int, blocks_to_swap_in: Dict[int, int], blocks_to_swap_out: Dict[int, int], blocks_to_copy: Dict[int, List[int] ], ignored_seq_groups: List[SequenceGroup]) ->None: self.scheduled_seq_groups = scheduled_seq_groups self.prompt_run = prompt_run self.num_batched_tokens = num_batched_tokens self.blocks_to_swap_in = blocks_to_swap_in self.blocks_to_swap_out = blocks_to_swap_out self.blocks_to_copy = blocks_to_copy assert not (blocks_to_swap_in and blocks_to_swap_out) self.ignored_seq_groups = ignored_seq_groups
null
get_num_kv_heads
"""Returns the number of KV heads per GPU.""" total_num_kv_heads = self.get_total_num_kv_heads() return max(1, total_num_kv_heads // parallel_config.tensor_parallel_size)
def get_num_kv_heads(self, parallel_config: 'ParallelConfig') ->int: """Returns the number of KV heads per GPU.""" total_num_kv_heads = self.get_total_num_kv_heads() return max(1, total_num_kv_heads // parallel_config.tensor_parallel_size)
Returns the number of KV heads per GPU.
__init__
super().__init__() self.hidden_size = hidden_size tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tensor_model_parallel_world_size == 0 self.num_heads = self.total_num_heads // tensor_model_parallel_world_size self.head_dim = hidden_size // self.total_num_heads self.postion_embedding = position_embedding self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.W_pack = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_heads, bias=False, linear_method= linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) if self.postion_embedding == 'ALIBI': tp_rank = get_tensor_model_parallel_rank() head_start = tp_rank * self.num_heads head_end = (tp_rank + 1) * self.num_heads alibi_slopes = _get_alibi_slopes(self.total_num_heads) alibi_slopes = alibi_slopes[head_start:head_end].tolist() scaling = self.head_dim ** -0.5 self.attn = PagedAttention(self.num_heads, self.head_dim, scaling, alibi_slopes=alibi_slopes) else: self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=self.max_position_embeddings, base=self.rope_theta) self.scaling = self.head_dim ** -0.5 self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling)
def __init__(self, hidden_size: int, num_heads: int, position_embedding: str, rope_theta: float=10000, max_position_embeddings: int=8192, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.hidden_size = hidden_size tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tensor_model_parallel_world_size == 0 self.num_heads = self.total_num_heads // tensor_model_parallel_world_size self.head_dim = hidden_size // self.total_num_heads self.postion_embedding = position_embedding self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.W_pack = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_heads, bias=False, linear_method= linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) if self.postion_embedding == 'ALIBI': tp_rank = get_tensor_model_parallel_rank() head_start = tp_rank * self.num_heads head_end = (tp_rank + 1) * self.num_heads alibi_slopes = _get_alibi_slopes(self.total_num_heads) alibi_slopes = alibi_slopes[head_start:head_end].tolist() scaling = self.head_dim ** -0.5 self.attn = PagedAttention(self.num_heads, self.head_dim, scaling, alibi_slopes=alibi_slopes) else: self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=self.max_position_embeddings, base=self.rope_theta) self.scaling = self.head_dim ** -0.5 self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling)
null
__init__
super().__init__() self.config: ChatGLMConfig = config self.linear_method = linear_method self.transformer = ChatGLMModel(config, linear_method) self.lm_head_weight = self.transformer.output_layer.weight self.sampler = Sampler(config.padded_vocab_size)
def __init__(self, config: ChatGLMConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config: ChatGLMConfig = config self.linear_method = linear_method self.transformer = ChatGLMModel(config, linear_method) self.lm_head_weight = self.transformer.output_layer.weight self.sampler = Sampler(config.padded_vocab_size)
null
test_load_chat_template
template = '../../examples/template_chatml.jinja' mock_args = Namespace(chat_template=template) tokenizer = MockTokenizer() load_chat_template(mock_args, tokenizer) template_content = tokenizer.chat_template assert template_content is not None assert template_content == """{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\\n'}}{% endif %}{% endfor %} {% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\\n' }}{% endif %}"""
def test_load_chat_template(): template = '../../examples/template_chatml.jinja' mock_args = Namespace(chat_template=template) tokenizer = MockTokenizer() load_chat_template(mock_args, tokenizer) template_content = tokenizer.chat_template assert template_content is not None assert template_content == """{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\\n'}}{% endif %}{% endfor %} {% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\\n' }}{% endif %}"""
null
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
null
forward
gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x
def forward(self, x): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x
null
get_key_block_shape
element_size = torch.tensor([], dtype=self.dtype).element_size() x = 16 // element_size return self.num_heads, self.head_size // x, self.block_size, x
def get_key_block_shape(self) ->Tuple[int, int, int, int]: element_size = torch.tensor([], dtype=self.dtype).element_size() x = 16 // element_size return self.num_heads, self.head_size // x, self.block_size, x
null
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
null
__init__
logger.info( f'Initializing an LLM engine with config: model={model_config.model!r}, tokenizer={model_config.tokenizer!r}, tokenizer_mode={model_config.tokenizer_mode}, revision={model_config.revision}, tokenizer_revision={model_config.tokenizer_revision}, trust_remote_code={model_config.trust_remote_code}, dtype={model_config.dtype}, max_seq_len={model_config.max_model_len}, download_dir={model_config.download_dir!r}, load_format={model_config.load_format}, tensor_parallel_size={parallel_config.tensor_parallel_size}, quantization={model_config.quantization}, enforce_eager={model_config.enforce_eager}, seed={model_config.seed})' ) self.model_config = model_config self.cache_config = cache_config self.parallel_config = parallel_config self.scheduler_config = scheduler_config self.log_stats = log_stats self._verify_args() self.tokenizer = get_tokenizer(model_config.tokenizer, tokenizer_mode= model_config.tokenizer_mode, trust_remote_code=model_config. trust_remote_code, tokenizer_revision=model_config.tokenizer_revision, revision=model_config.revision) self.seq_counter = Counter() if self.parallel_config.worker_use_ray: ray_usage = os.environ.get('RAY_USAGE_STATS_ENABLED', '0') if ray_usage != '1': os.environ['RAY_USAGE_STATS_ENABLED'] = '0' self._init_workers_ray(placement_group) else: self._init_workers() self._init_cache() self.scheduler = Scheduler(scheduler_config, cache_config) self.last_logging_time = 0.0 self.num_prompt_tokens: List[Tuple[float, int]] = [] self.num_generation_tokens: List[Tuple[float, int]] = []
def __init__(self, model_config: ModelConfig, cache_config: CacheConfig, parallel_config: ParallelConfig, scheduler_config: SchedulerConfig, placement_group: Optional['PlacementGroup'], log_stats: bool) ->None: logger.info( f'Initializing an LLM engine with config: model={model_config.model!r}, tokenizer={model_config.tokenizer!r}, tokenizer_mode={model_config.tokenizer_mode}, revision={model_config.revision}, tokenizer_revision={model_config.tokenizer_revision}, trust_remote_code={model_config.trust_remote_code}, dtype={model_config.dtype}, max_seq_len={model_config.max_model_len}, download_dir={model_config.download_dir!r}, load_format={model_config.load_format}, tensor_parallel_size={parallel_config.tensor_parallel_size}, quantization={model_config.quantization}, enforce_eager={model_config.enforce_eager}, seed={model_config.seed})' ) self.model_config = model_config self.cache_config = cache_config self.parallel_config = parallel_config self.scheduler_config = scheduler_config self.log_stats = log_stats self._verify_args() self.tokenizer = get_tokenizer(model_config.tokenizer, tokenizer_mode= model_config.tokenizer_mode, trust_remote_code=model_config. trust_remote_code, tokenizer_revision=model_config. tokenizer_revision, revision=model_config.revision) self.seq_counter = Counter() if self.parallel_config.worker_use_ray: ray_usage = os.environ.get('RAY_USAGE_STATS_ENABLED', '0') if ray_usage != '1': os.environ['RAY_USAGE_STATS_ENABLED'] = '0' self._init_workers_ray(placement_group) else: self._init_workers() self._init_cache() self.scheduler = Scheduler(scheduler_config, cache_config) self.last_logging_time = 0.0 self.num_prompt_tokens: List[Tuple[float, int]] = [] self.num_generation_tokens: List[Tuple[float, int]] = []
null
forward
qkv, _ = self.query_key_value(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(position_ids, q, k) key_cache, value_cache = kv_cache context_layer = self.attn(q, k, v, key_cache, value_cache, input_metadata) attn_output, _ = self.dense(context_layer) return attn_output
def forward(self, hidden_states: torch.Tensor, position_ids: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.query_key_value(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(position_ids, q, k) key_cache, value_cache = kv_cache context_layer = self.attn(q, k, v, key_cache, value_cache, input_metadata) attn_output, _ = self.dense(context_layer) return attn_output
null
_get_bin_counts_and_mask
bin_counts = torch.zeros((num_seqs, vocab_size + 1), dtype=torch.long, device=tokens.device) bin_counts.scatter_add_(1, tokens, torch.ones_like(tokens)) bin_counts = bin_counts[:, :vocab_size] mask = bin_counts > 0 return bin_counts, mask
def _get_bin_counts_and_mask(tokens: torch.Tensor, vocab_size: int, num_seqs: int) ->Tuple[torch.Tensor, torch.Tensor]: bin_counts = torch.zeros((num_seqs, vocab_size + 1), dtype=torch.long, device=tokens.device) bin_counts.scatter_add_(1, tokens, torch.ones_like(tokens)) bin_counts = bin_counts[:, :vocab_size] mask = bin_counts > 0 return bin_counts, mask
null
__init__
super().__init__() self.config = config assert not config.add_cross_attention self.embed_dim = config.hidden_size self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.h = nn.ModuleList([GPTBigCodeBlock(config, linear_method) for _ in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
def __init__(self, config: GPTBigCodeConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config assert not config.add_cross_attention self.embed_dim = config.hidden_size self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.h = nn.ModuleList([GPTBigCodeBlock(config, linear_method) for _ in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
null
forward
if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.post_attention_layernorm(hidden_states, residual ) hidden_states = self.mlp(hidden_states) return hidden_states, residual
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, residual: Optional[ torch.Tensor]) ->Tuple[torch.Tensor, torch.Tensor]: if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.post_attention_layernorm(hidden_states, residual) hidden_states = self.mlp(hidden_states) return hidden_states, residual
null
get_block_table
block_table = self.block_tables[seq.seq_id] return [block.block_number for block in block_table]
def get_block_table(self, seq: Sequence) ->List[int]: block_table = self.block_tables[seq.seq_id] return [block.block_number for block in block_table]
null
__init__
self.counter = start
def __init__(self, start: int=0) ->None: self.counter = start
null
__post_init__
if self.tokenizer is None: self.tokenizer = self.model
def __post_init__(self): if self.tokenizer is None: self.tokenizer = self.model
null
get_linear_method
return AWQLinearMethod(self)
def get_linear_method(self) ->'AWQLinearMethod': return AWQLinearMethod(self)
null
get_pipeline_model_parallel_last_rank
"""Return the global rank of the last process in the pipeline for the current tensor parallel group""" assert _PIPELINE_GLOBAL_RANKS is not None, 'Pipeline parallel group is not initialized' last_rank_local = get_pipeline_model_parallel_world_size() - 1 return _PIPELINE_GLOBAL_RANKS[last_rank_local]
def get_pipeline_model_parallel_last_rank(): """Return the global rank of the last process in the pipeline for the current tensor parallel group""" assert _PIPELINE_GLOBAL_RANKS is not None, 'Pipeline parallel group is not initialized' last_rank_local = get_pipeline_model_parallel_world_size() - 1 return _PIPELINE_GLOBAL_RANKS[last_rank_local]
Return the global rank of the last process in the pipeline for the current tensor parallel group
num_seqs
return len(self.get_seqs(status))
def num_seqs(self, status: Optional[SequenceStatus]=None) ->int: return len(self.get_seqs(status))
null
get_tokenizer
"""Gets a tokenizer for the given model name via Huggingface.""" if tokenizer_mode == 'slow': if kwargs.get('use_fast', False): raise ValueError( 'Cannot use the fast tokenizer in slow tokenizer mode.') kwargs['use_fast'] = False try: tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, *args, trust_remote_code=trust_remote_code, tokenizer_revision= tokenizer_revision, **kwargs) except ValueError as e: if not trust_remote_code and ( 'does not exist or is not currently imported.' in str(e) or 'requires you to execute the tokenizer file' in str(e)): err_msg = ( 'Failed to load the tokenizer. If the tokenizer is a custom tokenizer not yet available in the HuggingFace transformers library, consider setting `trust_remote_code=True` in LLM or using the `--trust-remote-code` flag in the CLI.' ) raise RuntimeError(err_msg) from e else: raise e except AttributeError as e: if 'BaichuanTokenizer' in str(e): tokenizer = BaichuanTokenizer.from_pretrained(tokenizer_name, *args, trust_remote_code=trust_remote_code, tokenizer_revision= tokenizer_revision, **kwargs) else: raise e if not isinstance(tokenizer, PreTrainedTokenizerFast): logger.warning( 'Using a slow tokenizer. This might cause a significant slowdown. Consider using a fast tokenizer instead.' ) return tokenizer
def get_tokenizer(tokenizer_name: str, *args, tokenizer_mode: str='auto', trust_remote_code: bool=False, tokenizer_revision: Optional[str]=None, **kwargs) ->Union[PreTrainedTokenizer, PreTrainedTokenizerFast]: """Gets a tokenizer for the given model name via Huggingface.""" if tokenizer_mode == 'slow': if kwargs.get('use_fast', False): raise ValueError( 'Cannot use the fast tokenizer in slow tokenizer mode.') kwargs['use_fast'] = False try: tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, *args, trust_remote_code=trust_remote_code, tokenizer_revision= tokenizer_revision, **kwargs) except ValueError as e: if not trust_remote_code and ( 'does not exist or is not currently imported.' in str(e) or 'requires you to execute the tokenizer file' in str(e)): err_msg = ( 'Failed to load the tokenizer. If the tokenizer is a custom tokenizer not yet available in the HuggingFace transformers library, consider setting `trust_remote_code=True` in LLM or using the `--trust-remote-code` flag in the CLI.' ) raise RuntimeError(err_msg) from e else: raise e except AttributeError as e: if 'BaichuanTokenizer' in str(e): tokenizer = BaichuanTokenizer.from_pretrained(tokenizer_name, * args, trust_remote_code=trust_remote_code, tokenizer_revision=tokenizer_revision, **kwargs) else: raise e if not isinstance(tokenizer, PreTrainedTokenizerFast): logger.warning( 'Using a slow tokenizer. This might cause a significant slowdown. Consider using a fast tokenizer instead.' ) return tokenizer
Gets a tokenizer for the given model name via Huggingface.
__init__
self.weight_bits = weight_bits self.group_size = group_size self.zero_point = zero_point if self.weight_bits != 4: raise ValueError( f'Currently, only 4-bit weight quantization is supported for AWQ, but got {self.weight_bits} bits.' ) self.pack_factor = 32 // self.weight_bits
def __init__(self, weight_bits: int, group_size: int, zero_point: bool) ->None: self.weight_bits = weight_bits self.group_size = group_size self.zero_point = zero_point if self.weight_bits != 4: raise ValueError( f'Currently, only 4-bit weight quantization is supported for AWQ, but got {self.weight_bits} bits.' ) self.pack_factor = 32 // self.weight_bits
null
apply_weights
qweight = weights['qweight'] lookup_table = weights['lookup_table'] out_shape = x.shape[:-1] + (qweight.shape[-1],) reshaped_x = x.reshape(-1, x.shape[-1]) if is_hip(): out_f = torch.zeros(out_shape, device='cuda', dtype=torch.float) ops.squeezellm_gemm(reshaped_x, qweight, out_f, lookup_table) out = out_f.to(dtype=torch.float16) else: out = torch.zeros(out_shape, device='cuda', dtype=torch.float16) ops.squeezellm_gemm(reshaped_x, qweight, out, lookup_table) if bias is not None: out = out + bias return out.reshape(out_shape)
def apply_weights(self, weights: Dict[str, Any], x: torch.Tensor, bias: Optional[torch.Tensor]=None) ->torch.Tensor: qweight = weights['qweight'] lookup_table = weights['lookup_table'] out_shape = x.shape[:-1] + (qweight.shape[-1],) reshaped_x = x.reshape(-1, x.shape[-1]) if is_hip(): out_f = torch.zeros(out_shape, device='cuda', dtype=torch.float) ops.squeezellm_gemm(reshaped_x, qweight, out_f, lookup_table) out = out_f.to(dtype=torch.float16) else: out = torch.zeros(out_shape, device='cuda', dtype=torch.float16) ops.squeezellm_gemm(reshaped_x, qweight, out, lookup_table) if bias is not None: out = out + bias return out.reshape(out_shape)
null
get_output_len
return self.data.get_output_len()
def get_output_len(self) ->int: return self.data.get_output_len()
null
__init__
super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, 'rope_theta', 10000) rope_scaling = getattr(config, 'rope_scaling', None) max_position_embeddings = getattr(config, 'max_position_embeddings', 8192) self.self_attn = YiAttention(hidden_size=self.hidden_size, num_heads=config .num_attention_heads, num_kv_heads=config.num_key_value_heads, rope_theta=rope_theta, rope_scaling=rope_scaling, max_position_embeddings=max_position_embeddings, linear_method= linear_method) self.mlp = YiMLP(hidden_size=self.hidden_size, intermediate_size=config. intermediate_size, hidden_act=config.hidden_act, linear_method= linear_method) self.ln1 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.ln2 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def __init__(self, config: YiConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, 'rope_theta', 10000) rope_scaling = getattr(config, 'rope_scaling', None) max_position_embeddings = getattr(config, 'max_position_embeddings', 8192) self.self_attn = YiAttention(hidden_size=self.hidden_size, num_heads= config.num_attention_heads, num_kv_heads=config.num_key_value_heads, rope_theta=rope_theta, rope_scaling=rope_scaling, max_position_embeddings=max_position_embeddings, linear_method= linear_method) self.mlp = YiMLP(hidden_size=self.hidden_size, intermediate_size=config .intermediate_size, hidden_act=config.hidden_act, linear_method= linear_method) self.ln1 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.ln2 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
null
head_dim
return self.hidden_size // self.n_head
@property def head_dim(self): return self.hidden_size // self.n_head
null
_append_slot
for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): ret = self.block_manager.append_slot(seq) if ret is not None: src_block, dst_block = ret if src_block in blocks_to_copy: blocks_to_copy[src_block].append(dst_block) else: blocks_to_copy[src_block] = [dst_block]
def _append_slot(self, seq_group: SequenceGroup, blocks_to_copy: Dict[int, List[int]]) ->None: for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): ret = self.block_manager.append_slot(seq) if ret is not None: src_block, dst_block = ret if src_block in blocks_to_copy: blocks_to_copy[src_block].append(dst_block) else: blocks_to_copy[src_block] = [dst_block]
null