diff --git a/triton_models/interactive/1/placeholder b/triton_models/interactive/1/placeholder new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/triton_models/interactive/1/weights b/triton_models/interactive/1/weights new file mode 100644 index 0000000000000000000000000000000000000000..05546b6f3227d07ee91d76d14c794fe34bc3aac2 --- /dev/null +++ b/triton_models/interactive/1/weights @@ -0,0 +1 @@ +../../weights \ No newline at end of file diff --git a/triton_models/interactive/config.pbtxt b/triton_models/interactive/config.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..e0fc8839fdd64477c68e2943d094014e1dd06a32 --- /dev/null +++ b/triton_models/interactive/config.pbtxt @@ -0,0 +1,293 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +name: "turbomind" +backend: "turbomind" +default_model_filename: "weights" +max_batch_size: 1 + +model_transaction_policy { + decoupled: True +} + +instance_group [ + { + # max concurrent instances + count: 48 + kind: KIND_CPU + } +] + +input [ + { + name: "input_ids" + data_type: TYPE_UINT32 + dims: [ -1 ] + # allow_ragged_batch: true + }, + { + name: "input_lengths" + data_type: TYPE_UINT32 + dims: [ 1 ] + reshape: { shape: [ ] } + }, + { + name: "request_output_len" + data_type: TYPE_UINT32 + dims: [ -1 ] + }, + { + name: "input_embeddings" + data_type: TYPE_INT8 + dims: [ -1 ] + optional: true + }, + { + name: "input_embedding_ranges" + data_type: TYPE_UINT32 + dims: [ -1, 2 ] + optional: true + }, + { + name: "step" + data_type: TYPE_INT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "session_len" + data_type: TYPE_UINT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "runtime_top_k" + data_type: TYPE_UINT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "runtime_top_p" + data_type: TYPE_FP32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "beam_search_diversity_rate" + data_type: TYPE_FP32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "temperature" + data_type: TYPE_FP32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "len_penalty" + data_type: TYPE_FP32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "repetition_penalty" + data_type: TYPE_FP32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "random_seed" + data_type: TYPE_UINT64 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "is_return_log_probs" + data_type: TYPE_BOOL + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "beam_width" + data_type: TYPE_UINT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "start_id" + data_type: TYPE_UINT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "end_id" + data_type: TYPE_UINT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "bad_words_list" + data_type: TYPE_INT32 + dims: [ 2, -1 ] + optional: true + }, + { + name: "stop_words_list" + data_type: TYPE_INT32 + dims: [ 2, -1 ] + optional: true + }, + { + name: "prompt_learning_task_name_ids" + data_type: TYPE_UINT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "top_p_decay" + data_type: TYPE_FP32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "top_p_min" + data_type: TYPE_FP32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "top_p_reset_ids" + data_type: TYPE_UINT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "START" + data_type: TYPE_INT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "END" + data_type: TYPE_INT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "STOP" + data_type: TYPE_INT32 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + }, + { + name: "CORRID" + data_type: TYPE_UINT64 + dims: [ 1 ] + reshape: { shape: [ ] } + optional: true + } +] +output [ + { + name: "output_ids" + data_type: TYPE_UINT32 + dims: [ -1, -1 ] + }, + { + name: "sequence_length" + data_type: TYPE_UINT32 + dims: [ -1 ] + }, + { + name: "cum_log_probs" + data_type: TYPE_FP32 + dims: [ -1 ] + }, + { + name: "output_log_probs" + data_type: TYPE_FP32 + dims: [ -1, -1 ] + } +] + +parameters { + key: "pipeline_para_size" + value: { + string_value: "1" + } +} +parameters { + key: "data_type" + value: { + string_value: "fp16" + } +} +parameters { + key: "model_type" + value: { + string_value: "Llama" + } +} + +parameters { + key: "enable_custom_all_reduce" + value: { + string_value: "0" + } +} +parameters { + key: "tensor_para_size" + value: { + string_value: "1" + } +} +parameters { + key: "model_name" + value: { + string_value: "internlm-chat-7b" + } +} diff --git a/triton_models/postprocessing/1/__pycache__/model.cpython-310.pyc b/triton_models/postprocessing/1/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02fb8dcd78a10733c2eedd62b97afb338f24c73e Binary files /dev/null and b/triton_models/postprocessing/1/__pycache__/model.cpython-310.pyc differ diff --git a/triton_models/postprocessing/1/model.py b/triton_models/postprocessing/1/model.py new file mode 100644 index 0000000000000000000000000000000000000000..20de97595195da5dedc044a31c6086c1f49892da --- /dev/null +++ b/triton_models/postprocessing/1/model.py @@ -0,0 +1,129 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os.path as osp +from pathlib import Path + +import numpy as np +import triton_python_backend_utils as pb_utils + +# This tokenizer is `lmdeploy/turbomind/tokenizer.py`. When an LLM is served +# by triton inference server, it has to be converted first by running +# `python lmdeploy/serve/turbomind/deploy.py`. Then +# `lmdeploy/turbomind/tokenizer.py` will be copied to `tokenizer/tokenizer.py` +from .tokenizer.tokenizer import Tokenizer + + +class TritonPythonModel: + """Your Python model must use the same class name. + + Every Python model that is created must have "TritonPythonModel" as the + class name. + """ + + def initialize(self, args): + """`initialize` is called only once when the model is being loaded. + Implementing `initialize` function is optional. This function allows + the model to initialize any state associated with this model. + Parameters + ---------- + args : dict + Both keys and values are strings. The dictionary keys and values are: + * model_config: A JSON string containing the model configuration + * model_instance_kind: A string containing model instance kind + * model_instance_device_id: A string containing model instance device + ID + * model_repository: Model repository path + * model_version: Model version + * model_name: Model name + """ + # Parse model configs + self.model_config = model_config = json.loads(args['model_config']) + + # Parse model output configs + output_config = pb_utils.get_output_config_by_name( + model_config, 'OUTPUT') + + # Convert Triton types to numpy types + self.output_dtype = pb_utils.triton_string_to_numpy( + output_config['data_type']) + + cur_folder = Path(__file__).parent + + self.tokenizer = Tokenizer( + osp.join( + cur_folder, self.model_config['parameters']['tokenizer_path'] + ['string_value'])) + + def execute(self, requests): + """`execute` must be implemented in every Python model. `execute` + function receives a list of pb_utils.InferenceRequest as the only + argument. This function is called when an inference is requested + for this model. Depending on the batching configuration (e.g. Dynamic + Batching) used, `requests` may contain multiple requests. Every + Python model, must create one pb_utils.InferenceResponse for every + pb_utils.InferenceRequest in `requests`. If there is an error, you can + set the error argument when creating a pb_utils.InferenceResponse. + Parameters + ---------- + requests : list + A list of pb_utils.InferenceRequest + Returns + ------- + list + A list of pb_utils.InferenceResponse. The length of this list must + be the same as `requests` + """ + + responses = [] + + # Every Python backend must iterate over everyone of the requests + # and create a pb_utils.InferenceResponse for each of them. + for idx, request in enumerate(requests): + # Get input tensors + tokens_batch = pb_utils.get_input_tensor_by_name( + request, 'TOKENS_BATCH').as_numpy() + sequence_length = pb_utils.get_input_tensor_by_name( + request, 'sequence_length').as_numpy() + + # Postprocessing output data. + outputs = self._postprocessing(tokens_batch.tolist(), + sequence_length) + + # Create output tensors. You need pb_utils.Tensor + # objects to create pb_utils.InferenceResponse. + output_tensor = pb_utils.Tensor( + 'OUTPUT', + np.array(outputs).astype(self.output_dtype)) + + # Create InferenceResponse. You can set an error here in case + # there was a problem with handling this inference request. + # Below is an example of how you can set errors in inference + # response: + # + # pb_utils.InferenceResponse( + # output_tensors=..., TritonError("An error occurred")) + inference_response = pb_utils.InferenceResponse( + output_tensors=[output_tensor]) + responses.append(inference_response) + + # You should return a list of pb_utils.InferenceResponse. Length + # of this list must match the length of `requests` list. + return responses + + def finalize(self): + """`finalize` is called only once when the model is being unloaded. + + Implementing `finalize` function is optional. This function allows the + model to perform any necessary clean ups before exit. + """ + print('Cleaning up...') + + def _postprocessing(self, tokens_batch, sequence_length): + """decode token ids into texts.""" + outputs = [] + for beam_tokens, beam_len in zip(tokens_batch, sequence_length): + for tokens, _len in zip(beam_tokens, beam_len): + output = self.tokenizer.decode(tokens, _len) + output = output.encode('utf8') + outputs.append(output) + return outputs diff --git a/triton_models/postprocessing/1/tokenizer b/triton_models/postprocessing/1/tokenizer new file mode 100644 index 0000000000000000000000000000000000000000..a92be7faecb29307d691683e022915639ca09f22 --- /dev/null +++ b/triton_models/postprocessing/1/tokenizer @@ -0,0 +1 @@ +../../tokenizer \ No newline at end of file diff --git a/triton_models/postprocessing/config.pbtxt b/triton_models/postprocessing/config.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..a4c3fd1041dcd03dc5c18b3fc28533cb82ac5653 --- /dev/null +++ b/triton_models/postprocessing/config.pbtxt @@ -0,0 +1,36 @@ +name: "postprocessing" +backend: "python" +max_batch_size: 1 +input [ + { + name: "TOKENS_BATCH" + data_type: TYPE_UINT32 + dims: [ -1, -1 ] + }, + { + name: "sequence_length" + data_type: TYPE_UINT32 + dims: [ -1 ] + } +] +output [ + { + name: "OUTPUT" + data_type: TYPE_STRING + dims: [ -1, -1 ] + } +] + +instance_group [ + { + count: 16 + kind: KIND_CPU + } +] + +parameters { + key: "tokenizer_path" + value: { + string_value: "tokenizer/tokenizer.model" + } +} diff --git a/triton_models/preprocessing/1/__pycache__/model.cpython-310.pyc b/triton_models/preprocessing/1/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b90f143dd1332eb9093f1e876123608ded922b8d Binary files /dev/null and b/triton_models/preprocessing/1/__pycache__/model.cpython-310.pyc differ diff --git a/triton_models/preprocessing/1/model.py b/triton_models/preprocessing/1/model.py new file mode 100644 index 0000000000000000000000000000000000000000..7e659fbae01737bd0a83980faf0e1eff9e607c3f --- /dev/null +++ b/triton_models/preprocessing/1/model.py @@ -0,0 +1,151 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os.path as osp +from pathlib import Path + +import numpy as np +import torch +import triton_python_backend_utils as pb_utils +from torch.nn.utils.rnn import pad_sequence + +# This tokenizer is `lmdeploy/turbomind/tokenizer.py`. When an LLM is served +# by triton inference server, it has to be converted first by running +# `python lmdeploy/serve/turbomind/deploy.py`. Then +# `lmdeploy/turbomind/tokenizer.py` will be copied to `tokenizer/tokenizer.py` +from .tokenizer.tokenizer import Tokenizer + + +class TritonPythonModel: + """Your Python model must use the same class name. + + Every Python model that is created must have "TritonPythonModel" as the + class name. + """ + + def initialize(self, args): + """`initialize` is called only once when the model is being loaded. + Implementing `initialize` function is optional. This function allows + the model to initialize any state associated with this model. + Parameters + ---------- + args : dict + Both keys and values are strings. The dictionary keys and values are: + * model_config: A JSON string containing the model configuration + * model_instance_kind: A string containing model instance kind + * model_instance_device_id: A string containing model instance device + ID + * model_repository: Model repository path + * model_version: Model version + * model_name: Model name + """ + # Parse model configs + self.model_config = model_config = json.loads(args['model_config']) + + # Parse model output configs and convert Triton types to numpy types + input_names = ['INPUT_ID', 'REQUEST_INPUT_LEN'] + for input_name in input_names: + setattr( + self, + input_name.lower() + '_dtype', + pb_utils.triton_string_to_numpy( + pb_utils.get_output_config_by_name( + model_config, input_name)['data_type'])) + + cur_folder = Path(__file__).parent + self.tokenizer = Tokenizer( + osp.join( + cur_folder, self.model_config['parameters']['tokenizer_path'] + ['string_value'])) + self.start_id = self.tokenizer.bos_token_id + self.end_id = self.tokenizer.eos_token_id + + def execute(self, requests): + """`execute` must be implemented in every Python model. `execute` + function receives a list of pb_utils.InferenceRequest as the only + argument. This function is called when an inference is requested + for this model. Depending on the batching configuration (e.g. Dynamic + Batching) used, `requests` may contain multiple requests. Every + Python model, must create one pb_utils.InferenceResponse for every + pb_utils.InferenceRequest in `requests`. If there is an error, you can + set the error argument when creating a pb_utils.InferenceResponse. + Parameters + ---------- + requests : list + A list of pb_utils.InferenceRequest + Returns + ------- + list + A list of pb_utils.InferenceResponse. The length of this list must + be the same as `requests` + """ + + responses = [] + + # Every Python backend must iterate over everyone of the requests + # and create a pb_utils.InferenceResponse for each of them. + for idx, request in enumerate(requests): + # Get input tensors + query = pb_utils.get_input_tensor_by_name(request, + 'QUERY').as_numpy() + + # Preprocessing input data. + input_id, request_input_len = self._create_request(query) + + # Create output tensors. You need pb_utils.Tensor + # objects to create pb_utils.InferenceResponse. + input_id_tensor = pb_utils.Tensor( + 'INPUT_ID', + np.array(input_id).astype(self.input_id_dtype)) + request_input_len_tensor = pb_utils.Tensor( + 'REQUEST_INPUT_LEN', + np.array(request_input_len).astype( + self.request_input_len_dtype)) + + # Create InferenceResponse. You can set an error here in case + # there was a problem with handling this inference request. + # Below is an example of how you can set errors in inference + # response: + # + # pb_utils.InferenceResponse( + # output_tensors=..., TritonError("An error occurred")) + inference_response = pb_utils.InferenceResponse( + output_tensors=[input_id_tensor, request_input_len_tensor]) + responses.append(inference_response) + + # You should return a list of pb_utils.InferenceResponse. Length + # of this list must match the length of `requests` list. + return responses + + def finalize(self): + """`finalize` is called only once when the model is being unloaded. + + Implementing `finalize` function is optional. This function allows the + model to perform any necessary clean ups before exit. + """ + print('Cleaning up...') + + def _create_request(self, query): + """Tokenize prompts and return the token ids and their length. + + Args: + query (List[str]): a list of prompt + Returns: + tuple: token ids and their length + """ + start_ids = [] + for s in query: + _s = s[0].decode() + if _s == '': + start_id = [self.start_id + ] if self.start_id is not None else [-1] + elif _s == '': + start_id = [self.end_id] if self.end_id is not None else [-1] + else: + start_id = self.tokenizer.encode(_s) + start_ids.append(torch.IntTensor(start_id)) + + start_lengths = torch.IntTensor([[len(ids)] for ids in start_ids]) + start_ids = pad_sequence(start_ids, + batch_first=True, + padding_value=self.end_id) + return start_ids, start_lengths diff --git a/triton_models/preprocessing/1/tokenizer b/triton_models/preprocessing/1/tokenizer new file mode 100644 index 0000000000000000000000000000000000000000..a92be7faecb29307d691683e022915639ca09f22 --- /dev/null +++ b/triton_models/preprocessing/1/tokenizer @@ -0,0 +1 @@ +../../tokenizer \ No newline at end of file diff --git a/triton_models/preprocessing/config.pbtxt b/triton_models/preprocessing/config.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..997ba399ba04f1f521bdbf088815d1dd3c26f696 --- /dev/null +++ b/triton_models/preprocessing/config.pbtxt @@ -0,0 +1,37 @@ +name: "preprocessing" +backend: "python" +max_batch_size: 1 + +input [ + { + name: "QUERY" + data_type: TYPE_STRING + dims: [ -1 ] + } +] +output [ + { + name: "INPUT_ID" + data_type: TYPE_UINT32 + dims: [ -1 ] + }, + { + name: "REQUEST_INPUT_LEN" + data_type: TYPE_UINT32 + dims: [ 1 ] + } +] + +instance_group [ + { + count: 4 + kind: KIND_CPU + } +] + +parameters { + key: "tokenizer_path" + value: { + string_value: "tokenizer/tokenizer.model" + } +} diff --git a/triton_models/tokenizer/config.json b/triton_models/tokenizer/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2a508ab0f1e8f080c4318609590933754feb4a20 --- /dev/null +++ b/triton_models/tokenizer/config.json @@ -0,0 +1,30 @@ +{ + "_name_or_path": "/root/share/temp/model_repos/internlm-chat-7b/", + "architectures": [ + "InternLMForCausalLM" + ], + "auto_map": { + "AutoConfig": "configuration_internlm.InternLMConfig", + "AutoModel": "modeling_internlm.InternLMForCausalLM", + "AutoModelForCausalLM": "modeling_internlm.InternLMForCausalLM" + }, + "bias": true, + "bos_token_id": 1, + "eos_token_id": 2, + "fp16": true, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 11008, + "max_position_embeddings": 2048, + "model_type": "internlm", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "pad_token_id": 2, + "rms_norm_eps": 1e-06, + "tie_word_embeddings": false, + "torch_dtype": "float16", + "transformers_version": "4.33.1", + "use_cache": false, + "vocab_size": 103168 +} diff --git a/triton_models/tokenizer/configuration_internlm.py b/triton_models/tokenizer/configuration_internlm.py new file mode 100644 index 0000000000000000000000000000000000000000..298f91319529e9b3034bcb74bb428d610534a0ba --- /dev/null +++ b/triton_models/tokenizer/configuration_internlm.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" InternLM model configuration""" + +from transformers.utils import logging +from transformers.configuration_utils import PretrainedConfig + + +logger = logging.get_logger(__name__) + +INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {} + + +class InternLMConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate an InternLM + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the InternLM-7B. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 32000): + Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`InternLMModel`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 11008): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + tie_word_embeddings(`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + Example: + + ```python + >>> from transformers import InternLMModel, InternLMConfig + + >>> # Initializing a InternLM internlm-7b style configuration + >>> configuration = InternLMConfig() + + >>> # Initializing a model from the internlm-7b style configuration + >>> model = InternLMModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "internlm" + _auto_class = "AutoConfig" + + def __init__( + self, + vocab_size=103168, + hidden_size=4096, + intermediate_size=11008, + num_hidden_layers=32, + num_attention_heads=32, + hidden_act="silu", + max_position_embeddings=2048, + initializer_range=0.02, + rms_norm_eps=1e-6, + use_cache=True, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + tie_word_embeddings=False, + bias=True, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.use_cache = use_cache + self.bias = bias + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) diff --git a/triton_models/tokenizer/generation_config.json b/triton_models/tokenizer/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..c29a44e1d9a440dc9ae06421d0fbedc6507e6760 --- /dev/null +++ b/triton_models/tokenizer/generation_config.json @@ -0,0 +1,7 @@ +{ + "_from_model_config": true, + "bos_token_id": 1, + "eos_token_id": 2, + "pad_token_id": 2, + "transformers_version": "4.33.1" +} diff --git a/triton_models/tokenizer/modeling_internlm.py b/triton_models/tokenizer/modeling_internlm.py new file mode 100644 index 0000000000000000000000000000000000000000..442706ca3f030a64df5825d75c231d7ff203d18c --- /dev/null +++ b/triton_models/tokenizer/modeling_internlm.py @@ -0,0 +1,1015 @@ +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch InternLM model.""" +import math +from typing import List, Optional, Tuple, Union +import threading, queue + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.generation.streamers import BaseStreamer +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_internlm import InternLMConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "InternLMConfig" + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class InternLMRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + InternLMRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return self.weight * hidden_states + + +class InternLMRotaryEmbedding(torch.nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) + self.register_buffer("inv_freq", inv_freq) + + # Build here to make `torch.jit.trace` work. + self.max_seq_len_cached = max_position_embeddings + t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. + if seq_len > self.max_seq_len_cached: + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) + return ( + self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + ) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids): + # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. + cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] + sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] + cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] + sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class InternLMMLP(nn.Module): + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + ): + super().__init__() + self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) + self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.act_fn = ACT2FN[hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +class InternLMAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: InternLMConfig): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.max_position_embeddings = config.max_position_embeddings + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias) + self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias) + self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias) + self.rotary_emb = InternLMRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + # [bsz, nh, t, hd] + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class InternLMDecoderLayer(nn.Module): + def __init__(self, config: InternLMConfig): + super().__init__() + self.hidden_size = config.hidden_size + self.self_attn = InternLMAttention(config=config) + self.mlp = InternLMMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + ) + self.input_layernorm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +INTERNLM_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`InternLMConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare InternLM Model outputting raw hidden-states without any specific head on top.", + INTERNLM_START_DOCSTRING, +) +class InternLMPreTrainedModel(PreTrainedModel): + config_class = InternLMConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["InternLMDecoderLayer"] + _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, InternLMModel): + module.gradient_checkpointing = value + + +INTERNLM_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare InternLM Model outputting raw hidden-states without any specific head on top.", + INTERNLM_START_DOCSTRING, +) +class InternLMModel(InternLMPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLMDecoderLayer`] + + Args: + config: InternLMConfig + """ + + _auto_class = "AutoModel" + + def __init__(self, config: InternLMConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.norm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + @add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + # embed positions + if attention_mask is None: + attention_mask = torch.ones( + (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + ) + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, None) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + position_ids, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class InternLMForCausalLM(InternLMPreTrainedModel): + _auto_class = "AutoModelForCausalLM" + + def __init__(self, config): + super().__init__(config) + self.model = InternLMModel(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, InternLMForCausalLM + + >>> model = InternLMForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you consciours? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values: + input_ids = input_ids[:, -1:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -1].unsqueeze(-1) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past + + def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = []): + prompt = "" + for record in history: + prompt += f"""<|User|>:{record[0]}\n<|Bot|>:{record[1]}\n""" + if len(prompt) == 0: + prompt += "" + prompt += f"""<|User|>:{query}\n<|Bot|>:""" + return tokenizer([prompt], return_tensors="pt") + + @torch.no_grad() + def chat( + self, + tokenizer, + query: str, + history: List[Tuple[str, str]] = [], + streamer: Optional[BaseStreamer] = None, + max_new_tokens: int = 1024, + do_sample: bool = True, + temperature: float = 0.8, + top_p: float = 0.8, + **kwargs, + ): + inputs = self.build_inputs(tokenizer, query, history) + inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)} + outputs = self.generate( + **inputs, + streamer=streamer, + max_new_tokens=max_new_tokens, + do_sample=do_sample, + temperature=temperature, + top_p=top_p, + **kwargs, + ) + outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]) :] + response = tokenizer.decode(outputs, skip_special_tokens=True) + response = response.split("")[0] + history = history + [(query, response)] + return response, history + + @torch.no_grad() + def stream_chat( + self, + tokenizer, + query: str, + history: List[Tuple[str, str]] = [], + max_new_tokens: int = 1024, + do_sample: bool = True, + temperature: float = 0.8, + top_p: float = 0.8, + **kwargs, + ): + """ + Return a generator in format: (response, history) + Eg. + ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')]) + ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')]) + """ + + response_queue = queue.Queue(maxsize=20) + + class ChatStreamer(BaseStreamer): + def __init__(self, tokenizer) -> None: + super().__init__() + self.tokenizer = tokenizer + self.queue = response_queue + self.query = query + self.history = history + self.response = "" + self.received_inputs = False + self.queue.put((self.response, history + [(self.query, self.response)])) + + def put(self, value): + if len(value.shape) > 1 and value.shape[0] > 1: + raise ValueError("ChatStreamer only supports batch size 1") + elif len(value.shape) > 1: + value = value[0] + + if not self.received_inputs: + # The first received value is input_ids, ignore here + self.received_inputs = True + return + + token = self.tokenizer.decode([value[-1]], skip_special_tokens=True) + if token.strip() != "": + self.response = self.response + token + history = self.history + [(self.query, self.response)] + self.queue.put((self.response, history)) + + def end(self): + self.queue.put(None) + + def stream_producer(): + return self.chat( + tokenizer=tokenizer, + query=query, + streamer=ChatStreamer(tokenizer=tokenizer), + history=history, + max_new_tokens=max_new_tokens, + do_sample=do_sample, + temperature=temperature, + top_p=top_p, + **kwargs, + ) + + def consumer(): + producer = threading.Thread(target=stream_producer) + producer.start() + while True: + res = response_queue.get() + if res is not None: + return + yield res + + return consumer() + + +@add_start_docstrings( + """ + The InternLM Model transformer with a sequence classification head on top (linear layer). + + [`InternLMForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + INTERNLM_START_DOCSTRING, +) +class InternLMForSequenceClassification(InternLMPreTrainedModel): + _keys_to_ignore_on_load_missing = [r"lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = InternLMModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) diff --git a/triton_models/tokenizer/placeholder b/triton_models/tokenizer/placeholder new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/triton_models/tokenizer/pytorch_model.bin.index.json b/triton_models/tokenizer/pytorch_model.bin.index.json new file mode 100644 index 0000000000000000000000000000000000000000..cd43985019f6100d0b31b9ad72f18df5c17d0ed7 --- /dev/null +++ b/triton_models/tokenizer/pytorch_model.bin.index.json @@ -0,0 +1,906 @@ +{ + "metadata": { + "total_size": 5157568512 + }, + "weight_map": { + "lm_head.weight": "pytorch_model-00003-of-00003.bin", + "model.embed_tokens.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.0.mlp.down_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.0.mlp.down_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.0.mlp.down_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.0.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.0.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.0.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.0.mlp.up_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.0.mlp.up_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.0.mlp.up_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.0.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.1.mlp.down_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.1.mlp.down_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.1.mlp.down_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.1.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.1.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.1.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.1.mlp.up_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.1.mlp.up_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.1.mlp.up_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.1.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.10.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.10.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.10.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.10.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.10.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.10.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.10.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.10.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.10.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.10.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.10.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.10.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.11.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.11.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.11.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.11.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.11.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.11.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.11.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.11.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.11.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.11.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.11.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.12.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.12.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.12.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.12.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.12.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.12.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.12.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.12.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.12.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.12.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.12.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.13.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.13.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.13.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.13.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.13.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.13.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.13.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.13.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.13.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.13.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.13.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.14.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.14.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.14.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.14.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.14.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.14.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.14.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.14.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.14.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.14.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.14.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.15.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.15.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.15.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.15.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.15.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.15.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.15.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.15.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.15.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.15.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.15.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.16.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.16.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.16.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.16.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.16.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.16.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.16.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.16.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.16.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.16.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.16.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.17.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.17.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.17.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.17.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.17.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.17.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.17.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.17.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.17.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.17.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.17.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.18.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.18.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.18.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.18.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.18.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.18.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.18.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.18.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.18.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.18.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.18.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.19.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.19.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.19.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.19.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.19.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.19.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.19.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.19.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.19.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.19.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.19.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.2.mlp.down_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.2.mlp.down_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.2.mlp.down_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.2.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.2.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.2.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.2.mlp.up_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.2.mlp.up_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.2.mlp.up_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.2.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.20.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.20.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.20.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.20.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.20.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.20.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.20.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.20.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.20.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.20.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.20.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.21.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.21.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.21.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.21.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.21.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.21.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.21.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.21.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.21.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.21.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.21.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.22.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.22.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.22.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.22.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.22.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.22.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.22.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.22.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.22.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.22.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.22.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.23.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.23.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.23.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.23.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.23.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.23.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.23.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.23.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.23.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.23.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.23.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.24.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.24.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.24.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.24.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.24.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.24.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.24.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.24.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.24.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.24.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.25.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.25.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.25.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.25.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.25.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.25.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.25.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.25.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.25.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.25.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.26.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.26.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.26.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.26.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.26.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.26.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.26.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.26.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.26.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.26.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.27.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.27.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.27.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.27.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.27.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.27.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.27.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.27.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.27.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.27.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.28.mlp.down_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.28.mlp.down_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.28.mlp.down_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.28.mlp.gate_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.28.mlp.gate_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.28.mlp.gate_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.28.mlp.up_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.28.mlp.up_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.28.mlp.up_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.k_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.k_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.k_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.o_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.o_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.o_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.o_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.q_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.q_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.q_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.v_proj.qweight": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.v_proj.qzeros": "pytorch_model-00002-of-00003.bin", + "model.layers.28.self_attn.v_proj.scales": "pytorch_model-00002-of-00003.bin", + "model.layers.29.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "model.layers.29.mlp.down_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.29.mlp.down_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.29.mlp.down_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.29.mlp.gate_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.29.mlp.gate_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.29.mlp.gate_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.29.mlp.up_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.29.mlp.up_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.29.mlp.up_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.k_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.k_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.k_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.o_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.o_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.o_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.o_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin", + "model.layers.29.self_attn.q_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.q_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.q_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.v_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.v_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.29.self_attn.v_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.3.mlp.down_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.3.mlp.down_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.3.mlp.down_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.3.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.3.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.3.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.3.mlp.up_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.3.mlp.up_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.3.mlp.up_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.3.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.30.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "model.layers.30.mlp.down_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.30.mlp.down_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.30.mlp.down_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.30.mlp.gate_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.30.mlp.gate_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.30.mlp.gate_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.30.mlp.up_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.30.mlp.up_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.30.mlp.up_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.k_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.k_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.k_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.o_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.o_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.o_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.o_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.q_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.q_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.q_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.v_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.v_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.30.self_attn.v_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.31.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "model.layers.31.mlp.down_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.31.mlp.down_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.31.mlp.down_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.31.mlp.gate_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.31.mlp.gate_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.31.mlp.gate_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.31.mlp.up_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.31.mlp.up_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.31.mlp.up_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.k_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.k_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.k_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.o_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.o_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.o_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.o_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.q_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.q_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.q_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.v_proj.qweight": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.v_proj.qzeros": "pytorch_model-00003-of-00003.bin", + "model.layers.31.self_attn.v_proj.scales": "pytorch_model-00003-of-00003.bin", + "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.4.mlp.down_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.4.mlp.down_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.4.mlp.down_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.4.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.4.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.4.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.4.mlp.up_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.4.mlp.up_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.4.mlp.up_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.4.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.5.mlp.down_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.5.mlp.down_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.5.mlp.down_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.5.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.5.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.5.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.5.mlp.up_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.5.mlp.up_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.5.mlp.up_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.5.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.6.mlp.down_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.6.mlp.down_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.6.mlp.down_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.6.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.6.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.6.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.6.mlp.up_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.6.mlp.up_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.6.mlp.up_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.6.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.7.mlp.down_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.7.mlp.down_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.7.mlp.down_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.7.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.7.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.7.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.7.mlp.up_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.7.mlp.up_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.7.mlp.up_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.7.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.8.mlp.down_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.8.mlp.down_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.8.mlp.down_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.8.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.8.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.8.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.8.mlp.up_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.8.mlp.up_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.8.mlp.up_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.8.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.9.mlp.down_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.9.mlp.down_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.9.mlp.down_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.9.mlp.gate_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.9.mlp.gate_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.9.mlp.gate_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.9.mlp.up_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.9.mlp.up_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.9.mlp.up_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.k_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.k_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.k_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.o_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.o_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.o_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.o_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.q_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.q_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.q_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.v_proj.qweight": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.v_proj.qzeros": "pytorch_model-00001-of-00003.bin", + "model.layers.9.self_attn.v_proj.scales": "pytorch_model-00001-of-00003.bin", + "model.norm.weight": "pytorch_model-00003-of-00003.bin" + } +} diff --git a/triton_models/tokenizer/special_tokens_map.json b/triton_models/tokenizer/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..9bfed7513d3b1b65445af10c4571256f4a19b290 --- /dev/null +++ b/triton_models/tokenizer/special_tokens_map.json @@ -0,0 +1,6 @@ +{ + "bos_token": "", + "eos_token": "", + "pad_token": "", + "unk_token": "" +} diff --git a/triton_models/tokenizer/tokenization_internlm.py b/triton_models/tokenizer/tokenization_internlm.py new file mode 100644 index 0000000000000000000000000000000000000000..5ce1e66c330b09341a6465bc5514a12c92b5f478 --- /dev/null +++ b/triton_models/tokenizer/tokenization_internlm.py @@ -0,0 +1,242 @@ +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tokenization classes for IntermLM.""" +import os +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple + +import sentencepiece as spm + +from transformers.tokenization_utils import PreTrainedTokenizer +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"} + +PRETRAINED_VOCAB_FILES_MAP = {} + + +class InternLMTokenizer(PreTrainedTokenizer): + """ + Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + model_input_names = ["input_ids", "attention_mask"] + _auto_class = "AutoTokenizer" + + def __init__( + self, + vocab_file, + unk_token="", + bos_token="", + eos_token="", + pad_token="", + sp_model_kwargs: Optional[Dict[str, Any]] = None, + add_bos_token=True, + add_eos_token=False, + decode_with_prefix_space=False, + clean_up_tokenization_spaces=False, + **kwargs, + ): + self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs + self.vocab_file = vocab_file + self.add_bos_token = add_bos_token + self.add_eos_token = add_eos_token + self.decode_with_prefix_space = decode_with_prefix_space + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(vocab_file) + self._no_prefix_space_tokens = None + super().__init__( + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + pad_token=pad_token, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs, + ) + + """ Initialization""" + + @property + def no_prefix_space_tokens(self): + if self._no_prefix_space_tokens is None: + vocab = self.convert_ids_to_tokens(list(range(self.vocab_size))) + self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")} + return self._no_prefix_space_tokens + + @property + def vocab_size(self): + """Returns vocab size""" + return self.sp_model.get_piece_size() + + @property + def bos_token_id(self) -> Optional[int]: + return self.sp_model.bos_id() + + @property + def eos_token_id(self) -> Optional[int]: + return self.sp_model.eos_id() + + def get_vocab(self): + """Returns vocab as a dict""" + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def _tokenize(self, text): + """Returns a tokenized string.""" + return self.sp_model.encode(text, out_type=str) + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.sp_model.piece_to_id(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + token = self.sp_model.IdToPiece(index) + return token + + def _maybe_add_prefix_space(self, tokens, decoded): + if tokens and tokens[0] not in self.no_prefix_space_tokens: + return " " + decoded + else: + return decoded + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + current_sub_tokens = [] + out_string = "" + prev_is_special = False + for token in tokens: + # make sure that special tokens are not decoded using sentencepiece model + if token in self.all_special_tokens: + if not prev_is_special: + out_string += " " + out_string += self.sp_model.decode(current_sub_tokens) + token + prev_is_special = True + current_sub_tokens = [] + else: + current_sub_tokens.append(token) + prev_is_special = False + out_string += self.sp_model.decode(current_sub_tokens) + out_string = self.clean_up_tokenization(out_string) + out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string) + return out_string[1:] + + def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]: + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): + copyfile(self.vocab_file, out_vocab_file) + elif not os.path.isfile(self.vocab_file): + with open(out_vocab_file, "wb") as fi: + content_spiece_model = self.sp_model.serialized_model_proto() + fi.write(content_spiece_model) + + return (out_vocab_file,) + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + if self.add_bos_token: + bos_token_ids = [self.bos_token_id] + else: + bos_token_ids = [] + + output = bos_token_ids + token_ids_0 + + if token_ids_1 is not None: + output = output + token_ids_1 + + if self.add_eos_token: + output = output + [self.eos_token_id] + + return output + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make + use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + eos = [self.eos_token_id] + + if token_ids_1 is None: + return len(token_ids_0 + eos) * [0] + return len(token_ids_0 + eos + token_ids_1 + eos) * [0] \ No newline at end of file diff --git a/triton_models/tokenizer/tokenizer.model b/triton_models/tokenizer/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..24f4d0607b1f6a966a5d653bb255813638de0bec --- /dev/null +++ b/triton_models/tokenizer/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aab622d98c98677a1a51f969e25765154487bf3e85c7819db105db2fcacba83f +size 1658691 diff --git a/triton_models/tokenizer/tokenizer.py b/triton_models/tokenizer/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..2ddecdfd40465f077cfab0544fbffbcd35c462c8 --- /dev/null +++ b/triton_models/tokenizer/tokenizer.py @@ -0,0 +1,284 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os +import os.path as osp +from typing import Optional, Sequence, Union + +import torch + + +class SentencePieceTokenizer: + """Tokenizer of sentencepiece. + + Args: + model_file (str): the path of the tokenizer model + """ + + def __init__(self, model_file: str): + from sentencepiece import SentencePieceProcessor + self.model = SentencePieceProcessor(model_file=model_file) + self._prefix_space_tokens = None + + @property + def vocab_size(self): + """vocabulary size.""" + return self.model.vocab_size() + + @property + def bos_token_id(self): + """begine of the sentence token id.""" + return self.model.bos_id() + + @property + def eos_token_id(self): + """end of the sentence token id.""" + return self.model.eos_id() + + @property + def prefix_space_tokens(self): + """tokens without prefix space.""" + if self._prefix_space_tokens is None: + vocab = self.model.IdToPiece(list(range(self.vocab_size))) + self._prefix_space_tokens = { + i + for i, tok in enumerate(vocab) if tok.startswith('▁') + } + return self._prefix_space_tokens + + def _maybe_add_prefix_space(self, tokens, decoded): + """maybe add prefix space for incremental decoding.""" + if len(tokens) and not decoded.startswith(' ') and\ + tokens[0] in self.prefix_space_tokens: + return ' ' + decoded + else: + return decoded + + def encode(self, s: str, add_bos: bool = True, **kwargs): + """Tokenize a prompt. + + Args: + s (str): a prompt + Returns: + list[int]: token ids + """ + return self.model.Encode(s, add_bos=add_bos, **kwargs) + + def decode(self, t: Sequence[int], offset: Optional[int] = None): + """De-tokenize. + + Args: + t (List[int]): a list of token ids + offset (int): for incrementally decoding. Default to None, which + means not applied. + Returns: + str: text of decoding tokens + """ + if isinstance(t, torch.Tensor): + t = t.tolist() + t = t[offset:] + out_string = self.model.Decode(t) + if offset: + out_string = self._maybe_add_prefix_space(t, out_string) + return out_string + + def __call__(self, s: Union[str, Sequence[str]]): + """Tokenize prompts. + + Args: + s (str): prompts + Returns: + list[int]: token ids + """ + import addict + add_bos = False + add_eos = False + + input_ids = self.model.Encode(s, add_bos=add_bos, add_eos=add_eos) + return addict.Addict(input_ids=input_ids) + + +class HuggingFaceTokenizer: + """Tokenizer of sentencepiece. + + Args: + model_dir (str): the directory of the tokenizer model + """ + + def __init__(self, model_dir: str): + from transformers import AutoTokenizer + model_file = osp.join(model_dir, 'tokenizer.model') + backend_tokenizer_file = osp.join(model_dir, 'tokenizer.json') + model_file_exists = osp.exists(model_file) + if not osp.exists(backend_tokenizer_file) and model_file_exists: + print('WARNING: Can not find tokenizer.json. ' + 'It may take long time to initialize the tokenizer.') + self.model = AutoTokenizer.from_pretrained(model_dir, + trust_remote_code=True) + self._prefix_space_tokens = None + # save tokenizer.json to reuse + if not osp.exists(backend_tokenizer_file) and model_file_exists: + if hasattr(self.model, 'backend_tokenizer'): + if os.access(model_dir, os.W_OK): + self.model.backend_tokenizer.save(backend_tokenizer_file) + + if self.model.eos_token_id is None: + generation_config_file = osp.join(model_dir, + 'generation_config.json') + if osp.exists(generation_config_file): + with open(generation_config_file, 'r') as f: + cfg = json.load(f) + self.model.eos_token_id = cfg['eos_token_id'] + elif hasattr(self.model, 'eod_id'): # Qwen remote + self.model.eos_token_id = self.model.eod_id + + @property + def vocab_size(self): + """vocabulary size.""" + return self.model.vocab_size + + @property + def bos_token_id(self): + """begine of the sentence token id.""" + return self.model.bos_token_id + + @property + def eos_token_id(self): + """end of the sentence token id.""" + return self.model.eos_token_id + + @property + def prefix_space_tokens(self): + """tokens without prefix space.""" + if self._prefix_space_tokens is None: + vocab = self.model.convert_ids_to_tokens( + list(range(self.vocab_size))) + self._prefix_space_tokens = { + i + for i, tok in enumerate(vocab) + if tok.startswith('▁' if isinstance(tok, str) else b' ') + } + return self._prefix_space_tokens + + def _maybe_add_prefix_space(self, tokens, decoded): + """maybe add prefix space for incremental decoding.""" + if len(tokens) and not decoded.startswith(' ') and\ + tokens[0] in self.prefix_space_tokens: + return ' ' + decoded + else: + return decoded + + def encode(self, s: str, add_bos: bool = True, **kwargs): + """Tokenize a prompt. + + Args: + s (str): a prompt + Returns: + list[int]: token ids + """ + encoded = self.model.encode(s, **kwargs) + if not add_bos: + # in the middle of a session + if len(encoded) and encoded[0] == self.bos_token_id: + encoded = encoded[1:] + return encoded + + def decode(self, t: Sequence[int], offset: Optional[int] = None): + """De-tokenize. + + Args: + t (List[int]): a list of token ids + offset (int): for incrementally decoding. Default to None, which + means not applied. + Returns: + str: text of decoding tokens + """ + skip_special_tokens = True + t = t[offset:] + out_string = self.model.decode(t, + skip_special_tokens=skip_special_tokens) + if offset: + out_string = self._maybe_add_prefix_space(t, out_string) + return out_string + + def __call__(self, s: Union[str, Sequence[str]]): + """Tokenize prompts. + + Args: + s (str): prompts + Returns: + list[int]: token ids + """ + add_special_tokens = False + return self.model(s, add_special_tokens=add_special_tokens) + + +class Tokenizer: + """Tokenize prompts or de-tokenize tokens into texts. + + Args: + model_file (str): the path of the tokenizer model + """ + + def __init__(self, model_file: str): + if model_file.endswith('.model'): + model_folder = osp.split(model_file)[0] + else: + model_folder = model_file + model_file = osp.join(model_folder, 'tokenizer.model') + tokenizer_config_file = osp.join(model_folder, 'tokenizer_config.json') + + model_file_exists = osp.exists(model_file) + config_exists = osp.exists(tokenizer_config_file) + use_hf_model = config_exists or not model_file_exists + + if not use_hf_model: + self.model = SentencePieceTokenizer(model_file) + else: + self.model = HuggingFaceTokenizer(model_folder) + + @property + def vocab_size(self): + """vocabulary size.""" + return self.model.vocab_size + + @property + def bos_token_id(self): + """begine of the sentence token id.""" + return self.model.bos_token_id + + @property + def eos_token_id(self): + """end of the sentence token id.""" + return self.model.eos_token_id + + def encode(self, s: str, add_bos: bool = True, **kwargs): + """Tokenize a prompt. + + Args: + s (str): a prompt + Returns: + list[int]: token ids + """ + return self.model.encode(s, add_bos, **kwargs) + + def decode(self, t: Sequence[int], offset: Optional[int] = None): + """De-tokenize. + + Args: + t (List[int]): a list of token ids + offset (int): for incrementally decoding. Default to None, which + means not applied. + Returns: + str: text of decoding tokens + """ + return self.model.decode(t, offset) + + def __call__(self, s: Union[str, Sequence[str]]): + """Tokenize prompts. + + Args: + s (str): prompts + Returns: + list[int]: token ids + """ + return self.model(s) diff --git a/triton_models/tokenizer/tokenizer_config.json b/triton_models/tokenizer/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..857ab9eccafd9682a491c525f5ebdc206c607de7 --- /dev/null +++ b/triton_models/tokenizer/tokenizer_config.json @@ -0,0 +1,15 @@ +{ + "auto_map": { + "AutoTokenizer": [ + "tokenization_internlm.InternLMTokenizer", + null + ] + }, + "bos_token": "", + "clean_up_tokenization_spaces": false, + "eos_token": "", + "model_max_length": 1000000000000000019884624838656, + "pad_token": "", + "tokenizer_class": "InternLMTokenizer", + "unk_token": "" +} diff --git a/triton_models/weights/config.ini b/triton_models/weights/config.ini new file mode 100644 index 0000000000000000000000000000000000000000..9423a18b17f5480f675d191eca772c6300ac1bb0 --- /dev/null +++ b/triton_models/weights/config.ini @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2225240c57583520fd37b972a4711b1c24c6976a94f2f2fe3448fe4cc7b0dd95 +size 569 diff --git a/triton_models/weights/layers.0.attention.w_qkv.0.bias b/triton_models/weights/layers.0.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..ebc8301a164cbf2d24a9d953ba7fb0c4c9893268 --- /dev/null +++ b/triton_models/weights/layers.0.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6ff758024bd03a02de7fe535c2444f8ae2f3dff22489f615276916295e7a030 +size 24576 diff --git a/triton_models/weights/layers.0.attention.w_qkv.0.qweight b/triton_models/weights/layers.0.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..0729b93a2fa2dd9792f73fe307561792b40a6094 --- /dev/null +++ b/triton_models/weights/layers.0.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1a13fdadc18dc30f5f9d9d63e253eabc48da9056c63933d564ce1a4eb7997ec +size 25165824 diff --git a/triton_models/weights/layers.0.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.0.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..3f063d5281c60f5dca6fd786e2dcbd020d7fb392 --- /dev/null +++ b/triton_models/weights/layers.0.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b9b9da34d1a7142c8996a454564eb64b261882146b5bf206fa3163ca374e979 +size 1572864 diff --git a/triton_models/weights/layers.0.attention.wo.0.bias b/triton_models/weights/layers.0.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..0a12465a388ce384fa68401fceebf69fefb96264 --- /dev/null +++ b/triton_models/weights/layers.0.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a69fe7d23dc53aa57b7af4f70e111782bd4078aa6d7e3a18905a32ffbc902f03 +size 8192 diff --git a/triton_models/weights/layers.0.attention.wo.0.qweight b/triton_models/weights/layers.0.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..88a10a600212a32c66e9d226d2ef8751236c6af7 --- /dev/null +++ b/triton_models/weights/layers.0.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac488432e41a88838f2ce198af1c7f8116553d8ebcced3ddf0ffe2c8469c1add +size 8388608 diff --git a/triton_models/weights/layers.0.attention.wo.0.scales_zeros b/triton_models/weights/layers.0.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..56ecc261c8ae4c819ef5fa45fe7f8793eb0a8628 --- /dev/null +++ b/triton_models/weights/layers.0.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4796983817d8541461aa5f90f6d593c2ebe804f780bcac58541e1015ebc19f77 +size 524288 diff --git a/triton_models/weights/layers.0.attention_norm.weight b/triton_models/weights/layers.0.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..fc111e2cb0b52d8c42bab20a89e8884d65f442e4 --- /dev/null +++ b/triton_models/weights/layers.0.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:167aa8ae66096ed9c9321f5887cdf09872aab99a6a95bafef099eb554b441285 +size 8192 diff --git a/triton_models/weights/layers.0.feed_forward.w13.0.qweight b/triton_models/weights/layers.0.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..4c7a6b9909f87fcadd7fd1c2c25f79a0580efd11 --- /dev/null +++ b/triton_models/weights/layers.0.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f751265c923dfbd9dbf7274da735809d467ea7d7f54c86e95caf1e4a9236c46 +size 45088768 diff --git a/triton_models/weights/layers.0.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.0.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..3e218428c2e0aa28ce1d1a520b6c0c4b1eaf2c5c --- /dev/null +++ b/triton_models/weights/layers.0.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b1e0e1dd6304ddfd10896506cf4391d3f2429f7374f38c8b8d17bf1bfeca371 +size 2818048 diff --git a/triton_models/weights/layers.0.feed_forward.w2.0.qweight b/triton_models/weights/layers.0.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..e74401a5460427edf79537ec2ce89135a41339a2 --- /dev/null +++ b/triton_models/weights/layers.0.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a367d78e2ebd36bb3f77c1f54b6b326b97b1f3f45b45b816662e9ec16b73740 +size 22544384 diff --git a/triton_models/weights/layers.0.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.0.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..8981c9d29cda5788c688f37ae0795daf176319cc --- /dev/null +++ b/triton_models/weights/layers.0.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b3769b39af4c8d9ceb99c60e0253be6c1b013432cff60743a5657790143b365 +size 1409024 diff --git a/triton_models/weights/layers.0.ffn_norm.weight b/triton_models/weights/layers.0.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..07be3ed18e4d31ef5971590ec2f101e7daee2cab --- /dev/null +++ b/triton_models/weights/layers.0.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a5545531f94a148dbcfe487c5ebd0309909caaf171ccab67e591fc11dd961d7 +size 8192 diff --git a/triton_models/weights/layers.0.past_kv_scale.0.weight b/triton_models/weights/layers.0.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..6581915c794067070e46e0abd0efc72a6012fccb --- /dev/null +++ b/triton_models/weights/layers.0.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3187a04fe1c87c67d881dd5c80d9a662bdc8f45bd77dc056be1299882dcd2fe5 +size 16 diff --git a/triton_models/weights/layers.1.attention.w_qkv.0.bias b/triton_models/weights/layers.1.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..8a6fc4cba28172d15f0e5302f9d7f92122215f66 --- /dev/null +++ b/triton_models/weights/layers.1.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e66a70472f91b5b3faeefd7276720c5408229920d84cf866e2f24f0c65f01ddf +size 24576 diff --git a/triton_models/weights/layers.1.attention.w_qkv.0.qweight b/triton_models/weights/layers.1.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..f2e6b5e65393286d434af89d8ad56eb58ae081ae --- /dev/null +++ b/triton_models/weights/layers.1.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2edbd7c7226da471fac7cfffc6fc87e6c871ea574b4ddb8cbd0796743a7b017d +size 25165824 diff --git a/triton_models/weights/layers.1.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.1.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..998cf227560fbb25179becefcb37a8968aed713a --- /dev/null +++ b/triton_models/weights/layers.1.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0d4d0715c18158514b8de14ca3fd27148873f532891a2a22a9100d9aa878adc +size 1572864 diff --git a/triton_models/weights/layers.1.attention.wo.0.bias b/triton_models/weights/layers.1.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..4694affde42b82c677266fa9322cf5ce659e2ba0 --- /dev/null +++ b/triton_models/weights/layers.1.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebda96ed0c22ebf73ba53a08b0d8caaf77916f512c8a7ac39386d28c59bfa765 +size 8192 diff --git a/triton_models/weights/layers.1.attention.wo.0.qweight b/triton_models/weights/layers.1.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..3a6a78628442f992e478e0baa5f62e5f928bebc8 --- /dev/null +++ b/triton_models/weights/layers.1.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41ffef64917bff14b0a30236b043a27901dcd858218bb6fde0098ed3b5748ee4 +size 8388608 diff --git a/triton_models/weights/layers.1.attention.wo.0.scales_zeros b/triton_models/weights/layers.1.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..4badd6171caff65214299f484fd001ff639f8543 --- /dev/null +++ b/triton_models/weights/layers.1.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:054be90f7a0566c0e0bfa57f31b509ce07ecb3be2b96cace35fd0a95a77f9eb4 +size 524288 diff --git a/triton_models/weights/layers.1.attention_norm.weight b/triton_models/weights/layers.1.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..be10a1e8485e5be76ed1467df8ac721aa7144594 --- /dev/null +++ b/triton_models/weights/layers.1.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dc094f300eb9e2f7041e5f197fc0928fb06ee8b508293b3bd2cdcdcd1324451 +size 8192 diff --git a/triton_models/weights/layers.1.feed_forward.w13.0.qweight b/triton_models/weights/layers.1.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..4a72c18ceaa4bf72aa4a86246454b15c7b92a98e --- /dev/null +++ b/triton_models/weights/layers.1.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1860883e47c667d7bae113f120790dcb0a417781d8c64bd1289b1f94db71772a +size 45088768 diff --git a/triton_models/weights/layers.1.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.1.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..8267b53f4f80f1ded1e49bbe2d532ed498d49057 --- /dev/null +++ b/triton_models/weights/layers.1.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:010bb3584b26d2a2a3320934627f4695658c7b56b95582c14bf7429b289250d6 +size 2818048 diff --git a/triton_models/weights/layers.1.feed_forward.w2.0.qweight b/triton_models/weights/layers.1.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..f69bb86e6bde96b435f86deda89f5a643bc632ae --- /dev/null +++ b/triton_models/weights/layers.1.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ce34ebc1973deee89490d2967238163f8862749a626e0c6182b0a5f306aaf45 +size 22544384 diff --git a/triton_models/weights/layers.1.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.1.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..f68b9b2ff1c863624c9dbf14831e68e2232f7774 --- /dev/null +++ b/triton_models/weights/layers.1.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96174efad333213a160dcd306293f18fb1664a2839e14a442770fda21236368c +size 1409024 diff --git a/triton_models/weights/layers.1.ffn_norm.weight b/triton_models/weights/layers.1.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..71892ddb8b2a28ab4d8cde214485c7f91e1413c1 --- /dev/null +++ b/triton_models/weights/layers.1.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aedc0167c3ce9b7d0c724d90434cc8cbafe6aa14009e3ab9231fc8ad53a02ea4 +size 8192 diff --git a/triton_models/weights/layers.1.past_kv_scale.0.weight b/triton_models/weights/layers.1.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..8e5edec51b56e3e5b0c65ed2cb5c4b78c904bc5e --- /dev/null +++ b/triton_models/weights/layers.1.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eba9e1a54861b9d99efdd0be0ac2a35e1d824a7177049454da9a3f191c88038 +size 16 diff --git a/triton_models/weights/layers.10.attention.w_qkv.0.bias b/triton_models/weights/layers.10.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..f584a99969e287d7aae10747338c135eceb8004a --- /dev/null +++ b/triton_models/weights/layers.10.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d700319871b959816fbc688e59c2449494bdbbf5f904fd08e89f3409e8e0ce67 +size 24576 diff --git a/triton_models/weights/layers.10.attention.w_qkv.0.qweight b/triton_models/weights/layers.10.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..994a3f83e704342e597a9dec57c5681175ffe0a3 --- /dev/null +++ b/triton_models/weights/layers.10.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfe5a7afc6dc3d504783975b63c7c38c714cd85e5fbde1d8ec22e9ac8469bd1c +size 25165824 diff --git a/triton_models/weights/layers.10.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.10.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..ec946f2f794a0a7b4e701e5aba5565213ed3329b --- /dev/null +++ b/triton_models/weights/layers.10.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26cabd3fe90de0012b44144d38315322b0f9d7e7ff05dcea1dbf6b463c7b4feb +size 1572864 diff --git a/triton_models/weights/layers.10.attention.wo.0.bias b/triton_models/weights/layers.10.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..9910971351a1d83337e5f0339d357e2ef2c6a8a3 --- /dev/null +++ b/triton_models/weights/layers.10.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15412cdc14e8e65bdd82887f4b5659a2040da09888ab6bf99e55331224fce5ce +size 8192 diff --git a/triton_models/weights/layers.10.attention.wo.0.qweight b/triton_models/weights/layers.10.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..6f79d0a59dd38cb469eb15d05a932c2fbc3ba8d3 --- /dev/null +++ b/triton_models/weights/layers.10.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:886864ad73c3647aa786e22447398f1041494aff7d03bba85a6496788b0e476b +size 8388608 diff --git a/triton_models/weights/layers.10.attention.wo.0.scales_zeros b/triton_models/weights/layers.10.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..768576998ce309a73739ba62e56fb6c64dee0980 --- /dev/null +++ b/triton_models/weights/layers.10.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2064eb4add9118b764540ff6187d378e4240443340b986ac8180f62ce5c5cc3 +size 524288 diff --git a/triton_models/weights/layers.10.attention_norm.weight b/triton_models/weights/layers.10.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..b83005ae96ad5184ae67c29df72e0e12ed2582f6 --- /dev/null +++ b/triton_models/weights/layers.10.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:086da7560d9cb5813f2233547a6e855775efe4362001ec16ae1217d6e5906906 +size 8192 diff --git a/triton_models/weights/layers.10.feed_forward.w13.0.qweight b/triton_models/weights/layers.10.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..c35db2dee71ea7600bfc79f3a2b986c97286a7dc --- /dev/null +++ b/triton_models/weights/layers.10.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dd8a7400456c3a6e78d893b6bc7cb409db3d312ef4f8e31b3f88b05f4ea9900 +size 45088768 diff --git a/triton_models/weights/layers.10.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.10.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..0d3951727686bf22e314ab4bdb91006a7802765d --- /dev/null +++ b/triton_models/weights/layers.10.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c445a2e2d9ddacc3eb13f454538e87cb810a2c4997fe2a00eab61be79a2cf8e3 +size 2818048 diff --git a/triton_models/weights/layers.10.feed_forward.w2.0.qweight b/triton_models/weights/layers.10.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..acddb9d4683177cf06bce59c3c21adfb7c7c042f --- /dev/null +++ b/triton_models/weights/layers.10.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7beeabd2905606a84083c196c7934afc90da3157e8ad7fc39e0fb0b2c1b87e4 +size 22544384 diff --git a/triton_models/weights/layers.10.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.10.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..c9681db9cc53b865e58d3b1e35c73921effa74a6 --- /dev/null +++ b/triton_models/weights/layers.10.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3e68691d9e996efdab729202171427c6778f6193f5c3ee0bc8e5cdacebc358b +size 1409024 diff --git a/triton_models/weights/layers.10.ffn_norm.weight b/triton_models/weights/layers.10.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..b954f72aaeb78323bda07e15ab925686488a435b --- /dev/null +++ b/triton_models/weights/layers.10.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e21a448e4c58b33dc56be6e7fd7f8bfee00dc8b7fc36c89c713efd05212ae78 +size 8192 diff --git a/triton_models/weights/layers.10.past_kv_scale.0.weight b/triton_models/weights/layers.10.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..6c0576bfe23608a6b2a4afeac624e29ed14d3fc7 --- /dev/null +++ b/triton_models/weights/layers.10.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a68d01a3baba1a0cb89d1fd554fcfae918634105d2779c68cd586ea7daab2b3 +size 16 diff --git a/triton_models/weights/layers.11.attention.w_qkv.0.bias b/triton_models/weights/layers.11.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..d4cf7a239c1dc8d96b1f604b62d1f8c76d46b1ba --- /dev/null +++ b/triton_models/weights/layers.11.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2557c839c183e4ada69c01795d9987fe7355b4810e04d11c93b39a602319dbce +size 24576 diff --git a/triton_models/weights/layers.11.attention.w_qkv.0.qweight b/triton_models/weights/layers.11.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..96e5567608fb182debd580604da98938078651a6 --- /dev/null +++ b/triton_models/weights/layers.11.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2979c232e8634f03888993ea1c34f03145f685dfefdd40edc301660561b02a02 +size 25165824 diff --git a/triton_models/weights/layers.11.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.11.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..01276aaf342b8e0d79c4969c1f899c51730e5108 --- /dev/null +++ b/triton_models/weights/layers.11.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:590537ac84874822dd1b29dbc7d39b2eef0d9f69b740b6f157e48ef64093e46c +size 1572864 diff --git a/triton_models/weights/layers.11.attention.wo.0.bias b/triton_models/weights/layers.11.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..d7dfa08d80a6ab466c12bff09f154eb46c00be97 --- /dev/null +++ b/triton_models/weights/layers.11.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee1d2a26b88ab112c7c7b484e1741d8e9bd96d9865971b804ca417a6aedffaeb +size 8192 diff --git a/triton_models/weights/layers.11.attention.wo.0.qweight b/triton_models/weights/layers.11.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..278afa4fd629c2fb0c3a495d5bce48ab9e08f5ce --- /dev/null +++ b/triton_models/weights/layers.11.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da55bd7ed408efbe196c3313c0f57f9b8b09fffe977a6f6f875e7cfff74a8c36 +size 8388608 diff --git a/triton_models/weights/layers.11.attention.wo.0.scales_zeros b/triton_models/weights/layers.11.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..89ebc9683e0ce78273331163e3a32c81e8ed677a --- /dev/null +++ b/triton_models/weights/layers.11.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ead5440400d48ecb41d7e4435f159fda0ca83a9c5a4799bf4f99f1146c1773ad +size 524288 diff --git a/triton_models/weights/layers.11.attention_norm.weight b/triton_models/weights/layers.11.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..e9e400b7986c4cdaab07095eafc61d349d6a3699 --- /dev/null +++ b/triton_models/weights/layers.11.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:607f33d6d2e396401a11d445ad227d9f00ef0430feec3c2437086dfc655c0c14 +size 8192 diff --git a/triton_models/weights/layers.11.feed_forward.w13.0.qweight b/triton_models/weights/layers.11.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..516337cb13b71a63f1c756cca4806ae58c215bc9 --- /dev/null +++ b/triton_models/weights/layers.11.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ca5f2ae270296540555f04fc74ebbdec348242e389da4f3cfbb681518836806 +size 45088768 diff --git a/triton_models/weights/layers.11.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.11.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..082974b0b1fbffbed1f8636360d461b6ac23cd35 --- /dev/null +++ b/triton_models/weights/layers.11.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74113c724f2befca508733be33f587b89eede7c0b8dedcf9208eea230b0e2b38 +size 2818048 diff --git a/triton_models/weights/layers.11.feed_forward.w2.0.qweight b/triton_models/weights/layers.11.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..eed33f7909adc88cee6787b8a1a2f31494c527f3 --- /dev/null +++ b/triton_models/weights/layers.11.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:007d84feaba8891dd2b5da8d63bd3e5521c73acc3c2c686d00867f85096944d6 +size 22544384 diff --git a/triton_models/weights/layers.11.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.11.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..b64203883f065d3f3e73c0d2a36663b6b3cbf9c3 --- /dev/null +++ b/triton_models/weights/layers.11.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8218d933dfab4ac53ee4dee01c72cde5e6445409cb55ea9deb8a9091228594a4 +size 1409024 diff --git a/triton_models/weights/layers.11.ffn_norm.weight b/triton_models/weights/layers.11.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..85946a3a9d808876bf97bbcc0d4270cd9c740a30 --- /dev/null +++ b/triton_models/weights/layers.11.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8bcc3dc030f1c0d42eb0283f7b4916831fb3d1cec49ed21c169cc6b51e2df27 +size 8192 diff --git a/triton_models/weights/layers.11.past_kv_scale.0.weight b/triton_models/weights/layers.11.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..bef803cfc8ca1c267538eeb51e36413c5debd3b4 --- /dev/null +++ b/triton_models/weights/layers.11.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c00976122f0613a4f1cbd6febd34941a493e56d11adc1f27a88f769a78ae929 +size 16 diff --git a/triton_models/weights/layers.12.attention.w_qkv.0.bias b/triton_models/weights/layers.12.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..59eb6a4491b6df7898cd7cbcf2ec2172e91a201f --- /dev/null +++ b/triton_models/weights/layers.12.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86f6533bbec2e8c87757da1559e834a254cdfb24ea00acbd567a3efb89af45e5 +size 24576 diff --git a/triton_models/weights/layers.12.attention.w_qkv.0.qweight b/triton_models/weights/layers.12.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..7f9f18f1b0506839478c0fc02a9d2eeb3f391c68 --- /dev/null +++ b/triton_models/weights/layers.12.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a20edc924a46fa0cf7ad4fd7660bd6d1df6549e78990f828623bb0262e9ee633 +size 25165824 diff --git a/triton_models/weights/layers.12.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.12.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..991c1edc06845e65efcaa19e2b0ca2e5e622d78a --- /dev/null +++ b/triton_models/weights/layers.12.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2a20627c460cac6a0229d140a3bafe6d64b41384b6d574067c60b103395d33e +size 1572864 diff --git a/triton_models/weights/layers.12.attention.wo.0.bias b/triton_models/weights/layers.12.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..3817e04acdde232c455447f7e55d7fd714c12bfb --- /dev/null +++ b/triton_models/weights/layers.12.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54f549aea68e4e813e9c1290e706d8dabd7016f203dafe4e428c8f0d4051444a +size 8192 diff --git a/triton_models/weights/layers.12.attention.wo.0.qweight b/triton_models/weights/layers.12.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..fa3cbed6c6e556491172a111627089a84c9affff --- /dev/null +++ b/triton_models/weights/layers.12.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1133c018884409087dc3d46ca85379525178c5d23608e9005794c21b232cfbf9 +size 8388608 diff --git a/triton_models/weights/layers.12.attention.wo.0.scales_zeros b/triton_models/weights/layers.12.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..92dbc5ee70c9eb3c6e986287a2a9be95131b4801 --- /dev/null +++ b/triton_models/weights/layers.12.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35d6532736c316e8d0474f9641e7a2f851b25000212566723cd1be4eab1ef4b8 +size 524288 diff --git a/triton_models/weights/layers.12.attention_norm.weight b/triton_models/weights/layers.12.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..fe5e45650696e5801b3046c506f95668e6086846 --- /dev/null +++ b/triton_models/weights/layers.12.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56ba7d8bca14005b4d0d509094de38b8e7455fd15771cd9f4da16885150aa308 +size 8192 diff --git a/triton_models/weights/layers.12.feed_forward.w13.0.qweight b/triton_models/weights/layers.12.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..f3273d81dedd1419e1e4ed6c3e7fc52ba521ef9e --- /dev/null +++ b/triton_models/weights/layers.12.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f09c7ecabee7c60a4f9c7b7e9f060130e805630fc18b2c9e1c49e24fd42ae31 +size 45088768 diff --git a/triton_models/weights/layers.12.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.12.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..7d5c1261032545304120157e4eb1deca5fb2b86c --- /dev/null +++ b/triton_models/weights/layers.12.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc08b6b16a38b4a2bb89c7e5234afc390ce00833f93ad42d5c3d32005fd91cc5 +size 2818048 diff --git a/triton_models/weights/layers.12.feed_forward.w2.0.qweight b/triton_models/weights/layers.12.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..48cce8e6407111f37ab76e179660978994ea956b --- /dev/null +++ b/triton_models/weights/layers.12.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77a0de567c71e2c32d9f956deb27852bde05dae3b57d6ed06d799df40260866d +size 22544384 diff --git a/triton_models/weights/layers.12.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.12.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..c38df0402d474e58d42ebefb9d31555aa8a003fc --- /dev/null +++ b/triton_models/weights/layers.12.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:382a5d5e10b9d2bf2480ce118388270ad2786321c2a6c50873558eea63d92ca0 +size 1409024 diff --git a/triton_models/weights/layers.12.ffn_norm.weight b/triton_models/weights/layers.12.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..7dc9e7f8492af6265d15ada256080991a674823b --- /dev/null +++ b/triton_models/weights/layers.12.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f27c1a0f51906a5bd0fa839561a32d202aaf6fe05081886ba42c7cc04662b64f +size 8192 diff --git a/triton_models/weights/layers.12.past_kv_scale.0.weight b/triton_models/weights/layers.12.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..7a897aacdc0d418f2eedd76b0c08973c3c9367e3 --- /dev/null +++ b/triton_models/weights/layers.12.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c95ba4cca470d8fc6dc8b3246ce6dfbde5769a0ddea53e74b6cf8b75ab8f184 +size 16 diff --git a/triton_models/weights/layers.13.attention.w_qkv.0.bias b/triton_models/weights/layers.13.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..997aecf79272b24eb8ed28be23663121ca6cb1a9 --- /dev/null +++ b/triton_models/weights/layers.13.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d36ba7adeef96ba2c0af2f576dcf5118a79b81f4751281059271f8794a5e9be1 +size 24576 diff --git a/triton_models/weights/layers.13.attention.w_qkv.0.qweight b/triton_models/weights/layers.13.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..8667beddee13cec696e2361f4b6e566a9db8490b --- /dev/null +++ b/triton_models/weights/layers.13.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60aa1e98d1f8cd558f0a76287060ee20afa459ecb6c8543547b951ccf02575b1 +size 25165824 diff --git a/triton_models/weights/layers.13.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.13.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..b1e480a59ee8018a657dbf58ebdfca30dd114327 --- /dev/null +++ b/triton_models/weights/layers.13.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:955d1315be418b1ffcca9da557b82e90e70e88ad6d56fbed28f09b8965ad9ef3 +size 1572864 diff --git a/triton_models/weights/layers.13.attention.wo.0.bias b/triton_models/weights/layers.13.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..1ffa81784f20b53676e5f56722acb38094ca1b3e --- /dev/null +++ b/triton_models/weights/layers.13.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3647ec4270d17de0bd67f701a679e9e44fd9cc791e4ea96ffe237f613fe5f4b2 +size 8192 diff --git a/triton_models/weights/layers.13.attention.wo.0.qweight b/triton_models/weights/layers.13.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..98431af3f11ac37a83a7d7339cccf56ee2292744 --- /dev/null +++ b/triton_models/weights/layers.13.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6353673ad77ef1c7c6789021f3002e56114186dc51a8d310801569099a77e49 +size 8388608 diff --git a/triton_models/weights/layers.13.attention.wo.0.scales_zeros b/triton_models/weights/layers.13.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..330d04e59f7f346adc1ab1225dcd80d716b857d9 --- /dev/null +++ b/triton_models/weights/layers.13.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:917ac4aab76d0c800dbb8bdbb3aabd85431963fca2b0c0593617c0e8560d9039 +size 524288 diff --git a/triton_models/weights/layers.13.attention_norm.weight b/triton_models/weights/layers.13.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..dcbfcebad1210b6070a5efd33c474c7e3786dcb8 --- /dev/null +++ b/triton_models/weights/layers.13.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82ee031c37413b35150a4207105159431ae1a240a8c665385e45400bd58c0f02 +size 8192 diff --git a/triton_models/weights/layers.13.feed_forward.w13.0.qweight b/triton_models/weights/layers.13.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..185338f465ae1dc79249e0ce78f00bd3a06f4f51 --- /dev/null +++ b/triton_models/weights/layers.13.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64b0b5e363e6239c87425968d553dc875e83bc1ff229de0a1d58608b7a2ac749 +size 45088768 diff --git a/triton_models/weights/layers.13.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.13.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..402eb95b25746b7ba0905844b4cf8fc21aa54146 --- /dev/null +++ b/triton_models/weights/layers.13.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:958188397797750482d3cf78bbe1cf1df0fbdde5556b19c5bb5001f630ab5fc9 +size 2818048 diff --git a/triton_models/weights/layers.13.feed_forward.w2.0.qweight b/triton_models/weights/layers.13.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..3de6b4c53870b93f880eb5db078b8be8237b3a30 --- /dev/null +++ b/triton_models/weights/layers.13.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:797e05e0bb43e5f982a0315ce764a753051ba940c127a794fc75a1310269ca71 +size 22544384 diff --git a/triton_models/weights/layers.13.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.13.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..173e92386e717ccd899a2fb212185fab88288197 --- /dev/null +++ b/triton_models/weights/layers.13.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49364ed082ef7e99406f138889c36de84bb167d1052b7ab282c159b1c082ffc4 +size 1409024 diff --git a/triton_models/weights/layers.13.ffn_norm.weight b/triton_models/weights/layers.13.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..97a8ea6d7eb4aae0d030e1b870ecee9eeb542e42 --- /dev/null +++ b/triton_models/weights/layers.13.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd2e72b777949a1b02d7f44421764ba0038d0d5b45a6d98ca398b54e1bb80a35 +size 8192 diff --git a/triton_models/weights/layers.13.past_kv_scale.0.weight b/triton_models/weights/layers.13.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..b8dcc59a510a4fea0ad159f6e7120c08f7e228c3 --- /dev/null +++ b/triton_models/weights/layers.13.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7678d2e25ba9cc87d055d76f7f908a418dcfa9a932cdde75cbfb9e60d45c8d2f +size 16 diff --git a/triton_models/weights/layers.14.attention.w_qkv.0.bias b/triton_models/weights/layers.14.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..37881763b00ee9d1f0733021d19edf7cbc0b4021 --- /dev/null +++ b/triton_models/weights/layers.14.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86d2e1a130d53b52ec01f0f6ab863bc094cac06531a7577cffdb518072baf20e +size 24576 diff --git a/triton_models/weights/layers.14.attention.w_qkv.0.qweight b/triton_models/weights/layers.14.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..011b060be351b18e636271aadbd5735a9ed94599 --- /dev/null +++ b/triton_models/weights/layers.14.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a35367a6174bc4b34c9d4e5c59af3b02519f71663c313312f581dee7b7d7e088 +size 25165824 diff --git a/triton_models/weights/layers.14.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.14.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e63565cf63ac0e815f230deec3e9f25e595546ba --- /dev/null +++ b/triton_models/weights/layers.14.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05bc71c1003bfff95381e3f999fac62858370fc8856e1683653d61f48fb4b25d +size 1572864 diff --git a/triton_models/weights/layers.14.attention.wo.0.bias b/triton_models/weights/layers.14.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..ff703a16d09e518c875af1a54557eaba70b685bc --- /dev/null +++ b/triton_models/weights/layers.14.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26db72f7fb858e52142db63aeb895719068cc000007cba553ad9614f8115f435 +size 8192 diff --git a/triton_models/weights/layers.14.attention.wo.0.qweight b/triton_models/weights/layers.14.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..be4480508b4212ae1d4a5cffe145d08236cf30f7 --- /dev/null +++ b/triton_models/weights/layers.14.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bb34d8e38af35fdd0018653ced371f22cde73fc6b40f1b0f545feef9fee99af +size 8388608 diff --git a/triton_models/weights/layers.14.attention.wo.0.scales_zeros b/triton_models/weights/layers.14.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..59cd8128b06551659a8fe036ac386263efef80b9 --- /dev/null +++ b/triton_models/weights/layers.14.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f07ce272fbcae32d298aea56c4ed02a87e6afd02cc51025711d89a5e08ea8d8 +size 524288 diff --git a/triton_models/weights/layers.14.attention_norm.weight b/triton_models/weights/layers.14.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..7c43db20270fbc04a1fd4bc65e92652f30b9d7a7 --- /dev/null +++ b/triton_models/weights/layers.14.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69bbcf37d417ceab6343eece111d07f24f3fb989aa33dfbc51086468a03dc38e +size 8192 diff --git a/triton_models/weights/layers.14.feed_forward.w13.0.qweight b/triton_models/weights/layers.14.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..980b89904a0421c8d3cd85d6c3401e78f5a3dc55 --- /dev/null +++ b/triton_models/weights/layers.14.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8190a81ec139c530be327657440cc9c48e911e13fd684f05cff10cbeab74ae5a +size 45088768 diff --git a/triton_models/weights/layers.14.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.14.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..6b21b38e4b9d7de3a89175a16186253c412501ba --- /dev/null +++ b/triton_models/weights/layers.14.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d41dc3f9b4a321f006bfb62dae0dea7a9aabeccc3ed1a25fcd98850feaf105a0 +size 2818048 diff --git a/triton_models/weights/layers.14.feed_forward.w2.0.qweight b/triton_models/weights/layers.14.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..dbfa3a882b234023dab9769ab3ffc47b6ffb399f --- /dev/null +++ b/triton_models/weights/layers.14.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c02a9552a0d7fb274b6e7fe4e5e7d5955363257c4706a6764254b83dd7ac1a84 +size 22544384 diff --git a/triton_models/weights/layers.14.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.14.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..557acab05f4841100f02a0bd3f88f6d11703465f --- /dev/null +++ b/triton_models/weights/layers.14.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e70eaaa7196a3ebc13b40a47cad49f833f8cab35bc093e3d2bcc3a2e28f294c7 +size 1409024 diff --git a/triton_models/weights/layers.14.ffn_norm.weight b/triton_models/weights/layers.14.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..313fe2aa84fdf97590f6568ff895e0a84eb81ba6 --- /dev/null +++ b/triton_models/weights/layers.14.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f4cf381df886f5d7db6975095a34fcd8cd98f7eb8f57953afd66eb12a0de5f1 +size 8192 diff --git a/triton_models/weights/layers.14.past_kv_scale.0.weight b/triton_models/weights/layers.14.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..226ec43e125130b99aee01493584869cc0503dd9 --- /dev/null +++ b/triton_models/weights/layers.14.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d3c10da50e41c70485b54c44a6164df52775732f931a971a7b5b775b4eb9ef9 +size 16 diff --git a/triton_models/weights/layers.15.attention.w_qkv.0.bias b/triton_models/weights/layers.15.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..2938cc147e571307d6bc102ec675bf7f322d7920 --- /dev/null +++ b/triton_models/weights/layers.15.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c10bfd0759deda84ec4009b198cf29bd7a31c003f37b2d34085057b678368f49 +size 24576 diff --git a/triton_models/weights/layers.15.attention.w_qkv.0.qweight b/triton_models/weights/layers.15.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..1e5f5784c6b772f60258b8e0277838448ae44cab --- /dev/null +++ b/triton_models/weights/layers.15.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:607571c17791c30b67977a25d4bd6b0570e86b077c48fffcff9c189ba87443ee +size 25165824 diff --git a/triton_models/weights/layers.15.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.15.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..b93751b2b93fa07730c99e9e2720b6f0e761a8bb --- /dev/null +++ b/triton_models/weights/layers.15.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb738a4244f4765924668c1eb2300f01c6b128b57271a7bc2b0a10a866b89ac2 +size 1572864 diff --git a/triton_models/weights/layers.15.attention.wo.0.bias b/triton_models/weights/layers.15.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..8423f503cff026ac6629e35f2cd7f4553c295b05 --- /dev/null +++ b/triton_models/weights/layers.15.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:414b93bd142fac59b6af872ff26a6107dd920114d86812e02699c5edba87a69e +size 8192 diff --git a/triton_models/weights/layers.15.attention.wo.0.qweight b/triton_models/weights/layers.15.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..d3ce7d5e0ded4d02177247c247db239bd3b08f71 --- /dev/null +++ b/triton_models/weights/layers.15.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cab5edc367c964298ad54612d4f51f7c040972fceef1a7badf30fa9f899e4fa0 +size 8388608 diff --git a/triton_models/weights/layers.15.attention.wo.0.scales_zeros b/triton_models/weights/layers.15.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e94a7c6e36d68e9a9eae64722d6975800a64c552 --- /dev/null +++ b/triton_models/weights/layers.15.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b17e9273b7696696113a21c8ee1a76843e9fdb4eb93d5668fc864f72fb5f6b5c +size 524288 diff --git a/triton_models/weights/layers.15.attention_norm.weight b/triton_models/weights/layers.15.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..f056b2f1003ecaa5aa4dec58bd907373469a183c --- /dev/null +++ b/triton_models/weights/layers.15.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:164609ec0555c45b067445d3dda7f1077c73f8e370809e82fc3d5b3e32f701e1 +size 8192 diff --git a/triton_models/weights/layers.15.feed_forward.w13.0.qweight b/triton_models/weights/layers.15.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..593e17cbe1adf4c27e901314b3c7c6e3dbfcd9fe --- /dev/null +++ b/triton_models/weights/layers.15.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3808b3d91365cc65684c23c1cb876ec9cbefae6ce3587644c9d024ca56b5250 +size 45088768 diff --git a/triton_models/weights/layers.15.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.15.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..764131251739123a1efa22ec8af869e7d41f281f --- /dev/null +++ b/triton_models/weights/layers.15.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c68e98c46c08ffb5f1816d07f82ffd669fc029a9d2418919391538f09357a9a +size 2818048 diff --git a/triton_models/weights/layers.15.feed_forward.w2.0.qweight b/triton_models/weights/layers.15.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..1f73992ff8d9033e6ef839d67e659c333bcfafa0 --- /dev/null +++ b/triton_models/weights/layers.15.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2271ca41681a03e6f907aeb89b53cce0ff1d7255675068bb119d00821523abba +size 22544384 diff --git a/triton_models/weights/layers.15.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.15.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..a30a309e016f039639b32e903d7d9c2c32099597 --- /dev/null +++ b/triton_models/weights/layers.15.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:785aefd6e43b9764347f2712cb8b16049dbc86ae9b4f6e433019926428ddb477 +size 1409024 diff --git a/triton_models/weights/layers.15.ffn_norm.weight b/triton_models/weights/layers.15.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..0619f0621bf76b283897d46bfd4326ed64fb1210 --- /dev/null +++ b/triton_models/weights/layers.15.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14910838e6fc2bc44483100bf52ba76275f337b7a72ea98d1fe24a9c4f352649 +size 8192 diff --git a/triton_models/weights/layers.15.past_kv_scale.0.weight b/triton_models/weights/layers.15.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..961095560f89f18541102e66c7c338b3bbfb0e4b --- /dev/null +++ b/triton_models/weights/layers.15.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c10305fdaff37fbbe14c2d8dc88386116d1a2547e90692ce845d6d298f7563e +size 16 diff --git a/triton_models/weights/layers.16.attention.w_qkv.0.bias b/triton_models/weights/layers.16.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..6a62296514118ee4d51f46ca7dacc82d92f32465 --- /dev/null +++ b/triton_models/weights/layers.16.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d65d70d918899513c47a83199a258703fcdd4088f3368c3bea91a9f9bf82e152 +size 24576 diff --git a/triton_models/weights/layers.16.attention.w_qkv.0.qweight b/triton_models/weights/layers.16.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..41fb4075a1cca8a3905ed1b5fbcfdbfc88d9a934 --- /dev/null +++ b/triton_models/weights/layers.16.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15de82b4f7c469bd52821fba7d63ec4c593d05cc14ab52005baa0f8813374769 +size 25165824 diff --git a/triton_models/weights/layers.16.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.16.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..89c502cc0bf6c13ed9eedb9af4005e6bf101f7a7 --- /dev/null +++ b/triton_models/weights/layers.16.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74efd0e4ea6a9b7cb4dd86bdd5a8e379f21d75c3c9b424fb9cc52f9eaaf16236 +size 1572864 diff --git a/triton_models/weights/layers.16.attention.wo.0.bias b/triton_models/weights/layers.16.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..a2e8ec8f9fc602965742447fe31b4e94a1dba582 --- /dev/null +++ b/triton_models/weights/layers.16.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d57f660194c11a7650c497e17ca75245aa059c88079bcb832fdb5b847d215bfa +size 8192 diff --git a/triton_models/weights/layers.16.attention.wo.0.qweight b/triton_models/weights/layers.16.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..923562a3ee776c05239ba1acb90854c1932638ba --- /dev/null +++ b/triton_models/weights/layers.16.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f6b06fbbbf44fadfcedbd7eeedfc5be321435af53ed754d90062cbf0eec5734 +size 8388608 diff --git a/triton_models/weights/layers.16.attention.wo.0.scales_zeros b/triton_models/weights/layers.16.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..4ba624679bf7f76d1c839e50d39fba899257734f --- /dev/null +++ b/triton_models/weights/layers.16.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96cd46739e451f9d44fcf5b7a7f57f105ab6dd699c4c8611f1cc2dc07f57f929 +size 524288 diff --git a/triton_models/weights/layers.16.attention_norm.weight b/triton_models/weights/layers.16.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..61aa5e69a44ef2553ebb535c9323e53692e9aeb8 --- /dev/null +++ b/triton_models/weights/layers.16.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb9373479580d57c829b1aa711da596ca9bd4bfe6cf4f2b6467cfc32e51340d4 +size 8192 diff --git a/triton_models/weights/layers.16.feed_forward.w13.0.qweight b/triton_models/weights/layers.16.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..b8570e44a05d5503edc69a671fae805afc29a53b --- /dev/null +++ b/triton_models/weights/layers.16.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85c80ebc56cd2cb164f8e85b1a17d48b48076c3b9dba89da1e40fb081b38d20f +size 45088768 diff --git a/triton_models/weights/layers.16.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.16.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..1732a5ca071c1cfb89d4b403606f7c438e227ead --- /dev/null +++ b/triton_models/weights/layers.16.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6d823a1adfe55b11e8c692d639139ffe0f9d4a445492c536068033d6131896b +size 2818048 diff --git a/triton_models/weights/layers.16.feed_forward.w2.0.qweight b/triton_models/weights/layers.16.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..f4c8ab4546b5dfc15074d9ffe45389fc9e906199 --- /dev/null +++ b/triton_models/weights/layers.16.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5a4cb4fca660b2aa153b88577f411cc7862be750cf109979c414dc156b534eb +size 22544384 diff --git a/triton_models/weights/layers.16.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.16.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..4f25148c93862abc4a446fe7d1e4ffb540269e85 --- /dev/null +++ b/triton_models/weights/layers.16.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad3c30db485cd2765f2b494e421b0c869004bc03feb6c888bc25a85cd65c9512 +size 1409024 diff --git a/triton_models/weights/layers.16.ffn_norm.weight b/triton_models/weights/layers.16.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..03ba40357acef3887925d4fdbbd0a952c25ea695 --- /dev/null +++ b/triton_models/weights/layers.16.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f196c7b787f6e3a09818d7889ffef5190f477a56edd1ffadad6e008f8d57e322 +size 8192 diff --git a/triton_models/weights/layers.16.past_kv_scale.0.weight b/triton_models/weights/layers.16.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..464ff9bef33e2b5aaf8c4ab0b27eabe062a9c8cb --- /dev/null +++ b/triton_models/weights/layers.16.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:050a7a10dcd8679f610af486218aa4965c3e10d8cb1692107d821fb466e88905 +size 16 diff --git a/triton_models/weights/layers.17.attention.w_qkv.0.bias b/triton_models/weights/layers.17.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..d60b61b9e32d5439691689db7f2a122a6f3652f9 --- /dev/null +++ b/triton_models/weights/layers.17.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a2179d0d75ddc204ddd52bb3bbc5109d853906531391d5c065d1b16cd28959d +size 24576 diff --git a/triton_models/weights/layers.17.attention.w_qkv.0.qweight b/triton_models/weights/layers.17.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..c39107f4619ba01a99304d12ec8687851f16a4b7 --- /dev/null +++ b/triton_models/weights/layers.17.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfe591a12d05454dd51668d2d3ed10244644f216bb761b73e2745e31172ef858 +size 25165824 diff --git a/triton_models/weights/layers.17.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.17.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..8acd9cecd11786ed73cc5f27641dbea8bcf26c2d --- /dev/null +++ b/triton_models/weights/layers.17.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8246481124606548df9788893b7ce64a6ee0e1b61ac5eaec62b17cabcc394bf0 +size 1572864 diff --git a/triton_models/weights/layers.17.attention.wo.0.bias b/triton_models/weights/layers.17.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..b589c5242406e4ab9096d14c7d463ed2ac40fc10 --- /dev/null +++ b/triton_models/weights/layers.17.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4ebade21c9263a279588363c4bb14f6abce22421683a9d719552c6a6f6b03ad +size 8192 diff --git a/triton_models/weights/layers.17.attention.wo.0.qweight b/triton_models/weights/layers.17.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..8f4fc99d05cb6094717dfcb6574b41dfde25d4c7 --- /dev/null +++ b/triton_models/weights/layers.17.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dcb20c6e798e352b3cc1b25c975f6df96f186ce5e002cc7ae1166f21315afef +size 8388608 diff --git a/triton_models/weights/layers.17.attention.wo.0.scales_zeros b/triton_models/weights/layers.17.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..7d79e6d97ae7d19d710676d97f06f2cc99961555 --- /dev/null +++ b/triton_models/weights/layers.17.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0de29f954d02984d8074ea5ec3a0090bf6e4d4cddade7d3efbc571598198e73 +size 524288 diff --git a/triton_models/weights/layers.17.attention_norm.weight b/triton_models/weights/layers.17.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..a74b0aecad499d79f7c752b0c839dff6d407ff9f --- /dev/null +++ b/triton_models/weights/layers.17.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:784e28a72a8c4fb263b72867eee8af42dc62c065ac91c39d625805c5f71f2d66 +size 8192 diff --git a/triton_models/weights/layers.17.feed_forward.w13.0.qweight b/triton_models/weights/layers.17.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..5d1291db46b01a348d955941ebaabbea3d8912d5 --- /dev/null +++ b/triton_models/weights/layers.17.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3176d69c64547a8eddc6296bbf501dd1ce14552e7fc1d34a860d8e0c8221d1ab +size 45088768 diff --git a/triton_models/weights/layers.17.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.17.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..b87ba1ff4aad95ebe59dc34f134d7428486356c0 --- /dev/null +++ b/triton_models/weights/layers.17.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2894ce3531caa910f65fb3fd329e22a3e31da43a8b5334bc60effa97f3c4e27 +size 2818048 diff --git a/triton_models/weights/layers.17.feed_forward.w2.0.qweight b/triton_models/weights/layers.17.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..0b5aab82b3634ca498ac691551f4264e35bef4d0 --- /dev/null +++ b/triton_models/weights/layers.17.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95248d41879f6c8664b39b15638ddbf13ab6d743453cbeeccea292df003b2094 +size 22544384 diff --git a/triton_models/weights/layers.17.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.17.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..cb6bab365baef15321ed1298285a8443a3360a3b --- /dev/null +++ b/triton_models/weights/layers.17.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44bea2b36379ff6d25d728fa8ae214d2b46eaadf5a28028f41f99464170750f7 +size 1409024 diff --git a/triton_models/weights/layers.17.ffn_norm.weight b/triton_models/weights/layers.17.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..9de4718abb0d57c63cfa8a19871a2fb317f01f12 --- /dev/null +++ b/triton_models/weights/layers.17.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1876b8219358a37f100abdbf56041e8425f2211ba15a84731cab6740388a1976 +size 8192 diff --git a/triton_models/weights/layers.17.past_kv_scale.0.weight b/triton_models/weights/layers.17.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..f4d9c128dd5f9a9cae810bddb60aa1d3e68d049e --- /dev/null +++ b/triton_models/weights/layers.17.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7dafce18169b48a0b7b8c6456b2d412361663888ffd8741ffe7557952bb6cf3 +size 16 diff --git a/triton_models/weights/layers.18.attention.w_qkv.0.bias b/triton_models/weights/layers.18.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..be9bc7c4e66d016381657dd2679e6b3730f54c3a --- /dev/null +++ b/triton_models/weights/layers.18.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49d60c508628974f6eeb8b27e65e6ffadbac242aae57cc140d8c965d84e4f98c +size 24576 diff --git a/triton_models/weights/layers.18.attention.w_qkv.0.qweight b/triton_models/weights/layers.18.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..0616b67e084b2eaef2b6d652ec56917d76726fc4 --- /dev/null +++ b/triton_models/weights/layers.18.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e60816086c6e7bec05529e305e3a5f1d6abec94d094d0ff8698050ef1856b707 +size 25165824 diff --git a/triton_models/weights/layers.18.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.18.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..5ca1702eba901f00f7dd1f825c962a277a4227d5 --- /dev/null +++ b/triton_models/weights/layers.18.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e519b524450a67ee294b883de648a7222413a3e72140a058131614c6fec681ae +size 1572864 diff --git a/triton_models/weights/layers.18.attention.wo.0.bias b/triton_models/weights/layers.18.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..8a9f2e1870cfcf749ab910ff41b6088a0af824d0 --- /dev/null +++ b/triton_models/weights/layers.18.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91a399f2bcc3f10cef1f695cea9284faefc91a8182d26fe7313d685e3a5e0ca1 +size 8192 diff --git a/triton_models/weights/layers.18.attention.wo.0.qweight b/triton_models/weights/layers.18.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..4bb0adbd7723f309a423986bae308bed646507ca --- /dev/null +++ b/triton_models/weights/layers.18.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac8aaf8d4ec6f17d23713738969b07d184000ec0ee9a9503a8ed7980fd4ab9f4 +size 8388608 diff --git a/triton_models/weights/layers.18.attention.wo.0.scales_zeros b/triton_models/weights/layers.18.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..bc398aecb85cd631d6ac7eb26022cfba28fc34d0 --- /dev/null +++ b/triton_models/weights/layers.18.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1094cded5ec9ade78ff64caa2cbb88881ab69cf8a892eb3c6c2ce6ed9a520dd +size 524288 diff --git a/triton_models/weights/layers.18.attention_norm.weight b/triton_models/weights/layers.18.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..3d32386bf7f5438f0ac4cae94a467ed1b2ca42bc --- /dev/null +++ b/triton_models/weights/layers.18.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3c8e716255d2c2a7544da2dc22f5b3fb361cc8a710f4d8fcc476eddc3e6904c +size 8192 diff --git a/triton_models/weights/layers.18.feed_forward.w13.0.qweight b/triton_models/weights/layers.18.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..97657f9915dd8306cc58c622c43d1a19aa9257f9 --- /dev/null +++ b/triton_models/weights/layers.18.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65e1d93b24c2edb75ad276274b81bb7d6b9ee73815e8e65e2cb5a732a4f69032 +size 45088768 diff --git a/triton_models/weights/layers.18.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.18.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..d2968ee7a69ee2821894604825fcd1eab6aaea56 --- /dev/null +++ b/triton_models/weights/layers.18.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d1940f1d18ca23c64cd74fa431a8ce0bfc6d5fc4033a4e0c678ca95b123d125 +size 2818048 diff --git a/triton_models/weights/layers.18.feed_forward.w2.0.qweight b/triton_models/weights/layers.18.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..2f24937d9bb151b156df2da53f89ce8758ad9572 --- /dev/null +++ b/triton_models/weights/layers.18.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0688cf733c626c0dacfb99d2615280bb7b4fddb8b46deadbb586dd35f8a07a09 +size 22544384 diff --git a/triton_models/weights/layers.18.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.18.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..eec1c1f2b41b00fe617ad1681c7d7a873fbf923b --- /dev/null +++ b/triton_models/weights/layers.18.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:846ad4b8a0b65b7ba4cc822cd27aa7c7bc17fed626240804fe267c36ad423ffc +size 1409024 diff --git a/triton_models/weights/layers.18.ffn_norm.weight b/triton_models/weights/layers.18.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..a0bf9ef6f4a572799afd652a5833a1b8e16b5889 --- /dev/null +++ b/triton_models/weights/layers.18.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d11fddd8c18251c3691ee8c6ae3ba4114f6b2574933b8fa31292c14f27f0b1d3 +size 8192 diff --git a/triton_models/weights/layers.18.past_kv_scale.0.weight b/triton_models/weights/layers.18.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..ecf981e16888f249bb08af2f580180ec7abc9648 --- /dev/null +++ b/triton_models/weights/layers.18.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdcb73f6e448796034aa77e7ff76c4b6219e457aa4feb26457be2b9fffb12d58 +size 16 diff --git a/triton_models/weights/layers.19.attention.w_qkv.0.bias b/triton_models/weights/layers.19.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..af09b54307fd231b8ff34b32a18815c2aa548f4d --- /dev/null +++ b/triton_models/weights/layers.19.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b946ef1be0d023c71432c8faf6a0ab271fe477aaaf21ebcf5219dc5accfad72 +size 24576 diff --git a/triton_models/weights/layers.19.attention.w_qkv.0.qweight b/triton_models/weights/layers.19.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..3cbb74bb09f6bb444ffff08d8bcc845030614a5c --- /dev/null +++ b/triton_models/weights/layers.19.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5f2288e68a653f9109ab23ff18dc633f24fe94905c622ea2076426c095a7d73 +size 25165824 diff --git a/triton_models/weights/layers.19.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.19.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..17cc889fd83daf26d7710fe49277f837941bd94e --- /dev/null +++ b/triton_models/weights/layers.19.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96935e0c7b1eff3de083fb75dcf39be00b8d62d13864f430da47e0389eee81f8 +size 1572864 diff --git a/triton_models/weights/layers.19.attention.wo.0.bias b/triton_models/weights/layers.19.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..02077ceaa660d94745e039bc037363b2902e28e9 --- /dev/null +++ b/triton_models/weights/layers.19.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f686a90f3e713e3309a477fd1a9d15a25153378a6dbed574c7a094db906f6f1a +size 8192 diff --git a/triton_models/weights/layers.19.attention.wo.0.qweight b/triton_models/weights/layers.19.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..4478bf1f05353eeacaf7291266646946206d21ac --- /dev/null +++ b/triton_models/weights/layers.19.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b0c1aea7c4243900269bb3e8df8e20bb5ea9516cdbccc5be7156926f7a0ea86 +size 8388608 diff --git a/triton_models/weights/layers.19.attention.wo.0.scales_zeros b/triton_models/weights/layers.19.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..db683c056d288368f9e93356362570ebae933f6b --- /dev/null +++ b/triton_models/weights/layers.19.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22249ddc25480d95450d3c2f2a0fc6f21e157d09d52b789d731e1e4bbd975c98 +size 524288 diff --git a/triton_models/weights/layers.19.attention_norm.weight b/triton_models/weights/layers.19.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..808f2cfe83ad61f455485dd97dcb209f1295cc61 --- /dev/null +++ b/triton_models/weights/layers.19.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a18b13501f194b39eef64a3cc9aff64ee1dd5c935da6ec06de0e1fd649637a07 +size 8192 diff --git a/triton_models/weights/layers.19.feed_forward.w13.0.qweight b/triton_models/weights/layers.19.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..c5cc153b3ba392a59dfac55b3dc63408db5185e2 --- /dev/null +++ b/triton_models/weights/layers.19.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06b883404c76eb602c3f0cb68c2bc08819887e70e3931c6186216c6a8e29a529 +size 45088768 diff --git a/triton_models/weights/layers.19.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.19.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..10eb1a723432667a529226b57a27625e89c3d552 --- /dev/null +++ b/triton_models/weights/layers.19.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25ad9591edd41e1d7ea6b48e404ebf226de4b040b5b0e74c03f248d1a035d526 +size 2818048 diff --git a/triton_models/weights/layers.19.feed_forward.w2.0.qweight b/triton_models/weights/layers.19.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..a04a9a1001c776448d0e8addc6bd7dffcacf089a --- /dev/null +++ b/triton_models/weights/layers.19.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b889cb326841e68590e9b029292f76fecb53644c661e1794bc09720844966f6 +size 22544384 diff --git a/triton_models/weights/layers.19.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.19.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..3e1853c5afc4a1f265516d152078fed0217893cf --- /dev/null +++ b/triton_models/weights/layers.19.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7c078003b97305893e6b9017b81696fbd4fa3dd40863d5bc7db7afc0c1dceaf +size 1409024 diff --git a/triton_models/weights/layers.19.ffn_norm.weight b/triton_models/weights/layers.19.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..d09e970111a2b26b38eb07f889c90da010f5314d --- /dev/null +++ b/triton_models/weights/layers.19.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1ac482697c88855c30e6059ed707ddaf392cfa46ca16ce9b2211d9a90e58546 +size 8192 diff --git a/triton_models/weights/layers.19.past_kv_scale.0.weight b/triton_models/weights/layers.19.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..079471db7197701b627eaf1b039df941801a0a3d --- /dev/null +++ b/triton_models/weights/layers.19.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6597938745235df566a8c1c7f69c75781eff6a777ddb231116ab96d98427a7f1 +size 16 diff --git a/triton_models/weights/layers.2.attention.w_qkv.0.bias b/triton_models/weights/layers.2.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..742e9ee90bc14d2a9df368733a69215b43f3385d --- /dev/null +++ b/triton_models/weights/layers.2.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fc0c2bc1d2d127407557abd94fc48092e2c5869f01e5005fec63f6832490f61 +size 24576 diff --git a/triton_models/weights/layers.2.attention.w_qkv.0.qweight b/triton_models/weights/layers.2.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..694ff2975601312bd0669b975e8afec377bf0cff --- /dev/null +++ b/triton_models/weights/layers.2.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eb6783e65472481ee3781c5c7af1be50df815e6be61565992c388c92867329b +size 25165824 diff --git a/triton_models/weights/layers.2.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.2.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..0d6875d0033b12529bd4dd6b35ecf02a9f4d157f --- /dev/null +++ b/triton_models/weights/layers.2.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9688598f0cdeee1c8a0f23304c295c518fa158536c967c0daed847b77267398c +size 1572864 diff --git a/triton_models/weights/layers.2.attention.wo.0.bias b/triton_models/weights/layers.2.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..1c0235aa242e5ce99af90854b38d450f386f99b0 --- /dev/null +++ b/triton_models/weights/layers.2.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9dd5d708b2273975f93a6deae53787e7f748a1ee4768702687d779324e9d0fe +size 8192 diff --git a/triton_models/weights/layers.2.attention.wo.0.qweight b/triton_models/weights/layers.2.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..e4db82040839389ba2d3d62000a90c0eff1c7bdf --- /dev/null +++ b/triton_models/weights/layers.2.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:241ed5c0ba532a7997a1d76144f82ff50061df3a49b4057f62e09cca3969fec1 +size 8388608 diff --git a/triton_models/weights/layers.2.attention.wo.0.scales_zeros b/triton_models/weights/layers.2.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e82768dd8732265ab2972a5e467a7787b8d795b1 --- /dev/null +++ b/triton_models/weights/layers.2.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:082782240466306abe04f98da00d70b4caefd9cae3aeacd43cc6eba0fdfbcedb +size 524288 diff --git a/triton_models/weights/layers.2.attention_norm.weight b/triton_models/weights/layers.2.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..a25f9eca31778b352339e8c1a29b0b5a74c6e26b --- /dev/null +++ b/triton_models/weights/layers.2.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64e9dd2464cc8f9cfb1c5e361c3bc2ec632916c0776a12e6b5bd9d2f52c77278 +size 8192 diff --git a/triton_models/weights/layers.2.feed_forward.w13.0.qweight b/triton_models/weights/layers.2.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..ed4d52cc11f86f4b1789c2ee9f027119b6979f01 --- /dev/null +++ b/triton_models/weights/layers.2.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60909c65ac9b524e518a54b408c6a17b29a91d091b41592afa6a7559855a3607 +size 45088768 diff --git a/triton_models/weights/layers.2.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.2.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..19283622618c202bbbcce2fdfdcd605a88f56387 --- /dev/null +++ b/triton_models/weights/layers.2.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47f523c4eb033d91eec0f4a95877d6f560416864affab4a1e86a0aa786e0f815 +size 2818048 diff --git a/triton_models/weights/layers.2.feed_forward.w2.0.qweight b/triton_models/weights/layers.2.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..d220a04ca0fbf07a8b8208c6a3bb790eaa1d72ec --- /dev/null +++ b/triton_models/weights/layers.2.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42fee9e2a14928d1e0cf7ec5d4ec29a74a4240c7150501288b2b9ad54a4d25bf +size 22544384 diff --git a/triton_models/weights/layers.2.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.2.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..9eda40179fbab165ff42b46e63a0b6e8c0e55100 --- /dev/null +++ b/triton_models/weights/layers.2.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ab4003a042c7d910bfcd984f5ac6f33ba6f601e9b4320cd52aaaa778c233f3e +size 1409024 diff --git a/triton_models/weights/layers.2.ffn_norm.weight b/triton_models/weights/layers.2.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..8269a859f1f9f0280495fd04e81a228c8850c700 --- /dev/null +++ b/triton_models/weights/layers.2.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf5e7977163c803d402d61d711fea495834f8e33a75621f991ee9c79f1bf6d02 +size 8192 diff --git a/triton_models/weights/layers.2.past_kv_scale.0.weight b/triton_models/weights/layers.2.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..035f3442ad6eaa21b056996cefb677b133c08f31 --- /dev/null +++ b/triton_models/weights/layers.2.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf881fe3d791f4d4f6eb74a55ecbe533c39682c17b7909f3ba2674d814667dc1 +size 16 diff --git a/triton_models/weights/layers.20.attention.w_qkv.0.bias b/triton_models/weights/layers.20.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..16f9fd1135e66dd89a327195fb56a87b427709b3 --- /dev/null +++ b/triton_models/weights/layers.20.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:106ffd602a53b71698a146f20d072604b098a4d371afec1395cab2ad836dd659 +size 24576 diff --git a/triton_models/weights/layers.20.attention.w_qkv.0.qweight b/triton_models/weights/layers.20.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..3aa6e9cdf18dcbb6c6725e193855682f7224e594 --- /dev/null +++ b/triton_models/weights/layers.20.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a970f8599723aa1e40208d1d8e8ad060dacea7119e234242de394d566ca0a7d +size 25165824 diff --git a/triton_models/weights/layers.20.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.20.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..925740aae380cb016a77d647afc3269fc36dd4a0 --- /dev/null +++ b/triton_models/weights/layers.20.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a09c8b48352bb0ad80097ebc4c2fb3b13ba28c156f948cd5a4584553d4dd7f9e +size 1572864 diff --git a/triton_models/weights/layers.20.attention.wo.0.bias b/triton_models/weights/layers.20.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..7574dc9092064629f4e18c1d4ff168cac3dda353 --- /dev/null +++ b/triton_models/weights/layers.20.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51ff7a277d8471fb3f8546a8b595072e698bf9f0c179a8489699323d54692f9b +size 8192 diff --git a/triton_models/weights/layers.20.attention.wo.0.qweight b/triton_models/weights/layers.20.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..6c3b5001861ade81401f8253d46c13d5b53544ab --- /dev/null +++ b/triton_models/weights/layers.20.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b543904d385d9db1ea0d6922bc707e1f1cb7de2f12b7ab2594f2585c75fcdff6 +size 8388608 diff --git a/triton_models/weights/layers.20.attention.wo.0.scales_zeros b/triton_models/weights/layers.20.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..8b26b7da33425e4e03af1ecd044e12f179a0e321 --- /dev/null +++ b/triton_models/weights/layers.20.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecd2c5d66f613b1cfd433eea550795e54e6801851338cc9909b247f165503afe +size 524288 diff --git a/triton_models/weights/layers.20.attention_norm.weight b/triton_models/weights/layers.20.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..99ac3abd57e74d828e81e91846f1a0f4a1f71b55 --- /dev/null +++ b/triton_models/weights/layers.20.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec335262a29488b5632fe5b58d59551ac811455b843996fb3fc50ffa3f52e0ed +size 8192 diff --git a/triton_models/weights/layers.20.feed_forward.w13.0.qweight b/triton_models/weights/layers.20.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..53feabff3f4a71bd3cde5c37c8df0fb0b70b53f2 --- /dev/null +++ b/triton_models/weights/layers.20.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41fff459e9678f63f23f9e3fc311caa76ef85370e2b56980d92a5619309f68a6 +size 45088768 diff --git a/triton_models/weights/layers.20.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.20.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..2e69e721c02a5e1d65cd2a68c495476f8f0ba87d --- /dev/null +++ b/triton_models/weights/layers.20.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1beb936cbf4a00d930c1076d71779dbbe1643294fe697609528edac755641b42 +size 2818048 diff --git a/triton_models/weights/layers.20.feed_forward.w2.0.qweight b/triton_models/weights/layers.20.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..e00171d7e12a481b15e7b990f1e64d388b9359fc --- /dev/null +++ b/triton_models/weights/layers.20.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24dfbb62b9f37779de793b049f017d22c6ab98ca64b022201d618b492780c019 +size 22544384 diff --git a/triton_models/weights/layers.20.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.20.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..9908df2c77f0dbb17d6b0a4347e2534667efee88 --- /dev/null +++ b/triton_models/weights/layers.20.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31c8a686589a0be433b043e3f051397af10e8146616c7427aef59a1629e420bd +size 1409024 diff --git a/triton_models/weights/layers.20.ffn_norm.weight b/triton_models/weights/layers.20.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..e97304eaaafd060252cfc94370cee4ede87be8ad --- /dev/null +++ b/triton_models/weights/layers.20.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bd784f60cfea7aebabd35177403560f8782ef08f92e2062036912548c78d8a1 +size 8192 diff --git a/triton_models/weights/layers.20.past_kv_scale.0.weight b/triton_models/weights/layers.20.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..2b4118b774fed252145f0bd427301c03005e31fc --- /dev/null +++ b/triton_models/weights/layers.20.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f74b53d063af0bbe4a5535d93f54c7f81b356d4ad50e1e94ce1cb54bc5959fb +size 16 diff --git a/triton_models/weights/layers.21.attention.w_qkv.0.bias b/triton_models/weights/layers.21.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..5132653220cb2f0d2508d2e311231983c53c51ff --- /dev/null +++ b/triton_models/weights/layers.21.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2c541a0f361acb95648b3529758d394e78aa9c2a92db9d2e30cd7806d5914a4 +size 24576 diff --git a/triton_models/weights/layers.21.attention.w_qkv.0.qweight b/triton_models/weights/layers.21.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..ece2cb9e105e811eafe53b9b3aa6cb149edbaf34 --- /dev/null +++ b/triton_models/weights/layers.21.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0aa9b16ac0b9760ad9ec96b434ddd9c83a2ccfbd4d5c03fa4d5c9b9c8aa7335c +size 25165824 diff --git a/triton_models/weights/layers.21.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.21.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..daa3317282b730fdd377031502121a510963ab03 --- /dev/null +++ b/triton_models/weights/layers.21.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b46837ba16fed5d04bdda691ccff588a396f35a3b8bbdaf8656a2e2ba1da7f7 +size 1572864 diff --git a/triton_models/weights/layers.21.attention.wo.0.bias b/triton_models/weights/layers.21.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..f92d849af35b6ef9a9c8843045bf9680eadaccf0 --- /dev/null +++ b/triton_models/weights/layers.21.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaae167dcc160ce716a161705ba574a0b66ede4a389e1d4829dcec96e3c5d721 +size 8192 diff --git a/triton_models/weights/layers.21.attention.wo.0.qweight b/triton_models/weights/layers.21.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..f21633ee10d0c2a9af7ada47c1be6dce16d9c0f3 --- /dev/null +++ b/triton_models/weights/layers.21.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df93e6bf1734fd5d564945c7d58e92354edcbfe18d0030f4bb2135ac3979c88f +size 8388608 diff --git a/triton_models/weights/layers.21.attention.wo.0.scales_zeros b/triton_models/weights/layers.21.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..d80ccbe286d6d8565391727798e06d99d70c91b1 --- /dev/null +++ b/triton_models/weights/layers.21.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff49b1cb016115c452623c3796c966f3b0fb23b451386f5ae2add4a87241d945 +size 524288 diff --git a/triton_models/weights/layers.21.attention_norm.weight b/triton_models/weights/layers.21.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..90d341016a8d2e510f74459b117704f8ad913a7e --- /dev/null +++ b/triton_models/weights/layers.21.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49dc68aa1d1aaa2178430b9f83bfff2a0d1bcc4b2dd599a6267fd2d665a346fa +size 8192 diff --git a/triton_models/weights/layers.21.feed_forward.w13.0.qweight b/triton_models/weights/layers.21.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..dfb4add86f0998acb8bea47428b45f2c7ddfc1ab --- /dev/null +++ b/triton_models/weights/layers.21.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e02b9f6777fa24fb975c8c9866830aad07573adf3f57292d411d9dae5bfb43a5 +size 45088768 diff --git a/triton_models/weights/layers.21.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.21.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..184211c7bf0943abc01e8eb072d11015d26d93e3 --- /dev/null +++ b/triton_models/weights/layers.21.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e40f8f7312e9c75a1bdef4aa7b5205131b99c227f66b9bbe08bc7061ceef28fe +size 2818048 diff --git a/triton_models/weights/layers.21.feed_forward.w2.0.qweight b/triton_models/weights/layers.21.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..91c1ade334a9c831be1c86b976c29fb8930dd260 --- /dev/null +++ b/triton_models/weights/layers.21.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0da2ef61984e699e288c5493dbc7f07208dce4d545306bb5ad0041cb9158e78d +size 22544384 diff --git a/triton_models/weights/layers.21.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.21.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..a22b7f7e5984ab49286d5891e3b828e8f84030c0 --- /dev/null +++ b/triton_models/weights/layers.21.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1a75db8fc894cf0c7b5f89eee9b5553b58e26659b2053629e4605a6bccd9209 +size 1409024 diff --git a/triton_models/weights/layers.21.ffn_norm.weight b/triton_models/weights/layers.21.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..d8f68fd2839d9e6df2770e4ba0f8ad8f28885428 --- /dev/null +++ b/triton_models/weights/layers.21.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cded2b93c18c0320467a1776bc717eafa5bf8c9b4bcd1ba9bfdc7855ff71328b +size 8192 diff --git a/triton_models/weights/layers.21.past_kv_scale.0.weight b/triton_models/weights/layers.21.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..a97919699a02fb82d12296b0adae91f3ca4a2e59 --- /dev/null +++ b/triton_models/weights/layers.21.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04e4f56944ddf7537a266266999f4b36b1141518efb4cfac3d6805f85f0157db +size 16 diff --git a/triton_models/weights/layers.22.attention.w_qkv.0.bias b/triton_models/weights/layers.22.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..0741ad78810d275f9159043254372d7946520363 --- /dev/null +++ b/triton_models/weights/layers.22.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c50b24433d106a83b8309a7abc048fc2d76cb0a120539a2c7d246023b3f4ed5b +size 24576 diff --git a/triton_models/weights/layers.22.attention.w_qkv.0.qweight b/triton_models/weights/layers.22.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..03d9dc05209c1b128bf583a1f0895ca2ad7532b5 --- /dev/null +++ b/triton_models/weights/layers.22.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f193011cb812136953c4cae908f68f8ff6530a46b259c32ee3691fcd3d8c3a8 +size 25165824 diff --git a/triton_models/weights/layers.22.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.22.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..f07bcf0766e13edf982f6ed5d67f4b62d4ab0c41 --- /dev/null +++ b/triton_models/weights/layers.22.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e0c1ed345dc7671f4bc23667eff69959b666fb02189da3b6560ceb2a142f768 +size 1572864 diff --git a/triton_models/weights/layers.22.attention.wo.0.bias b/triton_models/weights/layers.22.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..c8c2dde98b8e09a62e636386e08232f4555c0152 --- /dev/null +++ b/triton_models/weights/layers.22.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8c65c8280f8fff1a24e23ea78a7036af94a602d478c0b0686b7324eb40b7c56 +size 8192 diff --git a/triton_models/weights/layers.22.attention.wo.0.qweight b/triton_models/weights/layers.22.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..02bef557847d8a6871534d65ee8e1aed96849226 --- /dev/null +++ b/triton_models/weights/layers.22.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:295a40ef3003245e91df16b75a4f600a61c3d2dd9c3b6d1763b4a0fc41bf17b6 +size 8388608 diff --git a/triton_models/weights/layers.22.attention.wo.0.scales_zeros b/triton_models/weights/layers.22.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e4c580b39bc934241715e86bf6888a57b1024803 --- /dev/null +++ b/triton_models/weights/layers.22.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b90275d1df3f9ac68e19bf2b7f5ae752a346714a0ee1da4d32100f375a7a0fd +size 524288 diff --git a/triton_models/weights/layers.22.attention_norm.weight b/triton_models/weights/layers.22.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..065524d4e948419b4ac8513875bf79edcc67249a --- /dev/null +++ b/triton_models/weights/layers.22.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cf6dede885c7f3b9dd01b7899634e1f8f4c69b7cb19645062343258f87a3f56 +size 8192 diff --git a/triton_models/weights/layers.22.feed_forward.w13.0.qweight b/triton_models/weights/layers.22.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..d9c464bb97123786a6cf2d5fb645256ffc1d020e --- /dev/null +++ b/triton_models/weights/layers.22.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cd6e3b1c20291fb3462c1f5454183231adadb19c7b684920735d37dd4f15d48 +size 45088768 diff --git a/triton_models/weights/layers.22.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.22.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..ae19ef3c8ac642cdd2b6a534573ad565f29728c3 --- /dev/null +++ b/triton_models/weights/layers.22.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f218bc98343e5854f9377cd1c5af020b5f247e645058a6fb0dfa699bfd1313f4 +size 2818048 diff --git a/triton_models/weights/layers.22.feed_forward.w2.0.qweight b/triton_models/weights/layers.22.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..a0aa3d771beab6a28af61d99c5babb88c7e5928b --- /dev/null +++ b/triton_models/weights/layers.22.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8014d0dc2a69bf23fa51da4f3eceee7bd29b517d3bcb3c7cde4ceb8b3732c99d +size 22544384 diff --git a/triton_models/weights/layers.22.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.22.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..7086085731f57998e726f68c381f2785291a517c --- /dev/null +++ b/triton_models/weights/layers.22.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15f95a21767057378bf7d44bac0005146e659fb87e01b683eff7bd56a77604cc +size 1409024 diff --git a/triton_models/weights/layers.22.ffn_norm.weight b/triton_models/weights/layers.22.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..c80c2a28a88ea76b7c5679a1a360584b8e38ea61 --- /dev/null +++ b/triton_models/weights/layers.22.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f7559fc9efd499f320ff096cef3da24e0fe76d6934f81a55fc510163389ca35 +size 8192 diff --git a/triton_models/weights/layers.22.past_kv_scale.0.weight b/triton_models/weights/layers.22.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..576336abc2a405d3042f014a664c1dbb60529c85 --- /dev/null +++ b/triton_models/weights/layers.22.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8db2d8ec5a4dd18fb32bd41466517806853910b26687da56ae36ac042b47069 +size 16 diff --git a/triton_models/weights/layers.23.attention.w_qkv.0.bias b/triton_models/weights/layers.23.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..6d2d1e97fa09b62f3db3c0de6543b8c690e163ed --- /dev/null +++ b/triton_models/weights/layers.23.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b80a9d535a8aa6fb33f2850011a25ad759ccf012527b07134fd9b006329371c9 +size 24576 diff --git a/triton_models/weights/layers.23.attention.w_qkv.0.qweight b/triton_models/weights/layers.23.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..bc8d9e4a0ba158aebd25ce0bba176276e627db37 --- /dev/null +++ b/triton_models/weights/layers.23.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b766a8d345beaf464638447eb6dc868e5baf7dea23b7d8ca5bcd6ff5d1e63366 +size 25165824 diff --git a/triton_models/weights/layers.23.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.23.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..d7f697fb4ac4f046cae99a9466c9512ab78c835b --- /dev/null +++ b/triton_models/weights/layers.23.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:025ae6f852baa53d736408ee1987cbf51332a5c9f561b1ad2ca71b915d914e91 +size 1572864 diff --git a/triton_models/weights/layers.23.attention.wo.0.bias b/triton_models/weights/layers.23.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..36f012d0207097ee54844b3537009a9af81d87c0 --- /dev/null +++ b/triton_models/weights/layers.23.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcefacaab2e40107d558ab6a01f1c9b499f1d731c7a1f75e3cf3e332a6562acf +size 8192 diff --git a/triton_models/weights/layers.23.attention.wo.0.qweight b/triton_models/weights/layers.23.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..5aafcc9b438e872be2f8384d2cb38bf0f2d07798 --- /dev/null +++ b/triton_models/weights/layers.23.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cf8d0889b0486cc04c1ce2c1e89261ea9430945b4c7501b3cb7bcefdee5c4f6 +size 8388608 diff --git a/triton_models/weights/layers.23.attention.wo.0.scales_zeros b/triton_models/weights/layers.23.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..bfdd7feb37e7c9cb4f3b5bbe54a95763d0957abf --- /dev/null +++ b/triton_models/weights/layers.23.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:676ca88279cda7dea9ead23b24fcfd099061476a26de915384cdea248308aece +size 524288 diff --git a/triton_models/weights/layers.23.attention_norm.weight b/triton_models/weights/layers.23.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..b1fa3fe6ec2b595f315f6abdc04ba5ebbefe2066 --- /dev/null +++ b/triton_models/weights/layers.23.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c51c37812622c5b4ab5849be939e7a162c56ef9e3a202aa62bf685773832d56c +size 8192 diff --git a/triton_models/weights/layers.23.feed_forward.w13.0.qweight b/triton_models/weights/layers.23.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..85cd46240fdafce2afb996ee1a8354a717a67bf4 --- /dev/null +++ b/triton_models/weights/layers.23.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca3ba4a0e72b6ed51341d6facc2e95027e6302f705808e72962438bfa0b82943 +size 45088768 diff --git a/triton_models/weights/layers.23.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.23.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..817ac4e5aaba0c7818aa15911774261a5dd05d0f --- /dev/null +++ b/triton_models/weights/layers.23.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab28f798f6cb914ef20441aab5fe7084c6881e6c856602fe4a24ec5bd8ec0e0a +size 2818048 diff --git a/triton_models/weights/layers.23.feed_forward.w2.0.qweight b/triton_models/weights/layers.23.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..5e5a937a925ec19b853878740bf0ae5aa6e179b3 --- /dev/null +++ b/triton_models/weights/layers.23.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0a1ce1e58adfa68bd2ad6889a686d3849fa91b02af033e89929b2acb2532829 +size 22544384 diff --git a/triton_models/weights/layers.23.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.23.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..cd6c90fde3cad513977969667d21a0505d250806 --- /dev/null +++ b/triton_models/weights/layers.23.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bb1a36cd18c5ba93b97413d50d0fe97f175db4452d7a760cdda3a52781f7ae1 +size 1409024 diff --git a/triton_models/weights/layers.23.ffn_norm.weight b/triton_models/weights/layers.23.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..dacc5086153f24a394297ee3a0022bc19204c4e8 --- /dev/null +++ b/triton_models/weights/layers.23.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd0bfb607e32571ea6793de0402613f6202bb83e294899a1fc3955a635951797 +size 8192 diff --git a/triton_models/weights/layers.23.past_kv_scale.0.weight b/triton_models/weights/layers.23.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..30d2ea794c2f03a0fce07bee51bd019492a57c7b --- /dev/null +++ b/triton_models/weights/layers.23.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95623d4f23883e072099e22994fc83a41d1f0ced9a5de61db6958406cc3eb006 +size 16 diff --git a/triton_models/weights/layers.24.attention.w_qkv.0.bias b/triton_models/weights/layers.24.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..a0c7c5340a5af2ceeeed37b14a33904c4a90dddc --- /dev/null +++ b/triton_models/weights/layers.24.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b826dfa75366146ceeb5e631aa035931a8009d3427f16e8b655839182887035a +size 24576 diff --git a/triton_models/weights/layers.24.attention.w_qkv.0.qweight b/triton_models/weights/layers.24.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..9295a0f91a6c7951f42f85dc2e682f47fc4cd910 --- /dev/null +++ b/triton_models/weights/layers.24.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc9f0c626d6ffd7c9b13657526990e917e31310279d5e7e9a720b24066cf89c3 +size 25165824 diff --git a/triton_models/weights/layers.24.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.24.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..899d395507c9325e01ae54ba80c94ea37ce720a8 --- /dev/null +++ b/triton_models/weights/layers.24.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25769bbad620ccf00b8c7b771297251a24de0680712ff83d718a6b7d6b7f3231 +size 1572864 diff --git a/triton_models/weights/layers.24.attention.wo.0.bias b/triton_models/weights/layers.24.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..25fefaf7d6de7439b72806422afa836a356300ac --- /dev/null +++ b/triton_models/weights/layers.24.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2aa762f4356c15774416599ba111a73e55941de729389be4e27160fe3e777dd +size 8192 diff --git a/triton_models/weights/layers.24.attention.wo.0.qweight b/triton_models/weights/layers.24.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..378de92e25dc0f161bbdae2a3f5d995bc10fe9e8 --- /dev/null +++ b/triton_models/weights/layers.24.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c989c17452181367b421f5a9e7f3ab377685dd2b1667549c97584e2e3774307 +size 8388608 diff --git a/triton_models/weights/layers.24.attention.wo.0.scales_zeros b/triton_models/weights/layers.24.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..44b014e48df86a0725d4deb2788e098e9a7bf57a --- /dev/null +++ b/triton_models/weights/layers.24.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b3f4316c5b7b6cfe04277caae3465516ad1fed06e08d6343ae6b988d4fd797e +size 524288 diff --git a/triton_models/weights/layers.24.attention_norm.weight b/triton_models/weights/layers.24.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..5b0e40bea2e82566f793b232359e34f13b7144e3 --- /dev/null +++ b/triton_models/weights/layers.24.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dc42abae914f97c261245d7261ccd456c3a9a459936ccaa322c2299ea72c560 +size 8192 diff --git a/triton_models/weights/layers.24.feed_forward.w13.0.qweight b/triton_models/weights/layers.24.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..0a3ca71f2de38f50e060effeb3140ee05f6f4a6f --- /dev/null +++ b/triton_models/weights/layers.24.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3535d56b619e158a9b35648132a7a7a2475f7bbd134c29f88f4c1f9051ea086e +size 45088768 diff --git a/triton_models/weights/layers.24.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.24.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..f85f479398d4844fdd53400302853816fe93dd6c --- /dev/null +++ b/triton_models/weights/layers.24.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ab69a9f154e89a7fac73d10788e62c61dff83e00462da03d482331f54c6ca5a +size 2818048 diff --git a/triton_models/weights/layers.24.feed_forward.w2.0.qweight b/triton_models/weights/layers.24.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..3f019302a6ac1ea3ea80f3cf5788c205e2af99bb --- /dev/null +++ b/triton_models/weights/layers.24.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8808f397a8376ea92afcbdb09b9b856cce3781131d3693d2f40c3c47239ead9d +size 22544384 diff --git a/triton_models/weights/layers.24.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.24.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..83ca9c7239bafef59b17a18e13a3505d2e4a6c8b --- /dev/null +++ b/triton_models/weights/layers.24.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee774332571dee38fee912da9a03d2fb23a6fad225af848573ca50dcc5de8b88 +size 1409024 diff --git a/triton_models/weights/layers.24.ffn_norm.weight b/triton_models/weights/layers.24.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..7b4e392c06f2b7bd7af49d96fe6c1429d3b5d27f --- /dev/null +++ b/triton_models/weights/layers.24.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab335f13c59daa7aecbc4c3625cc2a351a3cf5fbd25623abc19dcffd368d21c9 +size 8192 diff --git a/triton_models/weights/layers.24.past_kv_scale.0.weight b/triton_models/weights/layers.24.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..d77434061af53aa2feedd77fc76c2c0225475d57 --- /dev/null +++ b/triton_models/weights/layers.24.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b88d21c8c9bcb6c886d1d89a43e458c4047c51d73cc86e0e91e1040339f96a47 +size 16 diff --git a/triton_models/weights/layers.25.attention.w_qkv.0.bias b/triton_models/weights/layers.25.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..c1f3298da98fd52fa34e7e2151c2080a26ac4825 --- /dev/null +++ b/triton_models/weights/layers.25.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:053f16060fff184876d9d4531ec979045f74749806a711db95e05c220210e752 +size 24576 diff --git a/triton_models/weights/layers.25.attention.w_qkv.0.qweight b/triton_models/weights/layers.25.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..ff927418294bb6a4cc4d7b198f1b5b648bd15357 --- /dev/null +++ b/triton_models/weights/layers.25.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54fb8a22e07df3f5cbe7ef8cbc76ee51c73a91518c8aba78083d1746b7cdd3b7 +size 25165824 diff --git a/triton_models/weights/layers.25.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.25.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..0db15379bf4d395a9cca6e24a92929cef28e37ea --- /dev/null +++ b/triton_models/weights/layers.25.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa10f361409c2acda27681e17e61c3c7a39109da894aa2a05e7e4651d943933e +size 1572864 diff --git a/triton_models/weights/layers.25.attention.wo.0.bias b/triton_models/weights/layers.25.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..459d69c8ac3a61158a6fec055ad46cbc87159088 --- /dev/null +++ b/triton_models/weights/layers.25.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1121363e1bbe801b360eb994c519c01dae18729a65f61842f648bb31dc07e84b +size 8192 diff --git a/triton_models/weights/layers.25.attention.wo.0.qweight b/triton_models/weights/layers.25.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..92bc8f89cfb1f9b320abdf073abc7078342fd4f6 --- /dev/null +++ b/triton_models/weights/layers.25.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:508b92042c695031375738282ee88f8fcea20c16641d4b7c2a59da7b1e423688 +size 8388608 diff --git a/triton_models/weights/layers.25.attention.wo.0.scales_zeros b/triton_models/weights/layers.25.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..28fae08e1e4476a30d2ecc9362c476787ead6bad --- /dev/null +++ b/triton_models/weights/layers.25.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c967d6661ecb9c59da8cae1ff9c5a3427f444af857f0ad75a9186eb14ce736fc +size 524288 diff --git a/triton_models/weights/layers.25.attention_norm.weight b/triton_models/weights/layers.25.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..c6b51c0c80dfdffafd0ca000dc85b7cb60da5a6a --- /dev/null +++ b/triton_models/weights/layers.25.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25605e0cccff335b384737a0f2b9a4f7da2997f1cb2b88506f484bc15917aec9 +size 8192 diff --git a/triton_models/weights/layers.25.feed_forward.w13.0.qweight b/triton_models/weights/layers.25.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..2e6c84fd372b85c6f8ba441a21e1d82cd4e7548d --- /dev/null +++ b/triton_models/weights/layers.25.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5adcdb2dd1c875240a704357ee2a48b54b8c887013fdfa30983e2b9c6dfb1136 +size 45088768 diff --git a/triton_models/weights/layers.25.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.25.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..17082fb43a8b451388bb9bd6dc6131581e2d18d7 --- /dev/null +++ b/triton_models/weights/layers.25.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:189891c8cb1ca2fc9df8de8ed224ec89beb1dd505320a1339900e9150c6cd964 +size 2818048 diff --git a/triton_models/weights/layers.25.feed_forward.w2.0.qweight b/triton_models/weights/layers.25.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..79b0baa576df9863179a010ac042274f287b1663 --- /dev/null +++ b/triton_models/weights/layers.25.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5363fe77a29cf6f5f6e1859cd2337e1a1549751c0f39071b4ee43416146e9427 +size 22544384 diff --git a/triton_models/weights/layers.25.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.25.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e27117a8326056f94e5c05e5b14225e856d01c3c --- /dev/null +++ b/triton_models/weights/layers.25.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f71df91f3cd0c02e08110ba7393f8e5c0bbb472ad53b0ecb5adf225ff116153 +size 1409024 diff --git a/triton_models/weights/layers.25.ffn_norm.weight b/triton_models/weights/layers.25.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..3068e1e2ba3479db27e1f00360fcc7c3b86316c8 --- /dev/null +++ b/triton_models/weights/layers.25.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbdc95c2194b3dd60086d641f214995b465b285297c0f0e1a0477cbb0e8b6cc4 +size 8192 diff --git a/triton_models/weights/layers.25.past_kv_scale.0.weight b/triton_models/weights/layers.25.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..545dba25378eb4777bba8744c39e1a3bec25d359 --- /dev/null +++ b/triton_models/weights/layers.25.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e908c8c22ddfaa84cacd046ce4b7d397bf22b6c75655f368336aa075c1e9df6a +size 16 diff --git a/triton_models/weights/layers.26.attention.w_qkv.0.bias b/triton_models/weights/layers.26.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..789bfc1c2a40b92b266a56212292117edafed2a2 --- /dev/null +++ b/triton_models/weights/layers.26.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6627aa941c5cee7693414d3466abf9285f4fa56ec1f1f7693cde4063d478983 +size 24576 diff --git a/triton_models/weights/layers.26.attention.w_qkv.0.qweight b/triton_models/weights/layers.26.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..cf26f1df41a3549ae46cf8e3ceab4ab902093614 --- /dev/null +++ b/triton_models/weights/layers.26.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acd6a2f4c5e673993ba46cafc59f77f61089bb3f3f11b563d9ba7fdeb86f00ae +size 25165824 diff --git a/triton_models/weights/layers.26.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.26.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..73481a898e4b708bab6c7dcfd054d43d56e29c4a --- /dev/null +++ b/triton_models/weights/layers.26.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af928ee7f2c8ec9da9def39a43cbc0c3bb99f950ee6a6f8e5a7bbac97d38a562 +size 1572864 diff --git a/triton_models/weights/layers.26.attention.wo.0.bias b/triton_models/weights/layers.26.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..88bf402d74ee45b96af1b0589f3c62259bddb011 --- /dev/null +++ b/triton_models/weights/layers.26.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc69426d2a21cd54667e20425acefafb5f7bff9b4e93784cee4789963642eff8 +size 8192 diff --git a/triton_models/weights/layers.26.attention.wo.0.qweight b/triton_models/weights/layers.26.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..040e53cc86fab23b9e1260acdd6ed205adb790b8 --- /dev/null +++ b/triton_models/weights/layers.26.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34f6431fce4f8d3733780676725e606ca1fbc79158cf19cdf55058538050b231 +size 8388608 diff --git a/triton_models/weights/layers.26.attention.wo.0.scales_zeros b/triton_models/weights/layers.26.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e39e3fd11ccc848770546ff72d92314e802e3ebb --- /dev/null +++ b/triton_models/weights/layers.26.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4541d29bdf2cca529d60fe33b25f3c80230b3177f60d354a8f6e597378be447 +size 524288 diff --git a/triton_models/weights/layers.26.attention_norm.weight b/triton_models/weights/layers.26.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..0826b621729bc0c72e55e5b9506e3d1fc9e2e86d --- /dev/null +++ b/triton_models/weights/layers.26.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1f1ad5c6049c28542102f897c0cc99dffcdea3d83f456c51bc53775de914365 +size 8192 diff --git a/triton_models/weights/layers.26.feed_forward.w13.0.qweight b/triton_models/weights/layers.26.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..7f7827a6332368a8135438fa49e45094c174b03c --- /dev/null +++ b/triton_models/weights/layers.26.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1f2260a7580796b6c4a443c96bb8c248400ef1749ab276270fa80747a442e97 +size 45088768 diff --git a/triton_models/weights/layers.26.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.26.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..b729611c63b062dfe09d485408f54d974108edeb --- /dev/null +++ b/triton_models/weights/layers.26.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6f84547758cdc6a2072d2438a5cfaf7b97eb1e9f6aecebb523911f491ed43aa +size 2818048 diff --git a/triton_models/weights/layers.26.feed_forward.w2.0.qweight b/triton_models/weights/layers.26.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..04dfcdd574f11ad97cacd192ab0fb18233c7eb87 --- /dev/null +++ b/triton_models/weights/layers.26.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6674fa76726236e247f7b7c696951b5c763117239b09ba55418f65c9090f13fb +size 22544384 diff --git a/triton_models/weights/layers.26.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.26.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..717063cdcd085727a322dab541a2488c4a222b94 --- /dev/null +++ b/triton_models/weights/layers.26.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7fbde1ce0fedf1b8058406806d41c5354c68508a5b7f0bcc581759cabf5aabb +size 1409024 diff --git a/triton_models/weights/layers.26.ffn_norm.weight b/triton_models/weights/layers.26.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..bfd7ea14b9d695bf94957382471a22a99db8f45c --- /dev/null +++ b/triton_models/weights/layers.26.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:616c0d6639bfd15cdd01a5b8286a52b50a55f64ec559918ac404859ab8e9732d +size 8192 diff --git a/triton_models/weights/layers.26.past_kv_scale.0.weight b/triton_models/weights/layers.26.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..e5ee5b93ae4bd92de2dd7d186c67f16d1d7bc34a --- /dev/null +++ b/triton_models/weights/layers.26.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ca2efcd293e698e26cc6860ea03b171cb1a799fe097d69122a71a2383b4a190 +size 16 diff --git a/triton_models/weights/layers.27.attention.w_qkv.0.bias b/triton_models/weights/layers.27.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..85b479557220d682d3b721741129062eb4a036ed --- /dev/null +++ b/triton_models/weights/layers.27.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5c423d295b4d8953b4009382f5ff377739f8dc16b844b4df1cea2703d2093a6 +size 24576 diff --git a/triton_models/weights/layers.27.attention.w_qkv.0.qweight b/triton_models/weights/layers.27.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..2deb367cedb7a0f5c376985dcc8b1b711643e55e --- /dev/null +++ b/triton_models/weights/layers.27.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16bbb95870f70fd3761b77a0913beadb68e8136c91697edab0cd04b6407debc1 +size 25165824 diff --git a/triton_models/weights/layers.27.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.27.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e286c22fa8bcb9372d5511827735cdcd390b1a88 --- /dev/null +++ b/triton_models/weights/layers.27.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8972f9c5c9ad8b5d2e3f69a183c43e0ee8616413849517421f10d394a8cf500 +size 1572864 diff --git a/triton_models/weights/layers.27.attention.wo.0.bias b/triton_models/weights/layers.27.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..abf038c93a6f9fa79d3a7f2559fb4b0b08828ffa --- /dev/null +++ b/triton_models/weights/layers.27.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82b5bb20ba3d7d9b28702aeb9cfa36beebc4d74a6354c77758a65954d8078a06 +size 8192 diff --git a/triton_models/weights/layers.27.attention.wo.0.qweight b/triton_models/weights/layers.27.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..0155c6696b27843b40e468bc7dcea3a7bcea665f --- /dev/null +++ b/triton_models/weights/layers.27.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eafd0de77c97e76345653ad7f55024acf7aec16a18ea6195440b3d3050b8f3f4 +size 8388608 diff --git a/triton_models/weights/layers.27.attention.wo.0.scales_zeros b/triton_models/weights/layers.27.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..c9556708a36deac17f320c4082e07bb47ee2654a --- /dev/null +++ b/triton_models/weights/layers.27.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f74644d910f7ef9fa3b53f76004b390058c6310a434bb676db6a92bf89ad1b0 +size 524288 diff --git a/triton_models/weights/layers.27.attention_norm.weight b/triton_models/weights/layers.27.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..19546c58cce9795f28e93f418e144050c6b36472 --- /dev/null +++ b/triton_models/weights/layers.27.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e847ade0deb42634776b2f276d6a6fb95b14d76943d39edd04b6b6c49fb6bae5 +size 8192 diff --git a/triton_models/weights/layers.27.feed_forward.w13.0.qweight b/triton_models/weights/layers.27.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..0eb873d13133a40d5210c6190468af7395777d75 --- /dev/null +++ b/triton_models/weights/layers.27.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:943944b4b5e3c0d2d74e8a3a029016bd5d3f75b49914a968467aa38c6238fc2a +size 45088768 diff --git a/triton_models/weights/layers.27.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.27.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..5ea426fa478cf65519a5c8f82abf6d933372064a --- /dev/null +++ b/triton_models/weights/layers.27.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:680200d9fba5cb44a37eec454f494ff4a9655e9ad1092a7ef57f05cf92fbf822 +size 2818048 diff --git a/triton_models/weights/layers.27.feed_forward.w2.0.qweight b/triton_models/weights/layers.27.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..3850b9bbb697c88d2752cbc82980902e534ec380 --- /dev/null +++ b/triton_models/weights/layers.27.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb7d170372b4ce254474a2f0cf746f3829c70b4e7b239bd000f7acd6c10bf2dc +size 22544384 diff --git a/triton_models/weights/layers.27.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.27.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..2b6f79f04a451aafd6225d373403eb91cb65031c --- /dev/null +++ b/triton_models/weights/layers.27.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40c05defc8a3b0bd405253b26bb686f0b5e09bd06152b8052cbb3f7679bf5df5 +size 1409024 diff --git a/triton_models/weights/layers.27.ffn_norm.weight b/triton_models/weights/layers.27.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..5cc44e5594ec6c48a557f23c45c7328face570eb --- /dev/null +++ b/triton_models/weights/layers.27.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2bfcf0d77e1a7c15c15aff2ca9c73b2437242480774222cae51011e55e8d789 +size 8192 diff --git a/triton_models/weights/layers.27.past_kv_scale.0.weight b/triton_models/weights/layers.27.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..0d367f62723f93d28641c2f2645b7f2c313b4140 --- /dev/null +++ b/triton_models/weights/layers.27.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55f717e4a4a894f924c933d649a5d237a2f62ddb11d72a9bb424bec19bcaa68d +size 16 diff --git a/triton_models/weights/layers.28.attention.w_qkv.0.bias b/triton_models/weights/layers.28.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..d49ef1c86e3cd9b7565fa93df3e36ad2edaca84f --- /dev/null +++ b/triton_models/weights/layers.28.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1370bb0fe84ded64e4b57fcdf8b95dd3c7b9b4581f0670fba8fc95310ec27fd6 +size 24576 diff --git a/triton_models/weights/layers.28.attention.w_qkv.0.qweight b/triton_models/weights/layers.28.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..49746186990cf32eede49d952206d1feba462796 --- /dev/null +++ b/triton_models/weights/layers.28.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b25eb3de7ea32b38096b3e00ed0cba3776a02506999d21e1aac1faf2dc090a3f +size 25165824 diff --git a/triton_models/weights/layers.28.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.28.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..7d231cd2f53f08999a8c76a41d86a1fa54f2518e --- /dev/null +++ b/triton_models/weights/layers.28.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9dbdafcfc3f1c617d96ea8a533ae1dd7e32de414b36891c70e96f58d6014a43d +size 1572864 diff --git a/triton_models/weights/layers.28.attention.wo.0.bias b/triton_models/weights/layers.28.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..a01e4f933fdd6e6ebfc1fd15c26d399ac302eed8 --- /dev/null +++ b/triton_models/weights/layers.28.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3676eb344f7dbf4767dccd3fbd5f2ab915eed828da3454e1a6fe8b45fc327d3 +size 8192 diff --git a/triton_models/weights/layers.28.attention.wo.0.qweight b/triton_models/weights/layers.28.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..0b6ff8abdb0f8237b12978995eda51dee4c940a2 --- /dev/null +++ b/triton_models/weights/layers.28.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe07050024a6dc946820aa80c254a15b05e69865328095adf5950f1f459fbaba +size 8388608 diff --git a/triton_models/weights/layers.28.attention.wo.0.scales_zeros b/triton_models/weights/layers.28.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..03fd0a419c58fa2d1506462d558ba4e139bab634 --- /dev/null +++ b/triton_models/weights/layers.28.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1dcc9867ca43a93ca8b97434a1061bc90318d6bcd605ff7bf090507c1fdad95 +size 524288 diff --git a/triton_models/weights/layers.28.attention_norm.weight b/triton_models/weights/layers.28.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..a940af255d3c389ecd572cdf2c90891555a2f4e2 --- /dev/null +++ b/triton_models/weights/layers.28.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9c773c0b9242d3033cdf41156b965ef5446593d655ef9a4353b7d4cd28123ec +size 8192 diff --git a/triton_models/weights/layers.28.feed_forward.w13.0.qweight b/triton_models/weights/layers.28.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..9054d2a06de7defd36ff531360bf3c9f5f9f112c --- /dev/null +++ b/triton_models/weights/layers.28.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d7e017904dda1afe2688d58a4852cadd9845ec985f82ae3d23261473b3bcbc4 +size 45088768 diff --git a/triton_models/weights/layers.28.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.28.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..4b075865fea91973e910c6709a6ad73e98604953 --- /dev/null +++ b/triton_models/weights/layers.28.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a66004f93f4594995a344e393b28babc53ad0a8c8817706e49dedc2216dd9c1 +size 2818048 diff --git a/triton_models/weights/layers.28.feed_forward.w2.0.qweight b/triton_models/weights/layers.28.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..facf86d4b9083de78bc65fab89193de716115f00 --- /dev/null +++ b/triton_models/weights/layers.28.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87f80d245d8222d4fe58a7190c0943e1624ec291f6ff7fe594c730c89e3519a5 +size 22544384 diff --git a/triton_models/weights/layers.28.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.28.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e43078285c9f78bf1da393cb8c08b7452590f601 --- /dev/null +++ b/triton_models/weights/layers.28.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a77dfd247fa72595ddb901e08f110ea2ce539b90d99980b257dfb6d1ab1ccdf +size 1409024 diff --git a/triton_models/weights/layers.28.ffn_norm.weight b/triton_models/weights/layers.28.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..2c0d7236cc9802da8286f76f698fdcd7a71470b4 --- /dev/null +++ b/triton_models/weights/layers.28.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c76bd2f9107269483f38cd68945cd7299cf87e69f56e4cca1593885421636c69 +size 8192 diff --git a/triton_models/weights/layers.28.past_kv_scale.0.weight b/triton_models/weights/layers.28.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..0a87f1c15ea497bb3f1490be421d8bddb2f9ae46 --- /dev/null +++ b/triton_models/weights/layers.28.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfccd044a9082ca979255df5b3d65171ea7fdaf2ee643c42ccee1fc7f7317f2b +size 16 diff --git a/triton_models/weights/layers.29.attention.w_qkv.0.bias b/triton_models/weights/layers.29.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..c200723660ff99a5192ea7bf71636461999d9355 --- /dev/null +++ b/triton_models/weights/layers.29.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e6bed5467659bbabf34787198475b202b2ae191d1818fbfc077c697ada7962c +size 24576 diff --git a/triton_models/weights/layers.29.attention.w_qkv.0.qweight b/triton_models/weights/layers.29.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..2a5442661131e9a1ac2a3dd994b912e81606c8bd --- /dev/null +++ b/triton_models/weights/layers.29.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c5508257663699f2cb60d79b4625150a74fe0fbe6b50c585d16e88317c11cb1 +size 25165824 diff --git a/triton_models/weights/layers.29.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.29.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..0f37219e168e854b4d97c4311463b8f410b6d775 --- /dev/null +++ b/triton_models/weights/layers.29.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20687351ce34b0ccd9991d94044bf8571e3d62cdbe6d971f20ce45101d40682c +size 1572864 diff --git a/triton_models/weights/layers.29.attention.wo.0.bias b/triton_models/weights/layers.29.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..f7d6ed311fd160e505bf9a12888f2fdf1aaf30fb --- /dev/null +++ b/triton_models/weights/layers.29.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:951d50cf811932f1dcda694b74831787739c57fd3a2005c55bf0b827d40a1536 +size 8192 diff --git a/triton_models/weights/layers.29.attention.wo.0.qweight b/triton_models/weights/layers.29.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..c6aab0e4b2f6ec899b2fc7b5a86e492f975e19dc --- /dev/null +++ b/triton_models/weights/layers.29.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13b1fa4d643cca8da9af00f19ea9c15c1a6b95e784cd85826c83a06d2be4300f +size 8388608 diff --git a/triton_models/weights/layers.29.attention.wo.0.scales_zeros b/triton_models/weights/layers.29.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..45ae16e38e87dd0042219b7fa3e2ff6d2aeb081f --- /dev/null +++ b/triton_models/weights/layers.29.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:657487a87fb7d4401e2b20a277718bf587bccc053e771de22c646a3c2869983c +size 524288 diff --git a/triton_models/weights/layers.29.attention_norm.weight b/triton_models/weights/layers.29.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..3b1e26182723bd384072e26c5957b974e89f3d9e --- /dev/null +++ b/triton_models/weights/layers.29.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0dc287615a28bb04e8a7857f1e263cbfe3502f2a8135f33fba88a5a2293c107b +size 8192 diff --git a/triton_models/weights/layers.29.feed_forward.w13.0.qweight b/triton_models/weights/layers.29.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..10f657f37af6dbfef75e915d5aa4dbc73815563b --- /dev/null +++ b/triton_models/weights/layers.29.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37278b2a74d9cde9a1806ac28a43f1a6ccffcf708369f5e2ae4c6fd7241ffe5a +size 45088768 diff --git a/triton_models/weights/layers.29.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.29.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..1599cc3eae1deaf35f16ec01f87e242cc948ff19 --- /dev/null +++ b/triton_models/weights/layers.29.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de07ab0f23d7dbaa3b8f5055eb2dd133106cba8dfb1778c56b7678957b7fe2b2 +size 2818048 diff --git a/triton_models/weights/layers.29.feed_forward.w2.0.qweight b/triton_models/weights/layers.29.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..b4045ecd6ef3f576e57900c280bdef300d9be8c7 --- /dev/null +++ b/triton_models/weights/layers.29.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:498f90a319c6a407095feca59829631811ff2789749f4e696ff16ce7a48ef610 +size 22544384 diff --git a/triton_models/weights/layers.29.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.29.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e4d2fe64a97ef76118fe8238b23b280acc497563 --- /dev/null +++ b/triton_models/weights/layers.29.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:941f4d428ab5bf09fdcfadfd85dcaa0fd55fffae878cac5fe3ad70ecdc6a1d53 +size 1409024 diff --git a/triton_models/weights/layers.29.ffn_norm.weight b/triton_models/weights/layers.29.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..de7fbbdc1d0733845c2c0360ff62515c12dc49ce --- /dev/null +++ b/triton_models/weights/layers.29.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3905443f02a3e9e4e0d273254aa271df8b36d443e2f554f40fba3cc93da92e6e +size 8192 diff --git a/triton_models/weights/layers.29.past_kv_scale.0.weight b/triton_models/weights/layers.29.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..ed831d9796b24f8d322b3f1cd59b4d5dc379fca0 --- /dev/null +++ b/triton_models/weights/layers.29.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0a8ff9542f9d789e9b089fc23ca54f00438d765927a626e15aafd89c5dd7d63 +size 16 diff --git a/triton_models/weights/layers.3.attention.w_qkv.0.bias b/triton_models/weights/layers.3.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..f7c9454fe583bfe15e2a49046752ecc8c4e31e0c --- /dev/null +++ b/triton_models/weights/layers.3.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3290f29ab518d740d83e81648eecbd06c569222756c71521a95d8a90c91fcb0c +size 24576 diff --git a/triton_models/weights/layers.3.attention.w_qkv.0.qweight b/triton_models/weights/layers.3.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..e93ee7faa87f02531f3519f5d8810479319e070e --- /dev/null +++ b/triton_models/weights/layers.3.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48bf4742f261ed366a3a6cad7607a951d7e34556420c5f0868e5ab34ab7f921a +size 25165824 diff --git a/triton_models/weights/layers.3.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.3.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..033128b5a868e37b0a774601b289ed69ba36839b --- /dev/null +++ b/triton_models/weights/layers.3.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2288bb9a50b7172e16ed5fcf711eb6b96a7bdc3b0330f7089682716ea4050101 +size 1572864 diff --git a/triton_models/weights/layers.3.attention.wo.0.bias b/triton_models/weights/layers.3.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..c4e367709c65aabc47d3be6749690d29b44966f0 --- /dev/null +++ b/triton_models/weights/layers.3.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcfadd5810916a471f7de4539288ed03403af765a86d8beac089600fca17070f +size 8192 diff --git a/triton_models/weights/layers.3.attention.wo.0.qweight b/triton_models/weights/layers.3.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..d5fcf42ad74da96e7ca3e41f61a12873d1fd7986 --- /dev/null +++ b/triton_models/weights/layers.3.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd06fa50ec9cb791a6b96fe7f78094bf107b4258a3abe8ffbd9350caa22bad40 +size 8388608 diff --git a/triton_models/weights/layers.3.attention.wo.0.scales_zeros b/triton_models/weights/layers.3.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..4087fbd4af75ae75fe04e67f6a7663d5dbf23096 --- /dev/null +++ b/triton_models/weights/layers.3.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cef60e9d46479ef3a9aa1ace90ec1853c7c2c1f7bdc93c7421fae0c6d92174d +size 524288 diff --git a/triton_models/weights/layers.3.attention_norm.weight b/triton_models/weights/layers.3.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..ce171c16f6a79dc8978ed083cf2850959a99a40d --- /dev/null +++ b/triton_models/weights/layers.3.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a74eac0852ad9ebda7f206cd0cb69a9b51ff086a0c358b87f97da68b488eb49 +size 8192 diff --git a/triton_models/weights/layers.3.feed_forward.w13.0.qweight b/triton_models/weights/layers.3.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..bcb72fc4600dfcdd868eeceb50b54a252d407a56 --- /dev/null +++ b/triton_models/weights/layers.3.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4acd5e30af3d41e2b859eb5ec76ceec387d61de9a94660362ef39690a1731290 +size 45088768 diff --git a/triton_models/weights/layers.3.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.3.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..abfef4c05657293c7e3eb7df34ec70f4044325e7 --- /dev/null +++ b/triton_models/weights/layers.3.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1031cead0d6d7d2b2e67f1d670437d55dd7f7cc3d381a1f5b87c211d0b4bb85f +size 2818048 diff --git a/triton_models/weights/layers.3.feed_forward.w2.0.qweight b/triton_models/weights/layers.3.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..0d73de5684050415c0c5a8cf83826bf40dd9d884 --- /dev/null +++ b/triton_models/weights/layers.3.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef324637abcad8b73c0b619edf7aeb19be6e66125351802e63f03cb5ded743d1 +size 22544384 diff --git a/triton_models/weights/layers.3.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.3.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..759504ec721b6ab4062132ed9cdb5f15a5350e99 --- /dev/null +++ b/triton_models/weights/layers.3.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28883a771e83657dc5f70e84cd448235e82f923d81866a8216cf65f38c976d9f +size 1409024 diff --git a/triton_models/weights/layers.3.ffn_norm.weight b/triton_models/weights/layers.3.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..ae0cd0e43de6a6c8b0280fa21724e08297efe866 --- /dev/null +++ b/triton_models/weights/layers.3.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b783cbbd7f51461f235adacb21bf3efa7e40b894d8660893b910cfb946d7c8f4 +size 8192 diff --git a/triton_models/weights/layers.3.past_kv_scale.0.weight b/triton_models/weights/layers.3.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..e6017d4c3d6a6da9225c2620b82855419fe0ed93 --- /dev/null +++ b/triton_models/weights/layers.3.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed0be339387b4f9957bf5ac833e0ba91f06393b7246f2187e96212207a9413a0 +size 16 diff --git a/triton_models/weights/layers.30.attention.w_qkv.0.bias b/triton_models/weights/layers.30.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..2215402826502962d56eddd6ed06dac278155d73 --- /dev/null +++ b/triton_models/weights/layers.30.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:807c07d9a8992dfdc4b3208c0efdf2de914d2d1858b2d04fe87ace852d36a92f +size 24576 diff --git a/triton_models/weights/layers.30.attention.w_qkv.0.qweight b/triton_models/weights/layers.30.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..ab5a95236fc486864a6413a2f9df7c7b508bc330 --- /dev/null +++ b/triton_models/weights/layers.30.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b23444730a65cc5dff12fc4b286bdf473f32fd58e36c3a592080e04216281cb +size 25165824 diff --git a/triton_models/weights/layers.30.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.30.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..02c718a472333c8cfed0cd42cc4a80e99eb39d4d --- /dev/null +++ b/triton_models/weights/layers.30.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0152f132778a5591009c2470e38c5a796c180d75fd5f18d8843722d4728d26c8 +size 1572864 diff --git a/triton_models/weights/layers.30.attention.wo.0.bias b/triton_models/weights/layers.30.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..d6fdf974af6f15f60300cf39a0c20f9a873b1891 --- /dev/null +++ b/triton_models/weights/layers.30.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fca278c6bc108a8c5b7ee48ed3a11be369fbb09f110ae555ee81e4b921870f3b +size 8192 diff --git a/triton_models/weights/layers.30.attention.wo.0.qweight b/triton_models/weights/layers.30.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..80321ac743ed39061f227954d7f1b4faa5b27612 --- /dev/null +++ b/triton_models/weights/layers.30.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99d1ea16b6bcef71c2ae968db5029f4699a7f727ba1ea416e59f086cbe5c6c1f +size 8388608 diff --git a/triton_models/weights/layers.30.attention.wo.0.scales_zeros b/triton_models/weights/layers.30.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..fadf2b134c51d0afb40f554e3a53edb297a4e233 --- /dev/null +++ b/triton_models/weights/layers.30.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d53608cb20b90e7fca3138a82ee846cfbb0b4b518d48be4d050b0ec7fd048e2 +size 524288 diff --git a/triton_models/weights/layers.30.attention_norm.weight b/triton_models/weights/layers.30.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..bee594214395e9dc34517bc74bd009dafea48996 --- /dev/null +++ b/triton_models/weights/layers.30.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbc581a2bec9eecd00d5974fb92ae7737807520e637b58a5268c21fba167d27a +size 8192 diff --git a/triton_models/weights/layers.30.feed_forward.w13.0.qweight b/triton_models/weights/layers.30.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..2d5573bfc0e06f5cc3f4e157fe446ee7299b45bf --- /dev/null +++ b/triton_models/weights/layers.30.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d265b256745b3210d8d84ece1b2fe760a9536eed2c88f1f45419c1827db27e7f +size 45088768 diff --git a/triton_models/weights/layers.30.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.30.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..69a8c90e3e806d0617d31c5a3c061cf374658325 --- /dev/null +++ b/triton_models/weights/layers.30.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:948b281f8ee71d66069cffde0eedfd824133956eba52879ab22fb63f8d7716ec +size 2818048 diff --git a/triton_models/weights/layers.30.feed_forward.w2.0.qweight b/triton_models/weights/layers.30.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..047cdabce667db233b8412bd6d77f744e8212ffc --- /dev/null +++ b/triton_models/weights/layers.30.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c087e3424f590e2aad2ae4e6ba1939354887b4fba0c13367b31e8aa09d4937c3 +size 22544384 diff --git a/triton_models/weights/layers.30.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.30.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..dfff919f35e3933ca708cd2becd3783f764f1aab --- /dev/null +++ b/triton_models/weights/layers.30.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90d4b233bd9fb01ec7e2d2957591891409d5c8f15c1c194cad8185214bc47df1 +size 1409024 diff --git a/triton_models/weights/layers.30.ffn_norm.weight b/triton_models/weights/layers.30.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..631f1d21b4fe181c86d76a9f81531a6d55950837 --- /dev/null +++ b/triton_models/weights/layers.30.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd3e1549357f71da5ab8ff6f87a3d026e9da72c0e252e133e02f58d383c79082 +size 8192 diff --git a/triton_models/weights/layers.30.past_kv_scale.0.weight b/triton_models/weights/layers.30.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..5b5e1c674cdd0b3ea4baabd7cf953e0cc88f4377 --- /dev/null +++ b/triton_models/weights/layers.30.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32536ef8b1d8d0eb3193dfab67b55527611df0e75612d538dcf5cceae8d1779a +size 16 diff --git a/triton_models/weights/layers.31.attention.w_qkv.0.bias b/triton_models/weights/layers.31.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..abfad2727b33b8d7dd851870c074705e64490e30 --- /dev/null +++ b/triton_models/weights/layers.31.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b78de50784477b3a931ee2d2ad550e4bcd294c861d896f99e8ad664a7568d86d +size 24576 diff --git a/triton_models/weights/layers.31.attention.w_qkv.0.qweight b/triton_models/weights/layers.31.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..2833bc128422ddc17ad7f70d0b82c0df67369850 --- /dev/null +++ b/triton_models/weights/layers.31.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cca9a6754dfa349f4fd8223f28a21c01948e5e24580973bb6f18e237e090ca71 +size 25165824 diff --git a/triton_models/weights/layers.31.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.31.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..cd2bb4f5b123021e511140ef1763bebdccbf99c5 --- /dev/null +++ b/triton_models/weights/layers.31.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f703a83684372cacfc21cf828df1653fa835ebe075080f47a825571856f588f0 +size 1572864 diff --git a/triton_models/weights/layers.31.attention.wo.0.bias b/triton_models/weights/layers.31.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..cd1c322999035e82546eebc317d84cc23e4d6995 --- /dev/null +++ b/triton_models/weights/layers.31.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48c99883cbca55f51af356c7ef6771ee7ae9e1f8d320940a3521f67b373a5d6c +size 8192 diff --git a/triton_models/weights/layers.31.attention.wo.0.qweight b/triton_models/weights/layers.31.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..6c4e4f61b8a6fc1bbe871bdab9405bbd3655ea62 --- /dev/null +++ b/triton_models/weights/layers.31.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98e99db0c40277bd0f6bf8f5b8149f39404c64e38cd1f53d846602f88ad7014f +size 8388608 diff --git a/triton_models/weights/layers.31.attention.wo.0.scales_zeros b/triton_models/weights/layers.31.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e9d428dfd72d22a8a383818d8e3b701382de00ad --- /dev/null +++ b/triton_models/weights/layers.31.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fce5f8fe95b602016f27a0ee584679f14aedf054e5d2ed38fafeebe2b41812ff +size 524288 diff --git a/triton_models/weights/layers.31.attention_norm.weight b/triton_models/weights/layers.31.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..143b23c6082f0eafe98414f75c5db021a86e251b --- /dev/null +++ b/triton_models/weights/layers.31.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cefe5ebcd524968a8a78c57d7446b16d54f2840c7e08e0567c88c742b1713d5 +size 8192 diff --git a/triton_models/weights/layers.31.feed_forward.w13.0.qweight b/triton_models/weights/layers.31.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..66aff46a092340093930181cd0ad3aaa7b9488b8 --- /dev/null +++ b/triton_models/weights/layers.31.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef4c58fb5bea89ca8073b5eb751f310694cb95ee4a1507afe1676b4d389b9688 +size 45088768 diff --git a/triton_models/weights/layers.31.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.31.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..bc49fb249afaf343068a796def4573877ec9da52 --- /dev/null +++ b/triton_models/weights/layers.31.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbc283ecd2dd676a92550291ff8d389f07ba32db50731c9b695e35b7d11c53f4 +size 2818048 diff --git a/triton_models/weights/layers.31.feed_forward.w2.0.qweight b/triton_models/weights/layers.31.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..4906c3524c9a562eb7d62af858f1ea241fe5e522 --- /dev/null +++ b/triton_models/weights/layers.31.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:244564556123760dca8dacdd5bd5aad65ad94842ff5221670c37cf6980a69fe5 +size 22544384 diff --git a/triton_models/weights/layers.31.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.31.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..6cb2e96cc0102b40230ccd8c15b65df7ad04b7a0 --- /dev/null +++ b/triton_models/weights/layers.31.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f20a1e55caeef7928fff3f69c7108603d09610798054dba7fa93e38be92a795b +size 1409024 diff --git a/triton_models/weights/layers.31.ffn_norm.weight b/triton_models/weights/layers.31.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..074a8984f85c4d3ad3830ce6ebbf5b119c452784 --- /dev/null +++ b/triton_models/weights/layers.31.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:618b76ad2af1439f16ac46489a828f1ccd8115254401d6b77b9badfef9e6928f +size 8192 diff --git a/triton_models/weights/layers.31.past_kv_scale.0.weight b/triton_models/weights/layers.31.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..02dace066321413555cb7847b678c9f5623def77 --- /dev/null +++ b/triton_models/weights/layers.31.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbaeeee2b1ea5a7cb6c53aff70ab6e9e39f628ba98ea5e796dc5c4ac6a5fa210 +size 16 diff --git a/triton_models/weights/layers.4.attention.w_qkv.0.bias b/triton_models/weights/layers.4.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..20739a9ab4c2a0db3cecb110557136d474e7b81d --- /dev/null +++ b/triton_models/weights/layers.4.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:125494cb18320aabc832ea35e672bc4bdfa2904a8a3795386183d2994649a91f +size 24576 diff --git a/triton_models/weights/layers.4.attention.w_qkv.0.qweight b/triton_models/weights/layers.4.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..5ba5d78eae25463b89bfe2386cb1707703c08537 --- /dev/null +++ b/triton_models/weights/layers.4.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:327241ad85de950f00947dbd1562d01cd541c4437a1214287372670bdbd4aae8 +size 25165824 diff --git a/triton_models/weights/layers.4.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.4.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..eb7bf64624114a531c9c484aa04e09f52bdb1493 --- /dev/null +++ b/triton_models/weights/layers.4.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:844773221c2d44b86bfcd6e5952e8a2f40e6694e1737cc4e8dc838ca67d726dc +size 1572864 diff --git a/triton_models/weights/layers.4.attention.wo.0.bias b/triton_models/weights/layers.4.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..7127cfac48210d8c4fac33881fc5ef23b90112b9 --- /dev/null +++ b/triton_models/weights/layers.4.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63365eece8f4a469901b7afcd585a8e8f5e16b72cda859e63f12a58f68951d37 +size 8192 diff --git a/triton_models/weights/layers.4.attention.wo.0.qweight b/triton_models/weights/layers.4.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..bea3e52fdebc7d1aa7e05177838dd99a73f18f98 --- /dev/null +++ b/triton_models/weights/layers.4.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4811058029734ab6454dd42a10f0781aa5dabdb387b2665d16cae4ac7ce4b13e +size 8388608 diff --git a/triton_models/weights/layers.4.attention.wo.0.scales_zeros b/triton_models/weights/layers.4.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..ad827c8582f261da774c138ae281d816eba856cc --- /dev/null +++ b/triton_models/weights/layers.4.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f64b3a219fc538ae6db526f5bd062f0e7a260da07274086a05b5d428d71c9a46 +size 524288 diff --git a/triton_models/weights/layers.4.attention_norm.weight b/triton_models/weights/layers.4.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..db5fc6ada018b525895c91e894efac64f9d1d9da --- /dev/null +++ b/triton_models/weights/layers.4.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f5082b9d733ef9b70a9df03839c18873b5aa16357fc54dd4ac2128547499eec +size 8192 diff --git a/triton_models/weights/layers.4.feed_forward.w13.0.qweight b/triton_models/weights/layers.4.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..2d22e76228fbccec1ff85e8e79966ec5f64d252d --- /dev/null +++ b/triton_models/weights/layers.4.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a34d659e32a11c500c91cdc818b185cbb9b3b38798a982df96b8e8e73d99b06 +size 45088768 diff --git a/triton_models/weights/layers.4.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.4.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..75331ed972f07bc50353ae0172c02936a555f0ec --- /dev/null +++ b/triton_models/weights/layers.4.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93ef018516c85564a297b353bdb17bda0c6b019084d40bc954006c7d50be1453 +size 2818048 diff --git a/triton_models/weights/layers.4.feed_forward.w2.0.qweight b/triton_models/weights/layers.4.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..71282af9b726bcf7c1a6491b63a4fee095a74fd9 --- /dev/null +++ b/triton_models/weights/layers.4.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d20868d3bdda39311a2845d5ffbee9410cfe74b1e5d45158891c384ffd3b614 +size 22544384 diff --git a/triton_models/weights/layers.4.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.4.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..2d0f846e47ac57dcafec72f7ab4830202c806dcd --- /dev/null +++ b/triton_models/weights/layers.4.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:120ed527c032672f64a5509cfef24929c8057808098e7797af7959746e76954d +size 1409024 diff --git a/triton_models/weights/layers.4.ffn_norm.weight b/triton_models/weights/layers.4.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..ed47aaef0cb3880af6cd4d3da6313a7fb79400a1 --- /dev/null +++ b/triton_models/weights/layers.4.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79f0cf03a3578853c14a43a9a26220e391c1b579e6c7fb51ecaa316f46eb4aa6 +size 8192 diff --git a/triton_models/weights/layers.4.past_kv_scale.0.weight b/triton_models/weights/layers.4.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..b47d34f35301e57baf06cbf78d7e427f31770b70 --- /dev/null +++ b/triton_models/weights/layers.4.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b99b0ce9a526d637c8e82e9bdd5bf7ee625c1b7d8115160f11d55ba7c06c317b +size 16 diff --git a/triton_models/weights/layers.5.attention.w_qkv.0.bias b/triton_models/weights/layers.5.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..2b01734560cc266fc697a0279aee3c58e4579db1 --- /dev/null +++ b/triton_models/weights/layers.5.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b286f889ee5b357bc2688fbc088b52d5076142cb5d4cb9d01fa456dab6de9ce6 +size 24576 diff --git a/triton_models/weights/layers.5.attention.w_qkv.0.qweight b/triton_models/weights/layers.5.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..b6d34e066c5eaec2dc2643d2bdc355663aeb8707 --- /dev/null +++ b/triton_models/weights/layers.5.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d965563daf96050a4aab6235ccdd6a98206fc282782b1c10d024cbdc8ede2373 +size 25165824 diff --git a/triton_models/weights/layers.5.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.5.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..b3ba6b2aa1d13d99287383e6c5b9d7f99c9ed67e --- /dev/null +++ b/triton_models/weights/layers.5.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f461da9d203ffd936394aa1be175e5b15547610e8b030d190ca9d056e4ba4ac4 +size 1572864 diff --git a/triton_models/weights/layers.5.attention.wo.0.bias b/triton_models/weights/layers.5.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..64d017aeaa5df05fa5a7158605ebc1bf42d324a2 --- /dev/null +++ b/triton_models/weights/layers.5.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:988bd9ec95ebc33b42520204253e31693e0f4049cde0c79574720d9da5cc44ce +size 8192 diff --git a/triton_models/weights/layers.5.attention.wo.0.qweight b/triton_models/weights/layers.5.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..58afe4c2e2e26269e87d3db427e05ae116e22693 --- /dev/null +++ b/triton_models/weights/layers.5.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ea7f8ed14b3ba5a3ec645970a00dfc1023e6b1d2a2c7dc799b1bd3c934765c9 +size 8388608 diff --git a/triton_models/weights/layers.5.attention.wo.0.scales_zeros b/triton_models/weights/layers.5.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..837612ae0e27ca9e71fff0e8ba831c10310b9592 --- /dev/null +++ b/triton_models/weights/layers.5.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:339b03dc1cd3a8844419052cf02671a6a5ca2877e18e816eadefc508f8a27b10 +size 524288 diff --git a/triton_models/weights/layers.5.attention_norm.weight b/triton_models/weights/layers.5.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..17f81bcc530940ed45cfab54d13872e97a25e859 --- /dev/null +++ b/triton_models/weights/layers.5.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67f81de6d18c99d4f682ed7ac240a6f25f6175156e7cdafd255ce378ed77b125 +size 8192 diff --git a/triton_models/weights/layers.5.feed_forward.w13.0.qweight b/triton_models/weights/layers.5.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..1a517a17c30367ba0aba686968cf8f052c172886 --- /dev/null +++ b/triton_models/weights/layers.5.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae68e9598ff12f2b1f4cb7322e9ae4f733e9782c8dc023551ee7b926a4a5d72e +size 45088768 diff --git a/triton_models/weights/layers.5.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.5.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..1b9a2431000e739a4b70927bf199f8ff7b7aeb0e --- /dev/null +++ b/triton_models/weights/layers.5.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a701dd445fdd9e48698405d5bc67eed1a44f7e0c4d10cb2b59b0dd24c8112f26 +size 2818048 diff --git a/triton_models/weights/layers.5.feed_forward.w2.0.qweight b/triton_models/weights/layers.5.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..797724df711a5ac2b84e4df8cc712d7ed55475c1 --- /dev/null +++ b/triton_models/weights/layers.5.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80da08e8118a2b745144d647e2ccd640958292d195502943bc1b492133a41577 +size 22544384 diff --git a/triton_models/weights/layers.5.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.5.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..488884f48f0247535a67a3e9e165cc02c6360b01 --- /dev/null +++ b/triton_models/weights/layers.5.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1799a7c0a3ac55d7fabba41c77d9af9b111bee863f5fca1a24c048d5fd0d9161 +size 1409024 diff --git a/triton_models/weights/layers.5.ffn_norm.weight b/triton_models/weights/layers.5.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..8648b1c5477b30145532376708b5b8f04163de06 --- /dev/null +++ b/triton_models/weights/layers.5.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:158c44f4474caf0b062bcd321ddbd36065b8adbf3ae9a0cb38fcaccfafeeb722 +size 8192 diff --git a/triton_models/weights/layers.5.past_kv_scale.0.weight b/triton_models/weights/layers.5.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..441a742c221915a70859870fd3d7e8afabc7c62b --- /dev/null +++ b/triton_models/weights/layers.5.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a63793b606b86e1c1fb53f9720880e2e922c51780d65bf3ab4e17050d76cbcb0 +size 16 diff --git a/triton_models/weights/layers.6.attention.w_qkv.0.bias b/triton_models/weights/layers.6.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..f83f8f95f5ebbd007b4d5f26175942c3f0c92619 --- /dev/null +++ b/triton_models/weights/layers.6.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d3e96109ecfb29669d2503f63df1ad6ead50f1b397c4bb0496ee9362e2eda5a +size 24576 diff --git a/triton_models/weights/layers.6.attention.w_qkv.0.qweight b/triton_models/weights/layers.6.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..158dfcb92ae54a1069ec5b7d6106116d1c1499f9 --- /dev/null +++ b/triton_models/weights/layers.6.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e72aa3b20fdfed8c05a9e2b96135fb606c0e553eaad9e2dfb189c9b5ac8a0c97 +size 25165824 diff --git a/triton_models/weights/layers.6.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.6.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..a9044b02c4fb578e6b9877b9c9e96e7d149080e8 --- /dev/null +++ b/triton_models/weights/layers.6.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74754499eed737dc955a256e6231f8afc221950754ebad0c7b67685737ef14de +size 1572864 diff --git a/triton_models/weights/layers.6.attention.wo.0.bias b/triton_models/weights/layers.6.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..57f7bd6daea7eb8ffe07da4f8fc3ea068670ab36 --- /dev/null +++ b/triton_models/weights/layers.6.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7d7c8da6d45dc89ff29e7a2a6f6777a1327b1b0751d404a1c499f842e726942 +size 8192 diff --git a/triton_models/weights/layers.6.attention.wo.0.qweight b/triton_models/weights/layers.6.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..c55ef26bad1d47412f0cec80db121f56c18460d0 --- /dev/null +++ b/triton_models/weights/layers.6.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8c368ef3163bd0d22e61e44ad446201d8391580a62f12b72c8497973898dd0a +size 8388608 diff --git a/triton_models/weights/layers.6.attention.wo.0.scales_zeros b/triton_models/weights/layers.6.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..dad2fb709498898d432ebeb1f06639c1c4fdf7b5 --- /dev/null +++ b/triton_models/weights/layers.6.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94d3f550f3aaf06b542ae8261a834f204cbaa65d8651a71fce634aca09337054 +size 524288 diff --git a/triton_models/weights/layers.6.attention_norm.weight b/triton_models/weights/layers.6.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..c9a19070b455a6f40b2981919631cdd9675b862a --- /dev/null +++ b/triton_models/weights/layers.6.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b26bb218ea4796434897a524f37bf4923a91b4c68dc0b4a73cb2ec5c9a5c022d +size 8192 diff --git a/triton_models/weights/layers.6.feed_forward.w13.0.qweight b/triton_models/weights/layers.6.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..cdec4bcb0392240846ebf008db0ad61ae922e063 --- /dev/null +++ b/triton_models/weights/layers.6.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:713e8e39512a02b038bfc7770dffae22922b5325174bd29df4f3a5f49d184c29 +size 45088768 diff --git a/triton_models/weights/layers.6.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.6.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..1012cbba3ccf5fb19ddc780e8447ba4c8942afb3 --- /dev/null +++ b/triton_models/weights/layers.6.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f03276163b5ac3dd174fe9d87cc38d618b0f4d429fef11dc61e49a21b3cd13e1 +size 2818048 diff --git a/triton_models/weights/layers.6.feed_forward.w2.0.qweight b/triton_models/weights/layers.6.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..71139d73b216e6b51ba8b37f956366c67399a43a --- /dev/null +++ b/triton_models/weights/layers.6.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3248a43676c17ce9a1fafa3d7ee2c2824d9eb7bb1004d386a1ef0cfa724f1e13 +size 22544384 diff --git a/triton_models/weights/layers.6.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.6.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..6cc44953ac494348f30471a47dc80cebd4fccee1 --- /dev/null +++ b/triton_models/weights/layers.6.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c78e844582c65fb4fd3aaff5656ab6b6c20432ed85b1131f91efe10ef50b65cc +size 1409024 diff --git a/triton_models/weights/layers.6.ffn_norm.weight b/triton_models/weights/layers.6.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..83e522ae53e874a0b605e06bc92de370d2040ad5 --- /dev/null +++ b/triton_models/weights/layers.6.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ae3f29f5471c2b5d2520fae01a8cd46ca12e1b4328b0173d1ebfb67915d6c3f +size 8192 diff --git a/triton_models/weights/layers.6.past_kv_scale.0.weight b/triton_models/weights/layers.6.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..35440e185ce9641d0154ce3e24860df760e1a2a6 --- /dev/null +++ b/triton_models/weights/layers.6.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e323a55ab745ba195078bde609a0c9a3d63ba7bbfa662165e9f82c3d5fb094ee +size 16 diff --git a/triton_models/weights/layers.7.attention.w_qkv.0.bias b/triton_models/weights/layers.7.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..048ef01ee748b7f46ff0f3876897a0e85751ba52 --- /dev/null +++ b/triton_models/weights/layers.7.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f28a2fe6e7478b700192c057e47ff2dba077f64abfc82af77cc40df9a0aa45a8 +size 24576 diff --git a/triton_models/weights/layers.7.attention.w_qkv.0.qweight b/triton_models/weights/layers.7.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..6e471483e5eff237490aefa187a88ec01c17c0fa --- /dev/null +++ b/triton_models/weights/layers.7.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:947aa8ae7da47954f7ba4f939904ad357013175ec55987a238291cca87ddb1e1 +size 25165824 diff --git a/triton_models/weights/layers.7.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.7.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..9ac3fdfea863183bed43dacd48d231a9a18176e3 --- /dev/null +++ b/triton_models/weights/layers.7.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad55e969486b7f27da777a13b798c9d729442049da9292e41cd4b792be497783 +size 1572864 diff --git a/triton_models/weights/layers.7.attention.wo.0.bias b/triton_models/weights/layers.7.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..32ff769b9fb2dcc53879261628735a7d592e7d2d --- /dev/null +++ b/triton_models/weights/layers.7.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:844ddf92e7e2169508db957d0ce752842a42d0c0e5b604429c51cf514fad2fb3 +size 8192 diff --git a/triton_models/weights/layers.7.attention.wo.0.qweight b/triton_models/weights/layers.7.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..762ab48740e01606a1f809edfe687698504610d8 --- /dev/null +++ b/triton_models/weights/layers.7.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de703a8199ab042f6cec1ef40bc7eea7b0d38ea48e98013d9cee0dc4fc695c8b +size 8388608 diff --git a/triton_models/weights/layers.7.attention.wo.0.scales_zeros b/triton_models/weights/layers.7.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..888e334c5fc081c4d8909ad80d76961b9d9cb5f4 --- /dev/null +++ b/triton_models/weights/layers.7.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6da55818e028d02a877c396b564ec4e64759c978efaaa6b0985abe191d031b9f +size 524288 diff --git a/triton_models/weights/layers.7.attention_norm.weight b/triton_models/weights/layers.7.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..28b456922c8ce491445b609daa555837d9db977c --- /dev/null +++ b/triton_models/weights/layers.7.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e01d51109876a4c166a5e4fbf644330cc88d5376feb919e6729992901adbb0e +size 8192 diff --git a/triton_models/weights/layers.7.feed_forward.w13.0.qweight b/triton_models/weights/layers.7.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..eeb42a9ff02d0ee3917996b31417d155ffe137b6 --- /dev/null +++ b/triton_models/weights/layers.7.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7690a7baffc196f323a9ee43b88a20449b13a01f596158acd44382d29b5ca9ed +size 45088768 diff --git a/triton_models/weights/layers.7.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.7.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..1e32dfdd9740844976bc0e279129bae7f0b686f1 --- /dev/null +++ b/triton_models/weights/layers.7.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cefcf6a90299085c9c89c081aa481a83ad46a0f373be0cf9448d753a59793767 +size 2818048 diff --git a/triton_models/weights/layers.7.feed_forward.w2.0.qweight b/triton_models/weights/layers.7.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..969c57a58f156df5d3c697397d52c2e795f5dbb9 --- /dev/null +++ b/triton_models/weights/layers.7.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5460ef9f2f8821e2fd95fff2bfea553f1500898bbdd7c14c220b12bd2ed94e0a +size 22544384 diff --git a/triton_models/weights/layers.7.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.7.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..99d235d2a6519b983e80d6163a84ccf4929bbebc --- /dev/null +++ b/triton_models/weights/layers.7.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab405fa6ac96898a26c464ead14a074c414badbc2056c2f8e6e86d2dacf85c47 +size 1409024 diff --git a/triton_models/weights/layers.7.ffn_norm.weight b/triton_models/weights/layers.7.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..656cf3414022f1e898e670dedde8dc38715e1e1f --- /dev/null +++ b/triton_models/weights/layers.7.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:606717b1b3d8c0b3250f36f439b1f33fa5b84f94833b325fc1eaff258868c2f8 +size 8192 diff --git a/triton_models/weights/layers.7.past_kv_scale.0.weight b/triton_models/weights/layers.7.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..af5343b03cf3daabf91928026efdac223fd7193e --- /dev/null +++ b/triton_models/weights/layers.7.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b1f8b9043ee052cae201239fbe70db294a0d6de883452032b4e6cc09978c94c +size 16 diff --git a/triton_models/weights/layers.8.attention.w_qkv.0.bias b/triton_models/weights/layers.8.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..d0f5d58826b057f555466cb5be0c9935a4038b3d --- /dev/null +++ b/triton_models/weights/layers.8.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2087041b7405fe5188c74554a1122d7ae6bf87f064216a5fbeda6c9e5e4f8dd +size 24576 diff --git a/triton_models/weights/layers.8.attention.w_qkv.0.qweight b/triton_models/weights/layers.8.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..486fd72a2e002adeb8bfbc180e834365a0a44451 --- /dev/null +++ b/triton_models/weights/layers.8.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcbd8efd61386db08402d12bcf81ca9fb1529a4a9fc318dd80dded1e25a3a479 +size 25165824 diff --git a/triton_models/weights/layers.8.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.8.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..ec0896b9b06aeaad709507b8d1738ac41e98351d --- /dev/null +++ b/triton_models/weights/layers.8.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea50b93c5938b6c28e46ea242a420111e3299b31a6309adba7a611c7ce411942 +size 1572864 diff --git a/triton_models/weights/layers.8.attention.wo.0.bias b/triton_models/weights/layers.8.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..31d5198b3d134e64505a1c7343372c050c9a948b --- /dev/null +++ b/triton_models/weights/layers.8.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81b34949117479fb052297d6ce3c77e3dd050b275d55183734f062804a3fc27f +size 8192 diff --git a/triton_models/weights/layers.8.attention.wo.0.qweight b/triton_models/weights/layers.8.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..554b1062da4ebe47eaaeaecfb40c513554560184 --- /dev/null +++ b/triton_models/weights/layers.8.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d444f9844af8c6ea6c6be91164753545cea8d74cc8d4202f08cc4a274128d6d4 +size 8388608 diff --git a/triton_models/weights/layers.8.attention.wo.0.scales_zeros b/triton_models/weights/layers.8.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e17521743f037dc7d6b7a40ac972bb58accfb14e --- /dev/null +++ b/triton_models/weights/layers.8.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3fe177a4c8af0db44d0c7c5f74a3f5496e2345b35af889008860e43c9c1f061 +size 524288 diff --git a/triton_models/weights/layers.8.attention_norm.weight b/triton_models/weights/layers.8.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..7fbae2d9cd333dda0006b14715aa597933c1c046 --- /dev/null +++ b/triton_models/weights/layers.8.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:238451897e0a7790bf2146f984544043b379a546dd61049118dabc0882a796e8 +size 8192 diff --git a/triton_models/weights/layers.8.feed_forward.w13.0.qweight b/triton_models/weights/layers.8.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..f6cc599e6f5ae4adbc0dfe3ddd44eb1eb6574c1c --- /dev/null +++ b/triton_models/weights/layers.8.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88b286b52baca6744de8e227093b4b6c454a770f3c856916bae17e7aeca780e9 +size 45088768 diff --git a/triton_models/weights/layers.8.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.8.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..65bdc81e4a424de9a5848804bc5c1afd7a2ba1a1 --- /dev/null +++ b/triton_models/weights/layers.8.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a5f078552755eeb9afeb659b42b3a8e82ee4f6b9dc3b02d4d7ad66326e09b70 +size 2818048 diff --git a/triton_models/weights/layers.8.feed_forward.w2.0.qweight b/triton_models/weights/layers.8.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..ef75b96093fe0334e9c3784716d5c2c67cb75b40 --- /dev/null +++ b/triton_models/weights/layers.8.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7ea684f1ca7783802a4f8096832d327afd3a0663de43df48173de7643c3f990 +size 22544384 diff --git a/triton_models/weights/layers.8.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.8.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..e0cfb63a841f60e221df3f67615234c072ebe7a1 --- /dev/null +++ b/triton_models/weights/layers.8.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee506648b60789b3005afb7624d4fb7fb46a8a6970cff5beda701e041bc0b2b8 +size 1409024 diff --git a/triton_models/weights/layers.8.ffn_norm.weight b/triton_models/weights/layers.8.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..e55811cee50b9868772b85639fa44347b2f52bc5 --- /dev/null +++ b/triton_models/weights/layers.8.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce835596c22d41ba45a0939568c90a9c46c48399881c10aed54d55745edc8dbd +size 8192 diff --git a/triton_models/weights/layers.8.past_kv_scale.0.weight b/triton_models/weights/layers.8.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..16eb7f2a23a2dc3ffd37eb174c0e3cc6c4ec3ef7 --- /dev/null +++ b/triton_models/weights/layers.8.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e9c1f1e271a8e921cbd8a8bc072a773cc2d3f83cfdc3ba60bb275275aaec788 +size 16 diff --git a/triton_models/weights/layers.9.attention.w_qkv.0.bias b/triton_models/weights/layers.9.attention.w_qkv.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..d25aa59b24e1ca2b70b2c93fcc1233e7a7e4212d --- /dev/null +++ b/triton_models/weights/layers.9.attention.w_qkv.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd5ec7e6c49994771ea5032fbfd64b009c8d42b9991da2008a6ebd6e5c559dd1 +size 24576 diff --git a/triton_models/weights/layers.9.attention.w_qkv.0.qweight b/triton_models/weights/layers.9.attention.w_qkv.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..7f5c15ae3cc7dd38d2ec5e4354edbef77cdfbc10 --- /dev/null +++ b/triton_models/weights/layers.9.attention.w_qkv.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afafadab5f90f93c2dcaf6740c2df9fb4e8366fd88b57fb6d9957e46037c01bd +size 25165824 diff --git a/triton_models/weights/layers.9.attention.w_qkv.0.scales_zeros b/triton_models/weights/layers.9.attention.w_qkv.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..5d92921c8e696feabfb6914473b4dc1b20846c77 --- /dev/null +++ b/triton_models/weights/layers.9.attention.w_qkv.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3eda73930e0497f43b77bf192114cd4b682bf2be3caf19192edcec849a4a0e2 +size 1572864 diff --git a/triton_models/weights/layers.9.attention.wo.0.bias b/triton_models/weights/layers.9.attention.wo.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..66229394dee013c0d4cdb81c82e1314e278599a2 --- /dev/null +++ b/triton_models/weights/layers.9.attention.wo.0.bias @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afec140a00ba47d0e4a3df93db74560338a22929a6d7784a56d72970915eaca3 +size 8192 diff --git a/triton_models/weights/layers.9.attention.wo.0.qweight b/triton_models/weights/layers.9.attention.wo.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..2a03a2b9a65fe22ea66eac1d9eecc0398d90ebfe --- /dev/null +++ b/triton_models/weights/layers.9.attention.wo.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:747ab44ba328d92663a56c95a12a9ab208e815fc564104646b3b6223f806ed1d +size 8388608 diff --git a/triton_models/weights/layers.9.attention.wo.0.scales_zeros b/triton_models/weights/layers.9.attention.wo.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..d3ff7d3f8c3ca39ee6cbd1bc2c87f8222c25eae4 --- /dev/null +++ b/triton_models/weights/layers.9.attention.wo.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d62b84e68bc884570d27fad5b4256235d60861b6dedbae63e49e5122e1b9fb60 +size 524288 diff --git a/triton_models/weights/layers.9.attention_norm.weight b/triton_models/weights/layers.9.attention_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..7eda0da0cade4eb2a79896cbe9ed53587f87b335 --- /dev/null +++ b/triton_models/weights/layers.9.attention_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cd0a4a62d2854435d51ca76ffd49df792941449366353116573f4a3f32acf08 +size 8192 diff --git a/triton_models/weights/layers.9.feed_forward.w13.0.qweight b/triton_models/weights/layers.9.feed_forward.w13.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..59aad85cb6871b35126d3e89810a941dfea214bd --- /dev/null +++ b/triton_models/weights/layers.9.feed_forward.w13.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb54b8ce63c2afce9e7c5688927294e9d55af3aa17885737cdc82d2bde3c9918 +size 45088768 diff --git a/triton_models/weights/layers.9.feed_forward.w13.0.scales_zeros b/triton_models/weights/layers.9.feed_forward.w13.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..a7f277062a5addad389425f8f5598dda3d3f0f57 --- /dev/null +++ b/triton_models/weights/layers.9.feed_forward.w13.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29faf68ec5a03b68537086a0e4b0a491a6b82920434553f8d9ba707121a60ba0 +size 2818048 diff --git a/triton_models/weights/layers.9.feed_forward.w2.0.qweight b/triton_models/weights/layers.9.feed_forward.w2.0.qweight new file mode 100644 index 0000000000000000000000000000000000000000..6384aec9d819f5277beecc4677cb610ea74d6a3f --- /dev/null +++ b/triton_models/weights/layers.9.feed_forward.w2.0.qweight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45830ef70a543cc4ace23eed9ebb1e2e157cf3f76d68fd2c05d5b2b48818ebcf +size 22544384 diff --git a/triton_models/weights/layers.9.feed_forward.w2.0.scales_zeros b/triton_models/weights/layers.9.feed_forward.w2.0.scales_zeros new file mode 100644 index 0000000000000000000000000000000000000000..60f592095648078bb93ce21310f9b64b39d50d56 --- /dev/null +++ b/triton_models/weights/layers.9.feed_forward.w2.0.scales_zeros @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0b08c6861870dcec01c413b6615d6f34a94b821bd80d781492c94e0b7de5f0f +size 1409024 diff --git a/triton_models/weights/layers.9.ffn_norm.weight b/triton_models/weights/layers.9.ffn_norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..405e5d8a09736aae87984511d94ead85b25ad267 --- /dev/null +++ b/triton_models/weights/layers.9.ffn_norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10f42bbfaa05a01f23f8742d573c0f33ec9741611d916c1c42f2472ebb18e130 +size 8192 diff --git a/triton_models/weights/layers.9.past_kv_scale.0.weight b/triton_models/weights/layers.9.past_kv_scale.0.weight new file mode 100644 index 0000000000000000000000000000000000000000..56be89dbdf3083fb2ce214ad70ed2a31f915a938 --- /dev/null +++ b/triton_models/weights/layers.9.past_kv_scale.0.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa6384d8208ab51862a5740bd4641f378a385d9d801b3e61cf6462e53e783c65 +size 16 diff --git a/triton_models/weights/norm.weight b/triton_models/weights/norm.weight new file mode 100644 index 0000000000000000000000000000000000000000..7824b504ccafa3d192a632b8c65b9a2de30eff9f --- /dev/null +++ b/triton_models/weights/norm.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cf93fbae00bdf7813e35622207692a0a1d9cc9126b0b7dfe5a409e159137b4d +size 8192 diff --git a/triton_models/weights/output.weight b/triton_models/weights/output.weight new file mode 100644 index 0000000000000000000000000000000000000000..08fd1ba720843a9f76b57b9ff848e6993596cf24 --- /dev/null +++ b/triton_models/weights/output.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab444c32fdc131cfc88c9fb9b40e06aa02b93eaa31fcd20adff909b781d0a46e +size 845152256 diff --git a/triton_models/weights/tok_embeddings.weight b/triton_models/weights/tok_embeddings.weight new file mode 100644 index 0000000000000000000000000000000000000000..840b0423fbff06f7a51629b1e40efad2b45b2667 --- /dev/null +++ b/triton_models/weights/tok_embeddings.weight @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4608d68b0bf8724710e27ace3a28a621e1b1d8335689da9d937bfe325e423a7f +size 845152256