ccwu0918 commited on
Commit
1a91b52
1 Parent(s): fb6920e

Upload 13 files

Browse files
README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 荀子系列大语言模型
2
+
3
+ 随着科技的飞速发展,人工智能已深入到各个领域。为响应古籍活化利用号召,推动大语言模型与古籍处理深度融合,以古籍智能化的研究为目的,本项目推出了一系列古籍处理领域大语言模型:荀子古籍大语言模型。荀子不仅是我国先秦时期伟大的朴素唯物主义的思想家,也是一位散文大家。他在语言学理论的阐述上又是一位开拓者、奠基人。荀子系列专为古籍智能处理而设计,这一系列模型的推出将推动古籍研究与保护工作的新发展,提高中华传统文化传承的效率与质量。
4
+
5
+ 本次荀子系列模型开源包括两个部分:基座模型[**XunziALLM**](https://modelscope.cn/models/Xunzillm4cc/Xunzi-Qwen)与对话模型[**XunziChat**](https://modelscope.cn/models/Xunzillm4cc/Xunzi-Qwen-Chat),模型的调用方式与阿里云的Qwen系列大模型一致。
6
+
7
+ ## 荀子系列模型亮点:
8
+
9
+ * 古籍智能标引,荀子模型具备强大的古籍文献标引能力,能够对古籍中的内容进行高质量主题标引,帮助研究人员快速了解文章主题。
10
+
11
+ * 古籍信息抽取,荀子模型能够自动从古籍中抽取关键信息,如人物、事件、地点等,大大节省了研究人员的信息整理时间。
12
+
13
+ * 诗歌生成:荀子模型还具备诗歌生成的能力,能够根据给定的主题或关键词,自动生成符合语法规则和韵律要求的古诗,为诗词爱好者提供创作灵感。
14
+
15
+ * 古籍高质量翻译:对于那些难以理解的古籍文献,荀子模型能够提供高质量的翻译服务,帮助研究人员更好地理解原文含义。
16
+
17
+ * 阅读理解:荀子模型能够对给出的古文文本进行分析解释,实现对古籍文本的自动阅读。
18
+
19
+ * 词法分析:荀子模型可以完成古籍文本的自动分词和词性标注,能够有效提升语言学工作者的研究效率。
20
+
21
+ * 自动标点:荀子大模型可以快速完成古籍文本的断句和标点,提升研究者以及业余爱好者对古籍文本的阅读体验。
22
+
23
+ 由于我们同时发布了基座模型,用户也可以根据自己的需求,使用本地的训练语料微调荀子基座模型,使得其能够在古籍下游处理任务上取得更佳的处理性能。
24
+
25
+ ## 声明:
26
+
27
+ 大语言模型庞大的参数量也带来了更多的随机性,虽然我们在训练数据选取时已经尽可能保证了数据的合规性,但由于数据和模型的复杂性,仍有可能存在一些无法避免的问题。因此,如果由于使用本开源模型而导致的各种问题,包括但不限于数据安全问题、公共舆论风险,或模型被误导、滥用、传播或不当利用所带来的任何风险和问题,我们将不承担任何责任。
28
+
29
+ 此外,根据国家网信办等七部门联合发布的[《生成式人工智能服务管理暂行办法》](http://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm),在训练、使用本模型以及其他生成式模型,请依据相关法律法规,为构建和谐、健康、可持续的生成式人工智能社区共同努力。
30
+ 因此,如果由于使用本开源模型而导致的任何问题,包括但不限于数据安全问题、公共舆论风险,或模型被误导、滥用、传播或不当利用所带来的任何风险和问题,我们将不承担任何责任。
31
+
32
+ 如果您在使用模型时遇到了任何问题,欢迎联系我们([email protected])
33
+ ```bash
34
+ git clone https://www.modelscope.cn/Xunzillm4cc/Xunzi-Qwen-Chat.git
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/gpuall/pretrained_models/Qwen_final/",
3
+ "architectures": [
4
+ "QWenLMHeadModel"
5
+ ],
6
+ "attn_dropout_prob": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_qwen.QWenConfig",
9
+ "AutoModel": "modeling_qwen.QWenLMHeadModel",
10
+ "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
11
+ },
12
+ "bf16": false,
13
+ "emb_dropout_prob": 0.0,
14
+ "fp16": true,
15
+ "fp32": false,
16
+ "hidden_size": 4096,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 22016,
19
+ "kv_channels": 128,
20
+ "layer_norm_epsilon": 1e-06,
21
+ "max_position_embeddings": 8192,
22
+ "model_type": "qwen",
23
+ "no_bias": true,
24
+ "num_attention_heads": 32,
25
+ "num_hidden_layers": 32,
26
+ "onnx_safe": null,
27
+ "rotary_emb_base": 10000,
28
+ "rotary_pct": 1.0,
29
+ "scale_attn_weights": true,
30
+ "seq_length": 2048,
31
+ "tie_word_embeddings": false,
32
+ "tokenizer_type": "QWenTokenizer",
33
+ "torch_dtype": "float16",
34
+ "transformers_version": "4.32.0",
35
+ "use_cache": true,
36
+ "use_dynamic_ntk": true,
37
+ "use_flash_attn": false,
38
+ "use_logn_attn": true,
39
+ "vocab_size": 151936
40
+ }
configuration_qwen.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ from transformers import PretrainedConfig
7
+
8
+
9
+ class QWenConfig(PretrainedConfig):
10
+ model_type = "qwen"
11
+ keys_to_ignore_at_inference = ["past_key_values"]
12
+
13
+ def __init__(
14
+ self,
15
+ vocab_size=151936,
16
+ hidden_size=4096,
17
+ num_hidden_layers=32,
18
+ num_attention_heads=32,
19
+ emb_dropout_prob=0.0,
20
+ attn_dropout_prob=0.0,
21
+ layer_norm_epsilon=1e-6,
22
+ initializer_range=0.02,
23
+ max_position_embeddings=8192,
24
+ scale_attn_weights=True,
25
+ use_cache=True,
26
+ bf16=False,
27
+ fp16=False,
28
+ fp32=False,
29
+ kv_channels=128,
30
+ rotary_pct=1.0,
31
+ rotary_emb_base=10000,
32
+ use_dynamic_ntk=True,
33
+ use_logn_attn=True,
34
+ use_flash_attn="auto",
35
+ intermediate_size=22016,
36
+ no_bias=True,
37
+ tie_word_embeddings=False,
38
+ **kwargs,
39
+ ):
40
+ self.vocab_size = vocab_size
41
+ self.hidden_size = hidden_size
42
+ self.intermediate_size = intermediate_size
43
+ self.num_hidden_layers = num_hidden_layers
44
+ self.num_attention_heads = num_attention_heads
45
+ self.emb_dropout_prob = emb_dropout_prob
46
+ self.attn_dropout_prob = attn_dropout_prob
47
+ self.layer_norm_epsilon = layer_norm_epsilon
48
+ self.initializer_range = initializer_range
49
+ self.scale_attn_weights = scale_attn_weights
50
+ self.use_cache = use_cache
51
+ self.max_position_embeddings = max_position_embeddings
52
+ self.bf16 = bf16
53
+ self.fp16 = fp16
54
+ self.fp32 = fp32
55
+ self.kv_channels = kv_channels
56
+ self.rotary_pct = rotary_pct
57
+ self.rotary_emb_base = rotary_emb_base
58
+ self.use_dynamic_ntk = use_dynamic_ntk
59
+ self.use_logn_attn = use_logn_attn
60
+ self.use_flash_attn = use_flash_attn
61
+ self.no_bias = no_bias
62
+ super().__init__(
63
+ tie_word_embeddings=tie_word_embeddings,
64
+ **kwargs
65
+ )
generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chat_format": "chatml",
3
+ "do_sample": true,
4
+ "max_new_tokens":512,
5
+ "eos_token_id": 151643,
6
+ "pad_token_id": 151643,
7
+ "max_window_size":6144,
8
+ "stop_words_ids": [
9
+ [
10
+ 151643
11
+ ]
12
+ ],
13
+ "top_k": 0,
14
+ "top_p": 0.8,
15
+ "transformers_version": "4.32.0"
16
+ }
logo.png ADDED
mdl ADDED
Binary file (50 Bytes). View file
 
modeling_qwen.py ADDED
@@ -0,0 +1,1232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ import importlib
7
+ import math
8
+ from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List, Any, Generator
9
+
10
+ import torch
11
+ import torch.nn.functional as F
12
+ import torch.utils.checkpoint
13
+ from torch.cuda.amp import autocast
14
+
15
+ from torch.nn import CrossEntropyLoss
16
+ from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
17
+ from transformers.generation.logits_process import LogitsProcessorList
18
+
19
+ if TYPE_CHECKING:
20
+ from transformers.generation.streamers import BaseStreamer
21
+ from transformers.generation.utils import GenerateOutput
22
+ from transformers.modeling_outputs import (
23
+ BaseModelOutputWithPast,
24
+ CausalLMOutputWithPast,
25
+ )
26
+ from transformers.modeling_utils import PreTrainedModel
27
+ from transformers.utils import logging
28
+
29
+ try:
30
+ from einops import rearrange
31
+ except ImportError:
32
+ rearrange = None
33
+ from torch import nn
34
+
35
+ SUPPORT_CUDA = torch.cuda.is_available()
36
+ SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
37
+ SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
38
+
39
+ from .configuration_qwen import QWenConfig
40
+ from .qwen_generation_utils import (
41
+ HistoryType,
42
+ make_context,
43
+ decode_tokens,
44
+ get_stop_words_ids,
45
+ StopWordsLogitsProcessor,
46
+ )
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "qwen"
52
+ _CONFIG_FOR_DOC = "QWenConfig"
53
+
54
+ QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"]
55
+
56
+ _ERROR_BAD_CHAT_FORMAT = """\
57
+ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml".
58
+ If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat().
59
+ 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。
60
+ 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。
61
+ """
62
+
63
+ _SENTINEL = object()
64
+ _ERROR_STREAM_IN_CHAT = """\
65
+ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True).
66
+ 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。
67
+ """
68
+
69
+ _ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED = """\
70
+ We detect you have activated flash attention support, but running model computation on CPU. Please make sure that your input data has been placed on GPU. If you actually want to run CPU computation, please following the readme and set device_map="cpu" to disable flash attention when loading the model (calling AutoModelForCausalLM.from_pretrained).
71
+ 检测到您的模型已激活了flash attention支持,但正在执行CPU运算任务。如使用flash attention,请您确认模型输入已经传到GPU上。如果您确认要执行CPU运算,请您在载入模型(调用AutoModelForCausalLM.from_pretrained)时,按照readme说法,指定device_map="cpu"以禁用flash attention。
72
+ """
73
+
74
+ apply_rotary_emb_func = None
75
+ rms_norm = None
76
+ flash_attn_unpadded_func = None
77
+
78
+
79
+ def _import_flash_attn():
80
+ global apply_rotary_emb_func, rms_norm, flash_attn_unpadded_func
81
+ try:
82
+ from flash_attn.layers.rotary import apply_rotary_emb_func as __apply_rotary_emb_func
83
+ apply_rotary_emb_func = __apply_rotary_emb_func
84
+ except ImportError:
85
+ logger.warn(
86
+ "Warning: import flash_attn rotary fail, please install FlashAttention rotary to get higher efficiency "
87
+ "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/rotary"
88
+ )
89
+
90
+ try:
91
+ from flash_attn.ops.rms_norm import rms_norm as __rms_norm
92
+ rms_norm = __rms_norm
93
+ except ImportError:
94
+ logger.warn(
95
+ "Warning: import flash_attn rms_norm fail, please install FlashAttention layer_norm to get higher efficiency "
96
+ "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm"
97
+ )
98
+
99
+ try:
100
+ import flash_attn
101
+ if not hasattr(flash_attn, '__version__'):
102
+ from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
103
+ else:
104
+ if int(flash_attn.__version__.split(".")[0]) >= 2:
105
+ from flash_attn.flash_attn_interface import flash_attn_varlen_func as __flash_attn_unpadded_func
106
+ else:
107
+ from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
108
+ flash_attn_unpadded_func = __flash_attn_unpadded_func
109
+ except ImportError:
110
+ logger.warn(
111
+ "Warning: import flash_attn fail, please install FlashAttention to get higher efficiency "
112
+ "https://github.com/Dao-AILab/flash-attention"
113
+ )
114
+
115
+
116
+ class FlashSelfAttention(torch.nn.Module):
117
+ def __init__(
118
+ self,
119
+ causal=False,
120
+ softmax_scale=None,
121
+ attention_dropout=0.0,
122
+ ):
123
+ super().__init__()
124
+ assert flash_attn_unpadded_func is not None, (
125
+ "Please install FlashAttention first, " "e.g., with pip install flash-attn"
126
+ )
127
+ assert (
128
+ rearrange is not None
129
+ ), "Please install einops first, e.g., with pip install einops"
130
+ self.causal = causal
131
+ self.softmax_scale = softmax_scale
132
+ self.dropout_p = attention_dropout
133
+
134
+ def forward(self, q, k, v):
135
+ assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q, k, v)))
136
+ assert all((i.is_cuda for i in (q, k, v)))
137
+ batch_size, seqlen_q = q.shape[0], q.shape[1]
138
+ seqlen_k = k.shape[1]
139
+
140
+ q, k, v = [rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]]
141
+ cu_seqlens_q = torch.arange(
142
+ 0,
143
+ (batch_size + 1) * seqlen_q,
144
+ step=seqlen_q,
145
+ dtype=torch.int32,
146
+ device=q.device,
147
+ )
148
+
149
+ if self.training:
150
+ assert seqlen_k == seqlen_q
151
+
152
+ is_causal = self.causal
153
+ cu_seqlens_k = cu_seqlens_q
154
+ else:
155
+ is_causal = seqlen_q == seqlen_k
156
+ cu_seqlens_k = torch.arange(
157
+ 0,
158
+ (batch_size + 1) * seqlen_k,
159
+ step=seqlen_k,
160
+ dtype=torch.int32,
161
+ device=q.device,
162
+ )
163
+ self.dropout_p = 0
164
+
165
+ output = flash_attn_unpadded_func(
166
+ q,
167
+ k,
168
+ v,
169
+ cu_seqlens_q,
170
+ cu_seqlens_k,
171
+ seqlen_q,
172
+ seqlen_k,
173
+ self.dropout_p,
174
+ softmax_scale=self.softmax_scale,
175
+ causal=is_causal,
176
+ )
177
+
178
+ new_shape = (batch_size, output.shape[0] // batch_size) + output.shape[1:]
179
+ output = output.view(new_shape)
180
+ return output
181
+
182
+
183
+ class QWenAttention(nn.Module):
184
+ def __init__(self, config):
185
+ super().__init__()
186
+
187
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
188
+ self.seq_length = config.seq_length
189
+
190
+ self.hidden_size = config.hidden_size
191
+ self.split_size = config.hidden_size
192
+ self.num_heads = config.num_attention_heads
193
+ self.head_dim = self.hidden_size // self.num_heads
194
+
195
+ self.use_flash_attn = config.use_flash_attn
196
+ self.scale_attn_weights = True
197
+
198
+ self.projection_size = config.kv_channels * config.num_attention_heads
199
+
200
+ assert self.projection_size % config.num_attention_heads == 0
201
+ self.hidden_size_per_attention_head = (
202
+ self.projection_size // config.num_attention_heads
203
+ )
204
+
205
+ self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size)
206
+
207
+ self.c_proj = nn.Linear(
208
+ config.hidden_size, self.projection_size, bias=not config.no_bias
209
+ )
210
+
211
+ self.is_fp32 = not (config.bf16 or config.fp16)
212
+ if (
213
+ self.use_flash_attn
214
+ and flash_attn_unpadded_func is not None
215
+ and not self.is_fp32
216
+ ):
217
+ self.core_attention_flash = FlashSelfAttention(
218
+ causal=True, attention_dropout=config.attn_dropout_prob
219
+ )
220
+ self.bf16 = config.bf16
221
+
222
+ self.use_dynamic_ntk = config.use_dynamic_ntk
223
+ self.use_logn_attn = config.use_logn_attn
224
+
225
+ logn_list = [
226
+ math.log(i, self.seq_length) if i > self.seq_length else 1
227
+ for i in range(1, 32768)
228
+ ]
229
+ self.logn_tensor = torch.tensor(logn_list)[None, :, None, None]
230
+
231
+ self.attn_dropout = nn.Dropout(config.attn_dropout_prob)
232
+
233
+ def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None):
234
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
235
+
236
+ if self.scale_attn_weights:
237
+ attn_weights = attn_weights / torch.full(
238
+ [],
239
+ value.size(-1) ** 0.5,
240
+ dtype=attn_weights.dtype,
241
+ device=attn_weights.device,
242
+ )
243
+
244
+ query_length, key_length = query.size(-2), key.size(-2)
245
+ causal_mask = registered_causal_mask[
246
+ :, :, key_length - query_length : key_length, :key_length
247
+ ]
248
+ mask_value = torch.finfo(attn_weights.dtype).min
249
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(
250
+ attn_weights.device
251
+ )
252
+ attn_weights = torch.where(
253
+ causal_mask, attn_weights.to(attn_weights.dtype), mask_value
254
+ )
255
+
256
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
257
+
258
+ attn_weights = attn_weights.type(value.dtype)
259
+ attn_weights = self.attn_dropout(attn_weights)
260
+
261
+ if head_mask is not None:
262
+ attn_weights = attn_weights * head_mask
263
+
264
+ attn_output = torch.matmul(attn_weights, value)
265
+ attn_output = attn_output.transpose(1, 2)
266
+
267
+ return attn_output, attn_weights
268
+
269
+ def _upcast_and_reordered_attn(
270
+ self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None
271
+ ):
272
+ bsz, num_heads, q_seq_len, dk = query.size()
273
+ _, _, k_seq_len, _ = key.size()
274
+
275
+ attn_weights = torch.empty(
276
+ bsz * num_heads,
277
+ q_seq_len,
278
+ k_seq_len,
279
+ dtype=torch.float32,
280
+ device=query.device,
281
+ )
282
+
283
+ scale_factor = 1.0
284
+ if self.scale_attn_weights:
285
+ scale_factor /= float(value.size(-1)) ** 0.5
286
+
287
+ with autocast(enabled=False):
288
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(
289
+ -1, dk, k_seq_len
290
+ )
291
+ attn_weights = torch.baddbmm(
292
+ attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor
293
+ )
294
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
295
+
296
+ query_length, key_length = query.size(-2), key.size(-2)
297
+ causal_mask = registered_causal_mask[
298
+ :, :, key_length - query_length : key_length, :key_length
299
+ ]
300
+ mask_value = torch.finfo(attn_weights.dtype).min
301
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(
302
+ attn_weights.device
303
+ )
304
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
305
+
306
+ if attention_mask is not None:
307
+ attn_weights = attn_weights + attention_mask
308
+
309
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
310
+
311
+ if attn_weights.dtype != torch.float32:
312
+ raise RuntimeError(
313
+ "Error with upcasting, attn_weights does not have dtype torch.float32"
314
+ )
315
+ attn_weights = attn_weights.type(value.dtype)
316
+ attn_weights = self.attn_dropout(attn_weights)
317
+
318
+ if head_mask is not None:
319
+ attn_weights = attn_weights * head_mask
320
+
321
+ attn_output = torch.matmul(attn_weights, value)
322
+
323
+ return attn_output, attn_weights
324
+
325
+ def _split_heads(self, tensor, num_heads, attn_head_size):
326
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
327
+ tensor = tensor.view(new_shape)
328
+ return tensor
329
+
330
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
331
+ tensor = tensor.contiguous()
332
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
333
+ return tensor.view(new_shape)
334
+
335
+ def forward(
336
+ self,
337
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
338
+ rotary_pos_emb: Optional[List[torch.Tensor]] = None,
339
+ registered_causal_mask: Optional[torch.Tensor] = None,
340
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
341
+ attention_mask: Optional[torch.FloatTensor] = None,
342
+ head_mask: Optional[torch.FloatTensor] = None,
343
+ encoder_hidden_states: Optional[torch.Tensor] = None,
344
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
345
+ output_attentions: Optional[bool] = False,
346
+ use_cache: Optional[bool] = False,
347
+ ):
348
+
349
+ mixed_x_layer = self.c_attn(hidden_states)
350
+
351
+ query, key, value = mixed_x_layer.split(self.split_size, dim=2)
352
+
353
+ query = self._split_heads(query, self.num_heads, self.head_dim)
354
+ key = self._split_heads(key, self.num_heads, self.head_dim)
355
+ value = self._split_heads(value, self.num_heads, self.head_dim)
356
+
357
+ if rotary_pos_emb is not None:
358
+ cur_len = query.shape[1]
359
+ rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
360
+ rotary_pos_emb = (rotary_pos_emb,) * 2
361
+ q_pos_emb, k_pos_emb = rotary_pos_emb
362
+ # Slice the pos emb for current inference
363
+ query = apply_rotary_pos_emb(query, q_pos_emb)
364
+ key = apply_rotary_pos_emb(key, k_pos_emb)
365
+
366
+ if layer_past is not None:
367
+ past_key, past_value = layer_past[0], layer_past[1]
368
+ key = torch.cat((past_key, key), dim=1)
369
+ value = torch.cat((past_value, value), dim=1)
370
+
371
+ if use_cache:
372
+ present = (key, value)
373
+ else:
374
+ present = None
375
+
376
+ if self.use_logn_attn and not self.training:
377
+ if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype:
378
+ self.logn_tensor = self.logn_tensor.to(query.device).type_as(query)
379
+ seq_start = key.size(1) - query.size(1)
380
+ seq_end = key.size(1)
381
+ logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :]
382
+ query = query * logn_tensor.expand_as(query)
383
+
384
+ if (
385
+ self.use_flash_attn
386
+ and flash_attn_unpadded_func is not None
387
+ and not self.is_fp32
388
+ and query.is_cuda
389
+ ):
390
+ q, k, v = query, key, value
391
+ context_layer = self.core_attention_flash(q, k, v)
392
+
393
+ # b s h d -> b s (h d)
394
+ context_layer = context_layer.flatten(2,3).contiguous()
395
+
396
+ else:
397
+ query = query.permute(0, 2, 1, 3)
398
+ key = key.permute(0, 2, 1, 3)
399
+ value = value.permute(0, 2, 1, 3)
400
+ if (
401
+ registered_causal_mask is None
402
+ and self.use_flash_attn
403
+ and flash_attn_unpadded_func is not None
404
+ and not self.is_fp32
405
+ and not query.is_cuda
406
+ ):
407
+ raise Exception(_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED)
408
+ attn_output, attn_weight = self._attn(
409
+ query, key, value, registered_causal_mask, attention_mask, head_mask
410
+ )
411
+ context_layer = self._merge_heads(
412
+ attn_output, self.num_heads, self.head_dim
413
+ )
414
+
415
+ attn_output = self.c_proj(context_layer)
416
+
417
+ outputs = (attn_output, present)
418
+ if output_attentions:
419
+ if (
420
+ self.use_flash_attn
421
+ and flash_attn_unpadded_func is not None
422
+ and not self.is_fp32
423
+ ):
424
+ raise ValueError("Cannot output attentions while using flash-attn")
425
+ else:
426
+ outputs += (attn_weight,)
427
+
428
+ return outputs
429
+
430
+
431
+ class QWenMLP(nn.Module):
432
+ def __init__(self, config):
433
+ super().__init__()
434
+ self.w1 = nn.Linear(
435
+ config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
436
+ )
437
+ self.w2 = nn.Linear(
438
+ config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
439
+ )
440
+ ff_dim_in = config.intermediate_size // 2
441
+ self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)
442
+
443
+ def forward(self, hidden_states):
444
+ a1 = self.w1(hidden_states)
445
+ a2 = self.w2(hidden_states)
446
+ intermediate_parallel = a1 * F.silu(a2)
447
+ output = self.c_proj(intermediate_parallel)
448
+ return output
449
+
450
+ class QWenBlock(nn.Module):
451
+ def __init__(self, config):
452
+ super().__init__()
453
+ hidden_size = config.hidden_size
454
+ self.bf16 = config.bf16
455
+
456
+ self.ln_1 = RMSNorm(
457
+ hidden_size,
458
+ eps=config.layer_norm_epsilon,
459
+ )
460
+ self.attn = QWenAttention(config)
461
+ self.ln_2 = RMSNorm(
462
+ hidden_size,
463
+ eps=config.layer_norm_epsilon,
464
+ )
465
+
466
+ self.mlp = QWenMLP(config)
467
+
468
+ def forward(
469
+ self,
470
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
471
+ rotary_pos_emb: Optional[List[torch.Tensor]] = None,
472
+ registered_causal_mask: Optional[torch.Tensor] = None,
473
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
474
+ attention_mask: Optional[torch.FloatTensor] = None,
475
+ head_mask: Optional[torch.FloatTensor] = None,
476
+ encoder_hidden_states: Optional[torch.Tensor] = None,
477
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
478
+ use_cache: Optional[bool] = False,
479
+ output_attentions: Optional[bool] = False,
480
+ ):
481
+ layernorm_output = self.ln_1(hidden_states)
482
+
483
+ attn_outputs = self.attn(
484
+ layernorm_output,
485
+ rotary_pos_emb,
486
+ registered_causal_mask=registered_causal_mask,
487
+ layer_past=layer_past,
488
+ attention_mask=attention_mask,
489
+ head_mask=head_mask,
490
+ use_cache=use_cache,
491
+ output_attentions=output_attentions,
492
+ )
493
+ attn_output = attn_outputs[0]
494
+
495
+ outputs = attn_outputs[1:]
496
+
497
+ residual = hidden_states
498
+ layernorm_input = attn_output + residual
499
+
500
+ layernorm_output = self.ln_2(layernorm_input)
501
+
502
+ residual = layernorm_input
503
+ mlp_output = self.mlp(layernorm_output)
504
+ hidden_states = residual + mlp_output
505
+
506
+ if use_cache:
507
+ outputs = (hidden_states,) + outputs
508
+ else:
509
+ outputs = (hidden_states,) + outputs[1:]
510
+
511
+ return outputs
512
+
513
+
514
+ class QWenPreTrainedModel(PreTrainedModel):
515
+ config_class = QWenConfig
516
+ base_model_prefix = "transformer"
517
+ is_parallelizable = False
518
+ supports_gradient_checkpointing = True
519
+ _no_split_modules = ["QWenBlock"]
520
+
521
+ def __init__(self, *inputs, **kwargs):
522
+ super().__init__(*inputs, **kwargs)
523
+
524
+ def _init_weights(self, module):
525
+ """Initialize the weights."""
526
+ if isinstance(module, nn.Linear):
527
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
528
+ if module.bias is not None:
529
+ module.bias.data.zero_()
530
+ elif isinstance(module, nn.Embedding):
531
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
532
+ if module.padding_idx is not None:
533
+ module.weight.data[module.padding_idx].zero_()
534
+ elif isinstance(module, RMSNorm):
535
+ module.weight.data.fill_(1.0)
536
+
537
+ for name, p in module.named_parameters():
538
+ if name == "c_proj.weight":
539
+ p.data.normal_(
540
+ mean=0.0,
541
+ std=(
542
+ self.config.initializer_range
543
+ / math.sqrt(2 * self.config.num_hidden_layers)
544
+ ),
545
+ )
546
+
547
+ def _set_gradient_checkpointing(self, module, value=False):
548
+ if isinstance(module, QWenModel):
549
+ module.gradient_checkpointing = value
550
+
551
+
552
+ class QWenModel(QWenPreTrainedModel):
553
+ _keys_to_ignore_on_load_missing = ["attn.masked_bias"]
554
+
555
+ def __init__(self, config):
556
+ super().__init__(config)
557
+ self.vocab_size = config.vocab_size
558
+ self.num_hidden_layers = config.num_hidden_layers
559
+ self.embed_dim = config.hidden_size
560
+
561
+ self.gradient_checkpointing = False
562
+ self.use_dynamic_ntk = config.use_dynamic_ntk
563
+ self.seq_length = config.seq_length
564
+
565
+ self.wte = nn.Embedding(self.vocab_size, self.embed_dim)
566
+
567
+ self.drop = nn.Dropout(config.emb_dropout_prob)
568
+
569
+ if config.rotary_pct == 1.0:
570
+ self.rotary_ndims = None
571
+ else:
572
+ assert config.rotary_pct < 1
573
+ self.rotary_ndims = int(
574
+ config.kv_channels * config.rotary_pct
575
+ )
576
+ dim = (
577
+ self.rotary_ndims
578
+ if self.rotary_ndims is not None
579
+ else config.kv_channels
580
+ )
581
+ self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)
582
+
583
+ self.use_flash_attn = config.use_flash_attn
584
+ self.is_fp32 = not (config.bf16 or config.fp16)
585
+ if (
586
+ self.use_flash_attn
587
+ and flash_attn_unpadded_func is not None
588
+ and not self.is_fp32
589
+ ):
590
+ self.registered_causal_mask = None
591
+ else:
592
+ max_positions = config.max_position_embeddings
593
+ self.register_buffer(
594
+ "registered_causal_mask",
595
+ torch.tril(
596
+ torch.ones((max_positions, max_positions), dtype=torch.bool)
597
+ ).view(1, 1, max_positions, max_positions),
598
+ persistent=False,
599
+ )
600
+
601
+ self.h = nn.ModuleList(
602
+ [
603
+ QWenBlock(
604
+ config
605
+ )
606
+ for i in range(config.num_hidden_layers)
607
+ ]
608
+ )
609
+ self.ln_f = RMSNorm(
610
+ self.embed_dim,
611
+ eps=config.layer_norm_epsilon,
612
+ )
613
+
614
+ self.post_init()
615
+
616
+ def get_input_embeddings(self):
617
+ return self.wte
618
+
619
+ def set_input_embeddings(self, new_embeddings):
620
+ self.wte = new_embeddings
621
+
622
+ def forward(
623
+ self,
624
+ input_ids: Optional[torch.LongTensor] = None,
625
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
626
+ attention_mask: Optional[torch.FloatTensor] = None,
627
+ token_type_ids: Optional[torch.LongTensor] = None,
628
+ position_ids: Optional[torch.LongTensor] = None,
629
+ head_mask: Optional[torch.FloatTensor] = None,
630
+ inputs_embeds: Optional[torch.FloatTensor] = None,
631
+ encoder_hidden_states: Optional[torch.Tensor] = None,
632
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
633
+ use_cache: Optional[bool] = None,
634
+ output_attentions: Optional[bool] = None,
635
+ output_hidden_states: Optional[bool] = None,
636
+ return_dict: Optional[bool] = None,
637
+ ):
638
+ output_attentions = (
639
+ output_attentions
640
+ if output_attentions is not None
641
+ else self.config.output_attentions
642
+ )
643
+ output_hidden_states = (
644
+ output_hidden_states
645
+ if output_hidden_states is not None
646
+ else self.config.output_hidden_states
647
+ )
648
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
649
+ return_dict = (
650
+ return_dict if return_dict is not None else self.config.use_return_dict
651
+ )
652
+
653
+ if input_ids is not None and inputs_embeds is not None:
654
+ raise ValueError(
655
+ "You cannot specify both input_ids and inputs_embeds at the same time"
656
+ )
657
+ elif input_ids is not None:
658
+ input_shape = input_ids.size()
659
+ input_ids = input_ids.view(-1, input_shape[-1])
660
+ batch_size = input_ids.shape[0]
661
+ elif inputs_embeds is not None:
662
+ input_shape = inputs_embeds.size()[:-1]
663
+ batch_size = inputs_embeds.shape[0]
664
+ else:
665
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
666
+
667
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
668
+
669
+ if token_type_ids is not None:
670
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
671
+ if position_ids is not None:
672
+ position_ids = position_ids.view(-1, input_shape[-1])
673
+
674
+ if past_key_values is None:
675
+ past_length = 0
676
+ past_key_values = tuple([None] * len(self.h))
677
+ else:
678
+ past_length = past_key_values[0][0].size(-2)
679
+
680
+ if position_ids is None:
681
+ position_ids = torch.arange(
682
+ past_length,
683
+ input_shape[-1] + past_length,
684
+ dtype=torch.long,
685
+ device=device,
686
+ )
687
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
688
+
689
+ if attention_mask is not None:
690
+ if batch_size <= 0:
691
+ raise ValueError("batch_size has to be defined and > 0")
692
+ attention_mask = attention_mask.view(batch_size, -1)
693
+ attention_mask = attention_mask[:, None, None, :]
694
+ attention_mask = attention_mask.to(dtype=self.dtype)
695
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
696
+
697
+ encoder_attention_mask = None
698
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
699
+
700
+ if inputs_embeds is None:
701
+ inputs_embeds = self.wte(input_ids)
702
+ hidden_states = inputs_embeds
703
+
704
+ kv_seq_len = hidden_states.size()[1]
705
+ if past_key_values[0] is not None:
706
+ # past key values[0][0] shape: bs * seq_len * head_num * dim
707
+ kv_seq_len += past_key_values[0][0].shape[1]
708
+ if (
709
+ self.use_dynamic_ntk
710
+ and kv_seq_len == hidden_states.size()[1]
711
+ and not self.training
712
+ ):
713
+ context_value = math.log(kv_seq_len / self.seq_length, 2) + 1
714
+ ntk_alpha = 2 ** math.ceil(context_value) - 1
715
+ ntk_alpha = max(ntk_alpha, 1)
716
+ else:
717
+ ntk_alpha = self.rotary_emb._ntk_alpha_cached
718
+
719
+ rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha)
720
+ for idx in range(len(rotary_pos_emb)):
721
+ rotary_pos_emb[idx] = rotary_pos_emb[idx].to(hidden_states.device)
722
+
723
+ hidden_states = self.drop(hidden_states)
724
+ output_shape = input_shape + (hidden_states.size(-1),)
725
+
726
+ if self.gradient_checkpointing and self.training:
727
+ if use_cache:
728
+ logger.warning_once(
729
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
730
+ )
731
+ use_cache = False
732
+
733
+ presents = () if use_cache else None
734
+ all_self_attentions = () if output_attentions else None
735
+ all_hidden_states = () if output_hidden_states else None
736
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
737
+
738
+ if output_hidden_states:
739
+ all_hidden_states = all_hidden_states + (hidden_states,)
740
+
741
+ if self.gradient_checkpointing and self.training:
742
+
743
+ def create_custom_forward(module):
744
+ def custom_forward(*inputs):
745
+ # None for past_key_value
746
+ return module(*inputs, use_cache, output_attentions)
747
+
748
+ return custom_forward
749
+
750
+ outputs = torch.utils.checkpoint.checkpoint(
751
+ create_custom_forward(block),
752
+ hidden_states,
753
+ rotary_pos_emb,
754
+ self.registered_causal_mask,
755
+ None,
756
+ attention_mask,
757
+ head_mask[i],
758
+ encoder_hidden_states,
759
+ encoder_attention_mask,
760
+ )
761
+ else:
762
+ outputs = block(
763
+ hidden_states,
764
+ layer_past=layer_past,
765
+ rotary_pos_emb=rotary_pos_emb,
766
+ registered_causal_mask=self.registered_causal_mask,
767
+ attention_mask=attention_mask,
768
+ head_mask=head_mask[i],
769
+ encoder_hidden_states=encoder_hidden_states,
770
+ encoder_attention_mask=encoder_attention_mask,
771
+ use_cache=use_cache,
772
+ output_attentions=output_attentions,
773
+ )
774
+
775
+ hidden_states = outputs[0]
776
+ if use_cache is True:
777
+ presents = presents + (outputs[1],)
778
+
779
+ if output_attentions:
780
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
781
+
782
+ hidden_states = self.ln_f(hidden_states)
783
+ hidden_states = hidden_states.view(output_shape)
784
+ # Add last hidden state
785
+ if output_hidden_states:
786
+ all_hidden_states = all_hidden_states + (hidden_states,)
787
+
788
+ if not return_dict:
789
+ return tuple(
790
+ v for v in [hidden_states, presents, all_hidden_states] if v is not None
791
+ )
792
+
793
+ return BaseModelOutputWithPast(
794
+ last_hidden_state=hidden_states,
795
+ past_key_values=presents,
796
+ hidden_states=all_hidden_states,
797
+ attentions=all_self_attentions,
798
+ )
799
+
800
+
801
+ class QWenLMHeadModel(QWenPreTrainedModel):
802
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.rotary_emb\.inv_freq"]
803
+ _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias"]
804
+
805
+ def __init__(self, config):
806
+ super().__init__(config)
807
+ assert (
808
+ config.bf16 + config.fp16 + config.fp32 <= 1
809
+ ), "Only one of \"bf16\", \"fp16\", \"fp32\" can be true"
810
+
811
+ autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0
812
+
813
+ if autoset_precision:
814
+ if SUPPORT_BF16:
815
+ logger.warn(
816
+ "The model is automatically converting to bf16 for faster inference. "
817
+ "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
818
+ )
819
+ config.bf16 = True
820
+ elif SUPPORT_FP16:
821
+ logger.warn(
822
+ "The model is automatically converting to fp16 for faster inference. "
823
+ "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
824
+ )
825
+ config.fp16 = True
826
+ else:
827
+ config.fp32 = True
828
+
829
+ if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:
830
+ logger.warn("Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \"AutoModelForCausalLM.from_pretrained\".")
831
+ if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:
832
+ logger.warn("Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster")
833
+ if config.fp32:
834
+ if SUPPORT_BF16:
835
+ logger.warn("Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".")
836
+ elif SUPPORT_FP16:
837
+ logger.warn("Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".")
838
+
839
+ if config.use_flash_attn == "auto":
840
+ if config.bf16 or config.fp16:
841
+ logger.warn("Try importing flash-attention for faster inference...")
842
+ config.use_flash_attn = True
843
+ else:
844
+ config.use_flash_attn = False
845
+ if config.use_flash_attn and config.fp32:
846
+ logger.warn("Flash attention will be disabled because it does NOT support fp32.")
847
+
848
+ if config.use_flash_attn:
849
+ _import_flash_attn()
850
+
851
+ self.transformer = QWenModel(config)
852
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
853
+
854
+ if config.bf16:
855
+ self.transformer.bfloat16()
856
+ self.lm_head.bfloat16()
857
+ if config.fp16:
858
+ self.transformer.half()
859
+ self.lm_head.half()
860
+ self.post_init()
861
+
862
+ def get_output_embeddings(self):
863
+ return self.lm_head
864
+
865
+ def set_output_embeddings(self, new_embeddings):
866
+ self.lm_head = new_embeddings
867
+
868
+ def prepare_inputs_for_generation(
869
+ self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
870
+ ):
871
+ token_type_ids = kwargs.get("token_type_ids", None)
872
+ if past_key_values:
873
+ input_ids = input_ids[:, -1].unsqueeze(-1)
874
+ if token_type_ids is not None:
875
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
876
+
877
+ attention_mask = kwargs.get("attention_mask", None)
878
+ position_ids = kwargs.get("position_ids", None)
879
+
880
+ if attention_mask is not None and position_ids is None:
881
+ position_ids = attention_mask.long().cumsum(-1) - 1
882
+ position_ids.masked_fill_(attention_mask == 0, 1)
883
+ if past_key_values:
884
+ position_ids = position_ids[:, -1].unsqueeze(-1)
885
+ else:
886
+ position_ids = None
887
+
888
+ if inputs_embeds is not None and past_key_values is None:
889
+ model_inputs = {"inputs_embeds": inputs_embeds}
890
+ else:
891
+ model_inputs = {"input_ids": input_ids}
892
+
893
+ model_inputs.update(
894
+ {
895
+ "past_key_values": past_key_values,
896
+ "use_cache": kwargs.get("use_cache"),
897
+ "position_ids": position_ids,
898
+ "attention_mask": attention_mask,
899
+ "token_type_ids": token_type_ids,
900
+ }
901
+ )
902
+ return model_inputs
903
+
904
+ def forward(
905
+ self,
906
+ input_ids: Optional[torch.LongTensor] = None,
907
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
908
+ attention_mask: Optional[torch.FloatTensor] = None,
909
+ token_type_ids: Optional[torch.LongTensor] = None,
910
+ position_ids: Optional[torch.LongTensor] = None,
911
+ head_mask: Optional[torch.FloatTensor] = None,
912
+ inputs_embeds: Optional[torch.FloatTensor] = None,
913
+ encoder_hidden_states: Optional[torch.Tensor] = None,
914
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
915
+ labels: Optional[torch.LongTensor] = None,
916
+ use_cache: Optional[bool] = None,
917
+ output_attentions: Optional[bool] = None,
918
+ output_hidden_states: Optional[bool] = None,
919
+ return_dict: Optional[bool] = None,
920
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
921
+
922
+ return_dict = (
923
+ return_dict if return_dict is not None else self.config.use_return_dict
924
+ )
925
+
926
+ transformer_outputs = self.transformer(
927
+ input_ids,
928
+ past_key_values=past_key_values,
929
+ attention_mask=attention_mask,
930
+ token_type_ids=token_type_ids,
931
+ position_ids=position_ids,
932
+ head_mask=head_mask,
933
+ inputs_embeds=inputs_embeds,
934
+ encoder_hidden_states=encoder_hidden_states,
935
+ encoder_attention_mask=encoder_attention_mask,
936
+ use_cache=use_cache,
937
+ output_attentions=output_attentions,
938
+ output_hidden_states=output_hidden_states,
939
+ return_dict=return_dict,
940
+ )
941
+ hidden_states = transformer_outputs[0]
942
+
943
+ lm_logits = self.lm_head(hidden_states)
944
+
945
+ loss = None
946
+ if labels is not None:
947
+ labels = labels.to(lm_logits.device)
948
+ shift_logits = lm_logits[..., :-1, :].contiguous()
949
+ shift_labels = labels[..., 1:].contiguous()
950
+ loss_fct = CrossEntropyLoss()
951
+ loss = loss_fct(
952
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
953
+ )
954
+
955
+ if not return_dict:
956
+ output = (lm_logits,) + transformer_outputs[1:]
957
+ return ((loss,) + output) if loss is not None else output
958
+
959
+ return CausalLMOutputWithPast(
960
+ loss=loss,
961
+ logits=lm_logits,
962
+ past_key_values=transformer_outputs.past_key_values,
963
+ hidden_states=transformer_outputs.hidden_states,
964
+ attentions=transformer_outputs.attentions,
965
+ )
966
+
967
+ @staticmethod
968
+ def _reorder_cache(
969
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
970
+ ) -> Tuple[Tuple[torch.Tensor]]:
971
+
972
+ return tuple(
973
+ tuple(
974
+ past_state.index_select(0, beam_idx.to(past_state.device))
975
+ for past_state in layer_past
976
+ )
977
+ for layer_past in past_key_values
978
+ )
979
+
980
+ def chat(
981
+ self,
982
+ tokenizer: PreTrainedTokenizer,
983
+ query: str,
984
+ history: Optional[HistoryType],
985
+ system: str = "You are a helpful assistant.",
986
+ append_history: bool = True,
987
+ stream: Optional[bool] = _SENTINEL,
988
+ stop_words_ids: Optional[List[List[int]]] = None,
989
+ generation_config: Optional[GenerationConfig] = None,
990
+ **kwargs,
991
+ ) -> Tuple[str, HistoryType]:
992
+ generation_config = generation_config if generation_config is not None else self.generation_config
993
+
994
+ assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT
995
+ assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
996
+ if history is None:
997
+ history = []
998
+ if stop_words_ids is None:
999
+ stop_words_ids = []
1000
+
1001
+ max_window_size = kwargs.get('max_window_size', None)
1002
+ if max_window_size is None:
1003
+ max_window_size = generation_config.max_window_size
1004
+ raw_text, context_tokens = make_context(
1005
+ tokenizer,
1006
+ query,
1007
+ history=history,
1008
+ system=system,
1009
+ max_window_size=max_window_size,
1010
+ chat_format=generation_config.chat_format,
1011
+ )
1012
+
1013
+ stop_words_ids.extend(get_stop_words_ids(
1014
+ generation_config.chat_format, tokenizer
1015
+ ))
1016
+ input_ids = torch.tensor([context_tokens]).to(self.device)
1017
+ outputs = self.generate(
1018
+ input_ids,
1019
+ stop_words_ids=stop_words_ids,
1020
+ return_dict_in_generate=False,
1021
+ generation_config=generation_config,
1022
+ **kwargs,
1023
+ )
1024
+
1025
+ response = decode_tokens(
1026
+ outputs[0],
1027
+ tokenizer,
1028
+ raw_text_len=len(raw_text),
1029
+ context_length=len(context_tokens),
1030
+ chat_format=generation_config.chat_format,
1031
+ verbose=False,
1032
+ errors='replace'
1033
+ )
1034
+
1035
+ if append_history:
1036
+ history.append((query, response))
1037
+
1038
+ return response, history
1039
+
1040
+ def chat_stream(
1041
+ self,
1042
+ tokenizer: PreTrainedTokenizer,
1043
+ query: str,
1044
+ history: Optional[HistoryType],
1045
+ system: str = "You are a helpful assistant.",
1046
+ stop_words_ids: Optional[List[List[int]]] = None,
1047
+ logits_processor: Optional[LogitsProcessorList] = None,
1048
+ generation_config: Optional[GenerationConfig] = None,
1049
+ **kwargs,
1050
+ ) -> Generator[str, Any, None]:
1051
+ generation_config = generation_config if generation_config is not None else self.generation_config
1052
+ assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
1053
+ if history is None:
1054
+ history = []
1055
+ if stop_words_ids is None:
1056
+ stop_words_ids = []
1057
+
1058
+ max_window_size = kwargs.get('max_window_size', None)
1059
+ if max_window_size is None:
1060
+ max_window_size = generation_config.max_window_size
1061
+ raw_text, context_tokens = make_context(
1062
+ tokenizer,
1063
+ query,
1064
+ history=history,
1065
+ system=system,
1066
+ max_window_size=max_window_size,
1067
+ chat_format=generation_config.chat_format,
1068
+ )
1069
+
1070
+ stop_words_ids.extend(get_stop_words_ids(
1071
+ generation_config.chat_format, tokenizer
1072
+ ))
1073
+ if stop_words_ids is not None:
1074
+ stop_words_logits_processor = StopWordsLogitsProcessor(
1075
+ stop_words_ids=stop_words_ids,
1076
+ eos_token_id=generation_config.eos_token_id,
1077
+ )
1078
+ if logits_processor is None:
1079
+ logits_processor = LogitsProcessorList([stop_words_logits_processor])
1080
+ else:
1081
+ logits_processor.append(stop_words_logits_processor)
1082
+ input_ids = torch.tensor([context_tokens]).to(self.device)
1083
+
1084
+ from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig
1085
+ self.__class__.generate_stream = NewGenerationMixin.generate
1086
+ self.__class__.sample_stream = NewGenerationMixin.sample_stream
1087
+ stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)
1088
+
1089
+ def stream_generator():
1090
+ outputs = []
1091
+ for token in self.generate_stream(
1092
+ input_ids,
1093
+ return_dict_in_generate=False,
1094
+ generation_config=stream_config,
1095
+ logits_processor=logits_processor,
1096
+ seed=-1,
1097
+ **kwargs):
1098
+ outputs.append(token.item())
1099
+ yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore')
1100
+
1101
+ return stream_generator()
1102
+
1103
+ def generate(
1104
+ self,
1105
+ inputs: Optional[torch.Tensor] = None,
1106
+ generation_config: Optional[GenerationConfig] = None,
1107
+ logits_processor: Optional[LogitsProcessorList] = None,
1108
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1109
+ prefix_allowed_tokens_fn: Optional[
1110
+ Callable[[int, torch.Tensor], List[int]]
1111
+ ] = None,
1112
+ synced_gpus: Optional[bool] = None,
1113
+ assistant_model: Optional["PreTrainedModel"] = None,
1114
+ streamer: Optional["BaseStreamer"] = None,
1115
+ **kwargs,
1116
+ ) -> Union[GenerateOutput, torch.LongTensor]:
1117
+ generation_config = generation_config if generation_config is not None else self.generation_config
1118
+
1119
+ # Process stop_words_ids.
1120
+ stop_words_ids = kwargs.pop("stop_words_ids", None)
1121
+ if stop_words_ids is None and generation_config is not None:
1122
+ stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1123
+ if stop_words_ids is None:
1124
+ stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1125
+
1126
+ if stop_words_ids is not None:
1127
+ stop_words_logits_processor = StopWordsLogitsProcessor(
1128
+ stop_words_ids=stop_words_ids,
1129
+ eos_token_id=generation_config.eos_token_id,
1130
+ )
1131
+ if logits_processor is None:
1132
+ logits_processor = LogitsProcessorList([stop_words_logits_processor])
1133
+ else:
1134
+ logits_processor.append(stop_words_logits_processor)
1135
+
1136
+ return super().generate(
1137
+ inputs,
1138
+ generation_config=generation_config,
1139
+ logits_processor=logits_processor,
1140
+ stopping_criteria=stopping_criteria,
1141
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1142
+ synced_gpus=synced_gpus,
1143
+ assistant_model=assistant_model,
1144
+ streamer=streamer,
1145
+ **kwargs,
1146
+ )
1147
+
1148
+
1149
+ class RotaryEmbedding(torch.nn.Module):
1150
+ def __init__(self, dim, base=10000):
1151
+ super().__init__()
1152
+ self.dim = dim
1153
+ self.base = base
1154
+ self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
1155
+ if importlib.util.find_spec("einops") is None:
1156
+ raise RuntimeError("einops is required for Rotary Embedding")
1157
+
1158
+ self._rotary_pos_emb_cache = None
1159
+ self._seq_len_cached = 0
1160
+ self._ntk_alpha_cached = 1.0
1161
+
1162
+ def update_rotary_pos_emb_cache(self, max_seq_len, offset=0, ntk_alpha=1.0):
1163
+ seqlen = max_seq_len + offset
1164
+ if seqlen > self._seq_len_cached or ntk_alpha != self._ntk_alpha_cached:
1165
+ base = self.base * ntk_alpha ** (self.dim / (self.dim - 2))
1166
+ self.inv_freq = 1.0 / (
1167
+ base
1168
+ ** (
1169
+ torch.arange(0, self.dim, 2, device=self.inv_freq.device).float()
1170
+ / self.dim
1171
+ )
1172
+ )
1173
+ self._seq_len_cached = max(2 * seqlen, 16)
1174
+ self._ntk_alpha_cached = ntk_alpha
1175
+ seq = torch.arange(self._seq_len_cached, device=self.inv_freq.device)
1176
+ freqs = torch.outer(seq.type_as(self.inv_freq), self.inv_freq)
1177
+
1178
+ emb = torch.cat((freqs, freqs), dim=-1)
1179
+ from einops import rearrange
1180
+
1181
+ emb = rearrange(emb, "n d -> 1 n 1 d")
1182
+
1183
+ cos, sin = emb.cos(), emb.sin()
1184
+ self._rotary_pos_emb_cache = [cos, sin]
1185
+
1186
+ def forward(self, max_seq_len, offset=0, ntk_alpha=1.0):
1187
+ self.update_rotary_pos_emb_cache(max_seq_len, offset, ntk_alpha)
1188
+ cos, sin = self._rotary_pos_emb_cache
1189
+ return [cos[:, offset : offset + max_seq_len], sin[:, offset : offset + max_seq_len]]
1190
+
1191
+
1192
+ def _rotate_half(x):
1193
+ from einops import rearrange
1194
+
1195
+ x = rearrange(x, "... (j d) -> ... j d", j=2)
1196
+ x1, x2 = x.unbind(dim=-2)
1197
+ return torch.cat((-x2, x1), dim=-1)
1198
+
1199
+
1200
+ def apply_rotary_pos_emb(t, freqs):
1201
+ cos, sin = freqs
1202
+ if apply_rotary_emb_func is not None and t.is_cuda:
1203
+ t_ = t.float()
1204
+ cos = cos.squeeze(0).squeeze(1)[:, : cos.shape[-1] // 2]
1205
+ sin = sin.squeeze(0).squeeze(1)[:, : sin.shape[-1] // 2]
1206
+ output = apply_rotary_emb_func(t_, cos, sin).type_as(t)
1207
+ return output
1208
+ else:
1209
+ rot_dim = freqs[0].shape[-1]
1210
+ cos, sin = freqs
1211
+ t_, t_pass_ = t[..., :rot_dim], t[..., rot_dim:]
1212
+ t_ = t_.float()
1213
+ t_pass_ = t_pass_.float()
1214
+ t_ = (t_ * cos) + (_rotate_half(t_) * sin)
1215
+ return torch.cat((t_, t_pass_), dim=-1).type_as(t)
1216
+
1217
+
1218
+ class RMSNorm(torch.nn.Module):
1219
+ def __init__(self, dim: int, eps: float = 1e-6):
1220
+ super().__init__()
1221
+ self.eps = eps
1222
+ self.weight = nn.Parameter(torch.ones(dim))
1223
+
1224
+ def _norm(self, x):
1225
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
1226
+
1227
+ def forward(self, x):
1228
+ if rms_norm is not None and x.is_cuda:
1229
+ return rms_norm(x, self.weight, self.eps)
1230
+ else:
1231
+ output = self._norm(x.float()).type_as(x)
1232
+ return output * self.weight
msc ADDED
Binary file (378 Bytes). View file
 
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15442649088
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00002-of-00002.bin",
7
+ "transformer.h.0.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
8
+ "transformer.h.0.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
9
+ "transformer.h.0.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
10
+ "transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00002.bin",
11
+ "transformer.h.0.ln_2.weight": "pytorch_model-00001-of-00002.bin",
12
+ "transformer.h.0.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
13
+ "transformer.h.0.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
14
+ "transformer.h.0.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
15
+ "transformer.h.1.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
16
+ "transformer.h.1.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
17
+ "transformer.h.1.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
18
+ "transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00002.bin",
19
+ "transformer.h.1.ln_2.weight": "pytorch_model-00001-of-00002.bin",
20
+ "transformer.h.1.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
21
+ "transformer.h.1.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
22
+ "transformer.h.1.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
23
+ "transformer.h.10.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
24
+ "transformer.h.10.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
25
+ "transformer.h.10.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
26
+ "transformer.h.10.ln_1.weight": "pytorch_model-00001-of-00002.bin",
27
+ "transformer.h.10.ln_2.weight": "pytorch_model-00001-of-00002.bin",
28
+ "transformer.h.10.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
29
+ "transformer.h.10.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
30
+ "transformer.h.10.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
31
+ "transformer.h.11.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
32
+ "transformer.h.11.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
33
+ "transformer.h.11.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
34
+ "transformer.h.11.ln_1.weight": "pytorch_model-00001-of-00002.bin",
35
+ "transformer.h.11.ln_2.weight": "pytorch_model-00001-of-00002.bin",
36
+ "transformer.h.11.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
37
+ "transformer.h.11.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
38
+ "transformer.h.11.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
39
+ "transformer.h.12.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
40
+ "transformer.h.12.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
41
+ "transformer.h.12.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
42
+ "transformer.h.12.ln_1.weight": "pytorch_model-00001-of-00002.bin",
43
+ "transformer.h.12.ln_2.weight": "pytorch_model-00001-of-00002.bin",
44
+ "transformer.h.12.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
45
+ "transformer.h.12.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
46
+ "transformer.h.12.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
47
+ "transformer.h.13.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
48
+ "transformer.h.13.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
49
+ "transformer.h.13.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
50
+ "transformer.h.13.ln_1.weight": "pytorch_model-00001-of-00002.bin",
51
+ "transformer.h.13.ln_2.weight": "pytorch_model-00001-of-00002.bin",
52
+ "transformer.h.13.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
53
+ "transformer.h.13.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
54
+ "transformer.h.13.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
55
+ "transformer.h.14.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
56
+ "transformer.h.14.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
57
+ "transformer.h.14.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
58
+ "transformer.h.14.ln_1.weight": "pytorch_model-00001-of-00002.bin",
59
+ "transformer.h.14.ln_2.weight": "pytorch_model-00001-of-00002.bin",
60
+ "transformer.h.14.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
61
+ "transformer.h.14.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
62
+ "transformer.h.14.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
63
+ "transformer.h.15.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
64
+ "transformer.h.15.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
65
+ "transformer.h.15.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
66
+ "transformer.h.15.ln_1.weight": "pytorch_model-00001-of-00002.bin",
67
+ "transformer.h.15.ln_2.weight": "pytorch_model-00001-of-00002.bin",
68
+ "transformer.h.15.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
69
+ "transformer.h.15.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
70
+ "transformer.h.15.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
71
+ "transformer.h.16.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
72
+ "transformer.h.16.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
73
+ "transformer.h.16.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
74
+ "transformer.h.16.ln_1.weight": "pytorch_model-00001-of-00002.bin",
75
+ "transformer.h.16.ln_2.weight": "pytorch_model-00001-of-00002.bin",
76
+ "transformer.h.16.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
77
+ "transformer.h.16.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
78
+ "transformer.h.16.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
79
+ "transformer.h.17.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
80
+ "transformer.h.17.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
81
+ "transformer.h.17.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
82
+ "transformer.h.17.ln_1.weight": "pytorch_model-00001-of-00002.bin",
83
+ "transformer.h.17.ln_2.weight": "pytorch_model-00001-of-00002.bin",
84
+ "transformer.h.17.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
85
+ "transformer.h.17.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
86
+ "transformer.h.17.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
87
+ "transformer.h.18.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
88
+ "transformer.h.18.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
89
+ "transformer.h.18.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
90
+ "transformer.h.18.ln_1.weight": "pytorch_model-00001-of-00002.bin",
91
+ "transformer.h.18.ln_2.weight": "pytorch_model-00001-of-00002.bin",
92
+ "transformer.h.18.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
93
+ "transformer.h.18.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
94
+ "transformer.h.18.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
95
+ "transformer.h.19.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
96
+ "transformer.h.19.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
97
+ "transformer.h.19.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
98
+ "transformer.h.19.ln_1.weight": "pytorch_model-00001-of-00002.bin",
99
+ "transformer.h.19.ln_2.weight": "pytorch_model-00001-of-00002.bin",
100
+ "transformer.h.19.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
101
+ "transformer.h.19.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
102
+ "transformer.h.19.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
103
+ "transformer.h.2.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
104
+ "transformer.h.2.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
105
+ "transformer.h.2.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
106
+ "transformer.h.2.ln_1.weight": "pytorch_model-00001-of-00002.bin",
107
+ "transformer.h.2.ln_2.weight": "pytorch_model-00001-of-00002.bin",
108
+ "transformer.h.2.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
109
+ "transformer.h.2.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
110
+ "transformer.h.2.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
111
+ "transformer.h.20.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
112
+ "transformer.h.20.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
113
+ "transformer.h.20.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
114
+ "transformer.h.20.ln_1.weight": "pytorch_model-00001-of-00002.bin",
115
+ "transformer.h.20.ln_2.weight": "pytorch_model-00001-of-00002.bin",
116
+ "transformer.h.20.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
117
+ "transformer.h.20.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
118
+ "transformer.h.20.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
119
+ "transformer.h.21.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
120
+ "transformer.h.21.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
121
+ "transformer.h.21.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
122
+ "transformer.h.21.ln_1.weight": "pytorch_model-00001-of-00002.bin",
123
+ "transformer.h.21.ln_2.weight": "pytorch_model-00001-of-00002.bin",
124
+ "transformer.h.21.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
125
+ "transformer.h.21.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
126
+ "transformer.h.21.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
127
+ "transformer.h.22.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
128
+ "transformer.h.22.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
129
+ "transformer.h.22.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
130
+ "transformer.h.22.ln_1.weight": "pytorch_model-00002-of-00002.bin",
131
+ "transformer.h.22.ln_2.weight": "pytorch_model-00002-of-00002.bin",
132
+ "transformer.h.22.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
133
+ "transformer.h.22.mlp.w1.weight": "pytorch_model-00002-of-00002.bin",
134
+ "transformer.h.22.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
135
+ "transformer.h.23.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
136
+ "transformer.h.23.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
137
+ "transformer.h.23.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
138
+ "transformer.h.23.ln_1.weight": "pytorch_model-00002-of-00002.bin",
139
+ "transformer.h.23.ln_2.weight": "pytorch_model-00002-of-00002.bin",
140
+ "transformer.h.23.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
141
+ "transformer.h.23.mlp.w1.weight": "pytorch_model-00002-of-00002.bin",
142
+ "transformer.h.23.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
143
+ "transformer.h.24.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
144
+ "transformer.h.24.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
145
+ "transformer.h.24.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
146
+ "transformer.h.24.ln_1.weight": "pytorch_model-00002-of-00002.bin",
147
+ "transformer.h.24.ln_2.weight": "pytorch_model-00002-of-00002.bin",
148
+ "transformer.h.24.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
149
+ "transformer.h.24.mlp.w1.weight": "pytorch_model-00002-of-00002.bin",
150
+ "transformer.h.24.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
151
+ "transformer.h.25.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
152
+ "transformer.h.25.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
153
+ "transformer.h.25.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
154
+ "transformer.h.25.ln_1.weight": "pytorch_model-00002-of-00002.bin",
155
+ "transformer.h.25.ln_2.weight": "pytorch_model-00002-of-00002.bin",
156
+ "transformer.h.25.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
157
+ "transformer.h.25.mlp.w1.weight": "pytorch_model-00002-of-00002.bin",
158
+ "transformer.h.25.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
159
+ "transformer.h.26.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
160
+ "transformer.h.26.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
161
+ "transformer.h.26.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
162
+ "transformer.h.26.ln_1.weight": "pytorch_model-00002-of-00002.bin",
163
+ "transformer.h.26.ln_2.weight": "pytorch_model-00002-of-00002.bin",
164
+ "transformer.h.26.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
165
+ "transformer.h.26.mlp.w1.weight": "pytorch_model-00002-of-00002.bin",
166
+ "transformer.h.26.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
167
+ "transformer.h.27.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
168
+ "transformer.h.27.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
169
+ "transformer.h.27.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
170
+ "transformer.h.27.ln_1.weight": "pytorch_model-00002-of-00002.bin",
171
+ "transformer.h.27.ln_2.weight": "pytorch_model-00002-of-00002.bin",
172
+ "transformer.h.27.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
173
+ "transformer.h.27.mlp.w1.weight": "pytorch_model-00002-of-00002.bin",
174
+ "transformer.h.27.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
175
+ "transformer.h.28.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
176
+ "transformer.h.28.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
177
+ "transformer.h.28.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
178
+ "transformer.h.28.ln_1.weight": "pytorch_model-00002-of-00002.bin",
179
+ "transformer.h.28.ln_2.weight": "pytorch_model-00002-of-00002.bin",
180
+ "transformer.h.28.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
181
+ "transformer.h.28.mlp.w1.weight": "pytorch_model-00002-of-00002.bin",
182
+ "transformer.h.28.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
183
+ "transformer.h.29.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
184
+ "transformer.h.29.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
185
+ "transformer.h.29.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
186
+ "transformer.h.29.ln_1.weight": "pytorch_model-00002-of-00002.bin",
187
+ "transformer.h.29.ln_2.weight": "pytorch_model-00002-of-00002.bin",
188
+ "transformer.h.29.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
189
+ "transformer.h.29.mlp.w1.weight": "pytorch_model-00002-of-00002.bin",
190
+ "transformer.h.29.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
191
+ "transformer.h.3.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
192
+ "transformer.h.3.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
193
+ "transformer.h.3.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
194
+ "transformer.h.3.ln_1.weight": "pytorch_model-00001-of-00002.bin",
195
+ "transformer.h.3.ln_2.weight": "pytorch_model-00001-of-00002.bin",
196
+ "transformer.h.3.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
197
+ "transformer.h.3.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
198
+ "transformer.h.3.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
199
+ "transformer.h.30.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
200
+ "transformer.h.30.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
201
+ "transformer.h.30.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
202
+ "transformer.h.30.ln_1.weight": "pytorch_model-00002-of-00002.bin",
203
+ "transformer.h.30.ln_2.weight": "pytorch_model-00002-of-00002.bin",
204
+ "transformer.h.30.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
205
+ "transformer.h.30.mlp.w1.weight": "pytorch_model-00002-of-00002.bin",
206
+ "transformer.h.30.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
207
+ "transformer.h.31.attn.c_attn.bias": "pytorch_model-00002-of-00002.bin",
208
+ "transformer.h.31.attn.c_attn.weight": "pytorch_model-00002-of-00002.bin",
209
+ "transformer.h.31.attn.c_proj.weight": "pytorch_model-00002-of-00002.bin",
210
+ "transformer.h.31.ln_1.weight": "pytorch_model-00002-of-00002.bin",
211
+ "transformer.h.31.ln_2.weight": "pytorch_model-00002-of-00002.bin",
212
+ "transformer.h.31.mlp.c_proj.weight": "pytorch_model-00002-of-00002.bin",
213
+ "transformer.h.31.mlp.w1.weight": "pytorch_model-00002-of-00002.bin",
214
+ "transformer.h.31.mlp.w2.weight": "pytorch_model-00002-of-00002.bin",
215
+ "transformer.h.4.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
216
+ "transformer.h.4.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
217
+ "transformer.h.4.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
218
+ "transformer.h.4.ln_1.weight": "pytorch_model-00001-of-00002.bin",
219
+ "transformer.h.4.ln_2.weight": "pytorch_model-00001-of-00002.bin",
220
+ "transformer.h.4.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
221
+ "transformer.h.4.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
222
+ "transformer.h.4.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
223
+ "transformer.h.5.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
224
+ "transformer.h.5.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
225
+ "transformer.h.5.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
226
+ "transformer.h.5.ln_1.weight": "pytorch_model-00001-of-00002.bin",
227
+ "transformer.h.5.ln_2.weight": "pytorch_model-00001-of-00002.bin",
228
+ "transformer.h.5.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
229
+ "transformer.h.5.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
230
+ "transformer.h.5.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
231
+ "transformer.h.6.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
232
+ "transformer.h.6.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
233
+ "transformer.h.6.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
234
+ "transformer.h.6.ln_1.weight": "pytorch_model-00001-of-00002.bin",
235
+ "transformer.h.6.ln_2.weight": "pytorch_model-00001-of-00002.bin",
236
+ "transformer.h.6.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
237
+ "transformer.h.6.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
238
+ "transformer.h.6.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
239
+ "transformer.h.7.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
240
+ "transformer.h.7.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
241
+ "transformer.h.7.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
242
+ "transformer.h.7.ln_1.weight": "pytorch_model-00001-of-00002.bin",
243
+ "transformer.h.7.ln_2.weight": "pytorch_model-00001-of-00002.bin",
244
+ "transformer.h.7.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
245
+ "transformer.h.7.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
246
+ "transformer.h.7.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
247
+ "transformer.h.8.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
248
+ "transformer.h.8.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
249
+ "transformer.h.8.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
250
+ "transformer.h.8.ln_1.weight": "pytorch_model-00001-of-00002.bin",
251
+ "transformer.h.8.ln_2.weight": "pytorch_model-00001-of-00002.bin",
252
+ "transformer.h.8.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
253
+ "transformer.h.8.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
254
+ "transformer.h.8.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
255
+ "transformer.h.9.attn.c_attn.bias": "pytorch_model-00001-of-00002.bin",
256
+ "transformer.h.9.attn.c_attn.weight": "pytorch_model-00001-of-00002.bin",
257
+ "transformer.h.9.attn.c_proj.weight": "pytorch_model-00001-of-00002.bin",
258
+ "transformer.h.9.ln_1.weight": "pytorch_model-00001-of-00002.bin",
259
+ "transformer.h.9.ln_2.weight": "pytorch_model-00001-of-00002.bin",
260
+ "transformer.h.9.mlp.c_proj.weight": "pytorch_model-00001-of-00002.bin",
261
+ "transformer.h.9.mlp.w1.weight": "pytorch_model-00001-of-00002.bin",
262
+ "transformer.h.9.mlp.w2.weight": "pytorch_model-00001-of-00002.bin",
263
+ "transformer.ln_f.weight": "pytorch_model-00002-of-00002.bin",
264
+ "transformer.wte.weight": "pytorch_model-00001-of-00002.bin"
265
+ }
266
+ }
qwen.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
qwen_generation_utils.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Generation support."""
7
+
8
+ from typing import Tuple, List, Union, Iterable
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from transformers import PreTrainedTokenizer
14
+ from transformers import logging
15
+ from transformers.generation import LogitsProcessor
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+ # Types.
20
+ HistoryType = List[Tuple[str, str]]
21
+ TokensType = List[int]
22
+ BatchTokensType = List[List[int]]
23
+
24
+
25
+ def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:
26
+ for tokens in batch:
27
+ context_length = len(tokens)
28
+ if context_length < seq_length:
29
+ tokens.extend([pad_id] * (seq_length - context_length))
30
+ return batch
31
+
32
+
33
+ def get_ltor_masks_and_position_ids(
34
+ data,
35
+ eod_token,
36
+ reset_position_ids,
37
+ reset_attention_mask,
38
+ eod_mask_loss,
39
+ ):
40
+ """Build masks and position id for left to right model."""
41
+
42
+ # Extract batch size and sequence length.
43
+ micro_batch_size, seq_length = data.size()
44
+
45
+ # Attention mask (lower triangular).
46
+ if reset_attention_mask:
47
+ att_mask_batch = micro_batch_size
48
+ else:
49
+ att_mask_batch = 1
50
+ attention_mask = torch.tril(
51
+ torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)
52
+ ).view(att_mask_batch, 1, seq_length, seq_length)
53
+
54
+ # Loss mask.
55
+ loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
56
+ if eod_mask_loss:
57
+ loss_mask[data == eod_token] = 0.0
58
+
59
+ # Position ids.
60
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
61
+ position_ids = position_ids.unsqueeze(0).expand_as(data)
62
+ # We need to clone as the ids will be modifed based on batch index.
63
+ if reset_position_ids:
64
+ position_ids = position_ids.clone()
65
+
66
+ if reset_position_ids or reset_attention_mask:
67
+ # Loop through the batches:
68
+ for b in range(micro_batch_size):
69
+
70
+ # Find indecies where EOD token is.
71
+ eod_index = position_ids[b, data[b] == eod_token]
72
+ # Detach indecies from positions if going to modify positions.
73
+ if reset_position_ids:
74
+ eod_index = eod_index.clone()
75
+
76
+ # Loop through EOD indecies:
77
+ prev_index = 0
78
+ for j in range(eod_index.size()[0]):
79
+ i = eod_index[j]
80
+ # Mask attention loss.
81
+ if reset_attention_mask:
82
+ attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
83
+ # Reset positions.
84
+ if reset_position_ids:
85
+ position_ids[b, (i + 1) :] -= i + 1 - prev_index
86
+ prev_index = i + 1
87
+
88
+ # Convert attention mask to binary:
89
+ attention_mask = attention_mask < 0.5
90
+
91
+ return attention_mask, loss_mask, position_ids
92
+
93
+
94
+ def get_batch(context_tokens: torch.LongTensor, eod_id: int):
95
+ """Generate batch from context tokens."""
96
+ # Move to GPU.
97
+ tokens = context_tokens.contiguous().to(context_tokens.device)
98
+ # Get the attention mask and postition ids.
99
+ attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
100
+ tokens,
101
+ eod_id,
102
+ reset_position_ids=False,
103
+ reset_attention_mask=False,
104
+ eod_mask_loss=False,
105
+ )
106
+ return tokens, attention_mask, position_ids
107
+
108
+
109
+ def get_stop_words_ids(chat_format, tokenizer):
110
+ if chat_format == "raw":
111
+ stop_words_ids = [tokenizer.encode("Human:"), [tokenizer.eod_id]]
112
+ elif chat_format == "chatml":
113
+ stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]]
114
+ else:
115
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
116
+ return stop_words_ids
117
+
118
+
119
+ def make_context(
120
+ tokenizer: PreTrainedTokenizer,
121
+ query: str,
122
+ history: List[Tuple[str, str]] = None,
123
+ system: str = "",
124
+ max_window_size: int = 6144,
125
+ chat_format: str = "chatml",
126
+ ):
127
+ if history is None:
128
+ history = []
129
+
130
+ if chat_format == "chatml":
131
+ im_start, im_end = "<|im_start|>", "<|im_end|>"
132
+ im_start_tokens = [tokenizer.im_start_id]
133
+ im_end_tokens = [tokenizer.im_end_id]
134
+ nl_tokens = tokenizer.encode("\n")
135
+
136
+ def _tokenize_str(role, content):
137
+ return f"{role}\n{content}", tokenizer.encode(
138
+ role, allowed_special=set()
139
+ ) + nl_tokens + tokenizer.encode(content, allowed_special=set())
140
+
141
+ system_text, system_tokens_part = _tokenize_str("system", system)
142
+ system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
143
+
144
+ raw_text = ""
145
+ context_tokens = []
146
+
147
+ for turn_query, turn_response in reversed(history):
148
+ query_text, query_tokens_part = _tokenize_str("user", turn_query)
149
+ query_tokens = im_start_tokens + query_tokens_part + im_end_tokens
150
+ response_text, response_tokens_part = _tokenize_str(
151
+ "assistant", turn_response
152
+ )
153
+ response_tokens = im_start_tokens + response_tokens_part + im_end_tokens
154
+
155
+ next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens
156
+ prev_chat = (
157
+ f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}"
158
+ )
159
+
160
+ current_context_size = (
161
+ len(system_tokens) + len(next_context_tokens) + len(context_tokens)
162
+ )
163
+ if current_context_size < max_window_size:
164
+ context_tokens = next_context_tokens + context_tokens
165
+ raw_text = prev_chat + raw_text
166
+ else:
167
+ break
168
+
169
+ context_tokens = system_tokens + context_tokens
170
+ raw_text = f"{im_start}{system_text}{im_end}" + raw_text
171
+ context_tokens += (
172
+ nl_tokens
173
+ + im_start_tokens
174
+ + _tokenize_str("user", query)[1]
175
+ + im_end_tokens
176
+ + nl_tokens
177
+ + im_start_tokens
178
+ + tokenizer.encode("assistant")
179
+ + nl_tokens
180
+ )
181
+ raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
182
+
183
+ elif chat_format == "raw":
184
+ raw_text = query
185
+ context_tokens = tokenizer.encode(raw_text)
186
+ else:
187
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
188
+
189
+ return raw_text, context_tokens
190
+
191
+
192
+ def _decode_default(
193
+ tokens: List[int],
194
+ *,
195
+ stop_words: List[str],
196
+ eod_words: List[str],
197
+ tokenizer: PreTrainedTokenizer,
198
+ raw_text_len: int,
199
+ verbose: bool = False,
200
+ return_end_reason: bool = False,
201
+ errors: str='replace',
202
+ ):
203
+ trim_decode_tokens = tokenizer.decode(tokens, errors=errors)[raw_text_len:]
204
+ if verbose:
205
+ print("\nRaw Generate: ", trim_decode_tokens)
206
+
207
+ end_reason = f"Gen length {len(tokens)}"
208
+ for stop_word in stop_words:
209
+ trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
210
+ for eod_word in eod_words:
211
+ if eod_word in trim_decode_tokens:
212
+ end_reason = f"Gen {eod_word!r}"
213
+ trim_decode_tokens = trim_decode_tokens.split(eod_word)[0]
214
+ trim_decode_tokens = trim_decode_tokens.strip()
215
+ if verbose:
216
+ print("\nEnd Reason:", end_reason)
217
+ print("\nGenerate: ", trim_decode_tokens)
218
+
219
+ if return_end_reason:
220
+ return trim_decode_tokens, end_reason
221
+ else:
222
+ return trim_decode_tokens
223
+
224
+
225
+ def _decode_chatml(
226
+ tokens: List[int],
227
+ *,
228
+ stop_words: List[str],
229
+ eod_token_ids: List[int],
230
+ tokenizer: PreTrainedTokenizer,
231
+ raw_text_len: int,
232
+ context_length: int,
233
+ verbose: bool = False,
234
+ return_end_reason: bool = False,
235
+ errors: str='replace'
236
+ ):
237
+ end_reason = f"Gen length {len(tokens)}"
238
+ eod_token_idx = context_length
239
+ for eod_token_idx in range(context_length, len(tokens)):
240
+ if tokens[eod_token_idx] in eod_token_ids:
241
+ end_reason = f"Gen {tokenizer.decode([tokens[eod_token_idx]])!r}"
242
+ break
243
+
244
+ trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx], errors=errors)[raw_text_len:]
245
+ if verbose:
246
+ print("\nRaw Generate w/o EOD:", tokenizer.decode(tokens, errors=errors)[raw_text_len:])
247
+ print("\nRaw Generate:", trim_decode_tokens)
248
+ print("\nEnd Reason:", end_reason)
249
+ for stop_word in stop_words:
250
+ trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
251
+ trim_decode_tokens = trim_decode_tokens.strip()
252
+ if verbose:
253
+ print("\nGenerate:", trim_decode_tokens)
254
+
255
+ if return_end_reason:
256
+ return trim_decode_tokens, end_reason
257
+ else:
258
+ return trim_decode_tokens
259
+
260
+
261
+ def decode_tokens(
262
+ tokens: Union[torch.LongTensor, TokensType],
263
+ tokenizer: PreTrainedTokenizer,
264
+ raw_text_len: int,
265
+ context_length: int,
266
+ chat_format: str,
267
+ verbose: bool = False,
268
+ return_end_reason: bool = False,
269
+ errors: str="replace",
270
+ ) -> str:
271
+ if torch.is_tensor(tokens):
272
+ tokens = tokens.cpu().numpy().tolist()
273
+
274
+ if chat_format == "chatml":
275
+ return _decode_chatml(
276
+ tokens,
277
+ stop_words=[],
278
+ eod_token_ids=[tokenizer.im_start_id, tokenizer.im_end_id],
279
+ tokenizer=tokenizer,
280
+ raw_text_len=raw_text_len,
281
+ context_length=context_length,
282
+ verbose=verbose,
283
+ return_end_reason=return_end_reason,
284
+ errors=errors,
285
+ )
286
+ elif chat_format == "raw":
287
+ return _decode_default(
288
+ tokens,
289
+ stop_words=["<|endoftext|>"],
290
+ eod_words=["<|endoftext|>"],
291
+ tokenizer=tokenizer,
292
+ raw_text_len=raw_text_len,
293
+ verbose=verbose,
294
+ return_end_reason=return_end_reason,
295
+ errors=errors,
296
+ )
297
+ else:
298
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
299
+
300
+
301
+ class StopWordsLogitsProcessor(LogitsProcessor):
302
+ """
303
+ :class:`transformers.LogitsProcessor` that enforces that when specified sequences appear, stop geration.
304
+
305
+ Args:
306
+ stop_words_ids (:obj:`List[List[int]]`):
307
+ List of list of token ids of stop ids. In order to get the tokens of the words
308
+ that should not appear in the generated text, use :obj:`tokenizer(bad_word,
309
+ add_prefix_space=True).input_ids`.
310
+ eos_token_id (:obj:`int`):
311
+ The id of the `end-of-sequence` token.
312
+ """
313
+
314
+ def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):
315
+
316
+ if not isinstance(stop_words_ids, List) or len(stop_words_ids) == 0:
317
+ raise ValueError(
318
+ f"`stop_words_ids` has to be a non-emtpy list, but is {stop_words_ids}."
319
+ )
320
+ if any(not isinstance(bad_word_ids, list) for bad_word_ids in stop_words_ids):
321
+ raise ValueError(
322
+ f"`stop_words_ids` has to be a list of lists, but is {stop_words_ids}."
323
+ )
324
+ if any(
325
+ any(
326
+ (not isinstance(token_id, (int, np.integer)) or token_id < 0)
327
+ for token_id in stop_word_ids
328
+ )
329
+ for stop_word_ids in stop_words_ids
330
+ ):
331
+ raise ValueError(
332
+ f"Each list in `stop_words_ids` has to be a list of positive integers, but is {stop_words_ids}."
333
+ )
334
+
335
+ self.stop_words_ids = list(
336
+ filter(
337
+ lambda bad_token_seq: bad_token_seq != [eos_token_id], stop_words_ids
338
+ )
339
+ )
340
+ self.eos_token_id = eos_token_id
341
+ for stop_token_seq in self.stop_words_ids:
342
+ assert (
343
+ len(stop_token_seq) > 0
344
+ ), "Stop words token sequences {} cannot have an empty list".format(
345
+ stop_words_ids
346
+ )
347
+
348
+ def __call__(
349
+ self, input_ids: torch.LongTensor, scores: torch.FloatTensor
350
+ ) -> torch.FloatTensor:
351
+ stopped_samples = self._calc_stopped_samples(input_ids)
352
+ for i, should_stop in enumerate(stopped_samples):
353
+ if should_stop:
354
+ scores[i, self.eos_token_id] = float(2**15)
355
+ return scores
356
+
357
+ def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:
358
+ if len(tokens) == 0:
359
+ # if bad word tokens is just one token always ban it
360
+ return True
361
+ elif len(tokens) > len(prev_tokens):
362
+ # if bad word tokens are longer then prev input_ids they can't be equal
363
+ return False
364
+ elif prev_tokens[-len(tokens) :].tolist() == tokens:
365
+ # if tokens match
366
+ return True
367
+ else:
368
+ return False
369
+
370
+ def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:
371
+ stopped_samples = []
372
+ for prev_input_ids_slice in prev_input_ids:
373
+ match = False
374
+ for stop_token_seq in self.stop_words_ids:
375
+ if self._tokens_match(prev_input_ids_slice, stop_token_seq):
376
+ # if tokens do not match continue
377
+ match = True
378
+ break
379
+ stopped_samples.append(match)
380
+
381
+ return stopped_samples
382
+
383
+
384
+ def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
385
+ """This function has been mostly taken from huggingface conversational
386
+ ai code at
387
+ https://medium.com/huggingface/how-to-build-a-state-of-the-art-
388
+ conversational-ai-with-transfer-learning-2d818ac26313"""
389
+
390
+ if top_k > 0:
391
+ # Remove all tokens with a probability less than the
392
+ # last token of the top-k
393
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
394
+ logits[indices_to_remove] = filter_value
395
+
396
+ if top_p > 0.0:
397
+ # Cconvert to 1D
398
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
399
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
400
+
401
+ # Remove tokens with cumulative probability above the threshold
402
+ sorted_indices_to_remove = cumulative_probs > top_p
403
+ # Shift the indices to the right to keep also the first token
404
+ # above the threshold
405
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
406
+ sorted_indices_to_remove[..., 0] = 0
407
+ for i in range(sorted_indices.size(0)):
408
+ indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
409
+ logits[i][indices_to_remove] = filter_value
410
+
411
+ return logits
412
+
413
+
414
+ def switch(val1, val2, boolean):
415
+ boolean = boolean.type_as(val1)
416
+ return (1 - boolean) * val1 + boolean * val2
tokenization_qwen.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Tokenization classes for QWen."""
7
+
8
+ import base64
9
+ import logging
10
+ import os
11
+ import unicodedata
12
+ from typing import Collection, Dict, List, Set, Tuple, Union
13
+
14
+ import tiktoken
15
+ from transformers import PreTrainedTokenizer, AddedToken
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken"}
21
+
22
+ PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
23
+ ENDOFTEXT = "<|endoftext|>"
24
+ IMSTART = "<|im_start|>"
25
+ IMEND = "<|im_end|>"
26
+ # as the default behavior is changed to allow special tokens in
27
+ # regular texts, the surface forms of special tokens need to be
28
+ # as different as possible to minimize the impact
29
+ EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
30
+ SPECIAL_TOKENS = (
31
+ ENDOFTEXT,
32
+ IMSTART,
33
+ IMEND,
34
+ ) + EXTRAS
35
+
36
+
37
+ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
38
+ with open(tiktoken_bpe_file, "rb") as f:
39
+ contents = f.read()
40
+ return {
41
+ base64.b64decode(token): int(rank)
42
+ for token, rank in (line.split() for line in contents.splitlines() if line)
43
+ }
44
+
45
+ class QWenTokenizer(PreTrainedTokenizer):
46
+ """QWen tokenizer."""
47
+
48
+ vocab_files_names = VOCAB_FILES_NAMES
49
+
50
+ def __init__(
51
+ self,
52
+ vocab_file,
53
+ errors="replace",
54
+ **kwargs,
55
+ ):
56
+ super().__init__(**kwargs)
57
+
58
+ self.errors = errors # how to handle errors in decoding
59
+
60
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]
61
+ self.special_tokens = {
62
+ token: index
63
+ for index, token in enumerate(
64
+ SPECIAL_TOKENS, start=len(self.mergeable_ranks)
65
+ )
66
+ }
67
+
68
+ enc = tiktoken.Encoding(
69
+ "Qwen",
70
+ pat_str=PAT_STR,
71
+ mergeable_ranks=self.mergeable_ranks,
72
+ special_tokens=self.special_tokens,
73
+ )
74
+ assert (
75
+ len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
76
+ ), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
77
+
78
+ self.decoder = {
79
+ v: k for k, v in self.mergeable_ranks.items()
80
+ } # type: dict[int, bytes|str]
81
+ self.decoder.update({v: k for k, v in self.special_tokens.items()})
82
+
83
+ self.tokenizer = enc # type: tiktoken.Encoding
84
+
85
+ self.eod_id = self.tokenizer.eot_token
86
+ self.im_start_id = self.special_tokens[IMSTART]
87
+ self.im_end_id = self.special_tokens[IMEND]
88
+
89
+ def __getstate__(self):
90
+ # for pickle lovers
91
+ state = self.__dict__.copy()
92
+ del state['tokenizer']
93
+ return state
94
+
95
+ def __setstate__(self, state):
96
+ # tokenizer is not python native; don't pass it; rebuild it
97
+ self.__dict__.update(state)
98
+ enc = tiktoken.Encoding(
99
+ "Qwen",
100
+ pat_str=PAT_STR,
101
+ mergeable_ranks=self.mergeable_ranks,
102
+ special_tokens=self.special_tokens,
103
+ )
104
+ self.tokenizer = enc
105
+
106
+
107
+ def __len__(self) -> int:
108
+ return self.tokenizer.n_vocab
109
+
110
+ def get_vocab(self) -> Dict[bytes, int]:
111
+ return self.mergeable_ranks
112
+
113
+ def convert_tokens_to_ids(
114
+ self, tokens: Union[bytes, str, List[Union[bytes, str]]]
115
+ ) -> List[int]:
116
+ ids = []
117
+ if isinstance(tokens, (str, bytes)):
118
+ if tokens in self.special_tokens:
119
+ return self.special_tokens[tokens]
120
+ else:
121
+ return self.mergeable_ranks.get(tokens)
122
+ for token in tokens:
123
+ if token in self.special_tokens:
124
+ ids.append(self.special_tokens[token])
125
+ else:
126
+ ids.append(self.mergeable_ranks.get(token))
127
+ return ids
128
+
129
+ def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
130
+ if not special_tokens and new_tokens:
131
+ raise ValueError('Adding regular tokens is not supported')
132
+ for token in new_tokens:
133
+ surface_form = token.content if isinstance(token, AddedToken) else token
134
+ if surface_form not in SPECIAL_TOKENS:
135
+ raise ValueError('Adding unknown special tokens is not supported')
136
+ return 0
137
+
138
+ def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
139
+ """
140
+ Save only the vocabulary of the tokenizer (vocabulary).
141
+
142
+ Returns:
143
+ `Tuple(str)`: Paths to the files saved.
144
+ """
145
+ file_path = os.path.join(save_directory, "qwen.tiktoken")
146
+ with open(file_path, "w", encoding="utf8") as w:
147
+ for k, v in self.mergeable_ranks.items():
148
+ line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
149
+ w.write(line)
150
+ return (file_path,)
151
+
152
+ def tokenize(
153
+ self,
154
+ text: str,
155
+ allowed_special: Union[Set, str] = "all",
156
+ disallowed_special: Union[Collection, str] = (),
157
+ **kwargs,
158
+ ) -> List[Union[bytes, str]]:
159
+ """
160
+ Converts a string in a sequence of tokens.
161
+
162
+ Args:
163
+ text (`str`):
164
+ The sequence to be encoded.
165
+ allowed_special (`Literal["all"]` or `set`):
166
+ The surface forms of the tokens to be encoded as special tokens in regular texts.
167
+ Default to "all".
168
+ disallowed_special (`Literal["all"]` or `Collection`):
169
+ The surface forms of the tokens that should not be in regular texts and trigger errors.
170
+ Default to an empty tuple.
171
+
172
+ kwargs (additional keyword arguments, *optional*):
173
+ Will be passed to the underlying model specific encode method.
174
+
175
+ Returns:
176
+ `List[bytes|str]`: The list of tokens.
177
+ """
178
+ tokens = []
179
+ text = unicodedata.normalize("NFC", text)
180
+
181
+ # this implementation takes a detour: text -> token id -> token surface forms
182
+ for t in self.tokenizer.encode(
183
+ text, allowed_special=allowed_special, disallowed_special=disallowed_special
184
+ ):
185
+ tokens.append(self.decoder[t])
186
+ return tokens
187
+
188
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
189
+ """
190
+ Converts a sequence of tokens in a single string.
191
+ """
192
+ text = ""
193
+ temp = b""
194
+ for t in tokens:
195
+ if isinstance(t, str):
196
+ if temp:
197
+ text += temp.decode("utf-8", errors=self.errors)
198
+ temp = b""
199
+ text += t
200
+ elif isinstance(t, bytes):
201
+ temp += t
202
+ else:
203
+ raise TypeError("token should only be of type types or str")
204
+ if temp:
205
+ text += temp.decode("utf-8", errors=self.errors)
206
+ return text
207
+
208
+ @property
209
+ def vocab_size(self):
210
+ return self.tokenizer.n_vocab
211
+
212
+ def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
213
+ """Converts an id to a token, special tokens included"""
214
+ if index in self.decoder:
215
+ return self.decoder[index]
216
+ raise ValueError("unknown ids")
217
+
218
+ def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
219
+ """Converts a token to an id using the vocab, special tokens included"""
220
+ if token in self.special_tokens:
221
+ return self.special_tokens[token]
222
+ if token in self.mergeable_ranks:
223
+ return self.mergeable_ranks[token]
224
+ raise ValueError("unknown token")
225
+
226
+ def _tokenize(self, text: str, **kwargs):
227
+ """
228
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
229
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
230
+
231
+ Do NOT take care of added tokens.
232
+ """
233
+ raise NotImplementedError
234
+
235
+ def _decode(
236
+ self,
237
+ token_ids: Union[int, List[int]],
238
+ skip_special_tokens: bool = False,
239
+ errors: str = None,
240
+ **kwargs,
241
+ ) -> str:
242
+ if isinstance(token_ids, int):
243
+ token_ids = [token_ids]
244
+ if skip_special_tokens:
245
+ token_ids = [i for i in token_ids if i < self.eod_id]
246
+ return self.tokenizer.decode(token_ids, errors=errors or self.errors)
tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "tokenization_qwen.QWenTokenizer",
5
+ null
6
+ ]
7
+ },
8
+ "clean_up_tokenization_spaces": true,
9
+ "model_max_length": 8192,
10
+ "padding_side": "left",
11
+ "split_special_tokens": false,
12
+ "tokenizer_class": "QWenTokenizer"
13
+ }