docs / huggingface_candle.txt
danidarko's picture
Upload 59 files
b1d4de0 verified
# File: candle-main/candle-pyo3/_additional_typing/__init__.py
from typing import Union, Sequence
class Tensor:
def __add__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __radd__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __sub__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __truediv__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __mul__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __rmul__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __richcmp__(self, rhs: Union['Tensor', 'Scalar'], op) -> 'Tensor':
pass
def __getitem__(self, index: Union['Index', 'Tensor', Sequence['Index']]) -> 'Tensor':
pass
def __eq__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __ne__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __lt__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __le__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __gt__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
def __ge__(self, rhs: Union['Tensor', 'Scalar']) -> 'Tensor':
pass
# File: candle-main/candle-pyo3/e5.py
from candle.utils import load_safetensors, save_gguf, load_gguf
from candle.models.bert import BertModel, Config
import json
from candle import Tensor
from tqdm import tqdm
from dataclasses import fields
import os
import time
from huggingface_hub import hf_hub_download
from transformers import BertTokenizer, AutoModel
import torch
if __name__ == '__main__':
model_name = 'intfloat/e5-small-v2'
model_file = hf_hub_download(repo_id=model_name, filename='model.safetensors')
config_file = hf_hub_download(repo_id=model_name, filename='config.json')
tensors = load_safetensors(model_file)
config = Config()
with open(config_file, 'r') as f:
raw_config = json.load(f)
for field in fields(config):
if field.name in raw_config:
setattr(config, field.name, raw_config[field.name])
model = BertModel(config)
model.load_state_dict(tensors)
hf_model = AutoModel.from_pretrained(model_name)
tokenizer = BertTokenizer.from_pretrained(model_name)
sentences = ['The cat sits outside', 'A man is playing guitar', 'I love pasta', 'The new movie is awesome', 'The cat plays in the garden', 'A woman watches TV', 'The new movie is so great', 'Do you like pizza?']
def average_pool(last_hidden_states: torch.Tensor, attention_mask: torch.Tensor):
last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
tokenized = tokenizer(sentences, padding=True)
tokens = Tensor(tokenized['input_ids'])
token_type_ids = Tensor(tokenized['token_type_ids'])
attention_mask = Tensor(tokenized['attention_mask'])
(encoder_out, _) = model.forward(tokens, token_type_ids, attention_mask=attention_mask)
hf_tokenized = tokenizer(sentences, padding=True, return_tensors='pt')
hf_result = hf_model(**hf_tokenized)['last_hidden_state']
hf_pooled = average_pool(hf_result, hf_tokenized['attention_mask'])
candle_pooled = average_pool(torch.tensor(encoder_out.values()), hf_tokenized['attention_mask'])
loss = torch.nn.L1Loss()
error = loss(hf_pooled, candle_pooled).mean().item()
print(f'Mean error between torch-reference and candle: {error}')
quantized_tensors = {}
for (name, tensor) in tqdm(tensors.items(), desc='Quantizing tensors to 5-Bit'):
if name.endswith('weight') and ('attention' in name or 'intermediate' in name or 'output' in name):
if tensor.shape[-1] % 256 == 0:
new_tensor = tensor.quantize('q4k')
else:
new_tensor = tensor.quantize('q5_0')
quantized_tensors[name] = new_tensor
else:
quantized_tensors[name] = tensor.quantize('q8_0')
print(f'Saving quantized tensors')
config_to_save = {k: v for (k, v) in config.__dict__.items() if v is not None}
quantized_model_file = 'e5_small.gguf'
save_gguf(quantized_model_file, quantized_tensors, config_to_save)
file_size_mb = os.path.getsize(model_file) / 1024 / 1024
file_size_mb_compressed = os.path.getsize(quantized_model_file) / 1024 / 1024
print(f'Compressed model from {file_size_mb:.2f} MB to {file_size_mb_compressed:.2f} MB')
(tensors, raw_config) = load_gguf(quantized_model_file)
config = Config()
for field in fields(config):
if field.name in raw_config:
setattr(config, field.name, raw_config[field.name])
model = BertModel(config)
model.load_state_dict(tensors, strict=False)
(encoder_out_2, pooled_output_2) = model.forward(tokens, token_type_ids)
(encoder_out_2, pooled_output_2) = (encoder_out_2.to_device('cpu'), pooled_output_2.to_device('cpu'))
candle_pooled_2 = average_pool(torch.tensor(encoder_out_2.values()), hf_tokenized['attention_mask'])
error = loss(hf_pooled, candle_pooled_2).mean().item()
print(f'Mean error between torch-reference and quantized-candle: {error}')
# File: candle-main/candle-pyo3/py_src/candle/__init__.py
import logging
try:
from .candle import *
except ImportError as e:
logging.warning('DLLs were not bundled with this package. Trying to locate them...')
import os
import platform
def locate_cuda_dlls():
logging.warning('Locating CUDA DLLs...')
cuda_path = os.environ.get('CUDA_PATH', None)
if cuda_path:
logging.warning(f'Found CUDA_PATH environment variable: {cuda_path}')
if platform.system() == 'Windows':
cuda_path = os.path.join(cuda_path, 'bin')
else:
cuda_path = os.path.join(cuda_path, 'lib64')
logging.warning(f'Adding {cuda_path} to DLL search path...')
os.add_dll_directory(cuda_path)
else:
logging.warning('CUDA_PATH environment variable not found!')
def locate_mkl_dlls():
oneapi_root = os.environ.get('ONEAPI_ROOT', None)
if oneapi_root:
if platform.system() == 'Windows':
mkl_path = os.path.join(oneapi_root, 'compiler', 'latest', 'windows', 'redist', 'intel64_win', 'compiler')
else:
mkl_path = os.path.join(oneapi_root, 'mkl', 'latest', 'lib', 'intel64')
logging.warning(f'Adding {mkl_path} to DLL search path...')
os.add_dll_directory(mkl_path)
else:
logging.warning('ONEAPI_ROOT environment variable not found!')
locate_cuda_dlls()
locate_mkl_dlls()
try:
from .candle import *
except ImportError as inner_e:
raise ImportError('Could not locate DLLs. Please check the documentation for more information.')
__doc__ = candle.__doc__
if hasattr(candle, '__all__'):
__all__ = candle.__all__
# File: candle-main/candle-pyo3/py_src/candle/models/bert.py
from dataclasses import dataclass
from typing import Optional
from candle.nn import Module, Embedding, LayerNorm, Linear, ModuleList
from candle import Tensor
import candle
import candle.functional as F
from typing import Tuple, Optional
@dataclass
class Config:
vocab_size: int = 30522
hidden_size: int = 768
num_hidden_layers: int = 12
num_attention_heads: int = 12
intermediate_size: int = 3072
hidden_act: str = 'gelu'
hidden_dropout_prob: float = 0.1
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
layer_norm_eps: float = 1e-12
pad_token_id: int = 0
position_embedding_type: str = 'absolute'
use_cache: bool = True
classifier_dropout: Optional[float] = None
model_type: Optional[str] = 'bert'
class BertSelfAttention(Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
all_head_size = int(config.num_attention_heads * self.attention_head_size)
hidden_size = config.hidden_size
self.query = Linear(hidden_size, all_head_size)
self.key = Linear(hidden_size, all_head_size)
self.value = Linear(hidden_size, all_head_size)
def transpose_for_scores(self, x: Tensor) -> Tensor:
new_x_shape = x.shape[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.reshape(new_x_shape).transpose(1, 2)
return x.contiguous()
def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
query = self.query.forward(hidden_states)
key = self.key.forward(hidden_states)
value = self.value.forward(hidden_states)
query = self.transpose_for_scores(query)
key = self.transpose_for_scores(key)
value = self.transpose_for_scores(value)
attention_scores = query.matmul(key.t())
attention_scores = attention_scores / float(self.attention_head_size) ** 0.5
if attention_mask is not None:
(b_size, _, _, last_dim) = attention_scores.shape
attention_scores = attention_scores.broadcast_add(attention_mask.reshape((b_size, 1, 1, last_dim)))
attention_probs = F.softmax(attention_scores, dim=-1)
context_layer = attention_probs.matmul(value)
context_layer = context_layer.transpose(1, 2).contiguous()
context_layer = context_layer.flatten_from(-2)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor:
hidden_states = self.dense.forward(hidden_states)
return self.LayerNorm.forward(hidden_states + input_tensor)
class BertAttention(Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, hidden_states: Tensor, attention_mask: None) -> Tensor:
self_outputs = self.self.forward(hidden_states, attention_mask=attention_mask)
attention_output = self.output.forward(self_outputs, hidden_states)
return attention_output
class BertIntermediate(Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
self.act = F.gelu if config.hidden_act == 'gelu' else F.relu
def forward(self, hidden_states: Tensor) -> Tensor:
hidden_states = self.dense.forward(hidden_states)
return self.act(hidden_states)
class BertOutput(Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor:
hidden_states = self.dense.forward(hidden_states)
return self.LayerNorm.forward(hidden_states + input_tensor)
class BertLayer(Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
attention_output = self.attention.forward(hidden_states, attention_mask=attention_mask)
intermediate_output = self.intermediate.forward(attention_output)
layer_output = self.output.forward(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.layer = ModuleList()
for _ in range(config.num_hidden_layers):
self.layer.append(BertLayer(config))
def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
for l in self.layer:
hidden_states = l.forward(hidden_states, attention_mask=attention_mask)
return hidden_states
class BertEmbeddings(Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.position_ids = candle.Tensor(list(range(config.max_position_embeddings))).reshape((1, config.max_position_embeddings))
def forward(self, input_ids: Tensor, token_type_ids: Tensor) -> Tensor:
(_batch_size, seq_len) = input_ids.shape
input_embeddings = self.word_embeddings.forward(input_ids)
token_type_embeddings = self.token_type_embeddings.forward(token_type_ids)
embeddings: Tensor = input_embeddings + token_type_embeddings
position_ids = list(range(seq_len))
position_ids = Tensor(position_ids).to_dtype(input_ids.dtype).to_device(input_ids.device)
embeddings = embeddings.broadcast_add(self.position_embeddings.forward(position_ids))
embeddings = self.LayerNorm(embeddings)
return embeddings
class BertPooler(Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.activation = F.tanh
def forward(self, hidden_states: Tensor) -> Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense.forward(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def masked_fill(on_false: float, mask: Tensor, on_true: float):
shape = mask.shape
on_true = candle.tensor(on_true).broadcast_as(shape)
on_false = candle.tensor(on_false).broadcast_as(shape)
return mask.where_cond(on_true, on_false)
class BertModel(Module):
def __init__(self, config: Config, add_pooling_layer=True) -> None:
super().__init__()
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
def forward(self, input_ids: Tensor, token_type_ids: Tensor, attention_mask=None) -> Tuple[Tensor, Optional[Tensor]]:
if attention_mask is not None:
attention_mask = masked_fill(float('-inf'), attention_mask, 1.0)
embeddings = self.embeddings.forward(input_ids, token_type_ids)
encoder_out = self.encoder.forward(embeddings, attention_mask=attention_mask)
pooled_output = self.pooler(encoder_out) if self.pooler is not None else None
return (encoder_out, pooled_output)
# File: candle-main/candle-pyo3/py_src/candle/models/llama.py
import candle
from typing import Dict, Tuple, Any
from candle import Tensor, QTensor, utils, nn
from candle.nn import Module, ModuleList
def masked_fill(on_false: Tensor, mask: Tensor, on_true: Tensor):
shape = mask.shape
on_true = candle.tensor(on_true).broadcast_as(shape)
return mask.where_cond(on_true, on_false)
def precompute_freqs_cis(hparams: Dict[str, Any], freq_base: float, max_seq_len: int):
head_dim = hparams['n_embd'] // hparams['n_head']
theta = [1.0 / freq_base ** (i / head_dim) for i in range(0, head_dim, 2)]
theta = candle.tensor(theta)
idx_theta = [float(i) for i in range(max_seq_len)]
idx_theta = candle.tensor(idx_theta).reshape((max_seq_len, 1))
m = idx_theta.matmul(theta.unsqueeze(0))
return (m.cos(), m.sin())
class RmsNorm(Module):
def __init__(self, qtensor: QTensor):
super().__init__()
self.weight = qtensor.dequantize()
def forward(self, x: Tensor) -> Tensor:
(b_size, seq_len, hidden_size) = x.shape
norm_x = x.sqr().sum_keepdim(2) / hidden_size
x_normed = x.broadcast_div((norm_x + 1e-05).sqrt())
return x_normed.broadcast_mul(self.weight)
class QuantizedLayer(Module):
def __init__(self, layer_idx: int, hparams: Dict[str, Any], all_tensors: Dict[str, QTensor], cos_sin: Tuple[Tensor, Tensor]):
super().__init__()
p = f'layers.{layer_idx}'
self.attention_wq = all_tensors[f'{p}.attention.wq.weight']
self.attention_wk = all_tensors[f'{p}.attention.wk.weight']
self.attention_wv = all_tensors[f'{p}.attention.wv.weight']
self.attention_wo = all_tensors[f'{p}.attention.wo.weight']
self.ffw1 = all_tensors[f'{p}.feed_forward.w1.weight']
self.ffw2 = all_tensors[f'{p}.feed_forward.w2.weight']
self.ffw3 = all_tensors[f'{p}.feed_forward.w3.weight']
self.attn_norm = RmsNorm(all_tensors[f'{p}.attention_norm.weight'])
self.ffn_norm = RmsNorm(all_tensors[f'{p}.ffn_norm.weight'])
self.n_head = hparams['n_head']
self.n_kv_head = self.n_head
self.head_dim = hparams['n_embd'] // self.n_head
self.kv_cache = None
self.cos = cos_sin[0]
self.sin = cos_sin[1]
self._non_persistent_buffers_set.add('cos')
self._non_persistent_buffers_set.add('sin')
def forward(self, x: Tensor, mask: Tensor, index_pos: int) -> Tensor:
residual = x
x = self.attn_norm(x)
attn = self.forward_attn(x, mask, index_pos)
x = attn + residual
residual = x
x = self.ffn_norm(x)
w1 = self.ffw1.matmul_t(x)
w3 = self.ffw3.matmul_t(x)
mlp = self.ffw2.matmul_t(nn.silu(w1) * w3)
return mlp + residual
def forward_attn(self, x: Tensor, mask: Tensor, index_pos: int):
(b_size, seq_len, n_embd) = x.shape
q = self.attention_wq.matmul_t(x)
k = self.attention_wk.matmul_t(x)
v = self.attention_wv.matmul_t(x)
q = q.reshape((b_size, seq_len, self.n_head, self.head_dim)).transpose(1, 2)
k = k.reshape((b_size, seq_len, self.n_kv_head, self.head_dim)).transpose(1, 2)
v = v.reshape((b_size, seq_len, self.n_kv_head, self.head_dim)).transpose(1, 2)
q = self.apply_rotary_emb(q, index_pos)
k = self.apply_rotary_emb(k, index_pos)
if self.kv_cache is not None and index_pos > 0:
(prev_k, prev_v) = self.kv_cache
k = candle.cat([prev_k, k], 2).contiguous()
v = candle.cat([prev_v, v], 2).contiguous()
self.kv_cache = (k, v)
att = q.matmul(k.t()) / self.head_dim ** 0.5
mask = mask.broadcast_as(att.shape)
att = masked_fill(att, mask, float('-inf'))
att = nn.softmax(att, -1)
y = att.matmul(v.contiguous())
y = y.transpose(1, 2).reshape((b_size, seq_len, n_embd))
return self.attention_wo.matmul_t(y)
def apply_rotary_emb(self, x: Tensor, index_pos: int):
(b_size, n_head, seq_len, n_embd) = x.shape
cos = self.cos.narrow(0, index_pos, seq_len).reshape((seq_len, n_embd // 2, 1))
sin = self.sin.narrow(0, index_pos, seq_len).reshape((seq_len, n_embd // 2, 1))
x = x.reshape((b_size, n_head, seq_len, n_embd // 2, 2))
x0 = x.narrow(-1, 0, 1)
x1 = x.narrow(-1, 1, 1)
y0 = x0.broadcast_mul(cos) - x1.broadcast_mul(sin)
y1 = x0.broadcast_mul(sin) + x1.broadcast_mul(cos)
rope = candle.cat([y0, y1], -1)
return rope.flatten_from(-2)
class QuantizedLlama(Module):
def __init__(self, hparams: Dict[str, Any], all_tensors: Dict[str, QTensor]):
super().__init__()
self.tok_embeddings = all_tensors['tok_embeddings.weight'].dequantize()
self.norm = RmsNorm(all_tensors['norm.weight'])
self.output = all_tensors['output.weight']
self.layers = ModuleList()
rope_freq = hparams.get('rope_freq', 10000.0)
cos_sin = precompute_freqs_cis(hparams, rope_freq, hparams['context_length'])
for layer_idx in range(hparams['n_layer']):
layer = QuantizedLayer(layer_idx, hparams, all_tensors, cos_sin)
self.layers.append(layer)
def forward(self, token: Tensor, index_pos: int) -> Tensor:
(b_size, seq_len) = token.shape
(vocab_size, hidden_size) = self.tok_embeddings.shape
token = token.reshape((b_size * seq_len,))
x = self.tok_embeddings.index_select(token, 0)
x = x.reshape((b_size, seq_len, hidden_size))
mask = [int(j > i) for j in range(seq_len) for i in range(seq_len)]
mask = candle.tensor(mask).reshape((seq_len, seq_len))
for layer in self.layers:
x = layer(x, mask, index_pos)
x = self.norm(x)
x = x.narrow(1, -1, 1).squeeze(1)
x = self.output.matmul_t(x)
return x
# File: candle-main/candle-pyo3/py_src/candle/nn/container.py
from .module import Module
from typing import Any, Dict, Iterable, Iterator, Mapping, Optional, overload, Tuple, TypeVar, Union
from collections import OrderedDict, abc as container_abcs
import operator
from itertools import chain, islice
__all__ = ['Sequential', 'ModuleList', 'ModuleDict']
T = TypeVar('T', bound=Module)
def _addindent(s_: str, numSpaces: int):
s = s_.split('\n')
if len(s) == 1:
return s_
first = s.pop(0)
s = [numSpaces * ' ' + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
return s
class Sequential(Module):
_modules: Dict[str, Module]
@overload
def __init__(self, *args: Module) -> None:
...
@overload
def __init__(self, arg: 'OrderedDict[str, Module]') -> None:
...
def __init__(self, *args):
super().__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for (key, module) in args[0].items():
self.add_module(key, module)
else:
for (idx, module) in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator, idx) -> T:
size = len(self)
idx = operator.index(idx)
if not -size <= idx < size:
raise IndexError('index {} is out of range'.format(idx))
idx %= size
return next(islice(iterator, idx, None))
def __getitem__(self, idx: Union[slice, int]) -> Union['Sequential', T]:
if isinstance(idx, slice):
return self.__class__(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx: int, module: Module) -> None:
key: str = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx: Union[slice, int]) -> None:
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
def __len__(self) -> int:
return len(self._modules)
def __add__(self, other) -> 'Sequential':
if isinstance(other, Sequential):
ret = Sequential()
for layer in self:
ret.append(layer)
for layer in other:
ret.append(layer)
return ret
else:
raise ValueError('add operator supports only objects of Sequential class, but {} is given.'.format(str(type(other))))
def pop(self, key: Union[int, slice]) -> Module:
v = self[key]
del self[key]
return v
def __iadd__(self, other) -> 'Sequential':
if isinstance(other, Sequential):
offset = len(self)
for (i, module) in enumerate(other):
self.add_module(str(i + offset), module)
return self
else:
raise ValueError('add operator supports only objects of Sequential class, but {} is given.'.format(str(type(other))))
def __mul__(self, other: int) -> 'Sequential':
if not isinstance(other, int):
raise TypeError(f'unsupported operand type(s) for *: {type(self)} and {type(other)}')
elif other <= 0:
raise ValueError(f'Non-positive multiplication factor {other} for {type(self)}')
else:
combined = Sequential()
offset = 0
for _ in range(other):
for module in self:
combined.add_module(str(offset), module)
offset += 1
return combined
def __rmul__(self, other: int) -> 'Sequential':
return self.__mul__(other)
def __imul__(self, other: int) -> 'Sequential':
if not isinstance(other, int):
raise TypeError(f'unsupported operand type(s) for *: {type(self)} and {type(other)}')
elif other <= 0:
raise ValueError(f'Non-positive multiplication factor {other} for {type(self)}')
else:
len_original = len(self)
offset = len(self)
for _ in range(other - 1):
for i in range(len_original):
self.add_module(str(i + offset), self._modules[str(i)])
offset += len_original
return self
def __dir__(self):
keys = super().__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
def forward(self, input):
for module in self:
input = module(input)
return input
def append(self, module: Module) -> 'Sequential':
self.add_module(str(len(self)), module)
return self
def insert(self, index: int, module: Module) -> 'Sequential':
if not isinstance(module, Module):
raise AssertionError('module should be of type: {}'.format(Module))
n = len(self._modules)
if not -n <= index <= n:
raise IndexError('Index out of range: {}'.format(index))
if index < 0:
index += n
for i in range(n, index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
return self
def extend(self, sequential) -> 'Sequential':
for layer in sequential:
self.append(layer)
return self
class ModuleList(Module):
_modules: Dict[str, Module]
def __init__(self, modules: Optional[Iterable[Module]]=None) -> None:
super().__init__()
if modules is not None:
self += modules
def _get_abs_string_index(self, idx):
idx = operator.index(idx)
if not -len(self) <= idx < len(self):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
def __getitem__(self, idx: Union[int, slice]) -> Union[Module, 'ModuleList']:
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __setitem__(self, idx: int, module: Module) -> None:
idx = self._get_abs_string_index(idx)
return setattr(self, str(idx), module)
def __delitem__(self, idx: Union[int, slice]) -> None:
if isinstance(idx, slice):
for k in range(len(self._modules))[idx]:
delattr(self, str(k))
else:
delattr(self, self._get_abs_string_index(idx))
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
def __len__(self) -> int:
return len(self._modules)
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
def __iadd__(self, modules: Iterable[Module]) -> 'ModuleList':
return self.extend(modules)
def __add__(self, other: Iterable[Module]) -> 'ModuleList':
combined = ModuleList()
for (i, module) in enumerate(chain(self, other)):
combined.add_module(str(i), module)
return combined
def __repr__(self):
list_of_reprs = [repr(item) for item in self]
if len(list_of_reprs) == 0:
return self._get_name() + '()'
start_end_indices = [[0, 0]]
repeated_blocks = [list_of_reprs[0]]
for (i, r) in enumerate(list_of_reprs[1:], 1):
if r == repeated_blocks[-1]:
start_end_indices[-1][1] += 1
continue
start_end_indices.append([i, i])
repeated_blocks.append(r)
lines = []
main_str = self._get_name() + '('
for ((start_id, end_id), b) in zip(start_end_indices, repeated_blocks):
local_repr = f'({start_id}): {b}'
if start_id != end_id:
n = end_id - start_id + 1
local_repr = f'({start_id}-{end_id}): {n} x {b}'
local_repr = _addindent(local_repr, 2)
lines.append(local_repr)
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
return main_str
def __dir__(self):
keys = super().__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def insert(self, index: int, module: Module) -> None:
for i in range(len(self._modules), index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
def append(self, module: Module) -> 'ModuleList':
self.add_module(str(len(self)), module)
return self
def pop(self, key: Union[int, slice]) -> Module:
v = self[key]
del self[key]
return v
def extend(self, modules: Iterable[Module]) -> 'ModuleList':
if not isinstance(modules, container_abcs.Iterable):
raise TypeError('ModuleList.extend should be called with an iterable, but got ' + type(modules).__name__)
offset = len(self)
for (i, module) in enumerate(modules):
self.add_module(str(offset + i), module)
return self
class ModuleDict(Module):
_modules: Dict[str, Module]
def __init__(self, modules: Optional[Mapping[str, Module]]=None) -> None:
super().__init__()
if modules is not None:
self.update(modules)
def __getitem__(self, key: str) -> Module:
return self._modules[key]
def __setitem__(self, key: str, module: Module) -> None:
self.add_module(key, module)
def __delitem__(self, key: str) -> None:
del self._modules[key]
def __len__(self) -> int:
return len(self._modules)
def __iter__(self) -> Iterator[str]:
return iter(self._modules)
def __contains__(self, key: str) -> bool:
return key in self._modules
def clear(self) -> None:
self._modules.clear()
def pop(self, key: str) -> Module:
v = self[key]
del self[key]
return v
def keys(self) -> Iterable[str]:
return self._modules.keys()
def items(self) -> Iterable[Tuple[str, Module]]:
return self._modules.items()
def values(self) -> Iterable[Module]:
return self._modules.values()
def update(self, modules: Mapping[str, Module]) -> None:
if not isinstance(modules, container_abcs.Iterable):
raise TypeError('ModuleDict.update should be called with an iterable of key/value pairs, but got ' + type(modules).__name__)
if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
for (key, module) in modules.items():
self[key] = module
else:
for (j, m) in enumerate(modules):
if not isinstance(m, container_abcs.Iterable):
raise TypeError('ModuleDict update sequence element #' + str(j) + ' should be Iterable; is' + type(m).__name__)
if not len(m) == 2:
raise ValueError('ModuleDict update sequence element #' + str(j) + ' has length ' + str(len(m)) + '; 2 is required')
self[m[0]] = m[1]
# File: candle-main/candle-pyo3/py_src/candle/nn/linear.py
import math
from typing import Any
import candle
from candle import Tensor
from .module import Module
class Identity(Module):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__()
def forward(self, input: Tensor) -> Tensor:
return input
class Linear(Module):
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self._quantizable_buffers.add('weight')
self.in_features = in_features
self.out_features = out_features
self.weight = candle.ones((out_features, in_features), **factory_kwargs)
if bias:
self.bias = candle.zeros((out_features,), **factory_kwargs)
else:
self.bias = None
def forward(self, x: Tensor) -> Tensor:
dims = x.shape
last_dim = dims[-1]
if isinstance(self.weight, candle.QTensor):
if len(dims) < 3:
matmul_result = self.weight.matmul_t(x).broadcast_add(self.bias)
elif len(dims) == 3:
(b, n, m) = dims
output_shape = (b, n, self.out_features)
re = x.reshape((b * n, m))
matmul_result = self.weight.matmul_t(re).reshape(output_shape)
else:
raise NotImplementedError("'QTensor.matmul_t' is not implemented for more than 3 dimensions")
if self.bias:
return matmul_result.broadcast_add(self.bias)
else:
if self.weight.shape[-1] == last_dim and len(dims) < 3:
w = self.weight.t()
else:
batch_size = dims[0]
w = self.weight.broadcast_left((batch_size,)).t()
x = x.matmul(w)
if self.bias is not None:
x = x.broadcast_add(self.bias)
return x
def extra_repr(self) -> str:
return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}'
# File: candle-main/candle-pyo3/py_src/candle/nn/module.py
from candle import Tensor, QTensor, DType
from typing import Dict, Tuple, Any, Optional, Union, Iterator, Set, overload, Mapping, TypeVar, List
from collections import OrderedDict, namedtuple
TensorLike = Union[Tensor, QTensor]
T = TypeVar('T', bound='Module')
class _IncompatibleKeys(namedtuple('IncompatibleKeys', ['missing_keys', 'unexpected_keys'])):
def __repr__(self):
if not self.missing_keys and (not self.unexpected_keys):
return '<All keys matched successfully>'
return super().__repr__()
__str__ = __repr__
class Module:
_modules: Dict[str, Optional['Module']]
_buffers: Dict[str, Optional[TensorLike]]
_non_persistent_buffers_set: Set[str]
_quantizable_buffers: Set[str]
_version: int = 1
def __init__(self, *args, **kwargs) -> None:
super().__setattr__('_modules', OrderedDict())
super().__setattr__('_buffers', OrderedDict())
super().__setattr__('_non_persistent_buffers_set', set())
super().__setattr__('_quantizable_buffers', set())
def __call__(self, *input):
return self.forward(*input)
def forward(self, *input):
pass
def children(self) -> Iterator['Module']:
for (name, module) in self.named_children():
yield module
def named_children(self) -> Iterator[Tuple[str, 'Module']]:
memo = set()
for (name, module) in self._modules.items():
if module is not None and module not in memo:
memo.add(module)
yield (name, module)
def add_module(self, name: str, module: Optional['Module']) -> None:
if not isinstance(module, Module) and module is not None:
raise TypeError(f'{str(module)} is not a Module subclass')
elif not isinstance(name, str):
raise TypeError(f'module name should be a string. Got {name}')
elif hasattr(self, name) and name not in self._modules:
raise KeyError(f"attribute '{name}' already exists")
elif '.' in name:
raise KeyError(f"""module name can't contain ".", got: {name}""")
elif name == '':
raise KeyError('module name can\'t be empty string ""')
self._modules[name] = module
def register_module(self, name: str, module: Optional['Module']) -> None:
self.add_module(name, module)
def modules(self) -> Iterator['Module']:
for (_, module) in self.named_modules():
yield module
def named_modules(self, memo: Optional[Set['Module']]=None, prefix: str='', remove_duplicate: bool=True):
if memo is None:
memo = set()
if self not in memo:
if remove_duplicate:
memo.add(self)
yield (prefix, self)
for (name, module) in self._modules.items():
if module is None:
continue
submodule_prefix = prefix + ('.' if prefix else '') + name
for m in module.named_modules(memo, submodule_prefix, remove_duplicate):
yield m
def buffers(self, recurse: bool=True) -> Iterator[TensorLike]:
for (name, buf) in self.named_buffers(recurse=recurse):
yield buf
def named_buffers(self, prefix: str='', recurse: bool=True, remove_duplicate: bool=True) -> Iterator[Tuple[str, TensorLike]]:
gen = self._named_members(lambda module: module._buffers.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate)
yield from gen
T_destination = TypeVar('T_destination', bound=Dict[str, Any])
@overload
def state_dict(self, *, destination: T_destination, prefix: str=..., keep_vars: bool=...) -> T_destination:
...
@overload
def state_dict(self, *, prefix: str=..., keep_vars: bool=...) -> Dict[str, Any]:
...
def state_dict(self, *args, destination=None, prefix='', keep_vars=False):
if len(args) > 0:
if destination is None:
destination = args[0]
if len(args) > 1 and prefix == '':
prefix = args[1]
if len(args) > 2 and keep_vars is False:
keep_vars = args[2]
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
local_metadata = dict(version=self._version)
if hasattr(destination, '_metadata'):
destination._metadata[prefix[:-1]] = local_metadata
self._save_to_state_dict(destination, prefix, keep_vars)
for (name, module) in self._modules.items():
if module is not None:
module.state_dict(destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars)
return destination
def _save_to_state_dict(self, destination, prefix, keep_vars):
for (name, buf) in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
if isinstance(buf, Tensor):
destination[prefix + name] = buf if keep_vars else buf.detach()
else:
destination[prefix + name] = buf
def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool=True, assign: bool=False):
if not isinstance(state_dict, Mapping):
raise TypeError(f'Expected state_dict to be dict-like, got {type(state_dict)}.')
missing_keys: List[str] = []
unexpected_keys: List[str] = []
error_msgs: List[str] = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = OrderedDict(state_dict)
if metadata is not None:
state_dict._metadata = metadata
def load(module, local_state_dict, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
if assign:
local_metadata['assign_to_params_buffers'] = assign
module._load_from_state_dict(local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in module._modules.items():
if child is not None:
child_prefix = prefix + name + '.'
child_state_dict = {k: v for (k, v) in local_state_dict.items() if k.startswith(child_prefix)}
load(child, child_state_dict, child_prefix)
load(self, state_dict)
del load
if strict:
if len(unexpected_keys) > 0:
error_msgs.insert(0, 'Unexpected key(s) in state_dict: {}. '.format(', '.join((f'"{k}"' for k in unexpected_keys))))
if len(missing_keys) > 0:
error_msgs.insert(0, 'Missing key(s) in state_dict: {}. '.format(', '.join((f'"{k}"' for k in missing_keys))))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(self.__class__.__name__, '\n\t'.join(error_msgs)))
return _IncompatibleKeys(missing_keys, unexpected_keys)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
persistent_buffers = {k: v for (k, v) in self._buffers.items() if k not in self._non_persistent_buffers_set}
local_name_params = persistent_buffers.items()
local_state = {k: v for (k, v) in local_name_params if v is not None}
for (name, param) in local_state.items():
key = prefix + name
if key in state_dict:
input_param = state_dict[key]
if not isinstance(input_param, (Tensor, QTensor)):
error_msgs.append(f'While copying the parameter named "{key}", expected Tensor-like object from checkpoint but received {type(input_param)}')
continue
if input_param.shape != param.shape:
error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, the shape in current model is {}.'.format(key, input_param.shape, param.shape))
continue
try:
setattr(self, name, input_param)
except Exception as ex:
error_msgs.append(f'While copying the parameter named "{key}", whose dimensions in the model are {param.shape} and whose dimensions in the checkpoint are {input_param.shape}, an exception occurred : {ex.args}.')
elif strict:
missing_keys.append(key)
if strict:
for key in state_dict.keys():
if key.startswith(prefix):
input_name = key[len(prefix):]
input_name = input_name.split('.', 1)[0]
if input_name not in self._modules and input_name not in local_state:
unexpected_keys.append(key)
def _named_members(self, get_members_fn, prefix='', recurse=True, remove_duplicate: bool=True):
memo = set()
modules = self.named_modules(prefix=prefix, remove_duplicate=remove_duplicate) if recurse else [(prefix, self)]
for (module_prefix, module) in modules:
members = get_members_fn(module)
for (k, v) in members:
if v is None or v in memo:
continue
if remove_duplicate:
memo.add(v)
name = module_prefix + ('.' if module_prefix else '') + k
yield (name, v)
def _get_name(self):
return self.__class__.__name__
def _apply(self, fn):
for module in self.children():
module._apply(fn)
for (key, buf) in self._buffers.items():
if buf is not None:
self._buffers[key] = fn(buf)
return self
def __move_tensor_to_device(self, tensor: TensorLike, device: str):
if isinstance(tensor, Tensor):
return tensor.to_device(device)
else:
raise NotImplementedError('Cannot offload QTensor to cuda, yet!')
def device(self) -> str:
tensor = next(self.buffers())
if isinstance(tensor, Tensor):
return tensor.device
else:
return 'cpu'
def cuda(self: T) -> T:
def to_cuda(t: TensorLike):
return self.__move_tensor_to_device(t, 'cuda')
return self._apply(to_cuda)
def cpu(self: T) -> T:
def to_cpu(t: TensorLike):
return self.__move_tensor_to_device(t, 'cpu')
return self._apply(to_cpu)
def __cast_tensor(self, tensor: TensorLike, dtype: Union[DType, str]):
if isinstance(tensor, Tensor):
return tensor.to_dtype(dtype)
else:
raise TypeError('candle.Module.to only accepts Tensor dtypes, but got desired dtype={}'.format(dtype))
def type(self: T, dst_type: Union[DType, str]) -> T:
def cast(t: TensorLike):
return self.__cast_tensor(t, dst_type)
return self._apply(cast)
@overload
def to(self: T, device: str=..., dtype: Optional[Union[DType, str]]=...) -> T:
...
@overload
def to(self: T, dtype: Union[DType, str]) -> T:
...
def to(self, *args, **kwargs):
device = None
dtype = None
if args:
for arg in args:
if isinstance(arg, str):
lower_arg = str(arg).lower()
if lower_arg.startswith('cuda') or lower_arg == 'cpu':
device = lower_arg
else:
dtype = arg
elif isinstance(arg, DType):
dtype = str(arg)
else:
raise TypeError('Module.to() received an invalid combination of arguments. Got: {}'.format(args))
if kwargs:
device = kwargs.get('device', device)
dtype = str(kwargs.get('dtype', dtype))
if device:
device = device.lower()
if dtype:
dtype = dtype.lower()
if dtype not in ['f32', 'f16', 'f64']:
raise TypeError('candle.Module.to only accepts floating pointdtypes, but got desired dtype={}'.format(dtype))
def convert(t):
if dtype:
t = self.__cast_tensor(t, dtype)
if device:
t = self.__move_tensor_to_device(t, device)
return t
return self._apply(convert)
def __setattr__(self, __name: str, __value: Any) -> None:
if isinstance(__value, Module):
self._modules[__name] = __value
elif isinstance(__value, QTensor):
if __name in self._quantizable_buffers:
type = __value.ggml_dtype.lower()
if type in ['f32', 'f16']:
dequant = __value.dequantize()
if type == 'f16':
dequant = dequant.to_dtype('f16')
self._buffers[__name] = dequant
else:
self._buffers[__name] = __value
else:
self._buffers[__name] = __value.dequantize()
elif isinstance(__value, Tensor):
self._buffers[__name] = __value
else:
super().__setattr__(__name, __value)
def __getattr__(self, __name: str) -> Any:
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if __name in modules:
return modules[__name]
if '_buffers' in self.__dict__:
tensors = self.__dict__['_buffers']
if __name in tensors:
return tensors[__name]
return super().__getattribute__(__name)
def __delattr__(self, name):
if name in self._buffers:
del self._buffers[name]
elif name in self._modules:
del self._modules[name]
else:
super().__delattr__(name)
# File: candle-main/candle-pyo3/py_src/candle/nn/normalization.py
import candle
from candle import Tensor
from .module import Module
from typing import Union, List, Tuple, Optional, Any
_shape_t = Union[int, List[int]]
import numbers
class LayerNorm(Module):
__constants__ = ['normalized_shape', 'eps']
normalized_shape: Tuple[int, ...]
eps: float
def __init__(self, normalized_shape: _shape_t, eps: float=1e-05, bias: bool=True, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.weight = candle.ones(normalized_shape, **factory_kwargs)
if bias:
self.bias = candle.zeros(normalized_shape, **factory_kwargs)
else:
self.bias = None
def forward(self, input: Tensor) -> Tensor:
mean_x = input.sum_keepdim(2) / float(self.normalized_shape[-1])
x = input.broadcast_sub(mean_x)
norm_x = x.sqr().sum_keepdim(2) / float(self.normalized_shape[-1])
x_normed = x.broadcast_div((norm_x + self.eps).sqrt())
x = x_normed.broadcast_mul(self.weight)
if self.bias:
x = x.broadcast_add(self.bias)
return x
def extra_repr(self) -> str:
return '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}'.format(**self.__dict__)
# File: candle-main/candle-pyo3/py_src/candle/nn/sparse.py
from .module import Module
from typing import Optional, Tuple, Any
from candle import Tensor
import candle
class Embedding(Module):
def __init__(self, num_embeddings: int, embedding_dim: int, device=None) -> None:
factory_kwargs = {'device': device}
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.weight = candle.randn((num_embeddings, embedding_dim), **factory_kwargs)
def forward(self, indexes: Tensor) -> Tensor:
final_dims = list(indexes.shape)
final_dims.append(self.embedding_dim)
indexes = indexes.flatten_all()
values = self.weight.index_select(indexes, 0)
return values.reshape(final_dims)
# File: candle-main/candle-pyo3/py_src/candle/typing/__init__.py
from typing import TypeVar, Union, Sequence
_T = TypeVar('_T')
_ArrayLike = Union[_T, Sequence[_T], Sequence[Sequence[_T]], Sequence[Sequence[Sequence[_T]]], Sequence[Sequence[Sequence[Sequence[_T]]]]]
CPU: str = 'cpu'
CUDA: str = 'cuda'
Device = TypeVar('Device', CPU, CUDA)
Scalar = Union[int, float]
Index = Union[int, slice, None, 'Ellipsis']
Shape = Union[int, Sequence[int]]
# File: candle-main/candle-pyo3/quant-llama.py
import sys
from typing import Dict, Tuple, Any
import candle
from candle.models.llama import QuantizedLlama
from candle import utils
MAX_SEQ_LEN = 4096
def gguf_rename(tensor_name: str):
if tensor_name == 'token_embd.weight':
return 'tok_embeddings.weight'
if tensor_name == 'output_norm.weight':
return 'norm.weight'
tensor_name = tensor_name.replace('blk.', 'layers.')
tensor_name = tensor_name.replace('.attn_q.', '.attention.wq.')
tensor_name = tensor_name.replace('.attn_k.', '.attention.wk.')
tensor_name = tensor_name.replace('.attn_v.', '.attention.wv.')
tensor_name = tensor_name.replace('.attn_output.', '.attention.wo.')
tensor_name = tensor_name.replace('.ffn_gate.', '.feed_forward.w1.')
tensor_name = tensor_name.replace('.ffn_down.', '.feed_forward.w2.')
tensor_name = tensor_name.replace('.ffn_up.', '.feed_forward.w3.')
tensor_name = tensor_name.replace('.attn_norm.', '.attention_norm.')
return tensor_name
def main():
if len(sys.argv) < 2:
raise ValueError('missing weight file argument')
filename = sys.argv[1]
print(f'reading model file {filename}')
if filename.endswith('gguf'):
(all_tensors, metadata) = utils.load_gguf(filename)
vocab = metadata['tokenizer.ggml.tokens']
for (i, v) in enumerate(vocab):
vocab[i] = '\n' if v == '<0x0A>' else v.replace('▁', ' ')
hparams = {k: v for (k, v) in metadata.items() if not k.startswith('tokenizer')}
print(hparams)
hparams = {'n_vocab': len(vocab), 'n_embd': metadata['llama.embedding_length'], 'n_mult': 256, 'n_head': metadata['llama.attention.head_count'], 'n_head_kv': metadata['llama.attention.head_count_kv'], 'n_layer': metadata['llama.block_count'], 'n_rot': metadata['llama.rope.dimension_count'], 'rope_freq': metadata.get('llama.rope.freq_base', 10000.0), 'ftype': metadata['general.file_type'], 'context_length': metadata['llama.context_length']}
all_tensors = {gguf_rename(k): v for (k, v) in all_tensors.items()}
else:
(all_tensors, hparams, vocab) = utils.load_ggml(filename)
hparams['context_length'] = 2048
print(hparams)
model = QuantizedLlama(hparams, all_tensors)
print('model built, starting inference')
tokens = [1]
for token_idx in range(500):
last_token = tokens[-1]
lt = candle.tensor([last_token]).unsqueeze(0)
logits = model.forward(lt, len(tokens))
m = logits.get(0).argmax_keepdim(-1)
next_token = m.values()[0]
print(vocab[next_token], end='', flush=True)
tokens.append(next_token)
if __name__ == '__main__':
main()
# File: candle-main/candle-pyo3/stub.py
import argparse
import inspect
import os
from typing import Optional
import black
from pathlib import Path
import re
INDENT = ' ' * 4
GENERATED_COMMENT = '# Generated content DO NOT EDIT\n'
TYPING = 'from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence\nfrom os import PathLike\n'
CANDLE_SPECIFIC_TYPING = 'from candle.typing import _ArrayLike, Device, Scalar, Index, Shape\n'
CANDLE_TENSOR_IMPORTS = 'from candle import Tensor,DType,QTensor\n'
RETURN_TYPE_MARKER = '&RETURNS&: '
ADDITIONAL_TYPEHINTS = {}
FORWARD_REF_PATTERN = re.compile("ForwardRef\\('([^']+)'\\)")
def do_indent(text: Optional[str], indent: str):
if text is None:
return ''
return text.replace('\n', f'\n{indent}')
def function(obj, indent: str, text_signature: str=None):
if text_signature is None:
text_signature = obj.__text_signature__
text_signature = text_signature.replace('$self', 'self').lstrip().rstrip()
doc_string = obj.__doc__
if doc_string is None:
doc_string = ''
return_type = None
doc_lines = doc_string.split('\n')
if doc_lines[-1].lstrip().startswith(RETURN_TYPE_MARKER):
return_type = doc_lines[-1].lstrip()[len(RETURN_TYPE_MARKER):].strip()
doc_string = '\n'.join(doc_lines[:-1])
string = ''
if return_type:
string += f'{indent}def {obj.__name__}{text_signature} -> {return_type}:\n'
else:
string += f'{indent}def {obj.__name__}{text_signature}:\n'
indent += INDENT
string += f'{indent}"""\n'
string += f'{indent}{do_indent(doc_string, indent)}\n'
string += f'{indent}"""\n'
string += f'{indent}pass\n'
string += '\n'
string += '\n'
return string
def member_sort(member):
if inspect.isclass(member):
value = 10 + len(inspect.getmro(member))
else:
value = 1
return value
def fn_predicate(obj):
value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj)
if value:
return obj.__text_signature__ and (not obj.__name__.startswith('_'))
if inspect.isgetsetdescriptor(obj):
return not obj.__name__.startswith('_')
return False
def get_module_members(module):
members = [member for (name, member) in inspect.getmembers(module) if not name.startswith('_') and (not inspect.ismodule(member))]
members.sort(key=member_sort)
return members
def pyi_file(obj, indent=''):
string = ''
if inspect.ismodule(obj):
string += GENERATED_COMMENT
string += TYPING
string += CANDLE_SPECIFIC_TYPING
if obj.__name__ != 'candle.candle':
string += CANDLE_TENSOR_IMPORTS
members = get_module_members(obj)
for member in members:
string += pyi_file(member, indent)
elif inspect.isclass(obj):
indent += INDENT
mro = inspect.getmro(obj)
if len(mro) > 2:
inherit = f'({mro[1].__name__})'
else:
inherit = ''
string += f'class {obj.__name__}{inherit}:\n'
body = ''
if obj.__doc__:
body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n'
fns = inspect.getmembers(obj, fn_predicate)
if obj.__text_signature__:
body += f'{indent}def __init__{obj.__text_signature__}:\n'
body += f'{indent + INDENT}pass\n'
body += '\n'
if obj.__name__ in ADDITIONAL_TYPEHINTS:
additional_members = inspect.getmembers(ADDITIONAL_TYPEHINTS[obj.__name__])
additional_functions = []
for (name, member) in additional_members:
if inspect.isfunction(member):
additional_functions.append((name, member))
def process_additional_function(fn):
signature = inspect.signature(fn)
cleaned_signature = re.sub(FORWARD_REF_PATTERN, '\\1', str(signature))
string = f'{indent}def {fn.__name__}{cleaned_signature}:\n'
string += f'{indent + INDENT}"""{indent + INDENT}{do_indent(fn.__doc__, indent + INDENT)}{indent + INDENT}"""\n'
string += f'{indent + INDENT}pass\n'
string += '\n'
return string
for (name, fn) in additional_functions:
body += process_additional_function(fn)
for (name, fn) in fns:
body += pyi_file(fn, indent=indent)
if not body:
body += f'{indent}pass\n'
string += body
string += '\n\n'
elif inspect.isbuiltin(obj):
string += f'{indent}@staticmethod\n'
string += function(obj, indent)
elif inspect.ismethoddescriptor(obj):
string += function(obj, indent)
elif inspect.isgetsetdescriptor(obj):
string += f'{indent}@property\n'
string += function(obj, indent, text_signature='(self)')
elif obj.__class__.__name__ == 'DType':
string += f'class {str(obj).lower()}(DType):\n'
string += f'{indent + INDENT}pass\n'
else:
raise Exception(f'Object {obj} is not supported')
return string
def py_file(module, origin):
members = get_module_members(module)
string = GENERATED_COMMENT
string += f'from .. import {origin}\n'
string += '\n'
for member in members:
if hasattr(member, '__name__'):
name = member.__name__
else:
name = str(member)
string += f'{name} = {origin}.{name}\n'
return string
def do_black(content, is_pyi):
mode = black.Mode(target_versions={black.TargetVersion.PY35}, line_length=119, is_pyi=is_pyi, string_normalization=True)
try:
return black.format_file_contents(content, fast=True, mode=mode)
except black.NothingChanged:
return content
def write(module, directory, origin, check=False):
submodules = [(name, member) for (name, member) in inspect.getmembers(module) if inspect.ismodule(member)]
filename = os.path.join(directory, '__init__.pyi')
pyi_content = pyi_file(module)
pyi_content = do_black(pyi_content, is_pyi=True)
os.makedirs(directory, exist_ok=True)
if check:
with open(filename, 'r') as f:
data = f.read()
print('generated content')
print(pyi_content)
assert data == pyi_content, f'The content of {filename} seems outdated, please run `python stub.py`'
else:
with open(filename, 'w') as f:
f.write(pyi_content)
filename = os.path.join(directory, '__init__.py')
py_content = py_file(module, origin)
py_content = do_black(py_content, is_pyi=False)
os.makedirs(directory, exist_ok=True)
is_auto = False
if not os.path.exists(filename):
is_auto = True
else:
with open(filename, 'r') as f:
line = f.readline()
if line == GENERATED_COMMENT:
is_auto = True
if is_auto:
if check:
with open(filename, 'r') as f:
data = f.read()
print('generated content')
print(py_content)
assert data == py_content, f'The content of {filename} seems outdated, please run `python stub.py`'
else:
with open(filename, 'w') as f:
f.write(py_content)
for (name, submodule) in submodules:
write(submodule, os.path.join(directory, name), f'{name}', check=check)
def extract_additional_types(module):
additional_types = {}
for (name, member) in inspect.getmembers(module):
if inspect.isclass(member):
if hasattr(member, '__name__'):
name = member.__name__
else:
name = str(member)
if name not in additional_types:
additional_types[name] = member
return additional_types
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--check', action='store_true')
args = parser.parse_args()
cwd = Path.cwd()
directory = 'py_src/candle/'
if cwd.name != 'candle-pyo3':
directory = f'candle-pyo3/{directory}'
import candle
import _additional_typing
ADDITIONAL_TYPEHINTS = extract_additional_types(_additional_typing)
write(candle.candle, directory, 'candle', check=args.check)