|
# File: tokenizers-main/bindings/python/py_src/tokenizers/__init__.py |
|
from enum import Enum |
|
from typing import List, Tuple, Union |
|
Offsets = Tuple[int, int] |
|
TextInputSequence = str |
|
'' |
|
PreTokenizedInputSequence = Union[List[str], Tuple[str]] |
|
'' |
|
TextEncodeInput = Union[TextInputSequence, Tuple[TextInputSequence, TextInputSequence], List[TextInputSequence]] |
|
'' |
|
PreTokenizedEncodeInput = Union[PreTokenizedInputSequence, Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence], List[PreTokenizedInputSequence]] |
|
'' |
|
InputSequence = Union[TextInputSequence, PreTokenizedInputSequence] |
|
'' |
|
EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput] |
|
'' |
|
|
|
class OffsetReferential(Enum): |
|
ORIGINAL = 'original' |
|
NORMALIZED = 'normalized' |
|
|
|
class OffsetType(Enum): |
|
BYTE = 'byte' |
|
CHAR = 'char' |
|
|
|
class SplitDelimiterBehavior(Enum): |
|
REMOVED = 'removed' |
|
ISOLATED = 'isolated' |
|
MERGED_WITH_PREVIOUS = 'merged_with_previous' |
|
MERGED_WITH_NEXT = 'merged_with_next' |
|
CONTIGUOUS = 'contiguous' |
|
from .tokenizers import AddedToken, Encoding, NormalizedString, PreTokenizedString, Regex, Token, Tokenizer, decoders, models, normalizers, pre_tokenizers, processors, trainers, __version__ |
|
from .implementations import BertWordPieceTokenizer, ByteLevelBPETokenizer, CharBPETokenizer, SentencePieceBPETokenizer, SentencePieceUnigramTokenizer |
|
|
|
# File: tokenizers-main/bindings/python/py_src/tokenizers/decoders/__init__.py |
|
from .. import decoders |
|
Decoder = decoders.Decoder |
|
ByteLevel = decoders.ByteLevel |
|
Replace = decoders.Replace |
|
WordPiece = decoders.WordPiece |
|
ByteFallback = decoders.ByteFallback |
|
Fuse = decoders.Fuse |
|
Strip = decoders.Strip |
|
Metaspace = decoders.Metaspace |
|
BPEDecoder = decoders.BPEDecoder |
|
CTC = decoders.CTC |
|
Sequence = decoders.Sequence |
|
|
|
# File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py |
|
from typing import Dict, List, Optional, Tuple, Union |
|
from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer |
|
from tokenizers.decoders import Decoder |
|
from tokenizers.models import Model |
|
from tokenizers.normalizers import Normalizer |
|
from tokenizers.pre_tokenizers import PreTokenizer |
|
from tokenizers.processors import PostProcessor |
|
Offsets = Tuple[int, int] |
|
|
|
class BaseTokenizer: |
|
|
|
def __init__(self, tokenizer: Tokenizer, parameters=None): |
|
self._tokenizer = tokenizer |
|
self._parameters = parameters if parameters is not None else {} |
|
|
|
def __repr__(self): |
|
return 'Tokenizer(vocabulary_size={}, {})'.format(self._tokenizer.get_vocab_size(), ', '.join((k + '=' + str(v) for (k, v) in self._parameters.items()))) |
|
|
|
def num_special_tokens_to_add(self, is_pair: bool) -> int: |
|
return self._tokenizer.num_special_tokens_to_add(is_pair) |
|
|
|
def get_vocab(self, with_added_tokens: bool=True) -> Dict[str, int]: |
|
return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens) |
|
|
|
def get_added_tokens_decoder(self) -> Dict[int, AddedToken]: |
|
return self._tokenizer.get_added_tokens_decoder() |
|
|
|
def get_vocab_size(self, with_added_tokens: bool=True) -> int: |
|
return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens) |
|
|
|
def enable_padding(self, direction: Optional[str]='right', pad_to_multiple_of: Optional[int]=None, pad_id: Optional[int]=0, pad_type_id: Optional[int]=0, pad_token: Optional[str]='[PAD]', length: Optional[int]=None): |
|
return self._tokenizer.enable_padding(direction=direction, pad_to_multiple_of=pad_to_multiple_of, pad_id=pad_id, pad_type_id=pad_type_id, pad_token=pad_token, length=length) |
|
|
|
def no_padding(self): |
|
return self._tokenizer.no_padding() |
|
|
|
@property |
|
def padding(self) -> Optional[dict]: |
|
return self._tokenizer.padding |
|
|
|
def enable_truncation(self, max_length: int, stride: Optional[int]=0, strategy: Optional[str]='longest_first'): |
|
return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy) |
|
|
|
def no_truncation(self): |
|
return self._tokenizer.no_truncation() |
|
|
|
@property |
|
def truncation(self) -> Optional[dict]: |
|
return self._tokenizer.truncation |
|
|
|
def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int: |
|
return self._tokenizer.add_tokens(tokens) |
|
|
|
def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int: |
|
return self._tokenizer.add_special_tokens(special_tokens) |
|
|
|
def normalize(self, sequence: str) -> str: |
|
return self._tokenizer.normalize(sequence) |
|
|
|
def encode(self, sequence: InputSequence, pair: Optional[InputSequence]=None, is_pretokenized: bool=False, add_special_tokens: bool=True) -> Encoding: |
|
if sequence is None: |
|
raise ValueError("encode: `sequence` can't be `None`") |
|
return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens) |
|
|
|
def encode_batch(self, inputs: List[EncodeInput], is_pretokenized: bool=False, add_special_tokens: bool=True) -> List[Encoding]: |
|
if inputs is None: |
|
raise ValueError("encode_batch: `inputs` can't be `None`") |
|
return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens) |
|
|
|
def decode(self, ids: List[int], skip_special_tokens: Optional[bool]=True) -> str: |
|
if ids is None: |
|
raise ValueError('None input is not valid. Should be a list of integers.') |
|
return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens) |
|
|
|
def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool]=True) -> str: |
|
if sequences is None: |
|
raise ValueError('None input is not valid. Should be list of list of integers.') |
|
return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens) |
|
|
|
def token_to_id(self, token: str) -> Optional[int]: |
|
return self._tokenizer.token_to_id(token) |
|
|
|
def id_to_token(self, id: int) -> Optional[str]: |
|
return self._tokenizer.id_to_token(id) |
|
|
|
def save_model(self, directory: str, prefix: Optional[str]=None): |
|
return self._tokenizer.model.save(directory, prefix=prefix) |
|
|
|
def save(self, path: str, pretty: bool=True): |
|
return self._tokenizer.save(path, pretty) |
|
|
|
def to_str(self, pretty: bool=False): |
|
return self._tokenizer.to_str(pretty) |
|
|
|
def post_process(self, encoding: Encoding, pair: Optional[Encoding]=None, add_special_tokens: bool=True) -> Encoding: |
|
return self._tokenizer.post_process(encoding, pair, add_special_tokens) |
|
|
|
@property |
|
def model(self) -> Model: |
|
return self._tokenizer.model |
|
|
|
@model.setter |
|
def model(self, model: Model): |
|
self._tokenizer.model = model |
|
|
|
@property |
|
def normalizer(self) -> Normalizer: |
|
return self._tokenizer.normalizer |
|
|
|
@normalizer.setter |
|
def normalizer(self, normalizer: Normalizer): |
|
self._tokenizer.normalizer = normalizer |
|
|
|
@property |
|
def pre_tokenizer(self) -> PreTokenizer: |
|
return self._tokenizer.pre_tokenizer |
|
|
|
@pre_tokenizer.setter |
|
def pre_tokenizer(self, pre_tokenizer: PreTokenizer): |
|
self._tokenizer.pre_tokenizer = pre_tokenizer |
|
|
|
@property |
|
def post_processor(self) -> PostProcessor: |
|
return self._tokenizer.post_processor |
|
|
|
@post_processor.setter |
|
def post_processor(self, post_processor: PostProcessor): |
|
self._tokenizer.post_processor = post_processor |
|
|
|
@property |
|
def decoder(self) -> Decoder: |
|
return self._tokenizer.decoder |
|
|
|
@decoder.setter |
|
def decoder(self, decoder: Decoder): |
|
self._tokenizer.decoder = decoder |
|
|
|
# File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/bert_wordpiece.py |
|
from typing import Dict, Iterator, List, Optional, Union |
|
from tokenizers import AddedToken, Tokenizer, decoders, trainers |
|
from tokenizers.models import WordPiece |
|
from tokenizers.normalizers import BertNormalizer |
|
from tokenizers.pre_tokenizers import BertPreTokenizer |
|
from tokenizers.processors import BertProcessing |
|
from .base_tokenizer import BaseTokenizer |
|
|
|
class BertWordPieceTokenizer(BaseTokenizer): |
|
|
|
def __init__(self, vocab: Optional[Union[str, Dict[str, int]]]=None, unk_token: Union[str, AddedToken]='[UNK]', sep_token: Union[str, AddedToken]='[SEP]', cls_token: Union[str, AddedToken]='[CLS]', pad_token: Union[str, AddedToken]='[PAD]', mask_token: Union[str, AddedToken]='[MASK]', clean_text: bool=True, handle_chinese_chars: bool=True, strip_accents: Optional[bool]=None, lowercase: bool=True, wordpieces_prefix: str='##'): |
|
if vocab is not None: |
|
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token))) |
|
else: |
|
tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token))) |
|
if tokenizer.token_to_id(str(unk_token)) is not None: |
|
tokenizer.add_special_tokens([str(unk_token)]) |
|
if tokenizer.token_to_id(str(sep_token)) is not None: |
|
tokenizer.add_special_tokens([str(sep_token)]) |
|
if tokenizer.token_to_id(str(cls_token)) is not None: |
|
tokenizer.add_special_tokens([str(cls_token)]) |
|
if tokenizer.token_to_id(str(pad_token)) is not None: |
|
tokenizer.add_special_tokens([str(pad_token)]) |
|
if tokenizer.token_to_id(str(mask_token)) is not None: |
|
tokenizer.add_special_tokens([str(mask_token)]) |
|
tokenizer.normalizer = BertNormalizer(clean_text=clean_text, handle_chinese_chars=handle_chinese_chars, strip_accents=strip_accents, lowercase=lowercase) |
|
tokenizer.pre_tokenizer = BertPreTokenizer() |
|
if vocab is not None: |
|
sep_token_id = tokenizer.token_to_id(str(sep_token)) |
|
if sep_token_id is None: |
|
raise TypeError('sep_token not found in the vocabulary') |
|
cls_token_id = tokenizer.token_to_id(str(cls_token)) |
|
if cls_token_id is None: |
|
raise TypeError('cls_token not found in the vocabulary') |
|
tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id)) |
|
tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix) |
|
parameters = {'model': 'BertWordPiece', 'unk_token': unk_token, 'sep_token': sep_token, 'cls_token': cls_token, 'pad_token': pad_token, 'mask_token': mask_token, 'clean_text': clean_text, 'handle_chinese_chars': handle_chinese_chars, 'strip_accents': strip_accents, 'lowercase': lowercase, 'wordpieces_prefix': wordpieces_prefix} |
|
super().__init__(tokenizer, parameters) |
|
|
|
@staticmethod |
|
def from_file(vocab: str, **kwargs): |
|
vocab = WordPiece.read_file(vocab) |
|
return BertWordPieceTokenizer(vocab, **kwargs) |
|
|
|
def train(self, files: Union[str, List[str]], vocab_size: int=30000, min_frequency: int=2, limit_alphabet: int=1000, initial_alphabet: List[str]=[], special_tokens: List[Union[str, AddedToken]]=['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]'], show_progress: bool=True, wordpieces_prefix: str='##'): |
|
trainer = trainers.WordPieceTrainer(vocab_size=vocab_size, min_frequency=min_frequency, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, special_tokens=special_tokens, show_progress=show_progress, continuing_subword_prefix=wordpieces_prefix) |
|
if isinstance(files, str): |
|
files = [files] |
|
self._tokenizer.train(files, trainer=trainer) |
|
|
|
def train_from_iterator(self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int=30000, min_frequency: int=2, limit_alphabet: int=1000, initial_alphabet: List[str]=[], special_tokens: List[Union[str, AddedToken]]=['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]'], show_progress: bool=True, wordpieces_prefix: str='##', length: Optional[int]=None): |
|
trainer = trainers.WordPieceTrainer(vocab_size=vocab_size, min_frequency=min_frequency, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, special_tokens=special_tokens, show_progress=show_progress, continuing_subword_prefix=wordpieces_prefix) |
|
self._tokenizer.train_from_iterator(iterator, trainer=trainer, length=length) |
|
|
|
# File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py |
|
from typing import Dict, Iterator, List, Optional, Tuple, Union |
|
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers |
|
from tokenizers.models import BPE |
|
from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str |
|
from .base_tokenizer import BaseTokenizer |
|
|
|
class ByteLevelBPETokenizer(BaseTokenizer): |
|
|
|
def __init__(self, vocab: Optional[Union[str, Dict[str, int]]]=None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]]=None, add_prefix_space: bool=False, lowercase: bool=False, dropout: Optional[float]=None, unicode_normalizer: Optional[str]=None, continuing_subword_prefix: Optional[str]=None, end_of_word_suffix: Optional[str]=None, trim_offsets: bool=False): |
|
if vocab is not None and merges is not None: |
|
tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, continuing_subword_prefix=continuing_subword_prefix or '', end_of_word_suffix=end_of_word_suffix or '')) |
|
else: |
|
tokenizer = Tokenizer(BPE()) |
|
normalizers = [] |
|
if unicode_normalizer: |
|
normalizers += [unicode_normalizer_from_str(unicode_normalizer)] |
|
if lowercase: |
|
normalizers += [Lowercase()] |
|
if len(normalizers) > 0: |
|
if len(normalizers) > 1: |
|
tokenizer.normalizer = Sequence(normalizers) |
|
else: |
|
tokenizer.normalizer = normalizers[0] |
|
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) |
|
tokenizer.decoder = decoders.ByteLevel() |
|
tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets) |
|
parameters = {'model': 'ByteLevelBPE', 'add_prefix_space': add_prefix_space, 'lowercase': lowercase, 'dropout': dropout, 'unicode_normalizer': unicode_normalizer, 'continuing_subword_prefix': continuing_subword_prefix, 'end_of_word_suffix': end_of_word_suffix, 'trim_offsets': trim_offsets} |
|
super().__init__(tokenizer, parameters) |
|
|
|
@staticmethod |
|
def from_file(vocab_filename: str, merges_filename: str, **kwargs): |
|
(vocab, merges) = BPE.read_file(vocab_filename, merges_filename) |
|
return ByteLevelBPETokenizer(vocab, merges, **kwargs) |
|
|
|
def train(self, files: Union[str, List[str]], vocab_size: int=30000, min_frequency: int=2, show_progress: bool=True, special_tokens: List[Union[str, AddedToken]]=[]): |
|
trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet()) |
|
if isinstance(files, str): |
|
files = [files] |
|
self._tokenizer.train(files, trainer=trainer) |
|
|
|
def train_from_iterator(self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int=30000, min_frequency: int=2, show_progress: bool=True, special_tokens: List[Union[str, AddedToken]]=[], length: Optional[int]=None): |
|
trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet()) |
|
self._tokenizer.train_from_iterator(iterator, trainer=trainer, length=length) |
|
|
|
# File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py |
|
from typing import Dict, Iterator, List, Optional, Tuple, Union |
|
from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers |
|
from ..models import BPE |
|
from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str |
|
from .base_tokenizer import BaseTokenizer |
|
|
|
class CharBPETokenizer(BaseTokenizer): |
|
|
|
def __init__(self, vocab: Optional[Union[str, Dict[str, int]]]=None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]]=None, unk_token: Union[str, AddedToken]='<unk>', suffix: str='</w>', dropout: Optional[float]=None, lowercase: bool=False, unicode_normalizer: Optional[str]=None, bert_normalizer: bool=True, split_on_whitespace_only: bool=False): |
|
if vocab is not None and merges is not None: |
|
tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=str(unk_token), end_of_word_suffix=suffix)) |
|
else: |
|
tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix)) |
|
if tokenizer.token_to_id(str(unk_token)) is not None: |
|
tokenizer.add_special_tokens([str(unk_token)]) |
|
normalizers = [] |
|
if unicode_normalizer: |
|
normalizers += [unicode_normalizer_from_str(unicode_normalizer)] |
|
if bert_normalizer: |
|
normalizers += [BertNormalizer(lowercase=False)] |
|
if lowercase: |
|
normalizers += [Lowercase()] |
|
if len(normalizers) > 0: |
|
if len(normalizers) > 1: |
|
tokenizer.normalizer = Sequence(normalizers) |
|
else: |
|
tokenizer.normalizer = normalizers[0] |
|
if split_on_whitespace_only: |
|
tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit() |
|
else: |
|
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() |
|
tokenizer.decoder = decoders.BPEDecoder(suffix=suffix) |
|
parameters = {'model': 'BPE', 'unk_token': unk_token, 'suffix': suffix, 'dropout': dropout, 'lowercase': lowercase, 'unicode_normalizer': unicode_normalizer, 'bert_normalizer': bert_normalizer, 'split_on_whitespace_only': split_on_whitespace_only} |
|
super().__init__(tokenizer, parameters) |
|
|
|
@staticmethod |
|
def from_file(vocab_filename: str, merges_filename: str, **kwargs): |
|
(vocab, merges) = BPE.read_file(vocab_filename, merges_filename) |
|
return CharBPETokenizer(vocab, merges, **kwargs) |
|
|
|
def train(self, files: Union[str, List[str]], vocab_size: int=30000, min_frequency: int=2, special_tokens: List[Union[str, AddedToken]]=['<unk>'], limit_alphabet: int=1000, initial_alphabet: List[str]=[], suffix: Optional[str]='</w>', show_progress: bool=True): |
|
trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress) |
|
if isinstance(files, str): |
|
files = [files] |
|
self._tokenizer.train(files, trainer=trainer) |
|
|
|
def train_from_iterator(self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int=30000, min_frequency: int=2, special_tokens: List[Union[str, AddedToken]]=['<unk>'], limit_alphabet: int=1000, initial_alphabet: List[str]=[], suffix: Optional[str]='</w>', show_progress: bool=True, length: Optional[int]=None): |
|
trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress) |
|
self._tokenizer.train_from_iterator(iterator, trainer=trainer, length=length) |
|
|
|
# File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py |
|
from typing import Dict, Iterator, List, Optional, Tuple, Union |
|
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers |
|
from tokenizers.models import BPE |
|
from tokenizers.normalizers import NFKC |
|
from .base_tokenizer import BaseTokenizer |
|
|
|
class SentencePieceBPETokenizer(BaseTokenizer): |
|
|
|
def __init__(self, vocab: Optional[Union[str, Dict[str, int]]]=None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]]=None, unk_token: Union[str, AddedToken]='<unk>', replacement: str='▁', add_prefix_space: bool=True, dropout: Optional[float]=None, fuse_unk: Optional[bool]=False): |
|
if vocab is not None and merges is not None: |
|
tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) |
|
else: |
|
tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) |
|
if tokenizer.token_to_id(str(unk_token)) is not None: |
|
tokenizer.add_special_tokens([str(unk_token)]) |
|
tokenizer.normalizer = NFKC() |
|
prepend_scheme = 'always' if add_prefix_space else 'never' |
|
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) |
|
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) |
|
parameters = {'model': 'SentencePieceBPE', 'unk_token': unk_token, 'replacement': replacement, 'add_prefix_space': add_prefix_space, 'dropout': dropout} |
|
super().__init__(tokenizer, parameters) |
|
|
|
@staticmethod |
|
def from_file(vocab_filename: str, merges_filename: str, **kwargs): |
|
(vocab, merges) = BPE.read_file(vocab_filename, merges_filename) |
|
return SentencePieceBPETokenizer(vocab, merges, **kwargs) |
|
|
|
def train(self, files: Union[str, List[str]], vocab_size: int=30000, min_frequency: int=2, special_tokens: List[Union[str, AddedToken]]=['<unk>'], limit_alphabet: int=1000, initial_alphabet: List[str]=[], show_progress: bool=True): |
|
trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress) |
|
if isinstance(files, str): |
|
files = [files] |
|
self._tokenizer.train(files, trainer=trainer) |
|
|
|
def train_from_iterator(self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int=30000, min_frequency: int=2, special_tokens: List[Union[str, AddedToken]]=['<unk>'], limit_alphabet: int=1000, initial_alphabet: List[str]=[], show_progress: bool=True, length: Optional[int]=None): |
|
trainer = trainers.BpeTrainer(vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress) |
|
self._tokenizer.train_from_iterator(iterator, trainer=trainer, length=length) |
|
|
|
# File: tokenizers-main/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py |
|
import json |
|
import os |
|
from typing import Iterator, List, Optional, Union, Tuple |
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers |
|
from tokenizers.models import Unigram |
|
from .base_tokenizer import BaseTokenizer |
|
|
|
class SentencePieceUnigramTokenizer(BaseTokenizer): |
|
|
|
def __init__(self, vocab: Optional[List[Tuple[str, float]]]=None, replacement: str='▁', add_prefix_space: bool=True): |
|
if vocab is not None: |
|
tokenizer = Tokenizer(Unigram(vocab)) |
|
else: |
|
tokenizer = Tokenizer(Unigram()) |
|
tokenizer.normalizer = normalizers.Sequence([normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}'), ' ')]) |
|
prepend_scheme = 'always' if add_prefix_space else 'never' |
|
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) |
|
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) |
|
parameters = {'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space} |
|
super().__init__(tokenizer, parameters) |
|
|
|
def train(self, files: Union[str, List[str]], vocab_size: int=8000, show_progress: bool=True, special_tokens: Optional[List[Union[str, AddedToken]]]=None, initial_alphabet: Optional[List[str]]=None, unk_token: Optional[str]=None): |
|
if special_tokens is None: |
|
special_tokens = [] |
|
if initial_alphabet is None: |
|
initial_alphabet = [] |
|
trainer = trainers.UnigramTrainer(vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token) |
|
if isinstance(files, str): |
|
files = [files] |
|
self._tokenizer.train(files, trainer=trainer) |
|
|
|
def train_from_iterator(self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int=8000, show_progress: bool=True, special_tokens: Optional[List[Union[str, AddedToken]]]=None, initial_alphabet: Optional[List[str]]=None, unk_token: Optional[str]=None, length: Optional[int]=None): |
|
if special_tokens is None: |
|
special_tokens = [] |
|
if initial_alphabet is None: |
|
initial_alphabet = [] |
|
trainer = trainers.UnigramTrainer(vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token) |
|
self._tokenizer.train_from_iterator(iterator, trainer=trainer, length=length) |
|
|
|
@staticmethod |
|
def from_spm(filename: str): |
|
try: |
|
import sys |
|
sys.path.append('.') |
|
import sentencepiece_model_pb2 as model |
|
except Exception: |
|
raise Exception("You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required.") |
|
m = model.ModelProto() |
|
m.ParseFromString(open(filename, 'rb').read()) |
|
precompiled_charsmap = m.normalizer_spec.precompiled_charsmap |
|
vocab = [(piece.piece, piece.score) for piece in m.pieces] |
|
unk_id = m.trainer_spec.unk_id |
|
model_type = m.trainer_spec.model_type |
|
byte_fallback = m.trainer_spec.byte_fallback |
|
if model_type != 1: |
|
raise Exception("You're trying to run a `Unigram` model but you're file was trained with a different algorithm") |
|
replacement = '▁' |
|
add_prefix_space = True |
|
tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback)) |
|
if precompiled_charsmap: |
|
tokenizer.normalizer = normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap), normalizers.Replace(Regex(' {2,}'), ' ')]) |
|
else: |
|
tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(' {2,}'), ' ')]) |
|
prepend_scheme = 'always' if add_prefix_space else 'never' |
|
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) |
|
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) |
|
parameters = {'model': 'SentencePieceUnigram'} |
|
obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters) |
|
BaseTokenizer.__init__(obj, tokenizer, parameters) |
|
return obj |
|
|
|
# File: tokenizers-main/bindings/python/py_src/tokenizers/normalizers/__init__.py |
|
from .. import normalizers |
|
Normalizer = normalizers.Normalizer |
|
BertNormalizer = normalizers.BertNormalizer |
|
NFD = normalizers.NFD |
|
NFKD = normalizers.NFKD |
|
NFC = normalizers.NFC |
|
NFKC = normalizers.NFKC |
|
Sequence = normalizers.Sequence |
|
Lowercase = normalizers.Lowercase |
|
Prepend = normalizers.Prepend |
|
Strip = normalizers.Strip |
|
StripAccents = normalizers.StripAccents |
|
Nmt = normalizers.Nmt |
|
Precompiled = normalizers.Precompiled |
|
Replace = normalizers.Replace |
|
ByteLevel = normalizers.ByteLevel |
|
NORMALIZERS = {'nfc': NFC, 'nfd': NFD, 'nfkc': NFKC, 'nfkd': NFKD} |
|
|
|
def unicode_normalizer_from_str(normalizer: str) -> Normalizer: |
|
if normalizer not in NORMALIZERS: |
|
raise ValueError('{} is not a known unicode normalizer. Available are {}'.format(normalizer, NORMALIZERS.keys())) |
|
return NORMALIZERS[normalizer]() |
|
|
|
# File: tokenizers-main/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py |
|
from .. import pre_tokenizers |
|
PreTokenizer = pre_tokenizers.PreTokenizer |
|
BertPreTokenizer = pre_tokenizers.BertPreTokenizer |
|
ByteLevel = pre_tokenizers.ByteLevel |
|
CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit |
|
Digits = pre_tokenizers.Digits |
|
Metaspace = pre_tokenizers.Metaspace |
|
Punctuation = pre_tokenizers.Punctuation |
|
Sequence = pre_tokenizers.Sequence |
|
Split = pre_tokenizers.Split |
|
UnicodeScripts = pre_tokenizers.UnicodeScripts |
|
Whitespace = pre_tokenizers.Whitespace |
|
WhitespaceSplit = pre_tokenizers.WhitespaceSplit |
|
|
|
# File: tokenizers-main/bindings/python/py_src/tokenizers/tools/visualizer.py |
|
import itertools |
|
import os |
|
import re |
|
from string import Template |
|
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple |
|
from tokenizers import Encoding, Tokenizer |
|
dirname = os.path.dirname(__file__) |
|
css_filename = os.path.join(dirname, 'visualizer-styles.css') |
|
with open(css_filename) as f: |
|
css = f.read() |
|
|
|
class Annotation: |
|
start: int |
|
end: int |
|
label: int |
|
|
|
def __init__(self, start: int, end: int, label: str): |
|
self.start = start |
|
self.end = end |
|
self.label = label |
|
AnnotationList = List[Annotation] |
|
PartialIntList = List[Optional[int]] |
|
|
|
class CharStateKey(NamedTuple): |
|
token_ix: Optional[int] |
|
anno_ix: Optional[int] |
|
|
|
class CharState: |
|
char_ix: Optional[int] |
|
|
|
def __init__(self, char_ix): |
|
self.char_ix = char_ix |
|
self.anno_ix: Optional[int] = None |
|
self.tokens: List[int] = [] |
|
|
|
@property |
|
def token_ix(self): |
|
return self.tokens[0] if len(self.tokens) > 0 else None |
|
|
|
@property |
|
def is_multitoken(self): |
|
return len(self.tokens) > 1 |
|
|
|
def partition_key(self) -> CharStateKey: |
|
return CharStateKey(token_ix=self.token_ix, anno_ix=self.anno_ix) |
|
|
|
class Aligned: |
|
pass |
|
|
|
class EncodingVisualizer: |
|
unk_token_regex = re.compile('(.{1}\x08)?(unk|oov)(\x08.{1})?', flags=re.IGNORECASE) |
|
|
|
def __init__(self, tokenizer: Tokenizer, default_to_notebook: bool=True, annotation_converter: Optional[Callable[[Any], Annotation]]=None): |
|
if default_to_notebook: |
|
try: |
|
from IPython.core.display import HTML, display |
|
except ImportError: |
|
raise Exception("We couldn't import IPython utils for html display.\n Are you running in a notebook?\n You can also pass `default_to_notebook=False` to get back raw HTML\n ") |
|
self.tokenizer = tokenizer |
|
self.default_to_notebook = default_to_notebook |
|
self.annotation_coverter = annotation_converter |
|
pass |
|
|
|
def __call__(self, text: str, annotations: AnnotationList=[], default_to_notebook: Optional[bool]=None) -> Optional[str]: |
|
final_default_to_notebook = self.default_to_notebook |
|
if default_to_notebook is not None: |
|
final_default_to_notebook = default_to_notebook |
|
if final_default_to_notebook: |
|
try: |
|
from IPython.core.display import HTML, display |
|
except ImportError: |
|
raise Exception("We couldn't import IPython utils for html display.\n Are you running in a notebook?") |
|
if self.annotation_coverter is not None: |
|
annotations = list(map(self.annotation_coverter, annotations)) |
|
encoding = self.tokenizer.encode(text) |
|
html = EncodingVisualizer.__make_html(text, encoding, annotations) |
|
if final_default_to_notebook: |
|
display(HTML(html)) |
|
else: |
|
return html |
|
|
|
@staticmethod |
|
def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]: |
|
if len(annotations) == 0: |
|
return {} |
|
labels = set(map(lambda x: x.label, annotations)) |
|
num_labels = len(labels) |
|
h_step = int(255 / num_labels) |
|
if h_step < 20: |
|
h_step = 20 |
|
s = 32 |
|
l = 64 |
|
h = 10 |
|
colors = {} |
|
for label in sorted(labels): |
|
colors[label] = f'hsl({h},{s}%,{l}%' |
|
h += h_step |
|
return colors |
|
|
|
@staticmethod |
|
def consecutive_chars_to_html(consecutive_chars_list: List[CharState], text: str, encoding: Encoding): |
|
first = consecutive_chars_list[0] |
|
if first.char_ix is None: |
|
stoken = encoding.tokens[first.token_ix] |
|
return f'<span class="special-token" data-stoken={stoken}></span>' |
|
last = consecutive_chars_list[-1] |
|
start = first.char_ix |
|
end = last.char_ix + 1 |
|
span_text = text[start:end] |
|
css_classes = [] |
|
data_items = {} |
|
if first.token_ix is not None: |
|
css_classes.append('token') |
|
if first.is_multitoken: |
|
css_classes.append('multi-token') |
|
if first.token_ix % 2: |
|
css_classes.append('odd-token') |
|
else: |
|
css_classes.append('even-token') |
|
if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None: |
|
css_classes.append('special-token') |
|
data_items['stok'] = encoding.tokens[first.token_ix] |
|
else: |
|
css_classes.append('non-token') |
|
css = f'''class="{' '.join(css_classes)}"''' |
|
data = '' |
|
for (key, val) in data_items.items(): |
|
data += f' data-{key}="{val}"' |
|
return f'<span {css} {data} >{span_text}</span>' |
|
|
|
@staticmethod |
|
def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str: |
|
char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations) |
|
current_consecutive_chars = [char_states[0]] |
|
prev_anno_ix = char_states[0].anno_ix |
|
spans = [] |
|
label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations) |
|
cur_anno_ix = char_states[0].anno_ix |
|
if cur_anno_ix is not None: |
|
anno = annotations[cur_anno_ix] |
|
label = anno.label |
|
color = label_colors_dict[label] |
|
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">') |
|
for cs in char_states[1:]: |
|
cur_anno_ix = cs.anno_ix |
|
if cur_anno_ix != prev_anno_ix: |
|
spans.append(EncodingVisualizer.consecutive_chars_to_html(current_consecutive_chars, text=text, encoding=encoding)) |
|
current_consecutive_chars = [cs] |
|
if prev_anno_ix is not None: |
|
spans.append('</span>') |
|
if cur_anno_ix is not None: |
|
anno = annotations[cur_anno_ix] |
|
label = anno.label |
|
color = label_colors_dict[label] |
|
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">') |
|
prev_anno_ix = cur_anno_ix |
|
if cs.partition_key() == current_consecutive_chars[0].partition_key(): |
|
current_consecutive_chars.append(cs) |
|
else: |
|
spans.append(EncodingVisualizer.consecutive_chars_to_html(current_consecutive_chars, text=text, encoding=encoding)) |
|
current_consecutive_chars = [cs] |
|
spans.append(EncodingVisualizer.consecutive_chars_to_html(current_consecutive_chars, text=text, encoding=encoding)) |
|
res = HTMLBody(spans) |
|
return res |
|
|
|
@staticmethod |
|
def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList: |
|
annotation_map = [None] * len(text) |
|
for (anno_ix, a) in enumerate(annotations): |
|
for i in range(a.start, a.end): |
|
annotation_map[i] = anno_ix |
|
return annotation_map |
|
|
|
@staticmethod |
|
def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]: |
|
annotation_map = EncodingVisualizer.__make_anno_map(text, annotations) |
|
char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))] |
|
for (token_ix, token) in enumerate(encoding.tokens): |
|
offsets = encoding.token_to_chars(token_ix) |
|
if offsets is not None: |
|
(start, end) = offsets |
|
for i in range(start, end): |
|
char_states[i].tokens.append(token_ix) |
|
for (char_ix, anno_ix) in enumerate(annotation_map): |
|
char_states[char_ix].anno_ix = anno_ix |
|
return char_states |
|
|
|
def HTMLBody(children: List[str], css_styles=css) -> str: |
|
children_text = ''.join(children) |
|
return f'\n <html>\n <head>\n <style>\n {css_styles}\n </style>\n </head>\n <body>\n <div class="tokenized-text" dir=auto>\n {children_text}\n </div>\n </body>\n </html>\n ' |
|
|
|
# File: tokenizers-main/bindings/python/stub.py |
|
import argparse |
|
import inspect |
|
import os |
|
from pathlib import Path |
|
INDENT = ' ' * 4 |
|
GENERATED_COMMENT = '# Generated content DO NOT EDIT\n' |
|
|
|
def do_indent(text: str, indent: str): |
|
return text.replace('\n', f'\n{indent}') |
|
|
|
def function(obj, indent, text_signature=None): |
|
if text_signature is None: |
|
text_signature = obj.__text_signature__ |
|
string = '' |
|
string += f'{indent}def {obj.__name__}{text_signature}:\n' |
|
indent += INDENT |
|
string += f'{indent}"""\n' |
|
string += f'{indent}{do_indent(obj.__doc__, indent)}\n' |
|
string += f'{indent}"""\n' |
|
string += f'{indent}pass\n' |
|
string += '\n' |
|
string += '\n' |
|
return string |
|
|
|
def member_sort(member): |
|
if inspect.isclass(member): |
|
value = 10 + len(inspect.getmro(member)) |
|
else: |
|
value = 1 |
|
return value |
|
|
|
def fn_predicate(obj): |
|
value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj) |
|
if value: |
|
return obj.__doc__ and obj.__text_signature__ and (not obj.__name__.startswith('_')) |
|
if inspect.isgetsetdescriptor(obj): |
|
return obj.__doc__ and (not obj.__name__.startswith('_')) |
|
return False |
|
|
|
def get_module_members(module): |
|
members = [member for (name, member) in inspect.getmembers(module) if not name.startswith('_') and (not inspect.ismodule(member))] |
|
members.sort(key=member_sort) |
|
return members |
|
|
|
def pyi_file(obj, indent=''): |
|
string = '' |
|
if inspect.ismodule(obj): |
|
string += GENERATED_COMMENT |
|
members = get_module_members(obj) |
|
for member in members: |
|
string += pyi_file(member, indent) |
|
elif inspect.isclass(obj): |
|
indent += INDENT |
|
mro = inspect.getmro(obj) |
|
if len(mro) > 2: |
|
inherit = f'({mro[1].__name__})' |
|
else: |
|
inherit = '' |
|
string += f'class {obj.__name__}{inherit}:\n' |
|
body = '' |
|
if obj.__doc__: |
|
body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n' |
|
fns = inspect.getmembers(obj, fn_predicate) |
|
if obj.__text_signature__: |
|
body += f'{indent}def __init__{obj.__text_signature__}:\n' |
|
body += f'{indent + INDENT}pass\n' |
|
body += '\n' |
|
for (name, fn) in fns: |
|
body += pyi_file(fn, indent=indent) |
|
if not body: |
|
body += f'{indent}pass\n' |
|
string += body |
|
string += '\n\n' |
|
elif inspect.isbuiltin(obj): |
|
string += f'{indent}@staticmethod\n' |
|
string += function(obj, indent) |
|
elif inspect.ismethoddescriptor(obj): |
|
string += function(obj, indent) |
|
elif inspect.isgetsetdescriptor(obj): |
|
string += f'{indent}@property\n' |
|
string += function(obj, indent, text_signature='(self)') |
|
else: |
|
raise Exception(f'Object {obj} is not supported') |
|
return string |
|
|
|
def py_file(module, origin): |
|
members = get_module_members(module) |
|
string = GENERATED_COMMENT |
|
string += f'from .. import {origin}\n' |
|
string += '\n' |
|
for member in members: |
|
name = member.__name__ |
|
string += f'{name} = {origin}.{name}\n' |
|
return string |
|
import subprocess |
|
from typing import List, Optional, Tuple |
|
|
|
def do_ruff(code, is_pyi: bool): |
|
command = ['ruff', 'format', '--config', 'pyproject.toml', '--silent', '-'] |
|
if is_pyi: |
|
command.extend(['--stdin-filename', 'test.pyi']) |
|
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) |
|
(stdout, _) = process.communicate(input=code.encode('utf-8')) |
|
return stdout.decode('utf-8') |
|
|
|
def write(module, directory, origin, check=False): |
|
submodules = [(name, member) for (name, member) in inspect.getmembers(module) if inspect.ismodule(member)] |
|
filename = os.path.join(directory, '__init__.pyi') |
|
pyi_content = pyi_file(module) |
|
pyi_content = do_ruff(pyi_content, is_pyi=True) |
|
os.makedirs(directory, exist_ok=True) |
|
if check: |
|
with open(filename, 'r') as f: |
|
data = f.read() |
|
assert data == pyi_content, f'The content of {filename} seems outdated, please run `python stub.py`' |
|
else: |
|
with open(filename, 'w') as f: |
|
f.write(pyi_content) |
|
filename = os.path.join(directory, '__init__.py') |
|
py_content = py_file(module, origin) |
|
py_content = do_ruff(py_content, is_pyi=False) |
|
os.makedirs(directory, exist_ok=True) |
|
is_auto = False |
|
if not os.path.exists(filename): |
|
is_auto = True |
|
else: |
|
with open(filename, 'r') as f: |
|
line = f.readline() |
|
if line == GENERATED_COMMENT: |
|
is_auto = True |
|
if is_auto: |
|
if check: |
|
with open(filename, 'r') as f: |
|
data = f.read() |
|
assert data == py_content, f'The content of {filename} seems outdated, please run `python stub.py`' |
|
else: |
|
with open(filename, 'w') as f: |
|
f.write(py_content) |
|
for (name, submodule) in submodules: |
|
write(submodule, os.path.join(directory, name), f'{name}', check=check) |
|
if __name__ == '__main__': |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--check', action='store_true') |
|
args = parser.parse_args() |
|
import tokenizers |
|
write(tokenizers.tokenizers, 'py_src/tokenizers/', 'tokenizers', check=args.check) |
|
|
|
# File: tokenizers-main/docs/source/_ext/entities.py |
|
from collections import defaultdict, abc |
|
from typing import cast |
|
from docutils import nodes |
|
from docutils.parsers.rst import Directive |
|
import sphinx |
|
from sphinx.locale import _ |
|
from sphinx.util.docutils import SphinxDirective |
|
from sphinx.errors import ExtensionError |
|
from conf import languages as LANGUAGES |
|
logger = sphinx.util.logging.getLogger(__name__) |
|
GLOBALNAME = '$GLOBAL$' |
|
|
|
def update(d, u): |
|
for (k, v) in u.items(): |
|
if isinstance(v, abc.Mapping): |
|
d[k] = update(d.get(k, {}), v) |
|
else: |
|
d[k] = v |
|
return d |
|
|
|
class EntityNode(nodes.General, nodes.Element): |
|
pass |
|
|
|
class EntitiesNode(nodes.General, nodes.Element): |
|
pass |
|
|
|
class AllEntities: |
|
|
|
def __init__(self): |
|
self.entities = defaultdict(dict) |
|
|
|
@classmethod |
|
def install(cls, env): |
|
if not hasattr(env, 'entity_all_entities'): |
|
entities = cls() |
|
env.entity_all_entities = entities |
|
return env.entity_all_entities |
|
|
|
def merge(self, other): |
|
self.entities.update(other.entities) |
|
|
|
def purge(self, docname): |
|
for env_docname in [GLOBALNAME, docname]: |
|
self.entities[env_docname] = dict([(name, entity) for (name, entity) in self.entities[env_docname].items() if entity['docname'] != docname]) |
|
|
|
def _extract_entities(self, nodes): |
|
pass |
|
|
|
def _extract_options(self, nodes): |
|
pass |
|
|
|
def _add_entities(self, entities, language, is_global, docname): |
|
scope = GLOBALNAME if is_global else docname |
|
for entity in entities: |
|
name = f"{language}-{entity['name']}" |
|
content = entity['content'] |
|
if name in self.entities[scope]: |
|
logger.warning(f'''Entity "{name}" has already been defined{(' globally' if is_global else '')}''', location=docname) |
|
self.entities[scope][name] = {'docname': docname, 'content': content} |
|
|
|
def _extract_global(self, nodes): |
|
for node in nodes: |
|
if node.tagname != 'field': |
|
raise Exception(f'Expected a field, found {node.tagname}') |
|
(name, _) = node.children |
|
if name.tagname != 'field_name': |
|
raise Exception(f'Expected a field name here, found {name_node.tagname}') |
|
if str(name.children[0]) == 'global': |
|
return True |
|
|
|
def _extract_entities(self, nodes): |
|
entities = [] |
|
for node in nodes: |
|
if node.tagname != 'definition_list_item': |
|
raise Exception(f'Expected a list item here, found {node.tagname}') |
|
(name_node, content_node) = node.children |
|
if name_node.tagname != 'term': |
|
raise Exception(f'Expected a term here, found {name_node.tagname}') |
|
if content_node.tagname != 'definition': |
|
raise Exception(f'Expected a definition here, found {content_node.tagname}') |
|
name = str(name_node.children[0]) |
|
if len(content_node.children) == 1 and content_node.children[0].tagname == 'paragraph': |
|
content = content_node.children[0].children[0] |
|
else: |
|
content = content_node |
|
entities.append({'name': name, 'content': content}) |
|
return entities |
|
|
|
def extract(self, node, docname): |
|
is_global = False |
|
entities = [] |
|
language = None |
|
for node in node.children: |
|
if language is None and node.tagname != 'paragraph': |
|
raise Exception(f'Expected language name:\n.. entities:: <LANGUAGE>') |
|
elif language is None and node.tagname == 'paragraph': |
|
language = str(node.children[0]) |
|
if language not in LANGUAGES: |
|
raise Exception(f'Unknown language "{language}. Might be missing a newline after language"') |
|
elif node.tagname == 'field_list': |
|
is_global = self._extract_global(node.children) |
|
elif node.tagname == 'definition_list': |
|
entities.extend(self._extract_entities(node.children)) |
|
else: |
|
raise Exception(f'Expected a list of terms/options, found {node.tagname}') |
|
self._add_entities(entities, language, is_global, docname) |
|
|
|
def resolve_pendings(self, app): |
|
env = app.builder.env |
|
updates = defaultdict(dict) |
|
for env_docname in self.entities.keys(): |
|
for (name, entity) in self.entities[env_docname].items(): |
|
docname = entity['docname'] |
|
node = entity['content'] |
|
for node in node.traverse(sphinx.addnodes.pending_xref): |
|
contnode = cast(nodes.TextElement, node[0].deepcopy()) |
|
newnode = None |
|
typ = node['reftype'] |
|
target = node['reftarget'] |
|
refdoc = node.get('refdoc', docname) |
|
domain = None |
|
try: |
|
if 'refdomain' in node and node['refdomain']: |
|
try: |
|
domain = env.domains[node['refdomain']] |
|
except KeyError as exc: |
|
raise NoUri(target, typ) from exc |
|
newnode = domain.resolve_xref(env, refdoc, app.builder, typ, target, node, contnode) |
|
except NoUri: |
|
newnode = contnode |
|
updates[env_docname][name] = {'docname': docname, 'content': newnode or contnode} |
|
update(self.entities, updates) |
|
|
|
def get(self, language, name, docname): |
|
name = f'{language}-{name}' |
|
if name in self.entities[docname]: |
|
return self.entities[docname][name] |
|
elif name in self.entities[GLOBALNAME]: |
|
return self.entities[GLOBALNAME][name] |
|
else: |
|
return None |
|
|
|
class EntitiesDirective(SphinxDirective): |
|
has_content = True |
|
|
|
def run(self): |
|
content = nodes.definition_list() |
|
self.state.nested_parse(self.content, self.content_offset, content) |
|
try: |
|
entities = AllEntities.install(self.env) |
|
entities.extract(content, self.env.docname) |
|
except Exception as err: |
|
raise self.error(f'Malformed directive "entities": {err}') |
|
return [] |
|
|
|
def entity_role(name, rawtext, text, lineno, inliner, options={}, content=[]): |
|
node = EntityNode() |
|
node.entity = text |
|
return ([node], []) |
|
|
|
def process_entity_nodes(app, doctree, docname): |
|
env = app.builder.env |
|
entities = AllEntities.install(env) |
|
entities.resolve_pendings(app) |
|
language = None |
|
try: |
|
language = next((l for l in LANGUAGES if l in app.tags)) |
|
except Exception: |
|
logger.warning(f'No language tag specified, not resolving entities in {docname}') |
|
for node in doctree.traverse(EntityNode): |
|
if language is None: |
|
node.replace_self(nodes.Text(_(node.entity), _(node.entity))) |
|
else: |
|
entity = entities.get(language, node.entity, docname) |
|
if entity is None: |
|
node.replace_self(nodes.Text(_(node.entity), _(node.entity))) |
|
logger.warning(f'Entity "{node.entity}" has not been defined', location=node) |
|
else: |
|
node.replace_self(entity['content']) |
|
|
|
def purge_entities(app, env, docname): |
|
entities = AllEntities.install(env) |
|
entities.purge(docname) |
|
|
|
def merge_entities(app, env, docnames, other): |
|
entities = AllEntities.install(env) |
|
other_entities = AllEntities.install(other) |
|
entities.merge(other_entities) |
|
|
|
def setup(app): |
|
app.add_node(EntityNode) |
|
app.add_node(EntitiesNode) |
|
app.add_directive('entities', EntitiesDirective) |
|
app.add_role('entity', entity_role) |
|
app.connect('doctree-resolved', process_entity_nodes) |
|
app.connect('env-merge-info', merge_entities) |
|
app.connect('env-purge-doc', purge_entities) |
|
return {'version': '0.1', 'parallel_read_safe': True, 'parallel_write_safe': True} |
|
|
|
# File: tokenizers-main/docs/source/_ext/rust_doc.py |
|
from docutils import nodes |
|
import sphinx |
|
from sphinx.locale import _ |
|
from conf import rust_version |
|
logger = sphinx.util.logging.getLogger(__name__) |
|
|
|
class RustRef: |
|
|
|
def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]): |
|
doctype = name.split('_')[1] |
|
parts = text.split('::') |
|
if text.startswith('~'): |
|
title = parts[-1] |
|
parts[0] = parts[0][1:] |
|
else: |
|
content = text |
|
link = self.base_link() |
|
if doctype == 'struct': |
|
(l, title) = self.make_struct_link(parts, title) |
|
if doctype == 'func': |
|
(l, title) = self.make_func_link(parts, title) |
|
if doctype == 'meth': |
|
(l, title) = self.make_meth_link(parts, title) |
|
if doctype == 'trait': |
|
(l, title) = self.make_trait_link(parts, title) |
|
link += l |
|
node = nodes.reference(internal=False, refuri=link, text=title) |
|
wrapper = nodes.literal(classes=['xref']) |
|
wrapper += node |
|
return ([wrapper], []) |
|
|
|
def base_link(self): |
|
return f'https://docs.rs/tokenizers/{rust_version}' |
|
|
|
def make_struct_link(self, parts, title): |
|
link = '' |
|
struct_name = parts[-1] |
|
path = parts[:-1] |
|
for p in path: |
|
link += f'/{p}' |
|
link += f'/struct.{struct_name}.html' |
|
return (link, title) |
|
|
|
def make_func_link(self, parts, title): |
|
link = '' |
|
fn_name = parts[-1] |
|
path = parts[:-1] |
|
for p in path: |
|
link += f'/{p}' |
|
link += f'/fn.{fn_name}.html' |
|
return (link, title) |
|
|
|
def make_meth_link(self, parts, title): |
|
meth_name = parts[-1] |
|
if meth_name.endswith('()'): |
|
meth_name = meth_name[:-2] |
|
(link, title) = self.make_struct_link(parts[:-1], title) |
|
link += f'#method.{meth_name}' |
|
if not title.endswith(')'): |
|
title += '()' |
|
return (link, title) |
|
|
|
def make_trait_link(self, parts, title): |
|
link = '' |
|
trait_name = parts[-1] |
|
path = parts[:-1] |
|
for p in path: |
|
link += f'/{p}' |
|
link += f'/trait.{trait_name}.html' |
|
return (link, title) |
|
|
|
def setup(app): |
|
app.add_role('rust_struct', RustRef()) |
|
app.add_role('rust_func', RustRef()) |
|
app.add_role('rust_meth', RustRef()) |
|
app.add_role('rust_trait', RustRef()) |
|
return {'version': '0.1', 'parallel_read_safe': True, 'parallel_write_safe': True} |
|
|
|
# File: tokenizers-main/docs/source/_ext/toctree_tags.py |
|
import re |
|
from sphinx.directives.other import TocTree |
|
|
|
class TocTreeTags(TocTree): |
|
hasPat = re.compile('^\\s*:(.+):(.+)$') |
|
|
|
def filter_entries(self, entries): |
|
filtered = [] |
|
for e in entries: |
|
m = self.hasPat.match(e) |
|
if m != None: |
|
if self.env.app.tags.has(m.groups()[0]): |
|
filtered.append(m.groups()[1]) |
|
else: |
|
filtered.append(e) |
|
return filtered |
|
|
|
def run(self): |
|
self.content = self.filter_entries(self.content) |
|
return super().run() |
|
|
|
def setup(app): |
|
app.add_directive('toctree-tags', TocTreeTags) |
|
return {'version': '0.1'} |
|
|
|
# File: tokenizers-main/docs/source/conf.py |
|
import os |
|
import sys |
|
sys.path.insert(0, os.path.abspath('./_ext')) |
|
sys.path.insert(0, os.path.abspath('.')) |
|
project = 'tokenizers' |
|
copyright = '2020, huggingface' |
|
author = 'huggingface' |
|
release = '' |
|
languages = ['node', 'rust', 'python'] |
|
rust_version = 'latest' |
|
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'entities', 'rust_doc', 'toctree_tags'] |
|
templates_path = ['_templates'] |
|
exclude_patterns = [] |
|
html_theme = 'sphinx_rtd_theme' |
|
html_theme_options = {'analytics_id': 'UA-83738774-2'} |
|
html_static_path = ['_static'] |
|
|
|
def setup(app): |
|
for language in languages: |
|
if not tags.has(language): |
|
exclude_patterns.append(f'tutorials/{language}/*') |
|
app.add_css_file('css/huggingface.css') |
|
app.add_css_file('css/code-snippets.css') |
|
app.add_js_file('js/custom.js') |
|
|
|
|