mirror of
https://github.com/mii443/tokenizers.git
synced 2025-08-24 00:59:19 +00:00
* version = "0.15.3-dev-0” Improve performances of meta space, but also just fix it. (transformers) ➜ transformers git:(refactor-default-llama) ✗ python ../scripts/gemma-dummy.py Token indices sequence length is longer than the specified maximum sequence length for this model (14999 > 2048). Running this sequence through the model will result in indexing errors ['<REPR_END>', '▁inform', '<s>', '.', '▁Hey', '<unk>', '.', '▁', '▁', '▁', '▁', '▁', '▁', '▁.'] ['▁inform', '<s>', '.', '▁Hey', '<unk>', '.', '▁', '▁', '▁', '▁', '▁', '▁', '▁.'] [0.0006330013275146484, 0.0014591217041015625, 0.015890836715698242, 0.18584918975830078, 2.1726326942443848] (transformers) ➜ transformers git:(refactor-default-llama) ✗ python ../scripts/gemma-dummy.py Token indices sequence length is longer than the specified maximum sequence length for this model (10000 > 2048). Running this sequence through the model will result in indexing errors ['<REPR_END>', 'in', 'form', '<s>', '.', '▁Hey', '<unk>', '.', '▁▁▁▁▁▁', '▁.'] ['in', 'form', '<s>', '.', '▁Hey', '<unk>', '.', '▁▁▁▁▁▁', '▁.'] [0.0008409023284912109, 0.0008909702301025391, 0.00882411003112793, 0.10214710235595703, 1.187899112701416] * well what do we have * nit * be BC with non legacy * unrelated change for clippy * fix test * splitting is a must for word_ids * fmt and lint * Fixing everything (hopefully better). * Fixing node. * Including yarn.lock * Lint. * Stubs. * revert to use split * fix merge issues * fix tests * finish fixing tests * ruff --------- Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
104 lines
3.7 KiB
Python
104 lines
3.7 KiB
Python
from typing import Dict, Iterator, List, Optional, Tuple, Union
|
|
|
|
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
|
|
from tokenizers.models import BPE
|
|
from tokenizers.normalizers import NFKC
|
|
|
|
from .base_tokenizer import BaseTokenizer
|
|
|
|
|
|
class SentencePieceBPETokenizer(BaseTokenizer):
|
|
"""SentencePiece BPE Tokenizer
|
|
|
|
Represents the BPE algorithm, with the pretokenization used by SentencePiece
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
vocab: Optional[Union[str, Dict[str, int]]] = None,
|
|
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
|
|
unk_token: Union[str, AddedToken] = "<unk>",
|
|
replacement: str = "▁",
|
|
add_prefix_space: bool = True,
|
|
dropout: Optional[float] = None,
|
|
fuse_unk: Optional[bool] = False,
|
|
):
|
|
if vocab is not None and merges is not None:
|
|
tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
|
|
else:
|
|
tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
|
|
|
|
if tokenizer.token_to_id(str(unk_token)) is not None:
|
|
tokenizer.add_special_tokens([str(unk_token)])
|
|
|
|
tokenizer.normalizer = NFKC()
|
|
prepend_scheme = "always" if add_prefix_space else "never"
|
|
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
|
|
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
|
|
|
|
parameters = {
|
|
"model": "SentencePieceBPE",
|
|
"unk_token": unk_token,
|
|
"replacement": replacement,
|
|
"add_prefix_space": add_prefix_space,
|
|
"dropout": dropout,
|
|
}
|
|
|
|
super().__init__(tokenizer, parameters)
|
|
|
|
@staticmethod
|
|
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
|
|
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
|
|
return SentencePieceBPETokenizer(vocab, merges, **kwargs)
|
|
|
|
def train(
|
|
self,
|
|
files: Union[str, List[str]],
|
|
vocab_size: int = 30000,
|
|
min_frequency: int = 2,
|
|
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
|
|
limit_alphabet: int = 1000,
|
|
initial_alphabet: List[str] = [],
|
|
show_progress: bool = True,
|
|
):
|
|
"""Train the model using the given files"""
|
|
|
|
trainer = trainers.BpeTrainer(
|
|
vocab_size=vocab_size,
|
|
min_frequency=min_frequency,
|
|
special_tokens=special_tokens,
|
|
limit_alphabet=limit_alphabet,
|
|
initial_alphabet=initial_alphabet,
|
|
show_progress=show_progress,
|
|
)
|
|
if isinstance(files, str):
|
|
files = [files]
|
|
self._tokenizer.train(files, trainer=trainer)
|
|
|
|
def train_from_iterator(
|
|
self,
|
|
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
|
|
vocab_size: int = 30000,
|
|
min_frequency: int = 2,
|
|
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
|
|
limit_alphabet: int = 1000,
|
|
initial_alphabet: List[str] = [],
|
|
show_progress: bool = True,
|
|
length: Optional[int] = None,
|
|
):
|
|
"""Train the model using the given iterator"""
|
|
|
|
trainer = trainers.BpeTrainer(
|
|
vocab_size=vocab_size,
|
|
min_frequency=min_frequency,
|
|
special_tokens=special_tokens,
|
|
limit_alphabet=limit_alphabet,
|
|
initial_alphabet=initial_alphabet,
|
|
show_progress=show_progress,
|
|
)
|
|
self._tokenizer.train_from_iterator(
|
|
iterator,
|
|
trainer=trainer,
|
|
length=length,
|
|
)
|