mirror of
https://github.com/mii443/tokenizers.git
synced 2025-08-22 16:25:30 +00:00
* nits * Fixing deps. * Ruff update. * Import order matters. * Fix. * Revert ruff fix. * Visualizer. * Putting back the imports. --------- Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
62 lines
2.6 KiB
Python
62 lines
2.6 KiB
Python
import pytest
|
|
|
|
from tokenizers import SentencePieceBPETokenizer, SentencePieceUnigramTokenizer
|
|
|
|
|
|
class TestSentencePieceBPE:
|
|
def test_train_from_iterator(self):
|
|
text = ["A first sentence", "Another sentence", "And a last one"]
|
|
tokenizer = SentencePieceBPETokenizer()
|
|
tokenizer.train_from_iterator(text, show_progress=False)
|
|
|
|
output = tokenizer.encode("A sentence")
|
|
assert output.tokens == ["▁A", "▁sentence"]
|
|
|
|
|
|
class TestSentencePieceUnigram:
|
|
def test_train(self, tmpdir):
|
|
p = tmpdir.mkdir("tmpdir").join("file.txt")
|
|
p.write("A first sentence\nAnother sentence\nAnd a last one")
|
|
|
|
tokenizer = SentencePieceUnigramTokenizer()
|
|
tokenizer.train(files=str(p), show_progress=False)
|
|
|
|
output = tokenizer.encode("A sentence")
|
|
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e"]
|
|
|
|
with pytest.raises(Exception) as excinfo:
|
|
_ = tokenizer.encode("A sentence 🤗")
|
|
assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing"
|
|
|
|
def test_train_with_unk_token(self, tmpdir):
|
|
p = tmpdir.mkdir("tmpdir").join("file.txt")
|
|
p.write("A first sentence\nAnother sentence\nAnd a last one")
|
|
|
|
tokenizer = SentencePieceUnigramTokenizer()
|
|
tokenizer.train(files=str(p), show_progress=False, special_tokens=["<unk>"], unk_token="<unk>")
|
|
output = tokenizer.encode("A sentence 🤗")
|
|
assert output.ids[-1] == 0
|
|
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e", "▁", "🤗"]
|
|
|
|
def test_train_from_iterator(self):
|
|
text = ["A first sentence", "Another sentence", "And a last one"]
|
|
tokenizer = SentencePieceUnigramTokenizer()
|
|
tokenizer.train_from_iterator(text, show_progress=False)
|
|
|
|
output = tokenizer.encode("A sentence")
|
|
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e"]
|
|
|
|
with pytest.raises(Exception) as excinfo:
|
|
_ = tokenizer.encode("A sentence 🤗")
|
|
assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing"
|
|
|
|
def test_train_from_iterator_with_unk_token(self):
|
|
text = ["A first sentence", "Another sentence", "And a last one"]
|
|
tokenizer = SentencePieceUnigramTokenizer()
|
|
tokenizer.train_from_iterator(
|
|
text, vocab_size=100, show_progress=False, special_tokens=["<unk>"], unk_token="<unk>"
|
|
)
|
|
output = tokenizer.encode("A sentence 🤗")
|
|
assert output.ids[-1] == 0
|
|
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e", "▁", "🤗"]
|