mirror of
https://github.com/mii443/tokenizers.git
synced 2025-12-09 14:18:30 +00:00
Python - Add train_from_iterator to implementations
This commit is contained in:
@@ -4,7 +4,7 @@ from ..utils import data_dir, openai_files, multiprocessing_with_parallelism
|
||||
from tokenizers import CharBPETokenizer
|
||||
|
||||
|
||||
class TestBertWordPieceBPE:
|
||||
class TestCharBPETokenizer:
|
||||
def test_basic_encode(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
|
||||
|
||||
@@ -53,3 +53,11 @@ class TestBertWordPieceBPE:
|
||||
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
|
||||
multiprocessing_with_parallelism(tokenizer, False)
|
||||
multiprocessing_with_parallelism(tokenizer, True)
|
||||
|
||||
def test_train_from_iterator(self):
|
||||
text = ["A first sentence", "Another sentence", "And a last one"]
|
||||
tokenizer = CharBPETokenizer()
|
||||
tokenizer.train_from_iterator(text, show_progress=False)
|
||||
|
||||
output = tokenizer.encode("A sentence")
|
||||
assert output.tokens == ["A</w>", "sentence</w>"]
|
||||
|
||||
Reference in New Issue
Block a user