mirror of
https://github.com/mii443/tokenizers.git
synced 2025-08-23 00:35:35 +00:00
from_files -> from_file everywhere
- read_files -> read_file - from_file pure rust impl in python bindings - Fix some typing in python binding - Added {BPE,WordLevel,WordPiece}.from_file tests.
This commit is contained in:
@ -6,9 +6,7 @@ from tokenizers import ByteLevelBPETokenizer
|
||||
|
||||
class TestByteLevelBPE:
|
||||
def test_basic_encode(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
roberta_files["vocab"], roberta_files["merges"]
|
||||
)
|
||||
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
|
||||
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
|
||||
|
||||
assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
|
||||
@ -36,7 +34,7 @@ class TestByteLevelBPE:
|
||||
]
|
||||
|
||||
def test_add_prefix_space(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
tokenizer = ByteLevelBPETokenizer.from_file(
|
||||
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True
|
||||
)
|
||||
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
|
||||
@ -66,7 +64,7 @@ class TestByteLevelBPE:
|
||||
]
|
||||
|
||||
def test_lowerspace(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
tokenizer = ByteLevelBPETokenizer.from_file(
|
||||
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True, lowercase=True,
|
||||
)
|
||||
output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog")
|
||||
@ -85,8 +83,6 @@ class TestByteLevelBPE:
|
||||
]
|
||||
|
||||
def test_multiprocessing_with_parallelism(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
roberta_files["vocab"], roberta_files["merges"]
|
||||
)
|
||||
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
|
||||
multiprocessing_with_parallelism(tokenizer, False)
|
||||
multiprocessing_with_parallelism(tokenizer, True)
|
||||
|
@ -6,7 +6,7 @@ from tokenizers import CharBPETokenizer
|
||||
|
||||
class TestBertWordPieceBPE:
|
||||
def test_basic_encode(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
|
||||
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
|
||||
|
||||
output = tokenizer.encode("My name is John", "pair")
|
||||
assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688]
|
||||
@ -33,7 +33,7 @@ class TestBertWordPieceBPE:
|
||||
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1]
|
||||
|
||||
def test_lowercase(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_files(
|
||||
tokenizer = CharBPETokenizer.from_file(
|
||||
openai_files["vocab"], openai_files["merges"], lowercase=True
|
||||
)
|
||||
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
|
||||
@ -43,13 +43,13 @@ class TestBertWordPieceBPE:
|
||||
assert output.type_ids == [0, 0, 0, 0, 1]
|
||||
|
||||
def test_decoding(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_files(
|
||||
tokenizer = CharBPETokenizer.from_file(
|
||||
openai_files["vocab"], openai_files["merges"], lowercase=True
|
||||
)
|
||||
decoded = tokenizer.decode(tokenizer.encode("my name is john").ids)
|
||||
assert decoded == "my name is john"
|
||||
|
||||
def test_multiprocessing_with_parallelism(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
|
||||
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
|
||||
multiprocessing_with_parallelism(tokenizer, False)
|
||||
multiprocessing_with_parallelism(tokenizer, True)
|
||||
|
Reference in New Issue
Block a user