from_files -> from_file everywhere

- read_files -> read_file
- from_file pure rust impl in python bindings
- Fix some typing in python binding
- Added {BPE,WordLevel,WordPiece}.from_file tests.
This commit is contained in:
Nicolas Patry
2020-09-23 18:24:40 +02:00
parent 9672995a56
commit 36832bfa12
18 changed files with 130 additions and 71 deletions

View File

@ -6,7 +6,7 @@ from tokenizers import CharBPETokenizer
class TestBertWordPieceBPE:
def test_basic_encode(self, openai_files):
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
output = tokenizer.encode("My name is John", "pair")
assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688]
@ -33,7 +33,7 @@ class TestBertWordPieceBPE:
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1]
def test_lowercase(self, openai_files):
tokenizer = CharBPETokenizer.from_files(
tokenizer = CharBPETokenizer.from_file(
openai_files["vocab"], openai_files["merges"], lowercase=True
)
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
@ -43,13 +43,13 @@ class TestBertWordPieceBPE:
assert output.type_ids == [0, 0, 0, 0, 1]
def test_decoding(self, openai_files):
tokenizer = CharBPETokenizer.from_files(
tokenizer = CharBPETokenizer.from_file(
openai_files["vocab"], openai_files["merges"], lowercase=True
)
decoded = tokenizer.decode(tokenizer.encode("my name is john").ids)
assert decoded == "my name is john"
def test_multiprocessing_with_parallelism(self, openai_files):
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)