mirror of
https://github.com/mii443/tokenizers.git
synced 2025-08-23 00:35:35 +00:00
Adressing first pass of comments.
This commit is contained in:
@ -67,7 +67,10 @@ class TestByteLevelBPE:
|
||||
|
||||
def test_lowerspace(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True, lowercase=True,
|
||||
roberta_files["vocab"],
|
||||
roberta_files["merges"],
|
||||
add_prefix_space=True,
|
||||
lowercase=True,
|
||||
)
|
||||
output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog")
|
||||
|
||||
|
@ -6,7 +6,9 @@ from tokenizers import CharBPETokenizer
|
||||
|
||||
class TestBertWordPieceBPE:
|
||||
def test_basic_encode(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
|
||||
tokenizer = CharBPETokenizer.from_files(
|
||||
openai_files["vocab"], openai_files["merges"]
|
||||
)
|
||||
|
||||
output = tokenizer.encode("My name is John", "pair")
|
||||
assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688]
|
||||
@ -50,6 +52,8 @@ class TestBertWordPieceBPE:
|
||||
assert decoded == "my name is john"
|
||||
|
||||
def test_multiprocessing_with_parallelism(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
|
||||
tokenizer = CharBPETokenizer.from_files(
|
||||
openai_files["vocab"], openai_files["merges"]
|
||||
)
|
||||
multiprocessing_with_parallelism(tokenizer, False)
|
||||
multiprocessing_with_parallelism(tokenizer, True)
|
||||
|
Reference in New Issue
Block a user