mirror of
https://github.com/mii443/tokenizers.git
synced 2025-12-16 17:18:43 +00:00
Temp work to make the APIs uniform (build from memory by default).
This commit is contained in:
@@ -1,16 +1,36 @@
|
||||
import pytest
|
||||
|
||||
from ..utils import data_dir, bert_files, multiprocessing_with_parallelism
|
||||
from tokenizers import BertWordPieceTokenizer
|
||||
|
||||
|
||||
class TestBertWordPieceBPE:
|
||||
def test_basic_encode(self, bert_files):
|
||||
tokenizer = BertWordPieceTokenizer(bert_files["vocab"])
|
||||
tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
|
||||
|
||||
# Encode with special tokens by default
|
||||
output = tokenizer.encode("My name is John", "pair")
|
||||
assert output.ids == [101, 2026, 2171, 2003, 2198, 102, 3940, 102]
|
||||
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"]
|
||||
assert output.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 15), (0, 0), (0, 4), (0, 0)]
|
||||
assert output.tokens == [
|
||||
"[CLS]",
|
||||
"my",
|
||||
"name",
|
||||
"is",
|
||||
"john",
|
||||
"[SEP]",
|
||||
"pair",
|
||||
"[SEP]",
|
||||
]
|
||||
assert output.offsets == [
|
||||
(0, 0),
|
||||
(0, 2),
|
||||
(3, 7),
|
||||
(8, 10),
|
||||
(11, 15),
|
||||
(0, 0),
|
||||
(0, 4),
|
||||
(0, 0),
|
||||
]
|
||||
assert output.type_ids == [0, 0, 0, 0, 0, 0, 1, 1]
|
||||
|
||||
# Can encode without the special tokens
|
||||
@@ -21,6 +41,6 @@ class TestBertWordPieceBPE:
|
||||
assert output.type_ids == [0, 0, 0, 0, 1]
|
||||
|
||||
def test_multiprocessing_with_parallelism(self, bert_files):
|
||||
tokenizer = BertWordPieceTokenizer(bert_files["vocab"])
|
||||
tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
|
||||
multiprocessing_with_parallelism(tokenizer, False)
|
||||
multiprocessing_with_parallelism(tokenizer, True)
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
import pytest
|
||||
|
||||
from ..utils import data_dir, roberta_files, multiprocessing_with_parallelism
|
||||
from tokenizers import ByteLevelBPETokenizer
|
||||
|
||||
|
||||
class TestByteLevelBPE:
|
||||
def test_basic_encode(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer(roberta_files["vocab"], roberta_files["merges"])
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
roberta_files["vocab"], roberta_files["merges"]
|
||||
)
|
||||
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
|
||||
|
||||
assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
|
||||
@@ -32,7 +36,7 @@ class TestByteLevelBPE:
|
||||
]
|
||||
|
||||
def test_add_prefix_space(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer(
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True
|
||||
)
|
||||
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
|
||||
@@ -62,8 +66,8 @@ class TestByteLevelBPE:
|
||||
]
|
||||
|
||||
def test_lowerspace(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer(
|
||||
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True, lowercase=True
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True, lowercase=True,
|
||||
)
|
||||
output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog")
|
||||
|
||||
@@ -81,6 +85,8 @@ class TestByteLevelBPE:
|
||||
]
|
||||
|
||||
def test_multiprocessing_with_parallelism(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer(roberta_files["vocab"], roberta_files["merges"])
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
roberta_files["vocab"], roberta_files["merges"]
|
||||
)
|
||||
multiprocessing_with_parallelism(tokenizer, False)
|
||||
multiprocessing_with_parallelism(tokenizer, True)
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import pytest
|
||||
|
||||
from ..utils import data_dir, openai_files, multiprocessing_with_parallelism
|
||||
from tokenizers import CharBPETokenizer
|
||||
|
||||
|
||||
class TestBertWordPieceBPE:
|
||||
def test_basic_encode(self, openai_files):
|
||||
tokenizer = CharBPETokenizer(openai_files["vocab"], openai_files["merges"])
|
||||
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
|
||||
|
||||
output = tokenizer.encode("My name is John", "pair")
|
||||
assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688]
|
||||
@@ -31,7 +33,9 @@ class TestBertWordPieceBPE:
|
||||
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1]
|
||||
|
||||
def test_lowercase(self, openai_files):
|
||||
tokenizer = CharBPETokenizer(openai_files["vocab"], openai_files["merges"], lowercase=True)
|
||||
tokenizer = CharBPETokenizer.from_files(
|
||||
openai_files["vocab"], openai_files["merges"], lowercase=True
|
||||
)
|
||||
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
|
||||
assert output.ids == [547, 1362, 544, 2476, 2688]
|
||||
assert output.tokens == ["my</w>", "name</w>", "is</w>", "john</w>", "pair</w>"]
|
||||
@@ -39,11 +43,13 @@ class TestBertWordPieceBPE:
|
||||
assert output.type_ids == [0, 0, 0, 0, 1]
|
||||
|
||||
def test_decoding(self, openai_files):
|
||||
tokenizer = CharBPETokenizer(openai_files["vocab"], openai_files["merges"], lowercase=True)
|
||||
tokenizer = CharBPETokenizer.from_files(
|
||||
openai_files["vocab"], openai_files["merges"], lowercase=True
|
||||
)
|
||||
decoded = tokenizer.decode(tokenizer.encode("my name is john").ids)
|
||||
assert decoded == "my name is john"
|
||||
|
||||
def test_multiprocessing_with_parallelism(self, openai_files):
|
||||
tokenizer = CharBPETokenizer(openai_files["vocab"], openai_files["merges"])
|
||||
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
|
||||
multiprocessing_with_parallelism(tokenizer, False)
|
||||
multiprocessing_with_parallelism(tokenizer, True)
|
||||
|
||||
Reference in New Issue
Block a user