mirror of
https://github.com/mii443/tokenizers.git
synced 2025-12-08 13:48:19 +00:00
Python - Update tests for new encode
This commit is contained in:
@@ -7,14 +7,14 @@ class TestBertWordPieceBPE:
|
||||
tokenizer = BertWordPieceTokenizer(bert_files["vocab"])
|
||||
|
||||
# Encode with special tokens by default
|
||||
output = tokenizer.encode("My name is John", "pair")
|
||||
output = tokenizer.encode(("My name is John", "pair"))
|
||||
assert output.ids == [101, 2026, 2171, 2003, 2198, 102, 3940, 102]
|
||||
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"]
|
||||
assert output.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 15), (0, 0), (0, 4), (0, 0)]
|
||||
assert output.type_ids == [0, 0, 0, 0, 0, 0, 1, 1]
|
||||
|
||||
# Can encode without the special tokens
|
||||
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
|
||||
output = tokenizer.encode(("My name is John", "pair"), add_special_tokens=False)
|
||||
assert output.ids == [2026, 2171, 2003, 2198, 3940]
|
||||
assert output.tokens == ["my", "name", "is", "john", "pair"]
|
||||
assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)]
|
||||
|
||||
Reference in New Issue
Block a user