Refactor metaspace (#1476)

* version = "0.15.3-dev-0”

Improve performances of meta space, but also just fix it.

(transformers) ➜  transformers git:(refactor-default-llama) ✗ python ../scripts/gemma-dummy.py
Token indices sequence length is longer than the specified maximum sequence length for this model (14999 > 2048). Running this sequence through the model will result in indexing errors
['<REPR_END>', '▁inform', '<s>', '.', '▁Hey', '<unk>', '.', '▁', '▁', '▁', '▁', '▁', '▁', '▁.']
['▁inform', '<s>', '.', '▁Hey', '<unk>', '.', '▁', '▁', '▁', '▁', '▁', '▁', '▁.']
[0.0006330013275146484, 0.0014591217041015625, 0.015890836715698242, 0.18584918975830078, 2.1726326942443848]
(transformers) ➜  transformers git:(refactor-default-llama) ✗ python ../scripts/gemma-dummy.py
Token indices sequence length is longer than the specified maximum sequence length for this model (10000 > 2048). Running this sequence through the model will result in indexing errors
['<REPR_END>', 'in', 'form', '<s>', '.', '▁Hey', '<unk>', '.', '▁▁▁▁▁▁', '▁.']
['in', 'form', '<s>', '.', '▁Hey', '<unk>', '.', '▁▁▁▁▁▁', '▁.']
[0.0008409023284912109, 0.0008909702301025391, 0.00882411003112793, 0.10214710235595703, 1.187899112701416]

* well what do we have

* nit

* be BC with non legacy

* unrelated change for clippy

* fix test

* splitting is a must for word_ids

* fmt and lint

* Fixing everything (hopefully better).

* Fixing node.

* Including yarn.lock

* Lint.

* Stubs.

* revert to use split

* fix merge issues

* fix tests

* finish fixing tests

* ruff

---------

Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
This commit is contained in:
Arthur
2024-03-30 10:27:24 +01:00
committed by GitHub
parent 6153126b22
commit 09069717e9
21 changed files with 1672 additions and 1515 deletions

View File

@ -126,7 +126,7 @@ class TestMetaspace:
assert Metaspace(replacement="-") is not None
with pytest.raises(ValueError, match="expected a string of length 1"):
Metaspace(replacement="")
assert Metaspace(add_prefix_space=True) is not None
assert Metaspace(prepend_scheme="always") is not None
assert isinstance(Metaspace(), Decoder)
assert isinstance(Metaspace(), Metaspace)
assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace)
@ -134,20 +134,20 @@ class TestMetaspace:
def test_decoding(self):
decoder = Metaspace()
assert decoder.decode(["▁My", "▁name", "▁is", "▁John"]) == "My name is John"
decoder = Metaspace(replacement="-", add_prefix_space=False)
decoder = Metaspace(replacement="-", prepend_scheme="never")
assert decoder.decode(["-My", "-name", "-is", "-John"]) == " My name is John"
def test_can_modify(self):
decoder = Metaspace(replacement="*", add_prefix_space=False)
decoder = Metaspace(replacement="*", prepend_scheme="never")
assert decoder.replacement == "*"
assert decoder.add_prefix_space == False
assert decoder.prepend_scheme == "never"
# Modify these
decoder.replacement = "&"
assert decoder.replacement == "&"
decoder.add_prefix_space = True
assert decoder.add_prefix_space == True
decoder.prepend_scheme = "first"
assert decoder.prepend_scheme == "first"
class TestBPEDecoder:

View File

@ -94,24 +94,27 @@ class TestMetaspace:
assert Metaspace(replacement="-") is not None
with pytest.raises(ValueError, match="expected a string of length 1"):
Metaspace(replacement="")
assert Metaspace(add_prefix_space=True) is not None
assert Metaspace(prepend_scheme="always") is not None
assert isinstance(Metaspace(), PreTokenizer)
assert isinstance(Metaspace(), Metaspace)
assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace)
def test_can_modify(self):
pretok = Metaspace(replacement="$", add_prefix_space=False)
pretok = Metaspace(replacement="$", prepend_scheme="never")
assert pretok.replacement == "$"
assert pretok.add_prefix_space == False
assert pretok.prepend_scheme == "never"
assert pretok.split == True
# Modify these
pretok.replacement = "%"
assert pretok.replacement == "%"
pretok.add_prefix_space = True
assert pretok.add_prefix_space == True
pretok.prepend_scheme = "never"
assert pretok.prepend_scheme == "never"
pretok.prepend_scheme = "first"
assert pretok.prepend_scheme == "first"
pretok.split = True
assert pretok.split == True
class TestCharDelimiterSplit:

View File

@ -487,3 +487,51 @@ class TestTokenizer:
tokenizer.add_tokens(["of_text>"])
output = tokenizer.encode("Hey there<end_of_text> dear<eot>friend!", add_special_tokens=False)
assert output.tokens == ["▁Hey", "▁there", "<", "end", "_", "of_text>", "▁dear", "<eot>", "▁friend", "!"]
def test_splitting(self):
tokenizer = Tokenizer.from_pretrained("hf-internal-testing/llama-new-metaspace")
tokenizer.pre_tokenizer.split = False
tokenizer.add_tokens([AddedToken("<REPR_END>", rstrip=True, lstrip=True)])
assert tokenizer.encode("<REPR_END>inform<s>. Hey. .", add_special_tokens=False).tokens == [
"<REPR_END>",
"in",
"form",
"<s>",
".",
"▁Hey",
".",
"▁▁▁▁▁▁",
"▁.",
]
assert tokenizer.encode("<REPR_END>inform<s>. Hey. .", add_special_tokens=False).ids == [
32000,
262,
689,
1,
29889,
18637,
29889,
539,
869,
]
assert tokenizer.encode("inform<s>. Hey. .").tokens == [
"<s>",
"▁inform",
"<s>",
".",
"▁Hey",
".",
"▁▁▁▁▁▁",
"▁.",
]
assert tokenizer.encode("inform<s>. Hey. .", add_special_tokens=False).tokens == [
"▁inform",
"<s>",
".",
"▁Hey",
".",
"▁▁▁▁▁▁",
"▁.",
]