mirror of
https://github.com/mii443/tokenizers.git
synced 2025-12-04 11:48:33 +00:00
from_files -> from_file everywhere
- read_files -> read_file
- from_file pure rust impl in python bindings
- Fix some typing in python binding
- Added {BPE,WordLevel,WordPiece}.from_file tests.
This commit is contained in:
@@ -78,8 +78,8 @@ class ByteLevelBPETokenizer(BaseTokenizer):
|
||||
super().__init__(tokenizer, parameters)
|
||||
|
||||
@staticmethod
|
||||
def from_files(vocab_filename: str, merges_filename: str, **kwargs):
|
||||
vocab, merges = BPE.read_files(vocab_filename, merges_filename)
|
||||
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
|
||||
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
|
||||
return ByteLevelBPETokenizer(vocab, merges, **kwargs)
|
||||
|
||||
def train(
|
||||
|
||||
@@ -95,8 +95,8 @@ class CharBPETokenizer(BaseTokenizer):
|
||||
super().__init__(tokenizer, parameters)
|
||||
|
||||
@staticmethod
|
||||
def from_files(vocab_filename: str, merges_filename: str, **kwargs):
|
||||
vocab, merges = BPE.read_files(vocab_filename, merges_filename)
|
||||
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
|
||||
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
|
||||
return CharBPETokenizer(vocab, merges, **kwargs)
|
||||
|
||||
def train(
|
||||
|
||||
@@ -48,7 +48,7 @@ class SentencePieceBPETokenizer(BaseTokenizer):
|
||||
super().__init__(tokenizer, parameters)
|
||||
|
||||
@staticmethod
|
||||
def from_files(vocab_filename: str, merges_filename: str, **kwargs):
|
||||
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
|
||||
vocab, merges = BPE.read_files(vocab_filename, merges_filename)
|
||||
return SentencePieceBPETokenizer(vocab, merges, **kwargs)
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ class Model:
|
||||
class BPE(Model):
|
||||
"""BytePairEncoding model class
|
||||
|
||||
Instantiate a BPE Model from the given vocab and merges files.
|
||||
Instantiate a BPE Model from the given vocab and merges.
|
||||
|
||||
Args:
|
||||
vocab: ('`optional`) Dict[str, int]:
|
||||
@@ -76,12 +76,19 @@ class BPE(Model):
|
||||
):
|
||||
pass
|
||||
@staticmethod
|
||||
def read_files(vocab_filename: str, merges_filename: str) -> Tuple[Vocab, Merges]:
|
||||
def read_file(vocab_filename: str, merges_filename: str) -> Tuple[Vocab, Merges]:
|
||||
pass
|
||||
@staticmethod
|
||||
def from_files(vocab_filename: str, merges_filename: str, **kwargs) -> BPE:
|
||||
vocab, merges = BPE.read_files(vocab_filename, merges_filename)
|
||||
return BPE(vocab, merges, **kwargs)
|
||||
def from_file(vocab_filename: str, merges_filename: str, **kwargs) -> BPE:
|
||||
"""
|
||||
Convenient method to intialize a BPE from files
|
||||
Roughly equivalent to
|
||||
|
||||
def from_file(vocab_filename, merges_filenames, **kwargs):
|
||||
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
|
||||
return BPE(vocab, merges, **kwargs)
|
||||
"""
|
||||
pass
|
||||
|
||||
class WordPiece(Model):
|
||||
"""WordPiece model class
|
||||
@@ -107,12 +114,19 @@ class WordPiece(Model):
|
||||
):
|
||||
pass
|
||||
@staticmethod
|
||||
def read_file(vocab_filename: str) -> Tuple[Vocab]:
|
||||
def read_file(vocab_filename: str) -> Vocab:
|
||||
pass
|
||||
@staticmethod
|
||||
def from_files(vocab_filename: str, **kwargs) -> WordPiece:
|
||||
vocab = WordPiece.read_files(vocab_filename)
|
||||
return WordPiece(vocab, **kwargs)
|
||||
def from_file(vocab_filename: str, **kwargs) -> WordPiece:
|
||||
"""
|
||||
Convenient method to intialize a WordPiece from file
|
||||
Roughly equivalent to
|
||||
|
||||
def from_file(vocab_filename, **kwargs):
|
||||
vocab, merges = WordPiece.read_file(vocab_filename)
|
||||
return WordPiece(vocab, **kwargs)
|
||||
"""
|
||||
pass
|
||||
|
||||
class WordLevel(Model):
|
||||
"""
|
||||
@@ -131,12 +145,19 @@ class WordLevel(Model):
|
||||
def __init__(self, vocab: Optional[Union[str, Dict[str, int]]], unk_token: Optional[str]):
|
||||
pass
|
||||
@staticmethod
|
||||
def read_file(vocab_filename: str) -> Tuple[Vocab]:
|
||||
def read_file(vocab_filename: str) -> Vocab:
|
||||
pass
|
||||
@staticmethod
|
||||
def from_files(vocab_filename: str, **kwargs) -> WordLevel:
|
||||
vocab = WordLevel.read_files(vocab_filename)
|
||||
return WordLevel(vocab, **kwargs)
|
||||
def from_file(vocab_filename: str, **kwargs) -> WordLevelg:
|
||||
"""
|
||||
Convenient method to intialize a WordLevelg from file
|
||||
Roughly equivalent to
|
||||
|
||||
def from_file(vocab_filename, **kwargs):
|
||||
vocab, merges = WordLevelg.read_file(vocab_filename)
|
||||
return WordLevelg(vocab, **kwargs)
|
||||
"""
|
||||
pass
|
||||
|
||||
class Unigram(Model):
|
||||
"""UnigramEncoding model class
|
||||
|
||||
@@ -209,7 +209,7 @@ impl PyBPE {
|
||||
(PyVocab::Filename(vocab_filename), PyMerges::Filename(merges_filename)) => {
|
||||
deprecation_warning(
|
||||
"0.9.0",
|
||||
"BPE.__init__ will not create from files anymore, try `BPE.from_files` instead",
|
||||
"BPE.__init__ will not create from files anymore, try `BPE.from_file` instead",
|
||||
)?;
|
||||
builder =
|
||||
builder.files(vocab_filename.to_string(), merges_filename.to_string());
|
||||
@@ -226,14 +226,35 @@ impl PyBPE {
|
||||
}
|
||||
|
||||
#[staticmethod]
|
||||
fn read_files(vocab_filename: &str, merges_filename: &str) -> PyResult<(Vocab, Merges)> {
|
||||
BPE::read_files(vocab_filename, merges_filename).map_err(|e| {
|
||||
fn read_file(vocab_filename: &str, merges_filename: &str) -> PyResult<(Vocab, Merges)> {
|
||||
BPE::read_file(vocab_filename, merges_filename).map_err(|e| {
|
||||
exceptions::PyValueError::new_err(format!(
|
||||
"Error while reading vocab&merges files: {}",
|
||||
e
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
#[staticmethod]
|
||||
#[args(kwargs = "**")]
|
||||
fn from_file(
|
||||
py: Python,
|
||||
vocab_filename: &str,
|
||||
merges_filename: &str,
|
||||
kwargs: Option<&PyDict>,
|
||||
) -> PyResult<Py<Self>> {
|
||||
let (vocab, merges) = BPE::read_file(vocab_filename, merges_filename).map_err(|e| {
|
||||
exceptions::PyValueError::new_err(format!("Error while reading BPE files: {}", e))
|
||||
})?;
|
||||
Py::new(
|
||||
py,
|
||||
PyBPE::new(
|
||||
Some(PyVocab::Vocab(vocab)),
|
||||
Some(PyMerges::Merges(merges)),
|
||||
kwargs,
|
||||
)?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// WordPiece Model
|
||||
@@ -300,10 +321,19 @@ impl PyWordPiece {
|
||||
|
||||
#[staticmethod]
|
||||
fn read_file(vocab_filename: &str) -> PyResult<Vocab> {
|
||||
WordPiece::read_files(vocab_filename).map_err(|e| {
|
||||
WordPiece::read_file(vocab_filename).map_err(|e| {
|
||||
exceptions::PyValueError::new_err(format!("Error while reading WordPiece file: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
#[staticmethod]
|
||||
#[args(kwargs = "**")]
|
||||
fn from_file(py: Python, vocab_filename: &str, kwargs: Option<&PyDict>) -> PyResult<Py<Self>> {
|
||||
let vocab = WordPiece::read_file(vocab_filename).map_err(|e| {
|
||||
exceptions::PyValueError::new_err(format!("Error while reading WordPiece file: {}", e))
|
||||
})?;
|
||||
Py::new(py, PyWordPiece::new(Some(PyVocab::Vocab(vocab)), kwargs)?)
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass(extends=PyModel, module = "tokenizers.models", name=WordLevel)]
|
||||
@@ -344,7 +374,7 @@ impl PyWordLevel {
|
||||
"0.9.0",
|
||||
"WordLevel.__init__ will not create from files anymore, try `WordLevel.from_file` instead",
|
||||
)?;
|
||||
WordLevel::from_files(vocab_filename, unk_token).map_err(|e| {
|
||||
WordLevel::from_file(vocab_filename, unk_token).map_err(|e| {
|
||||
exceptions::PyException::new_err(format!(
|
||||
"Error while loading WordLevel: {}",
|
||||
e
|
||||
@@ -364,10 +394,19 @@ impl PyWordLevel {
|
||||
|
||||
#[staticmethod]
|
||||
fn read_file(vocab_filename: &str) -> PyResult<Vocab> {
|
||||
WordLevel::read_files(vocab_filename).map_err(|e| {
|
||||
WordLevel::read_file(vocab_filename).map_err(|e| {
|
||||
exceptions::PyValueError::new_err(format!("Error while reading WordLevel file: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
#[staticmethod]
|
||||
#[args(kwargs = "**")]
|
||||
fn from_file(py: Python, vocab_filename: &str, kwargs: Option<&PyDict>) -> PyResult<Py<Self>> {
|
||||
let vocab = WordLevel::read_file(vocab_filename).map_err(|e| {
|
||||
exceptions::PyValueError::new_err(format!("Error while reading WordLevel file: {}", e))
|
||||
})?;
|
||||
Py::new(py, PyWordLevel::new(Some(PyVocab::Vocab(vocab)), kwargs)?)
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass(extends=PyModel, module = "tokenizers.models", name=Unigram)]
|
||||
|
||||
@@ -14,6 +14,7 @@ class TestBPE:
|
||||
vocab = {"a": 0, "b": 1, "ab": 2}
|
||||
merges = {(0, 1): (0, 2)}
|
||||
assert isinstance(BPE(vocab, merges), Model)
|
||||
assert isinstance(BPE.from_file(roberta_files["vocab"], roberta_files["merges"]), BPE)
|
||||
with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"):
|
||||
BPE(vocab=vocab)
|
||||
BPE(merges=merges)
|
||||
@@ -42,6 +43,7 @@ class TestWordPiece:
|
||||
vocab = {"a": 0, "b": 1, "ab": 2}
|
||||
assert isinstance(WordPiece(vocab), Model)
|
||||
assert isinstance(WordPiece(vocab), WordPiece)
|
||||
assert isinstance(WordPiece.from_file(bert_files["vocab"]), WordPiece)
|
||||
assert isinstance(pickle.loads(pickle.dumps(WordPiece(vocab))), WordPiece)
|
||||
|
||||
# Deprecated calls in 0.9
|
||||
@@ -59,6 +61,7 @@ class TestWordLevel:
|
||||
vocab = {"a": 0, "b": 1, "ab": 2}
|
||||
assert isinstance(WordLevel(vocab), Model)
|
||||
assert isinstance(WordLevel(vocab), WordLevel)
|
||||
assert isinstance(WordLevel.from_file(roberta_files["vocab"]), WordLevel)
|
||||
|
||||
# The WordLevel model expects a vocab.json using the same format as roberta
|
||||
# so we can just try to load with this file
|
||||
|
||||
@@ -6,9 +6,7 @@ from tokenizers import ByteLevelBPETokenizer
|
||||
|
||||
class TestByteLevelBPE:
|
||||
def test_basic_encode(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
roberta_files["vocab"], roberta_files["merges"]
|
||||
)
|
||||
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
|
||||
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
|
||||
|
||||
assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
|
||||
@@ -36,7 +34,7 @@ class TestByteLevelBPE:
|
||||
]
|
||||
|
||||
def test_add_prefix_space(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
tokenizer = ByteLevelBPETokenizer.from_file(
|
||||
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True
|
||||
)
|
||||
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
|
||||
@@ -66,7 +64,7 @@ class TestByteLevelBPE:
|
||||
]
|
||||
|
||||
def test_lowerspace(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
tokenizer = ByteLevelBPETokenizer.from_file(
|
||||
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True, lowercase=True,
|
||||
)
|
||||
output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog")
|
||||
@@ -85,8 +83,6 @@ class TestByteLevelBPE:
|
||||
]
|
||||
|
||||
def test_multiprocessing_with_parallelism(self, roberta_files):
|
||||
tokenizer = ByteLevelBPETokenizer.from_files(
|
||||
roberta_files["vocab"], roberta_files["merges"]
|
||||
)
|
||||
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
|
||||
multiprocessing_with_parallelism(tokenizer, False)
|
||||
multiprocessing_with_parallelism(tokenizer, True)
|
||||
|
||||
@@ -6,7 +6,7 @@ from tokenizers import CharBPETokenizer
|
||||
|
||||
class TestBertWordPieceBPE:
|
||||
def test_basic_encode(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
|
||||
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
|
||||
|
||||
output = tokenizer.encode("My name is John", "pair")
|
||||
assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688]
|
||||
@@ -33,7 +33,7 @@ class TestBertWordPieceBPE:
|
||||
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1]
|
||||
|
||||
def test_lowercase(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_files(
|
||||
tokenizer = CharBPETokenizer.from_file(
|
||||
openai_files["vocab"], openai_files["merges"], lowercase=True
|
||||
)
|
||||
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
|
||||
@@ -43,13 +43,13 @@ class TestBertWordPieceBPE:
|
||||
assert output.type_ids == [0, 0, 0, 0, 1]
|
||||
|
||||
def test_decoding(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_files(
|
||||
tokenizer = CharBPETokenizer.from_file(
|
||||
openai_files["vocab"], openai_files["merges"], lowercase=True
|
||||
)
|
||||
decoded = tokenizer.decode(tokenizer.encode("my name is john").ids)
|
||||
assert decoded == "my name is john"
|
||||
|
||||
def test_multiprocessing_with_parallelism(self, openai_files):
|
||||
tokenizer = CharBPETokenizer.from_files(openai_files["vocab"], openai_files["merges"])
|
||||
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
|
||||
multiprocessing_with_parallelism(tokenizer, False)
|
||||
multiprocessing_with_parallelism(tokenizer, True)
|
||||
|
||||
Reference in New Issue
Block a user