mirror of
https://github.com/mii443/tokenizers.git
synced 2025-12-03 11:18:29 +00:00
Finish exposing the UnicodeScripts PreTokenizer
This commit is contained in:
@@ -10,3 +10,4 @@ BertPreTokenizer = pre_tokenizers.BertPreTokenizer
|
||||
Metaspace = pre_tokenizers.Metaspace
|
||||
CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit
|
||||
Digits = pre_tokenizers.Digits
|
||||
UnicodeScripts = pre_tokenizers.UnicodeScripts
|
||||
|
||||
@@ -148,3 +148,16 @@ class Digits(PreTokenizer):
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
class UnicodeScripts(PreTokenizer):
|
||||
"""UnicodeScripts PreTokenizer
|
||||
|
||||
This pre-tokenizer splits on characters that belong to different language family
|
||||
It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt
|
||||
Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too.
|
||||
This mimicks SentencePiece Unigram implementation.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
""" Instantiate a new UnicodeScripts """
|
||||
pass
|
||||
|
||||
@@ -72,6 +72,7 @@ fn pre_tokenizers(_py: Python, m: &PyModule) -> PyResult<()> {
|
||||
m.add_class::<pre_tokenizers::PyPunctuation>()?;
|
||||
m.add_class::<pre_tokenizers::PySequence>()?;
|
||||
m.add_class::<pre_tokenizers::PyDigits>()?;
|
||||
m.add_class::<pre_tokenizers::PyUnicodeScripts>()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ use tk::pre_tokenizers::delimiter::CharDelimiterSplit;
|
||||
use tk::pre_tokenizers::digits::Digits;
|
||||
use tk::pre_tokenizers::metaspace::Metaspace;
|
||||
use tk::pre_tokenizers::punctuation::Punctuation;
|
||||
use tk::pre_tokenizers::unicode_scripts::UnicodeScripts;
|
||||
use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit};
|
||||
use tk::pre_tokenizers::PreTokenizerWrapper;
|
||||
use tk::tokenizer::Offsets;
|
||||
@@ -70,6 +71,9 @@ impl PyPreTokenizer {
|
||||
Py::new(py, (PyBertPreTokenizer {}, base))?.into_py(py)
|
||||
}
|
||||
PreTokenizerWrapper::Digits(_) => Py::new(py, (PyDigits {}, base))?.into_py(py),
|
||||
PreTokenizerWrapper::UnicodeScripts(_) => {
|
||||
Py::new(py, (PyUnicodeScripts {}, base))?.into_py(py)
|
||||
}
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -297,6 +301,16 @@ impl PyDigits {
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name=UnicodeScripts)]
|
||||
pub struct PyUnicodeScripts {}
|
||||
#[pymethods]
|
||||
impl PyUnicodeScripts {
|
||||
#[new]
|
||||
fn new() -> PyResult<(Self, PyPreTokenizer)> {
|
||||
Ok((PyUnicodeScripts {}, UnicodeScripts::new().into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct CustomPreTokenizer {
|
||||
inner: PyObject,
|
||||
|
||||
@@ -12,6 +12,7 @@ from tokenizers.pre_tokenizers import (
|
||||
Punctuation,
|
||||
Sequence,
|
||||
Digits,
|
||||
UnicodeScripts,
|
||||
)
|
||||
|
||||
|
||||
@@ -121,6 +122,14 @@ class TestDigits:
|
||||
assert isinstance(pickle.loads(pickle.dumps(Digits())), Digits)
|
||||
|
||||
|
||||
class TestUnicodeScripts:
|
||||
def test_instantiate(self):
|
||||
assert UnicodeScripts() is not None
|
||||
assert isinstance(UnicodeScripts(), PreTokenizer)
|
||||
assert isinstance(UnicodeScripts(), UnicodeScripts)
|
||||
assert isinstance(pickle.loads(pickle.dumps(UnicodeScripts())), UnicodeScripts)
|
||||
|
||||
|
||||
class TestCustomPreTokenizer:
|
||||
class BadCustomPretok:
|
||||
def pre_tokenize(self, pretok, wrong):
|
||||
|
||||
Reference in New Issue
Block a user