mirror of
https://github.com/mii443/tokenizers.git
synced 2025-12-08 13:48:19 +00:00
Finish exposing the UnicodeScripts PreTokenizer
This commit is contained in:
@@ -10,3 +10,4 @@ BertPreTokenizer = pre_tokenizers.BertPreTokenizer
|
||||
Metaspace = pre_tokenizers.Metaspace
|
||||
CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit
|
||||
Digits = pre_tokenizers.Digits
|
||||
UnicodeScripts = pre_tokenizers.UnicodeScripts
|
||||
|
||||
@@ -148,3 +148,16 @@ class Digits(PreTokenizer):
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
class UnicodeScripts(PreTokenizer):
|
||||
"""UnicodeScripts PreTokenizer
|
||||
|
||||
This pre-tokenizer splits on characters that belong to different language family
|
||||
It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt
|
||||
Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too.
|
||||
This mimicks SentencePiece Unigram implementation.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
""" Instantiate a new UnicodeScripts """
|
||||
pass
|
||||
|
||||
Reference in New Issue
Block a user