Finish exposing the UnicodeScripts PreTokenizer

This commit is contained in:
Anthony MOI
2020-10-20 16:38:28 -04:00
committed by Anthony MOI
parent 25e74b5400
commit a2289d49b4
8 changed files with 53 additions and 4 deletions

View File

@@ -10,3 +10,4 @@ BertPreTokenizer = pre_tokenizers.BertPreTokenizer
Metaspace = pre_tokenizers.Metaspace
CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit
Digits = pre_tokenizers.Digits
UnicodeScripts = pre_tokenizers.UnicodeScripts

View File

@@ -148,3 +148,16 @@ class Digits(PreTokenizer):
"""
pass
class UnicodeScripts(PreTokenizer):
"""UnicodeScripts PreTokenizer
This pre-tokenizer splits on characters that belong to different language family
It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt
Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too.
This mimicks SentencePiece Unigram implementation.
"""
def __init__(self) -> None:
""" Instantiate a new UnicodeScripts """
pass