mirror of
https://github.com/mii443/tokenizers.git
synced 2025-12-05 04:08:22 +00:00
Formatting after rebase.
This commit is contained in:
@@ -8,8 +8,7 @@ TextInputSequence = str
|
||||
PreTokenizedInputSequence = Union[List[str], Tuple[str]]
|
||||
TextEncodeInput = Union[TextInputSequence, Tuple[TextInputSequence, TextInputSequence]]
|
||||
PreTokenizedEncodeInput = Union[
|
||||
PreTokenizedInputSequence,
|
||||
Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence],
|
||||
PreTokenizedInputSequence, Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence],
|
||||
]
|
||||
|
||||
InputSequence = Union[TextInputSequence, PreTokenizedInputSequence]
|
||||
|
||||
@@ -13,10 +13,7 @@ class SentencePieceUnigramTokenizer(BaseTokenizer):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab: Optional[str] = None,
|
||||
replacement: str = "▁",
|
||||
add_prefix_space: bool = True,
|
||||
self, vocab: Optional[str] = None, replacement: str = "▁", add_prefix_space: bool = True,
|
||||
):
|
||||
if vocab is not None:
|
||||
tokenizer = Tokenizer(Unigram(vocab))
|
||||
|
||||
Reference in New Issue
Block a user