Fix hashlink ids

This commit is contained in:
Mishig Davaadorj
2022-04-18 12:13:40 +02:00
parent f6ba840e3e
commit 5c97125d22
10 changed files with 48 additions and 48 deletions

View File

@ -2,7 +2,7 @@
<tokenizerslangcontent>
<python>
## AddedToken[[tokenizers.AddedToken]]
## AddedToken
[[autodoc]] tokenizers.AddedToken
- content

View File

@ -2,23 +2,23 @@
<tokenizerslangcontent>
<python>
## BPEDecoder[[tokenizers.decoders.BPEDecoder]]
## BPEDecoder
[[autodoc]] tokenizers.decoders.BPEDecoder
## ByteLevel[[tokenizers.decoders.ByteLevel]]
## ByteLevel
[[autodoc]] tokenizers.decoders.ByteLevel
## CTC[[tokenizers.decoders.CTC]]
## CTC
[[autodoc]] tokenizers.decoders.CTC
## Metaspace[[tokenizers.decoders.Metaspace]]
## Metaspace
[[autodoc]] tokenizers.decoders.Metaspace
## WordPiece[[tokenizers.decoders.WordPiece]]
## WordPiece
[[autodoc]] tokenizers.decoders.WordPiece
</python>

View File

@ -2,7 +2,7 @@
<tokenizerslangcontent>
<python>
## Encoding[[tokenizers.Encoding]]
## Encoding
[[autodoc]] tokenizers.Encoding
- all

View File

@ -2,23 +2,23 @@
<tokenizerslangcontent>
<python>
## BPE[[tokenizers.models.BPE]]
## BPE
[[autodoc]] tokenizers.models.BPE
## Model[[tokenizers.models.Model]]
## Model
[[autodoc]] tokenizers.models.Model
## Unigram[[tokenizers.models.Unigram]]
## Unigram
[[autodoc]] tokenizers.models.Unigram
## WordLevel[[tokenizers.models.WordLevel]]
## WordLevel
[[autodoc]] tokenizers.models.WordLevel
## WordPiece[[tokenizers.models.WordPiece]]
## WordPiece
[[autodoc]] tokenizers.models.WordPiece
</python>

View File

@ -2,55 +2,55 @@
<tokenizerslangcontent>
<python>
## BertNormalizer[[tokenizers.normalizers.BertNormalizer]]
## BertNormalizer
[[autodoc]] tokenizers.normalizers.BertNormalizer
## Lowercase[[tokenizers.normalizers.Lowercase]]
## Lowercase
[[autodoc]] tokenizers.normalizers.Lowercase
## NFC[[tokenizers.normalizers.NFC]]
## NFC
[[autodoc]] tokenizers.normalizers.NFC
## NFD[[tokenizers.normalizers.NFD]]
## NFD
[[autodoc]] tokenizers.normalizers.NFD
## NFKC[[tokenizers.normalizers.NFKC]]
## NFKC
[[autodoc]] tokenizers.normalizers.NFKC
## NFKD[[tokenizers.normalizers.NFKD]]
## NFKD
[[autodoc]] tokenizers.normalizers.NFKD
## Nmt[[tokenizers.normalizers.Nmt]]
## Nmt
[[autodoc]] tokenizers.normalizers.Nmt
## Normalizer[[tokenizers.normalizers.Normalizer]]
## Normalizer
[[autodoc]] tokenizers.normalizers.Normalizer
## Precompiled[[tokenizers.normalizers.Precompiled]]
## Precompiled
[[autodoc]] tokenizers.normalizers.Precompiled
## Replace[[tokenizers.normalizers.Replace]]
## Replace
[[autodoc]] tokenizers.normalizers.Replace
## Sequence[[tokenizers.normalizers.Sequence]]
## Sequence
[[autodoc]] tokenizers.normalizers.Sequence
## Strip[[tokenizers.normalizers.Strip]]
## Strip
[[autodoc]] tokenizers.normalizers.Strip
## StripAccents[[tokenizers.normalizers.StripAccents]]
## StripAccents
[[autodoc]] tokenizers.normalizers.StripAccents
</python>

View File

@ -2,19 +2,19 @@
<tokenizerslangcontent>
<python>
## BertProcessing[[tokenizers.processors.BertProcessing]]
## BertProcessing
[[autodoc]] tokenizers.processors.BertProcessing
## ByteLevel[[tokenizers.processors.ByteLevel]]
## ByteLevel
[[autodoc]] tokenizers.processors.ByteLevel
## RobertaProcessing[[tokenizers.processors.RobertaProcessing]]
## RobertaProcessing
[[autodoc]] tokenizers.processors.RobertaProcessing
## TemplateProcessing[[tokenizers.processors.TemplateProcessing]]
## TemplateProcessing
[[autodoc]] tokenizers.processors.TemplateProcessing
</python>

View File

@ -2,51 +2,51 @@
<tokenizerslangcontent>
<python>
## BertPreTokenizer[[tokenizers.pre_tokenizers.BertPreTokenizer]]
## BertPreTokenizer
[[autodoc]] tokenizers.pre_tokenizers.BertPreTokenizer
## ByteLevel[[tokenizers.pre_tokenizers.ByteLevel]]
## ByteLevel
[[autodoc]] tokenizers.pre_tokenizers.ByteLevel
## CharDelimiterSplit[[tokenizers.pre_tokenizers.CharDelimiterSplit]]
## CharDelimiterSplit
[[autodoc]] tokenizers.pre_tokenizers.CharDelimiterSplit
## Digits[[tokenizers.pre_tokenizers.Digits]]
## Digits
[[autodoc]] tokenizers.pre_tokenizers.Digits
## Metaspace[[tokenizers.pre_tokenizers.Metaspace]]
## Metaspace
[[autodoc]] tokenizers.pre_tokenizers.Metaspace
## PreTokenizer[[tokenizers.pre_tokenizers.PreTokenizer]]
## PreTokenizer
[[autodoc]] tokenizers.pre_tokenizers.PreTokenizer
## Punctuation[[tokenizers.pre_tokenizers.Punctuation]]
## Punctuation
[[autodoc]] tokenizers.pre_tokenizers.Punctuation
## Sequence[[tokenizers.pre_tokenizers.Sequence]]
## Sequence
[[autodoc]] tokenizers.pre_tokenizers.Sequence
## Split[[tokenizers.pre_tokenizers.Split]]
## Split
[[autodoc]] tokenizers.pre_tokenizers.Split
## UnicodeScripts[[tokenizers.pre_tokenizers.UnicodeScripts]]
## UnicodeScripts
[[autodoc]] tokenizers.pre_tokenizers.UnicodeScripts
## Whitespace[[tokenizers.pre_tokenizers.Whitespace]]
## Whitespace
[[autodoc]] tokenizers.pre_tokenizers.Whitespace
## WhitespaceSplit[[tokenizers.pre_tokenizers.WhitespaceSplit]]
## WhitespaceSplit
[[autodoc]] tokenizers.pre_tokenizers.WhitespaceSplit
</python>

View File

@ -2,7 +2,7 @@
<tokenizerslangcontent>
<python>
## Tokenizer[[tokenizers.Tokenizer]]
## Tokenizer
[[autodoc]] tokenizers.Tokenizer
- all

View File

@ -2,19 +2,19 @@
<tokenizerslangcontent>
<python>
## BpeTrainer[[tokenizers.trainers.BpeTrainer]]
## BpeTrainer
[[autodoc]] tokenizers.trainers.BpeTrainer
## UnigramTrainer[[tokenizers.trainers.UnigramTrainer]]
## UnigramTrainer
[[autodoc]] tokenizers.trainers.UnigramTrainer
## WordLevelTrainer[[tokenizers.trainers.WordLevelTrainer]]
## WordLevelTrainer
[[autodoc]] tokenizers.trainers.WordLevelTrainer
## WordPieceTrainer[[tokenizers.trainers.WordPieceTrainer]]
## WordPieceTrainer
[[autodoc]] tokenizers.trainers.WordPieceTrainer
</python>

View File

@ -2,11 +2,11 @@
<tokenizerslangcontent>
<python>
## Annotation[[tokenizers.tools.Annotation]]
## Annotation
[[autodoc]] tokenizers.tools.Annotation
## EncodingVisualizer[[tokenizers.tools.EncodingVisualizer]]
## EncodingVisualizer
[[autodoc]] tokenizers.tools.EncodingVisualizer
- __call__