From bdfc38b78dff24827554f1525730615ff6e3eb9b Mon Sep 17 00:00:00 2001 From: tinyboxvk <13696594+tinyboxvk@users.noreply.github.com> Date: Thu, 9 Jan 2025 06:53:20 -0400 Subject: [PATCH] Fix typos (#1715) * Fix typos Signed-off-by: tinyboxvk <13696594+tinyboxvk@users.noreply.github.com> * Update docs/source/quicktour.rst * Update docs/source-doc-builder/quicktour.mdx --------- Signed-off-by: tinyboxvk <13696594+tinyboxvk@users.noreply.github.com> Co-authored-by: Nicolas Patry --- bindings/python/examples/custom_components.py | 2 +- .../py_src/tokenizers/decoders/__init__.pyi | 2 +- .../tokenizers/normalizers/__init__.pyi | 2 +- .../tokenizers/pre_tokenizers/__init__.pyi | 2 +- .../py_src/tokenizers/tools/visualizer.py | 2 +- bindings/python/src/decoders.rs | 2 +- bindings/python/src/lib.rs | 2 +- bindings/python/src/normalizers.rs | 2 +- bindings/python/src/pre_tokenizers.rs | 2 +- bindings/python/stub.py | 2 +- .../python/tests/bindings/test_trainers.py | 2 +- docs/source-doc-builder/components.mdx | 38 +++++++++---------- docs/source-doc-builder/installation.mdx | 2 +- docs/source-doc-builder/pipeline.mdx | 2 +- docs/source/components.rst | 14 +++---- docs/source/installation/python.inc | 2 +- docs/source/pipeline.rst | 2 +- tokenizers/CHANGELOG.md | 2 +- tokenizers/src/decoders/byte_fallback.rs | 2 +- tokenizers/src/decoders/ctc.rs | 2 +- tokenizers/src/decoders/strip.rs | 2 +- tokenizers/src/processors/template.rs | 2 +- tokenizers/src/tokenizer/normalizer.rs | 4 +- tokenizers/src/utils/from_pretrained.rs | 2 +- tokenizers/src/utils/truncation.rs | 2 +- 25 files changed, 50 insertions(+), 50 deletions(-) diff --git a/bindings/python/examples/custom_components.py b/bindings/python/examples/custom_components.py index cdb97309..10875cde 100644 --- a/bindings/python/examples/custom_components.py +++ b/bindings/python/examples/custom_components.py @@ -49,7 +49,7 @@ class CustomNormalizer: def normalize(self, normalized: NormalizedString): # Most of these can be replaced by a `Sequence` combining some provided Normalizer, # (ie Sequence([ NFKC(), Replace(Regex("\s+"), " "), Lowercase() ]) - # and it should be the prefered way. That being said, here is an example of the kind + # and it should be the preferred way. That being said, here is an example of the kind # of things that can be done here: normalized.nfkc() normalized.filter(lambda char: not char.isnumeric()) diff --git a/bindings/python/py_src/tokenizers/decoders/__init__.pyi b/bindings/python/py_src/tokenizers/decoders/__init__.pyi index adad6f53..672aebb8 100644 --- a/bindings/python/py_src/tokenizers/decoders/__init__.pyi +++ b/bindings/python/py_src/tokenizers/decoders/__init__.pyi @@ -57,7 +57,7 @@ class ByteFallback(Decoder): ByteFallback Decoder ByteFallback is a simple trick which converts tokens looking like `<0x61>` to pure bytes, and attempts to make them into a string. If the tokens - cannot be decoded you will get � instead for each inconvertable byte token + cannot be decoded you will get � instead for each inconvertible byte token """ def __init__(self): diff --git a/bindings/python/py_src/tokenizers/normalizers/__init__.pyi b/bindings/python/py_src/tokenizers/normalizers/__init__.pyi index 8c4e744d..1f555510 100644 --- a/bindings/python/py_src/tokenizers/normalizers/__init__.pyi +++ b/bindings/python/py_src/tokenizers/normalizers/__init__.pyi @@ -389,7 +389,7 @@ class Nmt(Normalizer): class Precompiled(Normalizer): """ Precompiled normalizer - Don't use manually it is used for compatiblity for SentencePiece. + Don't use manually it is used for compatibility for SentencePiece. """ def __init__(self, precompiled_charsmap): pass diff --git a/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi b/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi index 8049daec..6f31ff3a 100644 --- a/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi +++ b/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi @@ -48,7 +48,7 @@ class BertPreTokenizer(PreTokenizer): BertPreTokenizer This pre-tokenizer splits tokens on spaces, and also on punctuation. - Each occurence of a punctuation character will be treated separately. + Each occurrence of a punctuation character will be treated separately. """ def __init__(self): pass diff --git a/bindings/python/py_src/tokenizers/tools/visualizer.py b/bindings/python/py_src/tokenizers/tools/visualizer.py index c988a648..b7abb701 100644 --- a/bindings/python/py_src/tokenizers/tools/visualizer.py +++ b/bindings/python/py_src/tokenizers/tools/visualizer.py @@ -325,7 +325,7 @@ class EncodingVisualizer: Returns: A list of length len(text) whose entry at index i is None if there is no annotation on - charachter i or k, the index of the annotation that covers index i where k is with + character i or k, the index of the annotation that covers index i where k is with respect to the list of annotations """ annotation_map = [None] * len(text) diff --git a/bindings/python/src/decoders.rs b/bindings/python/src/decoders.rs index 44f33326..98b7d6b7 100644 --- a/bindings/python/src/decoders.rs +++ b/bindings/python/src/decoders.rs @@ -263,7 +263,7 @@ impl PyWordPieceDec { /// ByteFallback Decoder /// ByteFallback is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens -/// cannot be decoded you will get � instead for each inconvertable byte token +/// cannot be decoded you will get � instead for each inconvertible byte token /// #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "ByteFallback")] pub struct PyByteFallbackDec {} diff --git a/bindings/python/src/lib.rs b/bindings/python/src/lib.rs index 3f1e7136..d28067f1 100644 --- a/bindings/python/src/lib.rs +++ b/bindings/python/src/lib.rs @@ -23,7 +23,7 @@ use pyo3::wrap_pymodule; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); // For users using multiprocessing in python, it is quite easy to fork the process running -// tokenizers, ending up with a deadlock because we internaly make use of multithreading. So +// tokenizers, ending up with a deadlock because we internally make use of multithreading. So // we register a callback to be called in the event of a fork so that we can warn the user. #[cfg(target_family = "unix")] static mut REGISTERED_FORK_CALLBACK: bool = false; diff --git a/bindings/python/src/normalizers.rs b/bindings/python/src/normalizers.rs index d8159637..0d35cb1b 100644 --- a/bindings/python/src/normalizers.rs +++ b/bindings/python/src/normalizers.rs @@ -534,7 +534,7 @@ impl PyNmt { } /// Precompiled normalizer -/// Don't use manually it is used for compatiblity for SentencePiece. +/// Don't use manually it is used for compatibility for SentencePiece. #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Precompiled")] pub struct PyPrecompiled {} #[pymethods] diff --git a/bindings/python/src/pre_tokenizers.rs b/bindings/python/src/pre_tokenizers.rs index 02034560..367dd143 100644 --- a/bindings/python/src/pre_tokenizers.rs +++ b/bindings/python/src/pre_tokenizers.rs @@ -430,7 +430,7 @@ impl PyCharDelimiterSplit { /// BertPreTokenizer /// /// This pre-tokenizer splits tokens on spaces, and also on punctuation. -/// Each occurence of a punctuation character will be treated separately. +/// Each occurrence of a punctuation character will be treated separately. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "BertPreTokenizer")] pub struct PyBertPreTokenizer {} #[pymethods] diff --git a/bindings/python/stub.py b/bindings/python/stub.py index 41ef2d6e..ea8f43f9 100644 --- a/bindings/python/stub.py +++ b/bindings/python/stub.py @@ -100,7 +100,7 @@ def pyi_file(obj, indent=""): string += function(obj, indent) elif inspect.isgetsetdescriptor(obj): - # TODO it would be interesing to add the setter maybe ? + # TODO it would be interesting to add the setter maybe ? string += f"{indent}@property\n" string += function(obj, indent, text_signature="(self)") else: diff --git a/bindings/python/tests/bindings/test_trainers.py b/bindings/python/tests/bindings/test_trainers.py index 87021533..2e33b274 100644 --- a/bindings/python/tests/bindings/test_trainers.py +++ b/bindings/python/tests/bindings/test_trainers.py @@ -287,7 +287,7 @@ class TestUnigram: trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] - def test_continuing_prefix_trainer_mistmatch(self): + def test_continuing_prefix_trainer_mismatch(self): UNK = "[UNK]" special_tokens = [UNK] tokenizer = Tokenizer(models.BPE(unk_token=UNK, continuing_subword_prefix="##")) diff --git a/docs/source-doc-builder/components.mdx b/docs/source-doc-builder/components.mdx index d9c71580..0ca0325e 100644 --- a/docs/source-doc-builder/components.mdx +++ b/docs/source-doc-builder/components.mdx @@ -25,8 +25,8 @@ The `Normalizer` is optional. | NFKC | NFKC unicode normalization | | | Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ`
Output: `hello`ὀδυσσεύς` | | Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"`
Output: `"hi"` | -| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é`
Ouput: `e` | -| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this:
Input: `"banana"`
Ouput: `"benene"` | +| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é`
Output: `e` | +| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this:
Input: `"banana"`
Output: `"benene"` | | BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: | | | Sequence | Composes multiple normalizers that will run in the provided order | `Sequence([NFKC(), Lowercase()])` | @@ -39,8 +39,8 @@ The `Normalizer` is optional. | NFKC | NFKC unicode normalization | | | Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ`
Output: `hello`ὀδυσσεύς` | | Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"`
Output: `"hi"` | -| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é`
Ouput: `e` | -| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this:
Input: `"banana"`
Ouput: `"benene"` | +| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é`
Output: `e` | +| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this:
Input: `"banana"`
Output: `"benene"` | | BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: | | | Sequence | Composes multiple normalizers that will run in the provided order | `Sequence::new(vec![NFKC, Lowercase])` | @@ -53,8 +53,8 @@ The `Normalizer` is optional. | NFKC | NFKC unicode normalization | | | Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ`
Output: `hello`ὀδυσσεύς` | | Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"`
Output: `"hi"` | -| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é`
Ouput: `e` | -| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this:
Input: `"banana"`
Ouput: `"benene"` | +| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é`
Output: `e` | +| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this:
Input: `"banana"`
Output: `"benene"` | | BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: | | | Sequence | Composes multiple normalizers that will run in the provided order | | @@ -78,12 +78,12 @@ the ByteLevel) | Name | Description | Example | | :--- | :--- | :--- | -| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: | Input: `"Hello my friend, how are you?"`
Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` | +| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: | Input: `"Hello my friend, how are you?"`
Output: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` | | Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"`
Output: `"Hello", "there", "!"` | | WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"`
Output: `"Hello", "there!"` | -| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"`
Ouput: `"Hello", "?"` | -| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"`
Ouput: `"Hello", "▁there"` | -| CharDelimiterSplit | Splits on a given character | Example with `x`:
Input: `"Helloxthere"`
Ouput: `"Hello", "there"` | +| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"`
Output: `"Hello", "?"` | +| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"`
Output: `"Hello", "▁there"` | +| CharDelimiterSplit | Splits on a given character | Example with `x`:
Input: `"Helloxthere"`
Output: `"Hello", "there"` | | Digits | Splits the numbers from any other characters. | Input: `"Hello123there"`
Output: ``"Hello", "123", "there"`` | | Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. | Example with pattern = ` `, behavior = `"isolated"`, invert = `False`:
Input: `"Hello, how are you?"`
Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` | | Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | `Sequence([Punctuation(), WhitespaceSplit()])` | @@ -91,12 +91,12 @@ the ByteLevel) | Name | Description | Example | | :--- | :--- | :--- | -| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
  • Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.
  • A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)
  • For non ascii characters, it gets completely unreadable, but it works nonetheless!
| Input: `"Hello my friend, how are you?"`
Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` | +| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
  • Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.
  • A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)
  • For non ascii characters, it gets completely unreadable, but it works nonetheless!
| Input: `"Hello my friend, how are you?"`
Output: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` | | Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"`
Output: `"Hello", "there", "!"` | | WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"`
Output: `"Hello", "there!"` | -| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"`
Ouput: `"Hello", "?"` | -| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"`
Ouput: `"Hello", "▁there"` | -| CharDelimiterSplit | Splits on a given character | Example with `x`:
Input: `"Helloxthere"`
Ouput: `"Hello", "there"` | +| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"`
Output: `"Hello", "?"` | +| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"`
Output: `"Hello", "▁there"` | +| CharDelimiterSplit | Splits on a given character | Example with `x`:
Input: `"Helloxthere"`
Output: `"Hello", "there"` | | Digits | Splits the numbers from any other characters. | Input: `"Hello123there"`
Output: ``"Hello", "123", "there"`` | | Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary.
  • pattern should be either a custom string or regexp.
  • behavior should be one of:
    • Removed
    • Isolated
    • MergedWithPrevious
    • MergedWithNext
    • Contiguous
  • invert should be a boolean flag.
| Example with pattern = ` `, behavior = `"isolated"`, invert = `False`:
Input: `"Hello, how are you?"`
Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` | | Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | `Sequence::new(vec![Punctuation, WhitespaceSplit])` | @@ -104,12 +104,12 @@ the ByteLevel) | Name | Description | Example | | :--- | :--- | :--- | -| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
  • Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.
  • A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)
  • For non ascii characters, it gets completely unreadable, but it works nonetheless!
| Input: `"Hello my friend, how are you?"`
Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` | +| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
  • Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.
  • A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)
  • For non ascii characters, it gets completely unreadable, but it works nonetheless!
| Input: `"Hello my friend, how are you?"`
Output: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` | | Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"`
Output: `"Hello", "there", "!"` | | WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"`
Output: `"Hello", "there!"` | -| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"`
Ouput: `"Hello", "?"` | -| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"`
Ouput: `"Hello", "▁there"` | -| CharDelimiterSplit | Splits on a given character | Example with `x`:
Input: `"Helloxthere"`
Ouput: `"Hello", "there"` | +| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"`
Output: `"Hello", "?"` | +| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"`
Output: `"Hello", "▁there"` | +| CharDelimiterSplit | Splits on a given character | Example with `x`:
Input: `"Helloxthere"`
Output: `"Hello", "there"` | | Digits | Splits the numbers from any other characters. | Input: `"Hello123there"`
Output: ``"Hello", "123", "there"`` | | Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary.
  • pattern should be either a custom string or regexp.
  • behavior should be one of:
    • removed
    • isolated
    • mergedWithPrevious
    • mergedWithNext
    • contiguous
  • invert should be a boolean flag.
| Example with pattern = ` `, behavior = `"isolated"`, invert = `False`:
Input: `"Hello, how are you?"`
Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` | | Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | | @@ -148,5 +148,5 @@ special characters or identifiers that need to be reverted for example. | Name | Description | | :--- | :--- | | ByteLevel | Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using a set of visible Unicode characters to represent each byte, so we need a Decoder to revert this process and get something readable again. | -| Metaspace | Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer `▁` to identify whitespaces, and so this Decoder helps with decoding these. | +| Metaspace | Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifier `▁` to identify whitespaces, and so this Decoder helps with decoding these. | | WordPiece | Reverts the WordPiece Model. This model uses a special identifier `##` for continuing subwords, and so this Decoder helps with decoding these. | diff --git a/docs/source-doc-builder/installation.mdx b/docs/source-doc-builder/installation.mdx index a16f39dd..d49adeb3 100644 --- a/docs/source-doc-builder/installation.mdx +++ b/docs/source-doc-builder/installation.mdx @@ -32,7 +32,7 @@ as running: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh ``` -Or you can easiy update it with the following command: +Or you can easily update it with the following command: ```bash rustup update diff --git a/docs/source-doc-builder/pipeline.mdx b/docs/source-doc-builder/pipeline.mdx index 8c359a29..67b9ca53 100644 --- a/docs/source-doc-builder/pipeline.mdx +++ b/docs/source-doc-builder/pipeline.mdx @@ -290,7 +290,7 @@ The role of the model is to split your "words" into tokens, using the rules it has learned. It's also responsible for mapping those tokens to their corresponding IDs in the vocabulary of the model. -This model is passed along when intializing the +This model is passed along when initializing the `Tokenizer` so you already know how to customize this part. Currently, the 🤗 Tokenizers library supports: diff --git a/docs/source/components.rst b/docs/source/components.rst index 7ba1bb09..1b017308 100644 --- a/docs/source/components.rst +++ b/docs/source/components.rst @@ -132,14 +132,14 @@ The ``Normalizer`` is optional. - Removes all accent symbols in unicode (to be used with NFD for consistency) - Input: ``é`` - Ouput: ``e`` + Output: ``e`` * - Replace - Replaces a custom string or regexp and changes it with given content - ``Replace("a", "e")`` will behave like this: Input: ``"banana"`` - Ouput: ``"benene"`` + Output: ``"benene"`` * - BertNormalizer - Provides an implementation of the Normalizer used in the original BERT. Options @@ -193,7 +193,7 @@ the ByteLevel) - Input: ``"Hello my friend, how are you?"`` - Ouput: ``"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"`` + Output: ``"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"`` * - Whitespace - Splits on word boundaries (using the following regular expression: ``\w+|[^\w\s]+`` @@ -211,13 +211,13 @@ the ByteLevel) - Will isolate all punctuation characters - Input: ``"Hello?"`` - Ouput: ``"Hello", "?"`` + Output: ``"Hello", "?"`` * - Metaspace - Splits on whitespaces and replaces them with a special char "▁" (U+2581) - Input: ``"Hello there"`` - Ouput: ``"Hello", "▁there"`` + Output: ``"Hello", "▁there"`` * - CharDelimiterSplit - Splits on a given character @@ -225,7 +225,7 @@ the ByteLevel) Input: ``"Helloxthere"`` - Ouput: ``"Hello", "there"`` + Output: ``"Hello", "there"`` * - Digits - Splits the numbers from any other characters. @@ -361,7 +361,7 @@ reverted for example. a set of visible Unicode characters to represent each byte, so we need a Decoder to revert this process and get something readable again. * - Metaspace - - Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer ``▁`` to + - Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifier ``▁`` to identify whitespaces, and so this Decoder helps with decoding these. * - WordPiece - Reverts the WordPiece Model. This model uses a special identifier ``##`` for continuing diff --git a/docs/source/installation/python.inc b/docs/source/installation/python.inc index 6e7a8b08..2a3f67e2 100644 --- a/docs/source/installation/python.inc +++ b/docs/source/installation/python.inc @@ -24,7 +24,7 @@ If you are using a unix based OS, the installation should be as simple as runnin curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -Or you can easiy update it with the following command:: +Or you can easily update it with the following command:: rustup update diff --git a/docs/source/pipeline.rst b/docs/source/pipeline.rst index 82da6a53..f2da7d74 100644 --- a/docs/source/pipeline.rst +++ b/docs/source/pipeline.rst @@ -253,7 +253,7 @@ been trained if you are using a pretrained tokenizer). The role of the model is to split your "words" into tokens, using the rules it has learned. It's also responsible for mapping those tokens to their corresponding IDs in the vocabulary of the model. -This model is passed along when intializing the :entity:`Tokenizer` so you already know +This model is passed along when initializing the :entity:`Tokenizer` so you already know how to customize this part. Currently, the 🤗 Tokenizers library supports: - :entity:`models.BPE` diff --git a/tokenizers/CHANGELOG.md b/tokenizers/CHANGELOG.md index a9329bc9..a0fce504 100644 --- a/tokenizers/CHANGELOG.md +++ b/tokenizers/CHANGELOG.md @@ -62,7 +62,7 @@ special tokens and/or added tokens in the sequence). - [#363]: Fix panic from unwrapping `File::open` in `count_words` ### Changed -- [#234]: Completely changed the alignement mappings available on `Encoding`. Previous mappings +- [#234]: Completely changed the alignment mappings available on `Encoding`. Previous mappings were misleading and only providing offsets. New ones provide methods to easily convert between `char` or `word` (input space) and `token` (output space) - [#236]: `AddedToken` with special options like `rstrip` will keep the matched whitespaces diff --git a/tokenizers/src/decoders/byte_fallback.rs b/tokenizers/src/decoders/byte_fallback.rs index 16788dec..b04b3db6 100644 --- a/tokenizers/src/decoders/byte_fallback.rs +++ b/tokenizers/src/decoders/byte_fallback.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; #[derive(Deserialize, Clone, Debug, Serialize, Default)] /// ByteFallback is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens -/// cannot be decoded you will get � instead for each inconvertable byte token +/// cannot be decoded you will get � instead for each inconvertible byte token #[non_exhaustive] pub struct ByteFallback { #[serde(rename = "type")] diff --git a/tokenizers/src/decoders/ctc.rs b/tokenizers/src/decoders/ctc.rs index 2798638d..9d5a5718 100644 --- a/tokenizers/src/decoders/ctc.rs +++ b/tokenizers/src/decoders/ctc.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Serialize, Deserialize)] /// The CTC (Connectionist Temporal Classification) decoder takes care /// of sanitizing a list of inputs token. -/// Due to some alignement problem the output of some models can come +/// Due to some alignment problem the output of some models can come /// with duplicated token. #[serde(tag = "type")] #[non_exhaustive] diff --git a/tokenizers/src/decoders/strip.rs b/tokenizers/src/decoders/strip.rs index b095fc37..9aeffec6 100644 --- a/tokenizers/src/decoders/strip.rs +++ b/tokenizers/src/decoders/strip.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; #[derive(Deserialize, Clone, Debug, Serialize, Default)] /// Strip is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens -/// cannot be decoded you will get � instead for each inconvertable byte token +/// cannot be decoded you will get � instead for each inconvertible byte token #[serde(tag = "type")] #[non_exhaustive] pub struct Strip { diff --git a/tokenizers/src/processors/template.rs b/tokenizers/src/processors/template.rs index 7f1fed54..01394e47 100644 --- a/tokenizers/src/processors/template.rs +++ b/tokenizers/src/processors/template.rs @@ -508,7 +508,7 @@ impl TemplateProcessing { } Piece::SpecialToken { id, type_id } => { if add_special_tokens { - let tok = &self.special_tokens.0[id]; // We already checked existance above + let tok = &self.special_tokens.0[id]; // We already checked existence above let len = tok.ids.len(); let encoding = Encoding::new( diff --git a/tokenizers/src/tokenizer/normalizer.rs b/tokenizers/src/tokenizer/normalizer.rs index 94b56887..823f46a6 100644 --- a/tokenizers/src/tokenizer/normalizer.rs +++ b/tokenizers/src/tokenizer/normalizer.rs @@ -195,9 +195,9 @@ impl NormalizedString { }); match (start, end) { - // Targetting inexistant beginning + // Targeting inexistant beginning (Some(s), None) => Some(s..s), - // Targetting inexistant end + // Targeting inexistant end (None, Some(e)) => Some(e..e), // Found the range (Some(s), Some(e)) => Some(s..e), diff --git a/tokenizers/src/utils/from_pretrained.rs b/tokenizers/src/utils/from_pretrained.rs index dac5d826..223fbbeb 100644 --- a/tokenizers/src/utils/from_pretrained.rs +++ b/tokenizers/src/utils/from_pretrained.rs @@ -3,7 +3,7 @@ use hf_hub::{api::sync::ApiBuilder, Repo, RepoType}; use std::collections::HashMap; use std::path::PathBuf; -/// Defines the aditional parameters available for the `from_pretrained` function +/// Defines the additional parameters available for the `from_pretrained` function #[derive(Debug, Clone)] pub struct FromPretrainedParameters { pub revision: String, diff --git a/tokenizers/src/utils/truncation.rs b/tokenizers/src/utils/truncation.rs index a8ad2a61..9acc297b 100644 --- a/tokenizers/src/utils/truncation.rs +++ b/tokenizers/src/utils/truncation.rs @@ -136,7 +136,7 @@ pub fn truncate_encodings( n2 = n1 + params.max_length % 2; } - // Swap lengths if we swapped previosuly + // Swap lengths if we swapped previously if swap { mem::swap(&mut n1, &mut n2); }