diff --git a/tokenizers/src/tokenizer/added_vocabulary.rs b/tokenizers/src/tokenizer/added_vocabulary.rs index 92235d55..5ed77c6d 100644 --- a/tokenizers/src/tokenizer/added_vocabulary.rs +++ b/tokenizers/src/tokenizer/added_vocabulary.rs @@ -93,7 +93,12 @@ impl std::hash::Hash for AddedToken { } impl std::cmp::PartialEq for AddedToken { fn eq(&self, other: &Self) -> bool { - self.content == other.content && self.special == other.special && self.lstrip == other.lstrip && self.rstrip == other.rstrip && self.normalized == other.normalized && self.single_word == other.single_word + self.content == other.content + && self.special == other.special + && self.lstrip == other.lstrip + && self.rstrip == other.rstrip + && self.normalized == other.normalized + && self.single_word == other.single_word } } impl std::cmp::Eq for AddedToken {} @@ -673,7 +678,10 @@ mod tests { assert_eq!(vocab.get_vocab()["another_two"], 4); // New token was added, but the index is not the length of the vocab // Let's add an already added token again - assert_eq!(vocab.add_special_tokens(&[AddedToken::from("another_two", true)], &model, normalizer), 1); + assert_eq!( + vocab.add_special_tokens(&[AddedToken::from("another_two", true)], &model, normalizer), + 1 + ); assert_eq!(vocab.len(), 5); // Token was already there assert_eq!(vocab.get_vocab()["another_two"], 4); // Token idx not changed }