diff --git a/tokenizers/src/tokenizer/added_vocabulary.rs b/tokenizers/src/tokenizer/added_vocabulary.rs index bfbb4e0f..561f1adf 100644 --- a/tokenizers/src/tokenizer/added_vocabulary.rs +++ b/tokenizers/src/tokenizer/added_vocabulary.rs @@ -322,11 +322,7 @@ impl AddedVocabulary { /// This method returns a list "splits", each of them being a pair of Offsets /// and an optional ID if it is an AddedToken. /// The list of splits cover the entire input string. - fn find_matches<'a>( - &self, - sentence: &str, - split_re: &'a MatchingSet, - ) -> Vec<(Option, Offsets)> { + fn find_matches(&self, sentence: &str, split_re: &MatchingSet) -> Vec<(Option, Offsets)> { if sentence.is_empty() { return vec![(None, (0, 0))]; }