diff --git a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs b/milli/src/update/index_documents/extract/extract_docid_word_positions.rs index e5d95cbdb..96156adb4 100644 --- a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs +++ b/milli/src/update/index_documents/extract/extract_docid_word_positions.rs @@ -56,7 +56,7 @@ pub fn extract_docid_word_positions( let mut value_buffer = Vec::new(); // initialize tokenizer. - let mut builder = tokenizer_builder(stop_words, dictionary, allowed_separators, None); + let mut builder = tokenizer_builder(stop_words, allowed_separators, dictionary, None); let tokenizer = builder.build(); // iterate over documents. @@ -247,8 +247,8 @@ fn lang_safe_tokens_from_document<'a>( // build a new temporary tokenizer including the allow list. let mut builder = tokenizer_builder( stop_words, - dictionary, allowed_separators, + dictionary, Some(&script_language), ); let tokenizer = builder.build();