diff --git a/.github/workflows/publish-binaries.yml b/.github/workflows/publish-binaries.yml index 13555cbac..76dde74d1 100644 --- a/.github/workflows/publish-binaries.yml +++ b/.github/workflows/publish-binaries.yml @@ -96,14 +96,12 @@ jobs: publish-macos-apple-silicon: name: Publish binary for macOS silicon - runs-on: ${{ matrix.os }} + runs-on: macos-12 needs: check-version strategy: - fail-fast: false matrix: include: - - os: macos-12 - target: aarch64-apple-darwin + - target: aarch64-apple-darwin asset_name: meilisearch-macos-apple-silicon steps: - name: Checkout repository @@ -132,21 +130,29 @@ jobs: publish-aarch64: name: Publish binary for aarch64 - runs-on: ${{ matrix.os }} + runs-on: ubuntu-latest needs: check-version + container: + # Use ubuntu-18.04 to compile with glibc 2.27 + image: ubuntu:18.04 strategy: - fail-fast: false matrix: include: - - build: aarch64 - os: ubuntu-18.04 - target: aarch64-unknown-linux-gnu - linker: gcc-aarch64-linux-gnu - use-cross: true + - target: aarch64-unknown-linux-gnu asset_name: meilisearch-linux-aarch64 steps: - name: Checkout repository uses: actions/checkout@v3 + - name: Install needed dependencies + run: | + apt-get update -y && apt upgrade -y + apt-get install -y curl build-essential gcc-aarch64-linux-gnu + - name: Set up Docker for cross compilation + run: | + apt-get install -y curl apt-transport-https ca-certificates software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - + add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + apt-get update -y && apt-get install -y docker-ce - name: Installing Rust toolchain uses: actions-rs/toolchain@v1 with: @@ -154,15 +160,7 @@ jobs: profile: minimal target: ${{ matrix.target }} override: true - - name: APT update - run: | - sudo apt update - - name: Install target specific tools - if: matrix.use-cross - run: | - sudo apt-get install -y ${{ matrix.linker }} - name: Configure target aarch64 GNU - if: matrix.target == 'aarch64-unknown-linux-gnu' ## Environment variable is not passed using env: ## LD gold won't work with MUSL # env: @@ -176,8 +174,10 @@ jobs: uses: actions-rs/cargo@v1 with: command: build - use-cross: ${{ matrix.use-cross }} + use-cross: true args: --release --target ${{ matrix.target }} + env: + CROSS_DOCKER_IN_DOCKER: true - name: List target output files run: ls -lR ./target - name: Upload the binary to release diff --git a/meilisearch/src/search.rs b/meilisearch/src/search.rs index c287f1ba0..7e4a7da6a 100644 --- a/meilisearch/src/search.rs +++ b/meilisearch/src/search.rs @@ -375,10 +375,15 @@ pub fn perform_search( &displayed_ids, ); - let mut tokenizer_buidler = TokenizerBuilder::default(); - tokenizer_buidler.create_char_map(true); + let mut tokenizer_builder = TokenizerBuilder::default(); + tokenizer_builder.create_char_map(true); - let mut formatter_builder = MatcherBuilder::new(matching_words, tokenizer_buidler.build()); + let script_lang_map = index.script_language(&rtxn)?; + if !script_lang_map.is_empty() { + tokenizer_builder.allow_list(&script_lang_map); + } + + let mut formatter_builder = MatcherBuilder::new(matching_words, tokenizer_builder.build()); formatter_builder.crop_marker(query.crop_marker); formatter_builder.highlight_prefix(query.highlight_pre_tag); formatter_builder.highlight_suffix(query.highlight_post_tag); diff --git a/milli/src/index.rs b/milli/src/index.rs index a4048dfb0..20e64f984 100644 --- a/milli/src/index.rs +++ b/milli/src/index.rs @@ -1211,11 +1211,22 @@ impl Index { let soft_deleted_documents = self.soft_deleted_documents_ids(rtxn)?; let mut script_language: HashMap> = HashMap::new(); + let mut script_language_doc_count: Vec<(Script, Language, u64)> = Vec::new(); + let mut total = 0; for sl in self.script_language_docids.iter(rtxn)? { let ((script, language), docids) = sl?; // keep only Languages that contains at least 1 document. - if !soft_deleted_documents.is_superset(&docids) { + let remaining_documents_count = (docids - &soft_deleted_documents).len(); + total += remaining_documents_count; + if remaining_documents_count > 0 { + script_language_doc_count.push((script, language, remaining_documents_count)); + } + } + + let threshold = total / 20; // 5% (arbitrary) + for (script, language, count) in script_language_doc_count { + if count > threshold { if let Some(languages) = script_language.get_mut(&script) { (*languages).push(language); } else { diff --git a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs b/milli/src/update/index_documents/extract/extract_docid_word_positions.rs index 2d51fcc1a..131b78df9 100644 --- a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs +++ b/milli/src/update/index_documents/extract/extract_docid_word_positions.rs @@ -3,12 +3,14 @@ use std::convert::TryInto; use std::fs::File; use std::{io, mem, str}; -use charabia::{Language, Script, SeparatorKind, Token, TokenKind, TokenizerBuilder}; +use charabia::{Language, Script, SeparatorKind, Token, TokenKind, Tokenizer, TokenizerBuilder}; +use obkv::KvReader; use roaring::RoaringBitmap; use serde_json::Value; use super::helpers::{concat_u32s_array, create_sorter, sorter_into_reader, GrenadParameters}; use crate::error::{InternalError, SerializationError}; +use crate::update::index_documents::MergeFn; use crate::{ absolute_from_relative_position, FieldId, Result, MAX_POSITION_PER_ATTRIBUTE, MAX_WORD_LENGTH, }; @@ -33,7 +35,7 @@ pub fn extract_docid_word_positions( let max_memory = indexer.max_memory_by_thread(); let mut documents_ids = RoaringBitmap::new(); - let mut script_language_pair = HashMap::new(); + let mut script_language_docids = HashMap::new(); let mut docid_word_positions_sorter = create_sorter( grenad::SortAlgorithm::Stable, concat_u32s_array, @@ -43,13 +45,12 @@ pub fn extract_docid_word_positions( max_memory, ); - let mut key_buffer = Vec::new(); - let mut field_buffer = String::new(); - let mut builder = TokenizerBuilder::new(); + let mut buffers = Buffers::default(); + let mut tokenizer_builder = TokenizerBuilder::new(); if let Some(stop_words) = stop_words { - builder.stop_words(stop_words); + tokenizer_builder.stop_words(stop_words); } - let tokenizer = builder.build(); + let tokenizer = tokenizer_builder.build(); let mut cursor = obkv_documents.into_cursor()?; while let Some((key, value)) = cursor.move_on_next()? { @@ -57,49 +58,122 @@ pub fn extract_docid_word_positions( .try_into() .map(u32::from_be_bytes) .map_err(|_| SerializationError::InvalidNumberSerialization)?; - let obkv = obkv::KvReader::::new(value); + let obkv = KvReader::::new(value); documents_ids.push(document_id); - key_buffer.clear(); - key_buffer.extend_from_slice(&document_id.to_be_bytes()); + buffers.key_buffer.clear(); + buffers.key_buffer.extend_from_slice(&document_id.to_be_bytes()); - for (field_id, field_bytes) in obkv.iter() { - if searchable_fields.as_ref().map_or(true, |sf| sf.contains(&field_id)) { - let value = - serde_json::from_slice(field_bytes).map_err(InternalError::SerdeJson)?; - field_buffer.clear(); - if let Some(field) = json_to_string(&value, &mut field_buffer) { - let tokens = process_tokens(tokenizer.tokenize(field)) - .take_while(|(p, _)| (*p as u32) < max_positions_per_attributes); + let mut script_language_word_count = HashMap::new(); - for (index, token) in tokens { - if let Some(language) = token.language { - let script = token.script; - let entry = script_language_pair - .entry((script, language)) - .or_insert_with(RoaringBitmap::new); - entry.push(document_id); + extract_tokens_from_document( + &obkv, + searchable_fields, + &tokenizer, + max_positions_per_attributes, + &mut buffers, + &mut script_language_word_count, + &mut docid_word_positions_sorter, + )?; + + // if we detect a potetial mistake in the language detection, + // we rerun the extraction forcing the tokenizer to detect the most frequently detected Languages. + // context: https://github.com/meilisearch/meilisearch/issues/3565 + if script_language_word_count + .values() + .map(Vec::as_slice) + .any(potential_language_detection_error) + { + // build an allow list with the most frequent detected languages in the document. + let script_language: HashMap<_, _> = + script_language_word_count.iter().filter_map(most_frequent_languages).collect(); + + // if the allow list is empty, meaning that no Language is considered frequent, + // then we don't rerun the extraction. + if !script_language.is_empty() { + // build a new temporary tokenizer including the allow list. + let mut tokenizer_builder = TokenizerBuilder::new(); + if let Some(stop_words) = stop_words { + tokenizer_builder.stop_words(stop_words); + } + tokenizer_builder.allow_list(&script_language); + let tokenizer = tokenizer_builder.build(); + + script_language_word_count.clear(); + + // rerun the extraction. + extract_tokens_from_document( + &obkv, + searchable_fields, + &tokenizer, + max_positions_per_attributes, + &mut buffers, + &mut script_language_word_count, + &mut docid_word_positions_sorter, + )?; + } + } + + for (script, languages_frequency) in script_language_word_count { + for (language, _) in languages_frequency { + let entry = script_language_docids + .entry((script, language)) + .or_insert_with(RoaringBitmap::new); + entry.push(document_id); + } + } + } + + sorter_into_reader(docid_word_positions_sorter, indexer) + .map(|reader| (documents_ids, reader, script_language_docids)) +} + +fn extract_tokens_from_document>( + obkv: &KvReader, + searchable_fields: &Option>, + tokenizer: &Tokenizer, + max_positions_per_attributes: u32, + buffers: &mut Buffers, + script_language_word_count: &mut HashMap>, + docid_word_positions_sorter: &mut grenad::Sorter, +) -> Result<()> { + for (field_id, field_bytes) in obkv.iter() { + if searchable_fields.as_ref().map_or(true, |sf| sf.contains(&field_id)) { + let value = serde_json::from_slice(field_bytes).map_err(InternalError::SerdeJson)?; + buffers.field_buffer.clear(); + if let Some(field) = json_to_string(&value, &mut buffers.field_buffer) { + let tokens = process_tokens(tokenizer.tokenize(field)) + .take_while(|(p, _)| (*p as u32) < max_positions_per_attributes); + + for (index, token) in tokens { + // if a language has been detected for the token, we update the counter. + if let Some(language) = token.language { + let script = token.script; + let entry = + script_language_word_count.entry(script).or_insert_with(Vec::new); + match entry.iter_mut().find(|(l, _)| *l == language) { + Some((_, n)) => *n += 1, + None => entry.push((language, 1)), } - let token = token.lemma().trim(); - if !token.is_empty() && token.len() <= MAX_WORD_LENGTH { - key_buffer.truncate(mem::size_of::()); - key_buffer.extend_from_slice(token.as_bytes()); + } + let token = token.lemma().trim(); + if !token.is_empty() && token.len() <= MAX_WORD_LENGTH { + buffers.key_buffer.truncate(mem::size_of::()); + buffers.key_buffer.extend_from_slice(token.as_bytes()); - let position: u16 = index - .try_into() - .map_err(|_| SerializationError::InvalidNumberSerialization)?; - let position = absolute_from_relative_position(field_id, position); - docid_word_positions_sorter - .insert(&key_buffer, position.to_ne_bytes())?; - } + let position: u16 = index + .try_into() + .map_err(|_| SerializationError::InvalidNumberSerialization)?; + let position = absolute_from_relative_position(field_id, position); + docid_word_positions_sorter + .insert(&buffers.key_buffer, position.to_ne_bytes())?; } } } } } - sorter_into_reader(docid_word_positions_sorter, indexer) - .map(|reader| (documents_ids, reader, script_language_pair)) + Ok(()) } /// Transform a JSON value into a string that can be indexed. @@ -183,3 +257,46 @@ fn process_tokens<'a>( }) .filter(|(_, t)| t.is_word()) } + +fn potential_language_detection_error(languages_frequency: &[(Language, usize)]) -> bool { + if languages_frequency.len() > 1 { + let threshold = compute_language_frequency_threshold(languages_frequency); + languages_frequency.iter().any(|(_, c)| *c <= threshold) + } else { + false + } +} + +fn most_frequent_languages( + (script, languages_frequency): (&Script, &Vec<(Language, usize)>), +) -> Option<(Script, Vec)> { + if languages_frequency.len() > 1 { + let threshold = compute_language_frequency_threshold(languages_frequency); + + let languages: Vec<_> = + languages_frequency.iter().filter(|(_, c)| *c > threshold).map(|(l, _)| *l).collect(); + + if languages.is_empty() { + None + } else { + Some((*script, languages)) + } + } else { + None + } +} + +fn compute_language_frequency_threshold(languages_frequency: &[(Language, usize)]) -> usize { + let total: usize = languages_frequency.iter().map(|(_, c)| c).sum(); + total / 10 // 10% is a completely arbitrary value. +} + +#[derive(Default)] +struct Buffers { + // the key buffer is the concatenation of the internal document id with the field id. + // The buffer has to be completelly cleared between documents, + // and the field id part must be cleared between each field. + key_buffer: Vec, + // the field buffer for each fields desserialization, and must be cleared between each field. + field_buffer: String, +}