From 8fb96b8274d0b60439dd8c87746e9a77239e65bc Mon Sep 17 00:00:00 2001 From: ManyTheFish Date: Thu, 21 Sep 2023 10:02:08 +0200 Subject: [PATCH] Add buffer to the obkv writter --- .../extract/extract_docid_word_positions.rs | 5 +++- .../extract/extract_word_position_docids.rs | 30 ++++++++++++++++--- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs b/milli/src/update/index_documents/extract/extract_docid_word_positions.rs index f99fbbc4e..ff25caa1a 100644 --- a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs +++ b/milli/src/update/index_documents/extract/extract_docid_word_positions.rs @@ -155,7 +155,8 @@ fn extract_tokens_from_document( let tokens = process_tokens(tokenizer.tokenize(field)) .take_while(|(p, _)| (*p as u32) < max_positions_per_attributes); - let mut writer = KvWriterU16::memory(); + buffers.obkv_buffer.clear(); + let mut writer = KvWriterU16::new(&mut buffers.obkv_buffer); for (index, token) in tokens { // if a language has been detected for the token, we update the counter. if let Some(language) = token.language { @@ -293,4 +294,6 @@ struct Buffers { key_buffer: Vec, // the field buffer for each fields desserialization, and must be cleared between each field. field_buffer: String, + // buffer used to store the value data containing an obkv. + obkv_buffer: Vec, } diff --git a/milli/src/update/index_documents/extract/extract_word_position_docids.rs b/milli/src/update/index_documents/extract/extract_word_position_docids.rs index 0e2aa2021..638052b37 100644 --- a/milli/src/update/index_documents/extract/extract_word_position_docids.rs +++ b/milli/src/update/index_documents/extract/extract_word_position_docids.rs @@ -1,3 +1,4 @@ +use std::collections::HashSet; use std::fs::File; use std::io; @@ -33,18 +34,39 @@ pub fn extract_word_position_docids( max_memory, ); + let mut word_positions: HashSet<(u16, Vec)> = HashSet::new(); + let mut current_document_id = None; let mut key_buffer = Vec::new(); let mut cursor = docid_word_positions.into_cursor()?; while let Some((key, value)) = cursor.move_on_next()? { - let (document_id_bytes, fid_bytes) = try_split_array_at(key) + let (document_id_bytes, _fid_bytes) = try_split_array_at(key) .ok_or(SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?; let document_id = DocumentId::from_be_bytes(document_id_bytes); + if current_document_id.map_or(false, |id| document_id != id) { + for (position, word_bytes) in word_positions.iter() { + key_buffer.clear(); + key_buffer.extend_from_slice(word_bytes); + key_buffer.push(0); + key_buffer.extend_from_slice(&position.to_be_bytes()); + word_position_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?; + } + word_positions.clear(); + } + + current_document_id = Some(document_id); + for (position, word_bytes) in KvReaderU16::new(&value).iter() { - key_buffer.clear(); - key_buffer.extend_from_slice(word_bytes); - key_buffer.push(0); let position = bucketed_position(position); + word_positions.insert((position, word_bytes.to_vec())); + } + } + + if let Some(document_id) = current_document_id { + for (position, word_bytes) in word_positions { + key_buffer.clear(); + key_buffer.extend_from_slice(&word_bytes); + key_buffer.push(0); key_buffer.extend_from_slice(&position.to_be_bytes()); word_position_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?; }