mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-11-30 08:44:27 +01:00
Remove a useless grenad file merging
This commit is contained in:
parent
21898ffc60
commit
8d26f3040c
@ -279,9 +279,9 @@ where
|
|||||||
let index_documents_ids = self.index.documents_ids(self.wtxn)?;
|
let index_documents_ids = self.index.documents_ids(self.wtxn)?;
|
||||||
let index_is_empty = index_documents_ids.len() == 0;
|
let index_is_empty = index_documents_ids.len() == 0;
|
||||||
let mut final_documents_ids = RoaringBitmap::new();
|
let mut final_documents_ids = RoaringBitmap::new();
|
||||||
let mut word_pair_proximity_docids = Vec::new();
|
let mut word_pair_proximity_docids = None;
|
||||||
let mut word_position_docids = Vec::new();
|
let mut word_position_docids = None;
|
||||||
let mut word_docids = Vec::new();
|
let mut word_docids = None;
|
||||||
|
|
||||||
let mut databases_seen = 0;
|
let mut databases_seen = 0;
|
||||||
(self.progress)(UpdateIndexingStep::MergeDataIntoFinalDatabase {
|
(self.progress)(UpdateIndexingStep::MergeDataIntoFinalDatabase {
|
||||||
@ -293,17 +293,17 @@ where
|
|||||||
let typed_chunk = match result? {
|
let typed_chunk = match result? {
|
||||||
TypedChunk::WordDocids(chunk) => {
|
TypedChunk::WordDocids(chunk) => {
|
||||||
let cloneable_chunk = unsafe { as_cloneable_grenad(&chunk)? };
|
let cloneable_chunk = unsafe { as_cloneable_grenad(&chunk)? };
|
||||||
word_docids.push(cloneable_chunk);
|
word_docids = Some(cloneable_chunk);
|
||||||
TypedChunk::WordDocids(chunk)
|
TypedChunk::WordDocids(chunk)
|
||||||
}
|
}
|
||||||
TypedChunk::WordPairProximityDocids(chunk) => {
|
TypedChunk::WordPairProximityDocids(chunk) => {
|
||||||
let cloneable_chunk = unsafe { as_cloneable_grenad(&chunk)? };
|
let cloneable_chunk = unsafe { as_cloneable_grenad(&chunk)? };
|
||||||
word_pair_proximity_docids.push(cloneable_chunk);
|
word_pair_proximity_docids = Some(cloneable_chunk);
|
||||||
TypedChunk::WordPairProximityDocids(chunk)
|
TypedChunk::WordPairProximityDocids(chunk)
|
||||||
}
|
}
|
||||||
TypedChunk::WordPositionDocids(chunk) => {
|
TypedChunk::WordPositionDocids(chunk) => {
|
||||||
let cloneable_chunk = unsafe { as_cloneable_grenad(&chunk)? };
|
let cloneable_chunk = unsafe { as_cloneable_grenad(&chunk)? };
|
||||||
word_position_docids.push(cloneable_chunk);
|
word_position_docids = Some(cloneable_chunk);
|
||||||
TypedChunk::WordPositionDocids(chunk)
|
TypedChunk::WordPositionDocids(chunk)
|
||||||
}
|
}
|
||||||
otherwise => otherwise,
|
otherwise => otherwise,
|
||||||
@ -345,9 +345,9 @@ where
|
|||||||
self.index.put_documents_ids(self.wtxn, &all_documents_ids)?;
|
self.index.put_documents_ids(self.wtxn, &all_documents_ids)?;
|
||||||
|
|
||||||
self.execute_prefix_databases(
|
self.execute_prefix_databases(
|
||||||
word_docids,
|
word_docids.unwrap(),
|
||||||
word_pair_proximity_docids,
|
word_pair_proximity_docids.unwrap(),
|
||||||
word_position_docids,
|
word_position_docids.unwrap(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(all_documents_ids.len())
|
Ok(all_documents_ids.len())
|
||||||
@ -356,9 +356,9 @@ where
|
|||||||
#[logging_timer::time("IndexDocuments::{}")]
|
#[logging_timer::time("IndexDocuments::{}")]
|
||||||
pub fn execute_prefix_databases(
|
pub fn execute_prefix_databases(
|
||||||
self,
|
self,
|
||||||
word_docids: Vec<grenad::Reader<CursorClonableMmap>>,
|
word_docids: grenad::Reader<CursorClonableMmap>,
|
||||||
word_pair_proximity_docids: Vec<grenad::Reader<CursorClonableMmap>>,
|
word_pair_proximity_docids: grenad::Reader<CursorClonableMmap>,
|
||||||
word_position_docids: Vec<grenad::Reader<CursorClonableMmap>>,
|
word_position_docids: grenad::Reader<CursorClonableMmap>,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
F: Fn(UpdateIndexingStep) + Sync,
|
F: Fn(UpdateIndexingStep) + Sync,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
use grenad::{CompressionType, MergerBuilder};
|
use grenad::CompressionType;
|
||||||
use heed::types::ByteSlice;
|
use heed::types::ByteSlice;
|
||||||
|
|
||||||
use crate::update::index_documents::{
|
use crate::update::index_documents::{
|
||||||
@ -35,7 +35,7 @@ impl<'t, 'u, 'i> WordPrefixDocids<'t, 'u, 'i> {
|
|||||||
#[logging_timer::time("WordPrefixDocids::{}")]
|
#[logging_timer::time("WordPrefixDocids::{}")]
|
||||||
pub fn execute(
|
pub fn execute(
|
||||||
self,
|
self,
|
||||||
new_word_docids: Vec<grenad::Reader<CursorClonableMmap>>,
|
new_word_docids: grenad::Reader<CursorClonableMmap>,
|
||||||
new_prefix_fst_words: &[String],
|
new_prefix_fst_words: &[String],
|
||||||
common_prefix_fst_words: &[&[String]],
|
common_prefix_fst_words: &[&[String]],
|
||||||
del_prefix_fst_words: &HashSet<Vec<u8>>,
|
del_prefix_fst_words: &HashSet<Vec<u8>>,
|
||||||
@ -50,15 +50,10 @@ impl<'t, 'u, 'i> WordPrefixDocids<'t, 'u, 'i> {
|
|||||||
self.max_memory,
|
self.max_memory,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut word_docids_merger = MergerBuilder::new(merge_roaring_bitmaps);
|
let mut new_word_docids_iter = new_word_docids.into_cursor()?;
|
||||||
for reader in new_word_docids {
|
|
||||||
word_docids_merger.push(reader.into_cursor()?);
|
|
||||||
}
|
|
||||||
let mut word_docids_iter = word_docids_merger.build().into_stream_merger_iter()?;
|
|
||||||
|
|
||||||
let mut current_prefixes: Option<&&[String]> = None;
|
let mut current_prefixes: Option<&&[String]> = None;
|
||||||
let mut prefixes_cache = HashMap::new();
|
let mut prefixes_cache = HashMap::new();
|
||||||
while let Some((word, data)) = word_docids_iter.next()? {
|
while let Some((word, data)) = new_word_docids_iter.move_on_next()? {
|
||||||
current_prefixes = match current_prefixes.take() {
|
current_prefixes = match current_prefixes.take() {
|
||||||
Some(prefixes) if word.starts_with(&prefixes[0].as_bytes()) => Some(prefixes),
|
Some(prefixes) if word.starts_with(&prefixes[0].as_bytes()) => Some(prefixes),
|
||||||
_otherwise => {
|
_otherwise => {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
use grenad::{CompressionType, MergerBuilder};
|
use grenad::CompressionType;
|
||||||
use heed::types::ByteSlice;
|
use heed::types::ByteSlice;
|
||||||
use heed::BytesDecode;
|
use heed::BytesDecode;
|
||||||
use log::debug;
|
use log::debug;
|
||||||
@ -64,7 +64,7 @@ impl<'t, 'u, 'i> WordPrefixPairProximityDocids<'t, 'u, 'i> {
|
|||||||
#[logging_timer::time("WordPrefixPairProximityDocids::{}")]
|
#[logging_timer::time("WordPrefixPairProximityDocids::{}")]
|
||||||
pub fn execute(
|
pub fn execute(
|
||||||
self,
|
self,
|
||||||
new_word_pair_proximity_docids: Vec<grenad::Reader<CursorClonableMmap>>,
|
new_word_pair_proximity_docids: grenad::Reader<CursorClonableMmap>,
|
||||||
new_prefix_fst_words: &[String],
|
new_prefix_fst_words: &[String],
|
||||||
common_prefix_fst_words: &[&[String]],
|
common_prefix_fst_words: &[&[String]],
|
||||||
del_prefix_fst_words: &HashSet<Vec<u8>>,
|
del_prefix_fst_words: &HashSet<Vec<u8>>,
|
||||||
@ -74,14 +74,7 @@ impl<'t, 'u, 'i> WordPrefixPairProximityDocids<'t, 'u, 'i> {
|
|||||||
let new_prefix_fst_words: Vec<_> =
|
let new_prefix_fst_words: Vec<_> =
|
||||||
new_prefix_fst_words.linear_group_by_key(|x| x.chars().nth(0).unwrap()).collect();
|
new_prefix_fst_words.linear_group_by_key(|x| x.chars().nth(0).unwrap()).collect();
|
||||||
|
|
||||||
// We retrieve and merge the created word pair proximities docids entries
|
let mut new_wppd_iter = new_word_pair_proximity_docids.into_cursor()?;
|
||||||
// for the newly added documents.
|
|
||||||
let mut wppd_merger = MergerBuilder::new(merge_cbo_roaring_bitmaps);
|
|
||||||
for reader in new_word_pair_proximity_docids {
|
|
||||||
wppd_merger.push(reader.into_cursor()?);
|
|
||||||
}
|
|
||||||
let mut wppd_iter = wppd_merger.build().into_stream_merger_iter()?;
|
|
||||||
|
|
||||||
let mut word_prefix_pair_proximity_docids_sorter = create_sorter(
|
let mut word_prefix_pair_proximity_docids_sorter = create_sorter(
|
||||||
merge_cbo_roaring_bitmaps,
|
merge_cbo_roaring_bitmaps,
|
||||||
self.chunk_compression_type,
|
self.chunk_compression_type,
|
||||||
@ -95,7 +88,7 @@ impl<'t, 'u, 'i> WordPrefixPairProximityDocids<'t, 'u, 'i> {
|
|||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
let mut current_prefixes: Option<&&[String]> = None;
|
let mut current_prefixes: Option<&&[String]> = None;
|
||||||
let mut prefixes_cache = HashMap::new();
|
let mut prefixes_cache = HashMap::new();
|
||||||
while let Some((key, data)) = wppd_iter.next()? {
|
while let Some((key, data)) = new_wppd_iter.move_on_next()? {
|
||||||
let (w1, w2, prox) = StrStrU8Codec::bytes_decode(key).ok_or(heed::Error::Decoding)?;
|
let (w1, w2, prox) = StrStrU8Codec::bytes_decode(key).ok_or(heed::Error::Decoding)?;
|
||||||
if prox > self.max_proximity {
|
if prox > self.max_proximity {
|
||||||
continue;
|
continue;
|
||||||
|
@ -2,7 +2,7 @@ use std::collections::{HashMap, HashSet};
|
|||||||
use std::num::NonZeroU32;
|
use std::num::NonZeroU32;
|
||||||
use std::{cmp, str};
|
use std::{cmp, str};
|
||||||
|
|
||||||
use grenad::{CompressionType, MergerBuilder};
|
use grenad::CompressionType;
|
||||||
use heed::types::ByteSlice;
|
use heed::types::ByteSlice;
|
||||||
use heed::{BytesDecode, BytesEncode};
|
use heed::{BytesDecode, BytesEncode};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
@ -57,7 +57,7 @@ impl<'t, 'u, 'i> WordPrefixPositionDocids<'t, 'u, 'i> {
|
|||||||
#[logging_timer::time("WordPrefixPositionDocids::{}")]
|
#[logging_timer::time("WordPrefixPositionDocids::{}")]
|
||||||
pub fn execute(
|
pub fn execute(
|
||||||
self,
|
self,
|
||||||
new_word_position_docids: Vec<grenad::Reader<CursorClonableMmap>>,
|
new_word_position_docids: grenad::Reader<CursorClonableMmap>,
|
||||||
new_prefix_fst_words: &[String],
|
new_prefix_fst_words: &[String],
|
||||||
common_prefix_fst_words: &[&[String]],
|
common_prefix_fst_words: &[&[String]],
|
||||||
del_prefix_fst_words: &HashSet<Vec<u8>>,
|
del_prefix_fst_words: &HashSet<Vec<u8>>,
|
||||||
@ -72,18 +72,13 @@ impl<'t, 'u, 'i> WordPrefixPositionDocids<'t, 'u, 'i> {
|
|||||||
self.max_memory,
|
self.max_memory,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut word_position_docids_merger = MergerBuilder::new(merge_cbo_roaring_bitmaps);
|
let mut new_word_position_docids_iter = new_word_position_docids.into_cursor()?;
|
||||||
for reader in new_word_position_docids {
|
|
||||||
word_position_docids_merger.push(reader.into_cursor()?);
|
|
||||||
}
|
|
||||||
let mut word_position_docids_iter =
|
|
||||||
word_position_docids_merger.build().into_stream_merger_iter()?;
|
|
||||||
|
|
||||||
// We fetch all the new common prefixes between the previous and new prefix fst.
|
// We fetch all the new common prefixes between the previous and new prefix fst.
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
let mut current_prefixes: Option<&&[String]> = None;
|
let mut current_prefixes: Option<&&[String]> = None;
|
||||||
let mut prefixes_cache = HashMap::new();
|
let mut prefixes_cache = HashMap::new();
|
||||||
while let Some((key, data)) = word_position_docids_iter.next()? {
|
while let Some((key, data)) = new_word_position_docids_iter.move_on_next()? {
|
||||||
let (word, pos) = StrBEU32Codec::bytes_decode(key).ok_or(heed::Error::Decoding)?;
|
let (word, pos) = StrBEU32Codec::bytes_decode(key).ok_or(heed::Error::Decoding)?;
|
||||||
|
|
||||||
current_prefixes = match current_prefixes.take() {
|
current_prefixes = match current_prefixes.take() {
|
||||||
|
Loading…
Reference in New Issue
Block a user