2022-01-19 15:02:04 +01:00
|
|
|
use std::collections::HashMap;
|
2021-03-25 11:10:12 +01:00
|
|
|
|
2022-01-19 15:02:04 +01:00
|
|
|
use fst::IntoStreamer;
|
|
|
|
use grenad::{CompressionType, MergerBuilder};
|
|
|
|
use slice_group_by::GroupBy;
|
2021-03-25 11:10:12 +01:00
|
|
|
|
2021-06-09 12:17:11 +02:00
|
|
|
use crate::update::index_documents::{
|
2022-01-19 15:02:04 +01:00
|
|
|
create_sorter, fst_stream_into_hashset, merge_roaring_bitmaps, sorter_into_lmdb_database,
|
|
|
|
CursorClonableMmap, MergeFn, WriteMethod,
|
2021-06-09 12:17:11 +02:00
|
|
|
};
|
2021-06-16 18:33:33 +02:00
|
|
|
use crate::{Index, Result};
|
2021-03-25 11:10:12 +01:00
|
|
|
|
|
|
|
pub struct WordPrefixDocids<'t, 'u, 'i> {
|
|
|
|
wtxn: &'t mut heed::RwTxn<'i, 'u>,
|
|
|
|
index: &'i Index,
|
|
|
|
pub(crate) chunk_compression_type: CompressionType,
|
|
|
|
pub(crate) chunk_compression_level: Option<u32>,
|
|
|
|
pub(crate) max_nb_chunks: Option<usize>,
|
|
|
|
pub(crate) max_memory: Option<usize>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'t, 'u, 'i> WordPrefixDocids<'t, 'u, 'i> {
|
2021-06-16 18:33:33 +02:00
|
|
|
pub fn new(
|
|
|
|
wtxn: &'t mut heed::RwTxn<'i, 'u>,
|
|
|
|
index: &'i Index,
|
|
|
|
) -> WordPrefixDocids<'t, 'u, 'i> {
|
2021-03-25 11:10:12 +01:00
|
|
|
WordPrefixDocids {
|
|
|
|
wtxn,
|
|
|
|
index,
|
|
|
|
chunk_compression_type: CompressionType::None,
|
|
|
|
chunk_compression_level: None,
|
|
|
|
max_nb_chunks: None,
|
|
|
|
max_memory: None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-24 13:55:53 +02:00
|
|
|
#[logging_timer::time("WordPrefixDocids::{}")]
|
2022-01-19 15:02:04 +01:00
|
|
|
pub fn execute<A: AsRef<[u8]>>(
|
|
|
|
self,
|
|
|
|
new_word_docids: Vec<grenad::Reader<CursorClonableMmap>>,
|
|
|
|
old_prefix_fst: &fst::Set<A>,
|
|
|
|
) -> Result<()> {
|
2021-03-25 11:10:12 +01:00
|
|
|
let prefix_fst = self.index.words_prefixes_fst(self.wtxn)?;
|
2022-01-19 15:02:04 +01:00
|
|
|
let prefix_fst_keys = prefix_fst.into_stream().into_strs()?;
|
|
|
|
let prefix_fst_keys: Vec<_> =
|
|
|
|
prefix_fst_keys.as_slice().linear_group_by_key(|x| x.chars().nth(0).unwrap()).collect();
|
|
|
|
|
|
|
|
// We compute the set of prefixes that are no more part of the prefix fst.
|
|
|
|
let suppr_pw = fst_stream_into_hashset(old_prefix_fst.op().add(&prefix_fst).difference());
|
2021-03-25 11:10:12 +01:00
|
|
|
|
|
|
|
// It is forbidden to keep a mutable reference into the database
|
|
|
|
// and write into it at the same time, therefore we write into another file.
|
|
|
|
let mut prefix_docids_sorter = create_sorter(
|
2021-08-16 13:36:30 +02:00
|
|
|
merge_roaring_bitmaps,
|
2021-03-25 11:10:12 +01:00
|
|
|
self.chunk_compression_type,
|
|
|
|
self.chunk_compression_level,
|
|
|
|
self.max_nb_chunks,
|
|
|
|
self.max_memory,
|
|
|
|
);
|
|
|
|
|
2022-01-19 15:02:04 +01:00
|
|
|
let mut word_docids_merger = MergerBuilder::new(merge_roaring_bitmaps);
|
|
|
|
word_docids_merger.extend(new_word_docids);
|
|
|
|
let mut word_docids_iter = word_docids_merger.build().into_merger_iter()?;
|
|
|
|
|
|
|
|
let mut current_prefixes: Option<&&[String]> = None;
|
|
|
|
let mut prefixes_cache = HashMap::new();
|
|
|
|
while let Some((word, data)) = word_docids_iter.next()? {
|
|
|
|
current_prefixes = match current_prefixes.take() {
|
|
|
|
Some(prefixes) if word.starts_with(&prefixes[0].as_bytes()) => Some(prefixes),
|
|
|
|
_otherwise => {
|
|
|
|
write_prefixes_in_sorter(&mut prefixes_cache, &mut prefix_docids_sorter)?;
|
|
|
|
prefix_fst_keys
|
|
|
|
.iter()
|
|
|
|
.find(|prefixes| word.starts_with(&prefixes[0].as_bytes()))
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Some(prefixes) = current_prefixes {
|
|
|
|
for prefix in prefixes.iter() {
|
|
|
|
if word.starts_with(prefix.as_bytes()) {
|
|
|
|
match prefixes_cache.get_mut(prefix.as_bytes()) {
|
|
|
|
Some(value) => value.push(data.to_owned()),
|
|
|
|
None => {
|
|
|
|
prefixes_cache.insert(prefix.clone().into(), vec![data.to_owned()]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We remove all the entries that are no more required in this word prefix docids database.
|
|
|
|
let mut iter = self.index.word_prefix_docids.iter_mut(self.wtxn)?.lazily_decode_data();
|
|
|
|
while let Some((prefix, _)) = iter.next().transpose()? {
|
|
|
|
if suppr_pw.contains(prefix.as_bytes()) {
|
|
|
|
unsafe { iter.del_current()? };
|
2021-03-25 11:10:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-19 15:02:04 +01:00
|
|
|
drop(iter);
|
2021-03-25 11:10:12 +01:00
|
|
|
|
|
|
|
// We finally write the word prefix docids into the LMDB database.
|
|
|
|
sorter_into_lmdb_database(
|
|
|
|
self.wtxn,
|
|
|
|
*self.index.word_prefix_docids.as_polymorph(),
|
|
|
|
prefix_docids_sorter,
|
2021-08-16 13:36:30 +02:00
|
|
|
merge_roaring_bitmaps,
|
2022-01-19 15:02:04 +01:00
|
|
|
WriteMethod::GetMergePut,
|
2021-03-25 11:10:12 +01:00
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
2022-01-19 15:02:04 +01:00
|
|
|
|
|
|
|
fn write_prefixes_in_sorter(
|
|
|
|
prefixes: &mut HashMap<Vec<u8>, Vec<Vec<u8>>>,
|
|
|
|
sorter: &mut grenad::Sorter<MergeFn>,
|
|
|
|
) -> Result<()> {
|
|
|
|
for (key, data_slices) in prefixes.drain() {
|
|
|
|
for data in data_slices {
|
|
|
|
sorter.insert(&key, data)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|