2022-09-07 16:44:08 +02:00
|
|
|
/*!
|
|
|
|
This module implements two different algorithms for updating the `facet_id_string_docids`
|
|
|
|
and `facet_id_f64_docids` databases. The first algorithm is a "bulk" algorithm, meaning that
|
|
|
|
it recreates the database from scratch when new elements are added to it. The second algorithm
|
|
|
|
is incremental: it modifies the database as little as possible.
|
|
|
|
|
|
|
|
The databases must be able to return results for queries such as:
|
|
|
|
1. Filter : find all the document ids that have a facet value greater than X and/or smaller than Y
|
|
|
|
2. Min/Max : find the minimum/maximum facet value among these document ids
|
|
|
|
3. Sort : sort these document ids by increasing/decreasing facet values
|
|
|
|
4. Distribution : given some document ids, make a list of each facet value
|
|
|
|
found in these documents along with the number of documents that contain it
|
|
|
|
|
|
|
|
The algorithms that implement these queries are found in the `src/search/facet` folder.
|
|
|
|
|
|
|
|
To make these queries fast to compute, the database adopts a tree structure:
|
|
|
|
```ignore
|
|
|
|
┌───────────────────────────────┬───────────────────────────────┬───────────────┐
|
|
|
|
┌───────┐ │ "ab" (2) │ "gaf" (2) │ "woz" (1) │
|
|
|
|
│Level 2│ │ │ │ │
|
|
|
|
└───────┘ │ [a, b, d, f, z] │ [c, d, e, f, g] │ [u, y] │
|
|
|
|
├───────────────┬───────────────┼───────────────┬───────────────┼───────────────┤
|
|
|
|
┌───────┐ │ "ab" (2) │ "ba" (2) │ "gaf" (2) │ "form" (2) │ "woz" (2) │
|
|
|
|
│Level 1│ │ │ │ │ │ │
|
|
|
|
└───────┘ │ [a, b, d, z] │ [a, b, f] │ [c, d, g] │ [e, f] │ [u, y] │
|
|
|
|
├───────┬───────┼───────┬───────┼───────┬───────┼───────┬───────┼───────┬───────┤
|
|
|
|
┌───────┐ │ "ab" │ "ac" │ "ba" │ "bac" │ "gaf" │ "gal" │ "form"│ "wow" │ "woz" │ "zz" │
|
|
|
|
│Level 0│ │ │ │ │ │ │ │ │ │ │ │
|
|
|
|
└───────┘ │ [a, b]│ [d, z]│ [b, f]│ [a, f]│ [c, d]│ [g] │ [e] │ [e, f]│ [y] │ [u] │
|
|
|
|
└───────┴───────┴───────┴───────┴───────┴───────┴───────┴───────┴───────┴───────┘
|
|
|
|
```
|
|
|
|
In the diagram above, each cell corresponds to a node in the tree. The first line of the cell
|
|
|
|
contains the left bound of the range of facet values as well as the number of children of the node.
|
|
|
|
The second line contains the document ids which have a facet value within the range of the node.
|
|
|
|
The nodes at level 0 are the leaf nodes. They have 0 children and a single facet value in their range.
|
|
|
|
|
|
|
|
In the diagram above, the first cell of level 2 is `ab (2)`. Its range is `ab .. gaf` (because
|
|
|
|
`gaf` is the left bound of the next node) and it has two children. Its document ids are `[a,b,d,f,z]`.
|
|
|
|
These documents all contain a facet value that is contained within `ab .. gaf`.
|
|
|
|
|
|
|
|
In the database, each node is represented by a key/value pair encoded as a [`FacetGroupKey`] and a
|
|
|
|
[`FacetGroupValue`], which have the following format:
|
|
|
|
|
|
|
|
```ignore
|
|
|
|
FacetGroupKey:
|
|
|
|
- field id : u16
|
|
|
|
- level : u8
|
|
|
|
- left bound: [u8] // the facet value encoded using either OrderedF64Codec or Str
|
|
|
|
|
|
|
|
FacetGroupValue:
|
|
|
|
- #children : u8
|
|
|
|
- docids : RoaringBitmap
|
|
|
|
```
|
|
|
|
|
|
|
|
When the database is first created using the "bulk" method, each node has a fixed number of children
|
2022-09-07 18:04:07 +02:00
|
|
|
(except for possibly the last one) given by the `group_size` parameter (default to `FACET_GROUP_SIZE`).
|
|
|
|
The tree is also built such that the highest level has more than `min_level_size`
|
2022-09-07 16:44:08 +02:00
|
|
|
(default to `FACET_MIN_LEVEL_SIZE`) elements in it.
|
|
|
|
|
|
|
|
When the database is incrementally updated, the number of children of a node can vary between
|
|
|
|
1 and `max_group_size`. This is done so that most incremental operations do not need to change
|
|
|
|
the structure of the tree. When the number of children of a node reaches `max_group_size`,
|
|
|
|
we split the node in two and update the number of children of its parent.
|
|
|
|
|
|
|
|
When adding documents to the databases, it is important to determine which method to use to
|
|
|
|
minimise indexing time. The incremental method is faster when adding few new facet values, but the
|
|
|
|
bulk method is faster when a large part of the database is modified. Empirically, it seems that
|
|
|
|
it takes 50x more time to incrementally add N facet values to an existing database than it is to
|
2022-09-07 18:04:07 +02:00
|
|
|
construct a database of N facet values. This is the heuristic that is used to choose between the
|
2022-09-07 16:44:08 +02:00
|
|
|
two methods.
|
2022-10-12 09:46:31 +02:00
|
|
|
|
|
|
|
Related PR: https://github.com/meilisearch/milli/pull/619
|
2022-09-07 16:44:08 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
pub const FACET_MAX_GROUP_SIZE: u8 = 8;
|
|
|
|
pub const FACET_GROUP_SIZE: u8 = 4;
|
|
|
|
pub const FACET_MIN_LEVEL_SIZE: u8 = 5;
|
|
|
|
|
2023-07-20 17:57:07 +02:00
|
|
|
use std::collections::BTreeSet;
|
2022-10-12 10:23:40 +02:00
|
|
|
use std::fs::File;
|
2023-09-28 16:26:01 +02:00
|
|
|
use std::io::BufReader;
|
2023-07-20 17:57:07 +02:00
|
|
|
use std::iter::FromIterator;
|
2022-10-12 10:23:40 +02:00
|
|
|
|
2023-07-20 17:57:07 +02:00
|
|
|
use charabia::normalizer::{Normalize, NormalizerOption};
|
|
|
|
use grenad::{CompressionType, SortAlgorithm};
|
|
|
|
use heed::types::{ByteSlice, DecodeIgnore, SerdeJson};
|
|
|
|
use heed::BytesEncode;
|
2022-10-12 12:32:33 +02:00
|
|
|
use log::debug;
|
|
|
|
use time::OffsetDateTime;
|
|
|
|
|
2022-09-05 17:31:26 +02:00
|
|
|
use self::incremental::FacetsUpdateIncremental;
|
|
|
|
use super::FacetsUpdateBulk;
|
|
|
|
use crate::facet::FacetType;
|
2023-05-02 09:34:28 +02:00
|
|
|
use crate::heed_codec::facet::{FacetGroupKey, FacetGroupKeyCodec, FacetGroupValueCodec};
|
2022-10-12 09:42:55 +02:00
|
|
|
use crate::heed_codec::ByteSliceRefCodec;
|
2023-07-20 17:57:07 +02:00
|
|
|
use crate::update::index_documents::create_sorter;
|
|
|
|
use crate::update::merge_btreeset_string;
|
2023-08-08 12:00:51 +02:00
|
|
|
use crate::{BEU16StrCodec, Index, Result, BEU16, MAX_FACET_VALUE_LENGTH};
|
2022-09-05 12:52:05 +02:00
|
|
|
|
2022-08-31 13:03:36 +02:00
|
|
|
pub mod bulk;
|
2022-09-21 15:53:39 +02:00
|
|
|
pub mod delete;
|
2022-08-31 13:03:36 +02:00
|
|
|
pub mod incremental;
|
2022-09-05 12:52:05 +02:00
|
|
|
|
2022-10-12 12:32:33 +02:00
|
|
|
/// A builder used to add new elements to the `facet_id_string_docids` or `facet_id_f64_docids` databases.
|
|
|
|
///
|
|
|
|
/// Depending on the number of new elements and the existing size of the database, we use either
|
|
|
|
/// a bulk update method or an incremental update method.
|
2022-09-05 12:52:05 +02:00
|
|
|
pub struct FacetsUpdate<'i> {
|
|
|
|
index: &'i Index,
|
2022-10-12 09:42:55 +02:00
|
|
|
database: heed::Database<FacetGroupKeyCodec<ByteSliceRefCodec>, FacetGroupValueCodec>,
|
2022-09-05 17:31:26 +02:00
|
|
|
facet_type: FacetType,
|
2023-10-19 11:56:42 +02:00
|
|
|
delta_data: grenad::Reader<BufReader<File>>,
|
2022-09-07 16:44:08 +02:00
|
|
|
group_size: u8,
|
|
|
|
max_group_size: u8,
|
2022-09-05 12:52:05 +02:00
|
|
|
min_level_size: u8,
|
|
|
|
}
|
|
|
|
impl<'i> FacetsUpdate<'i> {
|
2023-10-17 18:09:41 +02:00
|
|
|
// TODO grenad::Reader<Key, Obkv<DelAdd, RoaringBitmap>>
|
2023-09-28 16:26:01 +02:00
|
|
|
pub fn new(
|
|
|
|
index: &'i Index,
|
|
|
|
facet_type: FacetType,
|
2023-10-19 11:56:42 +02:00
|
|
|
delta_data: grenad::Reader<BufReader<File>>,
|
2023-09-28 16:26:01 +02:00
|
|
|
) -> Self {
|
2022-09-05 12:52:05 +02:00
|
|
|
let database = match facet_type {
|
2022-10-12 09:42:55 +02:00
|
|
|
FacetType::String => index
|
|
|
|
.facet_id_string_docids
|
|
|
|
.remap_key_type::<FacetGroupKeyCodec<ByteSliceRefCodec>>(),
|
2022-09-05 12:52:05 +02:00
|
|
|
FacetType::Number => {
|
2022-10-12 09:42:55 +02:00
|
|
|
index.facet_id_f64_docids.remap_key_type::<FacetGroupKeyCodec<ByteSliceRefCodec>>()
|
2022-09-05 12:52:05 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
Self {
|
|
|
|
index,
|
|
|
|
database,
|
2022-09-07 16:44:08 +02:00
|
|
|
group_size: FACET_GROUP_SIZE,
|
|
|
|
max_group_size: FACET_MAX_GROUP_SIZE,
|
|
|
|
min_level_size: FACET_MIN_LEVEL_SIZE,
|
2022-09-05 12:52:05 +02:00
|
|
|
facet_type,
|
2023-10-19 11:56:42 +02:00
|
|
|
delta_data,
|
2022-09-05 12:52:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn execute(self, wtxn: &mut heed::RwTxn) -> Result<()> {
|
2023-10-19 11:56:42 +02:00
|
|
|
if self.delta_data.is_empty() {
|
2022-09-05 17:31:26 +02:00
|
|
|
return Ok(());
|
|
|
|
}
|
2022-10-12 12:32:33 +02:00
|
|
|
debug!("Computing and writing the facet values levels docids into LMDB on disk...");
|
|
|
|
self.index.set_updated_at(wtxn, &OffsetDateTime::now_utc())?;
|
|
|
|
|
|
|
|
// See self::comparison_bench::benchmark_facet_indexing
|
2023-10-19 11:56:42 +02:00
|
|
|
if self.delta_data.len() >= (self.database.len(wtxn)? as u64 / 50) {
|
2022-09-21 15:53:39 +02:00
|
|
|
let field_ids =
|
|
|
|
self.index.faceted_fields_ids(wtxn)?.iter().copied().collect::<Vec<_>>();
|
2022-09-07 18:04:07 +02:00
|
|
|
let bulk_update = FacetsUpdateBulk::new(
|
|
|
|
self.index,
|
2022-09-21 15:53:39 +02:00
|
|
|
field_ids,
|
2022-09-07 18:04:07 +02:00
|
|
|
self.facet_type,
|
2023-10-19 11:56:42 +02:00
|
|
|
self.delta_data,
|
2022-09-07 18:04:07 +02:00
|
|
|
self.group_size,
|
|
|
|
self.min_level_size,
|
|
|
|
);
|
2022-09-05 12:52:05 +02:00
|
|
|
bulk_update.execute(wtxn)?;
|
|
|
|
} else {
|
2022-09-07 18:04:07 +02:00
|
|
|
let incremental_update = FacetsUpdateIncremental::new(
|
|
|
|
self.index,
|
|
|
|
self.facet_type,
|
2023-10-19 11:56:42 +02:00
|
|
|
self.delta_data,
|
2022-09-07 18:04:07 +02:00
|
|
|
self.group_size,
|
|
|
|
self.min_level_size,
|
|
|
|
self.max_group_size,
|
|
|
|
);
|
2022-09-05 17:31:26 +02:00
|
|
|
incremental_update.execute(wtxn)?;
|
2022-09-05 12:52:05 +02:00
|
|
|
}
|
2023-05-02 09:34:28 +02:00
|
|
|
|
2023-07-20 17:57:07 +02:00
|
|
|
// We clear the list of normalized-for-search facets
|
|
|
|
// and the previous FSTs to compute everything from scratch
|
|
|
|
self.index.facet_id_normalized_string_strings.clear(wtxn)?;
|
|
|
|
self.index.facet_id_string_fst.clear(wtxn)?;
|
|
|
|
|
|
|
|
// As we can't use the same write transaction to read and write in two different databases
|
|
|
|
// we must create a temporary sorter that we will write into LMDB afterward.
|
|
|
|
// As multiple unnormalized facet values can become the same normalized facet value
|
|
|
|
// we must merge them together.
|
|
|
|
let mut sorter = create_sorter(
|
|
|
|
SortAlgorithm::Unstable,
|
|
|
|
merge_btreeset_string,
|
|
|
|
CompressionType::None,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
);
|
|
|
|
|
|
|
|
// We iterate on the list of original, semi-normalized, facet values
|
|
|
|
// and normalize them for search, inserting them in LMDB in any given order.
|
|
|
|
let options = NormalizerOption { lossy: true, ..Default::default() };
|
|
|
|
let database = self.index.facet_id_string_docids.remap_data_type::<DecodeIgnore>();
|
|
|
|
for result in database.iter(wtxn)? {
|
|
|
|
let (facet_group_key, ()) = result?;
|
|
|
|
if let FacetGroupKey { field_id, level: 0, left_bound } = facet_group_key {
|
2023-08-08 12:00:51 +02:00
|
|
|
let mut normalized_facet = left_bound.normalize(&options);
|
|
|
|
let normalized_truncated_facet: String;
|
|
|
|
if normalized_facet.len() > MAX_FACET_VALUE_LENGTH {
|
|
|
|
normalized_truncated_facet = normalized_facet
|
|
|
|
.char_indices()
|
|
|
|
.take_while(|(idx, _)| *idx < MAX_FACET_VALUE_LENGTH)
|
|
|
|
.map(|(_, c)| c)
|
|
|
|
.collect();
|
|
|
|
normalized_facet = normalized_truncated_facet.into();
|
|
|
|
}
|
2023-07-20 17:57:07 +02:00
|
|
|
let set = BTreeSet::from_iter(std::iter::once(left_bound));
|
|
|
|
let key = (field_id, normalized_facet.as_ref());
|
|
|
|
let key = BEU16StrCodec::bytes_encode(&key).ok_or(heed::Error::Encoding)?;
|
|
|
|
let val = SerdeJson::bytes_encode(&set).ok_or(heed::Error::Encoding)?;
|
|
|
|
sorter.insert(key, val)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this loop we don't need to take care of merging bitmaps
|
|
|
|
// as the grenad sorter already merged them for us.
|
|
|
|
let mut merger_iter = sorter.into_stream_merger_iter()?;
|
|
|
|
while let Some((key_bytes, btreeset_bytes)) = merger_iter.next()? {
|
|
|
|
self.index
|
|
|
|
.facet_id_normalized_string_strings
|
|
|
|
.remap_types::<ByteSlice, ByteSlice>()
|
|
|
|
.put(wtxn, key_bytes, btreeset_bytes)?;
|
|
|
|
}
|
|
|
|
|
2023-05-02 09:34:28 +02:00
|
|
|
// We compute one FST by string facet
|
|
|
|
let mut text_fsts = vec![];
|
|
|
|
let mut current_fst: Option<(u16, fst::SetBuilder<Vec<u8>>)> = None;
|
2023-07-20 17:57:07 +02:00
|
|
|
let database =
|
|
|
|
self.index.facet_id_normalized_string_strings.remap_data_type::<DecodeIgnore>();
|
2023-04-26 18:35:06 +02:00
|
|
|
for result in database.iter(wtxn)? {
|
2023-07-20 17:57:07 +02:00
|
|
|
let ((field_id, normalized_facet), _) = result?;
|
|
|
|
current_fst = match current_fst.take() {
|
|
|
|
Some((fid, fst_builder)) if fid != field_id => {
|
|
|
|
let fst = fst_builder.into_set();
|
|
|
|
text_fsts.push((fid, fst));
|
|
|
|
Some((field_id, fst::SetBuilder::memory()))
|
2023-05-02 09:34:28 +02:00
|
|
|
}
|
2023-07-20 17:57:07 +02:00
|
|
|
Some((field_id, fst_builder)) => Some((field_id, fst_builder)),
|
|
|
|
None => Some((field_id, fst::SetBuilder::memory())),
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Some((_, fst_builder)) = current_fst.as_mut() {
|
|
|
|
fst_builder.insert(normalized_facet)?;
|
2023-05-02 09:34:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some((field_id, fst_builder)) = current_fst {
|
|
|
|
let fst = fst_builder.into_set();
|
|
|
|
text_fsts.push((field_id, fst));
|
|
|
|
}
|
|
|
|
|
|
|
|
// We write those FSTs in LMDB now
|
|
|
|
for (field_id, fst) in text_fsts {
|
|
|
|
self.index.facet_id_string_fst.put(wtxn, &BEU16::new(field_id), &fst)?;
|
|
|
|
}
|
|
|
|
|
2022-09-05 12:52:05 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
2022-09-05 17:31:26 +02:00
|
|
|
|
|
|
|
#[cfg(test)]
|
2022-12-05 10:33:31 +01:00
|
|
|
pub(crate) mod test_helpers {
|
2022-09-08 11:53:01 +02:00
|
|
|
use std::cell::Cell;
|
2022-09-07 18:04:07 +02:00
|
|
|
use std::fmt::Display;
|
2022-10-12 10:23:40 +02:00
|
|
|
use std::iter::FromIterator;
|
2022-09-07 18:04:07 +02:00
|
|
|
use std::marker::PhantomData;
|
|
|
|
use std::rc::Rc;
|
|
|
|
|
|
|
|
use heed::types::ByteSlice;
|
|
|
|
use heed::{BytesDecode, BytesEncode, Env, RoTxn, RwTxn};
|
|
|
|
use roaring::RoaringBitmap;
|
|
|
|
|
2022-09-06 11:52:57 +02:00
|
|
|
use super::bulk::FacetsUpdateBulkInner;
|
|
|
|
use crate::heed_codec::facet::{
|
2022-10-12 09:42:55 +02:00
|
|
|
FacetGroupKey, FacetGroupKeyCodec, FacetGroupValue, FacetGroupValueCodec,
|
2022-09-06 11:52:57 +02:00
|
|
|
};
|
2022-10-12 09:42:55 +02:00
|
|
|
use crate::heed_codec::ByteSliceRefCodec;
|
2022-09-06 11:52:57 +02:00
|
|
|
use crate::search::facet::get_highest_level;
|
|
|
|
use crate::snapshot_tests::display_bitmap;
|
|
|
|
use crate::update::FacetsUpdateIncrementalInner;
|
|
|
|
use crate::CboRoaringBitmapCodec;
|
2022-09-05 17:31:26 +02:00
|
|
|
|
2022-12-05 10:33:31 +01:00
|
|
|
/// Utility function to generate a string whose position in a lexicographically
|
|
|
|
/// ordered list is `i`.
|
|
|
|
pub fn ordered_string(mut i: usize) -> String {
|
|
|
|
// The first string is empty
|
|
|
|
if i == 0 {
|
|
|
|
return String::new();
|
|
|
|
}
|
|
|
|
// The others are 5 char long, each between 'a' and 'z'
|
|
|
|
let mut s = String::new();
|
|
|
|
for _ in 0..5 {
|
|
|
|
let (digit, next) = (i % 26, i / 26);
|
|
|
|
s.insert(0, char::from_u32('a' as u32 + digit as u32).unwrap());
|
|
|
|
i = next;
|
|
|
|
}
|
|
|
|
s
|
|
|
|
}
|
|
|
|
|
2022-10-12 10:23:40 +02:00
|
|
|
/// A dummy index that only contains the facet database, used for testing
|
2022-09-06 11:52:57 +02:00
|
|
|
pub struct FacetIndex<BoundCodec>
|
|
|
|
where
|
|
|
|
for<'a> BoundCodec:
|
|
|
|
BytesEncode<'a> + BytesDecode<'a, DItem = <BoundCodec as BytesEncode<'a>>::EItem>,
|
|
|
|
{
|
|
|
|
pub env: Env,
|
2022-10-12 09:42:55 +02:00
|
|
|
pub content: heed::Database<FacetGroupKeyCodec<ByteSliceRefCodec>, FacetGroupValueCodec>,
|
2022-09-08 11:53:01 +02:00
|
|
|
pub group_size: Cell<u8>,
|
|
|
|
pub min_level_size: Cell<u8>,
|
|
|
|
pub max_group_size: Cell<u8>,
|
2022-09-06 11:52:57 +02:00
|
|
|
_tempdir: Rc<tempfile::TempDir>,
|
|
|
|
_phantom: PhantomData<BoundCodec>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<BoundCodec> FacetIndex<BoundCodec>
|
|
|
|
where
|
|
|
|
for<'a> BoundCodec:
|
|
|
|
BytesEncode<'a> + BytesDecode<'a, DItem = <BoundCodec as BytesEncode<'a>>::EItem>,
|
|
|
|
{
|
|
|
|
#[cfg(all(test, fuzzing))]
|
|
|
|
pub fn open_from_tempdir(
|
|
|
|
tempdir: Rc<tempfile::TempDir>,
|
|
|
|
group_size: u8,
|
|
|
|
max_group_size: u8,
|
|
|
|
min_level_size: u8,
|
|
|
|
) -> FacetIndex<BoundCodec> {
|
2022-09-08 11:53:01 +02:00
|
|
|
let group_size = std::cmp::min(16, std::cmp::max(group_size, 2)); // 2 <= x <= 16
|
|
|
|
let max_group_size = std::cmp::min(16, std::cmp::max(group_size * 2, max_group_size)); // 2*group_size <= x <= 16
|
|
|
|
let min_level_size = std::cmp::min(17, std::cmp::max(1, min_level_size)); // 1 <= x <= 17
|
2022-09-06 11:52:57 +02:00
|
|
|
|
|
|
|
let mut options = heed::EnvOpenOptions::new();
|
2022-10-12 12:32:33 +02:00
|
|
|
let options = options.map_size(4096 * 4 * 10 * 1000);
|
2022-09-06 11:52:57 +02:00
|
|
|
unsafe {
|
|
|
|
options.flag(heed::flags::Flags::MdbAlwaysFreePages);
|
|
|
|
}
|
|
|
|
let env = options.open(tempdir.path()).unwrap();
|
|
|
|
let content = env.open_database(None).unwrap().unwrap();
|
|
|
|
|
|
|
|
FacetIndex {
|
2022-09-08 11:53:01 +02:00
|
|
|
content,
|
|
|
|
group_size: Cell::new(group_size),
|
|
|
|
max_group_size: Cell::new(max_group_size),
|
|
|
|
min_level_size: Cell::new(min_level_size),
|
|
|
|
_tempdir: tempdir,
|
2022-09-06 11:52:57 +02:00
|
|
|
env,
|
|
|
|
_phantom: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn new(
|
|
|
|
group_size: u8,
|
|
|
|
max_group_size: u8,
|
|
|
|
min_level_size: u8,
|
|
|
|
) -> FacetIndex<BoundCodec> {
|
2023-01-17 18:01:26 +01:00
|
|
|
let group_size = group_size.clamp(2, 127);
|
2022-09-06 11:52:57 +02:00
|
|
|
let max_group_size = std::cmp::min(127, std::cmp::max(group_size * 2, max_group_size)); // 2*group_size <= x <= 127
|
|
|
|
let min_level_size = std::cmp::max(1, min_level_size); // 1 <= x <= inf
|
|
|
|
let mut options = heed::EnvOpenOptions::new();
|
2022-10-12 12:32:33 +02:00
|
|
|
let options = options.map_size(4096 * 4 * 1000 * 100);
|
2022-09-06 11:52:57 +02:00
|
|
|
let tempdir = tempfile::TempDir::new().unwrap();
|
|
|
|
let env = options.open(tempdir.path()).unwrap();
|
2023-05-15 10:15:33 +02:00
|
|
|
let mut wtxn = env.write_txn().unwrap();
|
|
|
|
let content = env.create_database(&mut wtxn, None).unwrap();
|
|
|
|
wtxn.commit().unwrap();
|
2022-09-06 11:52:57 +02:00
|
|
|
|
|
|
|
FacetIndex {
|
|
|
|
content,
|
2022-09-08 11:53:01 +02:00
|
|
|
group_size: Cell::new(group_size),
|
|
|
|
max_group_size: Cell::new(max_group_size),
|
|
|
|
min_level_size: Cell::new(min_level_size),
|
2022-09-06 11:52:57 +02:00
|
|
|
_tempdir: Rc::new(tempdir),
|
|
|
|
env,
|
|
|
|
_phantom: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
2022-09-08 11:53:01 +02:00
|
|
|
|
2022-09-08 13:10:45 +02:00
|
|
|
#[cfg(all(test, fuzzing))]
|
2022-09-08 11:53:01 +02:00
|
|
|
pub fn set_group_size(&self, group_size: u8) {
|
|
|
|
// 2 <= x <= 64
|
|
|
|
self.group_size.set(std::cmp::min(64, std::cmp::max(group_size, 2)));
|
|
|
|
}
|
2022-09-08 13:10:45 +02:00
|
|
|
#[cfg(all(test, fuzzing))]
|
2022-09-08 11:53:01 +02:00
|
|
|
pub fn set_max_group_size(&self, max_group_size: u8) {
|
|
|
|
// 2*group_size <= x <= 128
|
|
|
|
let max_group_size = std::cmp::max(4, std::cmp::min(128, max_group_size));
|
|
|
|
self.max_group_size.set(max_group_size);
|
|
|
|
if self.group_size.get() < max_group_size / 2 {
|
|
|
|
self.group_size.set(max_group_size / 2);
|
|
|
|
}
|
|
|
|
}
|
2022-09-08 13:10:45 +02:00
|
|
|
#[cfg(all(test, fuzzing))]
|
2022-09-08 11:53:01 +02:00
|
|
|
pub fn set_min_level_size(&self, min_level_size: u8) {
|
|
|
|
// 1 <= x <= inf
|
|
|
|
self.min_level_size.set(std::cmp::max(1, min_level_size));
|
|
|
|
}
|
|
|
|
|
2022-09-06 11:52:57 +02:00
|
|
|
pub fn insert<'a>(
|
|
|
|
&self,
|
|
|
|
wtxn: &'a mut RwTxn,
|
|
|
|
field_id: u16,
|
|
|
|
key: &'a <BoundCodec as BytesEncode<'a>>::EItem,
|
|
|
|
docids: &RoaringBitmap,
|
|
|
|
) {
|
|
|
|
let update = FacetsUpdateIncrementalInner {
|
|
|
|
db: self.content,
|
2022-09-08 11:53:01 +02:00
|
|
|
group_size: self.group_size.get(),
|
|
|
|
min_level_size: self.min_level_size.get(),
|
|
|
|
max_group_size: self.max_group_size.get(),
|
2022-09-06 11:52:57 +02:00
|
|
|
};
|
2023-01-17 18:01:26 +01:00
|
|
|
let key_bytes = BoundCodec::bytes_encode(key).unwrap();
|
2022-09-06 11:52:57 +02:00
|
|
|
update.insert(wtxn, field_id, &key_bytes, docids).unwrap();
|
|
|
|
}
|
2022-09-21 15:53:39 +02:00
|
|
|
pub fn delete_single_docid<'a>(
|
|
|
|
&self,
|
|
|
|
wtxn: &'a mut RwTxn,
|
|
|
|
field_id: u16,
|
|
|
|
key: &'a <BoundCodec as BytesEncode<'a>>::EItem,
|
|
|
|
docid: u32,
|
|
|
|
) {
|
2022-10-12 10:23:40 +02:00
|
|
|
self.delete(wtxn, field_id, key, &RoaringBitmap::from_iter(std::iter::once(docid)))
|
2022-09-21 15:53:39 +02:00
|
|
|
}
|
2022-10-12 10:23:40 +02:00
|
|
|
|
2022-09-06 11:52:57 +02:00
|
|
|
pub fn delete<'a>(
|
|
|
|
&self,
|
|
|
|
wtxn: &'a mut RwTxn,
|
|
|
|
field_id: u16,
|
|
|
|
key: &'a <BoundCodec as BytesEncode<'a>>::EItem,
|
2022-09-21 15:53:39 +02:00
|
|
|
docids: &RoaringBitmap,
|
2022-09-06 11:52:57 +02:00
|
|
|
) {
|
|
|
|
let update = FacetsUpdateIncrementalInner {
|
|
|
|
db: self.content,
|
2022-09-08 11:53:01 +02:00
|
|
|
group_size: self.group_size.get(),
|
|
|
|
min_level_size: self.min_level_size.get(),
|
|
|
|
max_group_size: self.max_group_size.get(),
|
2022-09-06 11:52:57 +02:00
|
|
|
};
|
2023-01-17 18:01:26 +01:00
|
|
|
let key_bytes = BoundCodec::bytes_encode(key).unwrap();
|
2022-09-21 15:53:39 +02:00
|
|
|
update.delete(wtxn, field_id, &key_bytes, docids).unwrap();
|
2022-09-06 11:52:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn bulk_insert<'a, 'b>(
|
|
|
|
&self,
|
|
|
|
wtxn: &'a mut RwTxn,
|
|
|
|
field_ids: &[u16],
|
|
|
|
els: impl IntoIterator<
|
|
|
|
Item = &'a ((u16, <BoundCodec as BytesEncode<'a>>::EItem), RoaringBitmap),
|
|
|
|
>,
|
|
|
|
) where
|
|
|
|
for<'c> <BoundCodec as BytesEncode<'c>>::EItem: Sized,
|
|
|
|
{
|
|
|
|
let mut new_data = vec![];
|
|
|
|
let mut writer = grenad::Writer::new(&mut new_data);
|
|
|
|
for ((field_id, left_bound), docids) in els {
|
|
|
|
let left_bound_bytes = BoundCodec::bytes_encode(left_bound).unwrap().into_owned();
|
|
|
|
let key: FacetGroupKey<&[u8]> =
|
|
|
|
FacetGroupKey { field_id: *field_id, level: 0, left_bound: &left_bound_bytes };
|
2022-10-12 09:42:55 +02:00
|
|
|
let key = FacetGroupKeyCodec::<ByteSliceRefCodec>::bytes_encode(&key).unwrap();
|
2023-01-17 18:01:26 +01:00
|
|
|
let value = CboRoaringBitmapCodec::bytes_encode(docids).unwrap();
|
2022-09-06 11:52:57 +02:00
|
|
|
writer.insert(&key, &value).unwrap();
|
|
|
|
}
|
|
|
|
writer.finish().unwrap();
|
|
|
|
let reader = grenad::Reader::new(std::io::Cursor::new(new_data)).unwrap();
|
|
|
|
|
|
|
|
let update = FacetsUpdateBulkInner {
|
|
|
|
db: self.content,
|
2023-10-19 11:56:42 +02:00
|
|
|
delta_data: Some(reader),
|
2022-09-08 11:53:01 +02:00
|
|
|
group_size: self.group_size.get(),
|
|
|
|
min_level_size: self.min_level_size.get(),
|
2022-09-06 11:52:57 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
update.update(wtxn, field_ids, |_, _, _| Ok(())).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn verify_structure_validity(&self, txn: &RoTxn, field_id: u16) {
|
|
|
|
let mut field_id_prefix = vec![];
|
|
|
|
field_id_prefix.extend_from_slice(&field_id.to_be_bytes());
|
|
|
|
|
|
|
|
let highest_level = get_highest_level(txn, self.content, field_id).unwrap();
|
|
|
|
|
|
|
|
for level_no in (1..=highest_level).rev() {
|
|
|
|
let mut level_no_prefix = vec![];
|
|
|
|
level_no_prefix.extend_from_slice(&field_id.to_be_bytes());
|
|
|
|
level_no_prefix.push(level_no);
|
|
|
|
|
2023-01-17 18:01:26 +01:00
|
|
|
let iter = self
|
2022-09-06 11:52:57 +02:00
|
|
|
.content
|
|
|
|
.as_polymorph()
|
|
|
|
.prefix_iter::<_, ByteSlice, FacetGroupValueCodec>(txn, &level_no_prefix)
|
|
|
|
.unwrap();
|
2023-01-17 18:01:26 +01:00
|
|
|
for el in iter {
|
2022-09-06 11:52:57 +02:00
|
|
|
let (key, value) = el.unwrap();
|
2023-01-17 18:01:26 +01:00
|
|
|
let key = FacetGroupKeyCodec::<ByteSliceRefCodec>::bytes_decode(key).unwrap();
|
2022-09-06 11:52:57 +02:00
|
|
|
|
|
|
|
let mut prefix_start_below = vec![];
|
|
|
|
prefix_start_below.extend_from_slice(&field_id.to_be_bytes());
|
|
|
|
prefix_start_below.push(level_no - 1);
|
2023-01-17 18:01:26 +01:00
|
|
|
prefix_start_below.extend_from_slice(key.left_bound);
|
2022-09-06 11:52:57 +02:00
|
|
|
|
|
|
|
let start_below = {
|
|
|
|
let mut start_below_iter = self
|
|
|
|
.content
|
|
|
|
.as_polymorph()
|
|
|
|
.prefix_iter::<_, ByteSlice, FacetGroupValueCodec>(
|
|
|
|
txn,
|
|
|
|
&prefix_start_below,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
let (key_bytes, _) = start_below_iter.next().unwrap().unwrap();
|
2023-01-17 18:01:26 +01:00
|
|
|
FacetGroupKeyCodec::<ByteSliceRefCodec>::bytes_decode(key_bytes).unwrap()
|
2022-09-06 11:52:57 +02:00
|
|
|
};
|
|
|
|
|
2022-09-08 11:53:01 +02:00
|
|
|
assert!(value.size > 0);
|
2022-09-06 11:52:57 +02:00
|
|
|
|
|
|
|
let mut actual_size = 0;
|
|
|
|
let mut values_below = RoaringBitmap::new();
|
2023-01-17 18:01:26 +01:00
|
|
|
let iter_below = self
|
2022-09-06 11:52:57 +02:00
|
|
|
.content
|
|
|
|
.range(txn, &(start_below..))
|
|
|
|
.unwrap()
|
|
|
|
.take(value.size as usize);
|
2023-01-17 18:01:26 +01:00
|
|
|
for el in iter_below {
|
2022-09-06 11:52:57 +02:00
|
|
|
let (_, value) = el.unwrap();
|
|
|
|
actual_size += 1;
|
|
|
|
values_below |= value.bitmap;
|
|
|
|
}
|
|
|
|
assert_eq!(actual_size, value.size, "{key:?} start_below: {start_below:?}");
|
|
|
|
|
|
|
|
assert_eq!(value.bitmap, values_below);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<BoundCodec> Display for FacetIndex<BoundCodec>
|
|
|
|
where
|
|
|
|
for<'a> <BoundCodec as BytesEncode<'a>>::EItem: Sized + Display,
|
|
|
|
for<'a> BoundCodec:
|
|
|
|
BytesEncode<'a> + BytesDecode<'a, DItem = <BoundCodec as BytesEncode<'a>>::EItem>,
|
|
|
|
{
|
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
|
|
let txn = self.env.read_txn().unwrap();
|
2023-01-17 18:01:26 +01:00
|
|
|
let iter = self.content.iter(&txn).unwrap();
|
|
|
|
for el in iter {
|
2022-09-06 11:52:57 +02:00
|
|
|
let (key, value) = el.unwrap();
|
|
|
|
let FacetGroupKey { field_id, level, left_bound: bound } = key;
|
|
|
|
let bound = BoundCodec::bytes_decode(bound).unwrap();
|
|
|
|
let FacetGroupValue { size, bitmap } = value;
|
|
|
|
writeln!(
|
|
|
|
f,
|
|
|
|
"{field_id:<2} {level:<2} k{bound:<8} {size:<4} {values:?}",
|
|
|
|
values = display_bitmap(&bitmap)
|
|
|
|
)?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
2022-09-05 17:31:26 +02:00
|
|
|
}
|
2022-09-06 13:39:08 +02:00
|
|
|
|
2022-12-05 10:33:31 +01:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use big_s::S;
|
|
|
|
use maplit::hashset;
|
|
|
|
|
|
|
|
use crate::db_snap;
|
|
|
|
use crate::documents::documents_batch_reader_from_objects;
|
|
|
|
use crate::index::tests::TempIndex;
|
2022-12-19 09:47:29 +01:00
|
|
|
use crate::update::DeletionStrategy;
|
2022-12-05 10:33:31 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn replace_all_identical_soft_deletion_then_hard_deletion() {
|
|
|
|
let mut index = TempIndex::new_with_map_size(4096 * 1000 * 100);
|
|
|
|
|
2022-12-19 09:47:29 +01:00
|
|
|
index.index_documents_config.deletion_strategy = DeletionStrategy::AlwaysSoft;
|
|
|
|
|
2022-12-05 10:33:31 +01:00
|
|
|
index
|
|
|
|
.update_settings(|settings| {
|
|
|
|
settings.set_primary_key("id".to_owned());
|
|
|
|
settings.set_filterable_fields(hashset! { S("size") });
|
|
|
|
})
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let mut documents = vec![];
|
|
|
|
for i in 0..1000 {
|
|
|
|
documents.push(
|
|
|
|
serde_json::json! {
|
|
|
|
{
|
|
|
|
"id": i,
|
|
|
|
"size": i % 250,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
.as_object()
|
|
|
|
.unwrap()
|
|
|
|
.clone(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let documents = documents_batch_reader_from_objects(documents);
|
|
|
|
index.add_documents(documents).unwrap();
|
|
|
|
|
|
|
|
db_snap!(index, facet_id_f64_docids, "initial", @"777e0e221d778764b472c512617eeb3b");
|
|
|
|
db_snap!(index, soft_deleted_documents_ids, "initial", @"[]");
|
|
|
|
|
|
|
|
let mut documents = vec![];
|
|
|
|
for i in 0..999 {
|
|
|
|
documents.push(
|
|
|
|
serde_json::json! {
|
|
|
|
{
|
|
|
|
"id": i,
|
|
|
|
"size": i % 250,
|
|
|
|
"other": 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
.as_object()
|
|
|
|
.unwrap()
|
|
|
|
.clone(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let documents = documents_batch_reader_from_objects(documents);
|
|
|
|
index.add_documents(documents).unwrap();
|
|
|
|
|
|
|
|
db_snap!(index, facet_id_f64_docids, "replaced_1_soft", @"abba175d7bed727d0efadaef85a4388f");
|
|
|
|
db_snap!(index, soft_deleted_documents_ids, "replaced_1_soft", @"6c975deb900f286d2f6456d2d5c3a123");
|
|
|
|
|
|
|
|
// Then replace the last document while disabling soft_deletion
|
2022-12-19 09:47:29 +01:00
|
|
|
index.index_documents_config.deletion_strategy = DeletionStrategy::AlwaysHard;
|
2022-12-05 10:33:31 +01:00
|
|
|
let mut documents = vec![];
|
|
|
|
for i in 999..1000 {
|
|
|
|
documents.push(
|
|
|
|
serde_json::json! {
|
|
|
|
{
|
|
|
|
"id": i,
|
|
|
|
"size": i % 250,
|
|
|
|
"other": 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
.as_object()
|
|
|
|
.unwrap()
|
|
|
|
.clone(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let documents = documents_batch_reader_from_objects(documents);
|
|
|
|
index.add_documents(documents).unwrap();
|
|
|
|
|
|
|
|
db_snap!(index, facet_id_f64_docids, "replaced_2_hard", @"029e27a46d09c574ae949aa4289b45e6");
|
|
|
|
db_snap!(index, soft_deleted_documents_ids, "replaced_2_hard", @"[]");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-06 13:39:08 +02:00
|
|
|
#[allow(unused)]
|
|
|
|
#[cfg(test)]
|
|
|
|
mod comparison_bench {
|
|
|
|
use std::iter::once;
|
|
|
|
|
|
|
|
use rand::Rng;
|
|
|
|
use roaring::RoaringBitmap;
|
|
|
|
|
2022-12-05 10:33:31 +01:00
|
|
|
use super::test_helpers::FacetIndex;
|
2022-09-07 18:04:07 +02:00
|
|
|
use crate::heed_codec::facet::OrderedF64Codec;
|
2022-09-06 13:39:08 +02:00
|
|
|
|
|
|
|
// This is a simple test to get an intuition on the relative speed
|
|
|
|
// of the incremental vs. bulk indexer.
|
2022-10-12 12:32:33 +02:00
|
|
|
//
|
|
|
|
// The benchmark shows the worst-case scenario for the incremental indexer, since
|
|
|
|
// each facet value contains only one document ID.
|
|
|
|
//
|
|
|
|
// In that scenario, it appears that the incremental indexer is about 50 times slower than the
|
2022-09-06 13:39:08 +02:00
|
|
|
// bulk indexer.
|
2022-09-07 16:44:08 +02:00
|
|
|
// #[test]
|
2022-09-06 13:39:08 +02:00
|
|
|
fn benchmark_facet_indexing() {
|
|
|
|
let mut facet_value = 0;
|
|
|
|
|
|
|
|
let mut r = rand::thread_rng();
|
|
|
|
|
|
|
|
for i in 1..=20 {
|
|
|
|
let size = 50_000 * i;
|
|
|
|
let index = FacetIndex::<OrderedF64Codec>::new(4, 8, 5);
|
|
|
|
|
|
|
|
let mut txn = index.env.write_txn().unwrap();
|
|
|
|
let mut elements = Vec::<((u16, f64), RoaringBitmap)>::new();
|
|
|
|
for i in 0..size {
|
|
|
|
// field id = 0, left_bound = i, docids = [i]
|
|
|
|
elements.push(((0, facet_value as f64), once(i).collect()));
|
|
|
|
facet_value += 1;
|
|
|
|
}
|
|
|
|
let timer = std::time::Instant::now();
|
|
|
|
index.bulk_insert(&mut txn, &[0], elements.iter());
|
|
|
|
let time_spent = timer.elapsed().as_millis();
|
|
|
|
println!("bulk {size} : {time_spent}ms");
|
|
|
|
|
|
|
|
txn.commit().unwrap();
|
|
|
|
|
|
|
|
for nbr_doc in [1, 100, 1000, 10_000] {
|
|
|
|
let mut txn = index.env.write_txn().unwrap();
|
|
|
|
let timer = std::time::Instant::now();
|
|
|
|
//
|
|
|
|
// insert one document
|
|
|
|
//
|
|
|
|
for _ in 0..nbr_doc {
|
|
|
|
index.insert(&mut txn, 0, &r.gen(), &once(1).collect());
|
|
|
|
}
|
|
|
|
let time_spent = timer.elapsed().as_millis();
|
|
|
|
println!(" add {nbr_doc} : {time_spent}ms");
|
|
|
|
txn.abort().unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|