mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-07-04 20:37:15 +02:00
Inroduce the use of RocksDB instead of sled (RAM)
This commit is contained in:
parent
91ba938953
commit
a81f201fad
5 changed files with 288 additions and 116 deletions
|
@ -37,8 +37,12 @@ impl BpVec {
|
|||
uncompressed
|
||||
}
|
||||
|
||||
pub fn capacity(&self) -> usize {
|
||||
self.compressed.capacity() + self.uncompressed.capacity()
|
||||
pub fn compressed_capacity(&self) -> usize {
|
||||
self.compressed.capacity()
|
||||
}
|
||||
|
||||
pub fn uncompressed_capacity(&self) -> usize {
|
||||
self.uncompressed.capacity()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,12 @@ impl CodecBitPacker4xSorted {
|
|||
let mut initial_value = 0;
|
||||
|
||||
while let Some(num_bits) = bytes.get(0) {
|
||||
if *num_bits == 0 {
|
||||
decompressed.resize(decompressed.len() + BitPacker4x::BLOCK_LEN, initial_value);
|
||||
bytes = &bytes[1..];
|
||||
continue;
|
||||
}
|
||||
|
||||
let block_size = BitPacker4x::compressed_block_size(*num_bits);
|
||||
|
||||
let new_len = decompressed.len() + BitPacker4x::BLOCK_LEN;
|
||||
|
|
115
src/main.rs
115
src/main.rs
|
@ -9,6 +9,8 @@ use std::convert::TryFrom;
|
|||
use std::fs::File;
|
||||
use std::hash::BuildHasherDefault;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use anyhow::{ensure, Context};
|
||||
use fst::IntoStreamer;
|
||||
|
@ -28,6 +30,8 @@ pub type SmallString32 = smallstr::SmallString<[u8; 32]>;
|
|||
#[global_allocator]
|
||||
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
||||
|
||||
static ID_GENERATOR: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
#[derive(Debug, StructOpt)]
|
||||
#[structopt(name = "mm-indexer", about = "The server side of the daugt project.")]
|
||||
struct Opt {
|
||||
|
@ -40,40 +44,58 @@ struct Opt {
|
|||
files_to_index: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
fn union_bitpacked_postings_ids(_key: &[u8], old_value: Option<&[u8]>, new_value: &[u8]) -> Option<Vec<u8>> {
|
||||
if old_value.is_none() {
|
||||
return Some(new_value.to_vec())
|
||||
fn union_bitpacked_postings_ids(
|
||||
_key: &[u8],
|
||||
old_value: Option<&[u8]>,
|
||||
operands: &mut rocksdb::MergeOperands,
|
||||
) -> Option<Vec<u8>>
|
||||
{
|
||||
let mut sets_bufs = Vec::new();
|
||||
|
||||
if let Some(old_value) = old_value {
|
||||
let old_value = CodecBitPacker4xSorted::bytes_decode(old_value).unwrap();
|
||||
sets_bufs.push(SetBuf::new(old_value).unwrap());
|
||||
}
|
||||
|
||||
let old_value = old_value.unwrap_or_default();
|
||||
let old_value = CodecBitPacker4xSorted::bytes_decode(&old_value).unwrap();
|
||||
let new_value = CodecBitPacker4xSorted::bytes_decode(&new_value).unwrap();
|
||||
for operand in operands {
|
||||
let new_value = CodecBitPacker4xSorted::bytes_decode(operand).unwrap();
|
||||
sets_bufs.push(SetBuf::new(new_value).unwrap());
|
||||
}
|
||||
|
||||
let old_set = SetBuf::new(old_value).unwrap();
|
||||
let new_set = SetBuf::new(new_value).unwrap();
|
||||
|
||||
let result = sdset::duo::Union::new(&old_set, &new_set).into_set_buf();
|
||||
let sets = sets_bufs.iter().map(|s| s.as_set()).collect();
|
||||
let result = sdset::multi::Union::new(sets).into_set_buf();
|
||||
let compressed = CodecBitPacker4xSorted::bytes_encode(&result).unwrap();
|
||||
|
||||
Some(compressed)
|
||||
}
|
||||
|
||||
fn union_words_fst(key: &[u8], old_value: Option<&[u8]>, new_value: &[u8]) -> Option<Vec<u8>> {
|
||||
fn union_words_fst(
|
||||
key: &[u8],
|
||||
old_value: Option<&[u8]>,
|
||||
operands: &mut rocksdb::MergeOperands,
|
||||
) -> Option<Vec<u8>>
|
||||
{
|
||||
if key != b"words-fst" { unimplemented!() }
|
||||
|
||||
let old_value = match old_value {
|
||||
Some(old_value) => old_value,
|
||||
None => return Some(new_value.to_vec()),
|
||||
};
|
||||
|
||||
eprintln!("old_words size: {}", old_value.len());
|
||||
eprintln!("new_words size: {}", new_value.len());
|
||||
|
||||
let old_words = fst::Set::new(old_value).unwrap();
|
||||
let new_words = fst::Set::new(new_value).unwrap();
|
||||
let mut fst_operands = Vec::new();
|
||||
for operand in operands {
|
||||
fst_operands.push(fst::Set::new(operand).unwrap());
|
||||
}
|
||||
|
||||
// Do an union of the old and the new set of words.
|
||||
let op = old_words.op().add(new_words.into_stream()).r#union();
|
||||
let mut builder = fst::set::OpBuilder::new();
|
||||
|
||||
let old_words = old_value.map(|v| fst::Set::new(v).unwrap());
|
||||
let old_words = old_words.as_ref().map(|v| v.into_stream());
|
||||
if let Some(old_words) = old_words {
|
||||
builder.push(old_words);
|
||||
}
|
||||
|
||||
for new_words in &fst_operands {
|
||||
builder.push(new_words.into_stream());
|
||||
}
|
||||
|
||||
let op = builder.r#union();
|
||||
let mut build = fst::SetBuilder::memory();
|
||||
build.extend_stream(op.into_stream()).unwrap();
|
||||
|
||||
|
@ -85,13 +107,18 @@ fn alphanumeric_tokens(string: &str) -> impl Iterator<Item = &str> {
|
|||
string.linear_group_by_key(|c| c.is_alphanumeric()).filter(is_alphanumeric)
|
||||
}
|
||||
|
||||
fn index_csv(tid: usize, db: sled::Db, mut rdr: csv::Reader<File>) -> anyhow::Result<usize> {
|
||||
fn index_csv(
|
||||
tid: usize,
|
||||
db: Arc<rocksdb::DB>,
|
||||
mut rdr: csv::Reader<File>,
|
||||
) -> anyhow::Result<usize>
|
||||
{
|
||||
const MAX_POSITION: usize = 1000;
|
||||
const MAX_ATTRIBUTES: usize = u32::max_value() as usize / MAX_POSITION;
|
||||
|
||||
let main = &*db;
|
||||
let postings_ids = db.open_tree("postings-ids")?;
|
||||
let documents = db.open_tree("documents")?;
|
||||
let main = db.cf_handle("main").context("cf \"main\" not found")?;
|
||||
let postings_ids = db.cf_handle("postings-ids").context("cf \"postings-ids\" not found")?;
|
||||
let documents = db.cf_handle("documents").context("cf \"documents\" not found")?;
|
||||
|
||||
let mut document = csv::StringRecord::new();
|
||||
let mut new_postings_ids = FastMap4::default();
|
||||
|
@ -104,12 +131,13 @@ fn index_csv(tid: usize, db: sled::Db, mut rdr: csv::Reader<File>) -> anyhow::Re
|
|||
writer.write_byte_record(headers.as_byte_record())?;
|
||||
let headers = writer.into_inner()?;
|
||||
|
||||
if let Some(old_headers) = main.insert("headers", headers.as_slice())? {
|
||||
if let Some(old_headers) = db.get_cf(&main, "headers")? {
|
||||
ensure!(old_headers == headers, "headers differs from the previous ones");
|
||||
}
|
||||
db.put_cf(&main, "headers", headers.as_slice())?;
|
||||
|
||||
while rdr.read_record(&mut document)? {
|
||||
let document_id = db.generate_id()?;
|
||||
let document_id = ID_GENERATOR.fetch_add(1, Ordering::SeqCst);
|
||||
let document_id = u32::try_from(document_id).context("Generated id is too big")?;
|
||||
|
||||
for (_attr, content) in document.iter().enumerate().take(MAX_ATTRIBUTES) {
|
||||
|
@ -122,11 +150,13 @@ fn index_csv(tid: usize, db: sled::Db, mut rdr: csv::Reader<File>) -> anyhow::Re
|
|||
let mut writer = csv::WriterBuilder::new().has_headers(false).from_writer(Vec::new());
|
||||
writer.write_byte_record(document.as_byte_record())?;
|
||||
let document = writer.into_inner()?;
|
||||
documents.insert(document_id.to_be_bytes(), document)?;
|
||||
db.put_cf(&documents, document_id.to_be_bytes(), document)?;
|
||||
|
||||
number_of_documents += 1;
|
||||
if number_of_documents % 100000 == 0 {
|
||||
let postings_ids_size = new_postings_ids.iter().map(|(_, v)| v.capacity() * 4).sum::<usize>();
|
||||
let postings_ids_size = new_postings_ids.iter().map(|(_, v)| {
|
||||
v.compressed_capacity() + v.uncompressed_capacity() * 4
|
||||
}).sum::<usize>();
|
||||
eprintln!("{}, documents seen {}, postings size {}",
|
||||
tid, number_of_documents, postings_ids_size);
|
||||
}
|
||||
|
@ -140,7 +170,7 @@ fn index_csv(tid: usize, db: sled::Db, mut rdr: csv::Reader<File>) -> anyhow::Re
|
|||
let compressed = CodecBitPacker4xSorted::bytes_encode(&new_ids)
|
||||
.context("error while compressing using CodecBitPacker4xSorted")?;
|
||||
|
||||
postings_ids.merge(word.as_bytes(), compressed)?;
|
||||
db.merge_cf(&postings_ids, word.as_bytes(), compressed)?;
|
||||
|
||||
new_words.insert(word);
|
||||
}
|
||||
|
@ -151,7 +181,7 @@ fn index_csv(tid: usize, db: sled::Db, mut rdr: csv::Reader<File>) -> anyhow::Re
|
|||
|
||||
let new_words_fst = fst::Set::from_iter(new_words.iter().map(|s| s.as_str()))?;
|
||||
drop(new_words);
|
||||
main.merge("words-fst", new_words_fst.as_fst().as_bytes())?;
|
||||
db.merge_cf(&main, "words-fst", new_words_fst.as_fst().as_bytes())?;
|
||||
|
||||
eprintln!("Finished merging the words-fst");
|
||||
|
||||
|
@ -161,16 +191,21 @@ fn index_csv(tid: usize, db: sled::Db, mut rdr: csv::Reader<File>) -> anyhow::Re
|
|||
fn main() -> anyhow::Result<()> {
|
||||
let opt = Opt::from_args();
|
||||
|
||||
let db = sled::open(opt.database)?;
|
||||
let main = &*db;
|
||||
|
||||
let mut opts = rocksdb::Options::default();
|
||||
opts.create_if_missing(true);
|
||||
opts.create_missing_column_families(true);
|
||||
// Setup the merge operators
|
||||
main.set_merge_operator(union_words_fst);
|
||||
let postings_ids = db.open_tree("postings-ids")?;
|
||||
postings_ids.set_merge_operator(union_bitpacked_postings_ids);
|
||||
// ...
|
||||
let _documents = db.open_tree("documents")?;
|
||||
opts.set_merge_operator("main", union_words_fst, Some(union_words_fst));
|
||||
opts.set_merge_operator("postings-ids", union_bitpacked_postings_ids, Some(union_bitpacked_postings_ids));
|
||||
|
||||
let mut db = rocksdb::DB::open(&opts, &opt.database)?;
|
||||
|
||||
let cfs = &["main", "postings-ids", "documents"];
|
||||
for cf in cfs.into_iter() {
|
||||
db.create_cf(cf, &opts).unwrap();
|
||||
}
|
||||
|
||||
let db = Arc::new(db);
|
||||
let res = opt.files_to_index
|
||||
.into_par_iter()
|
||||
.enumerate()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue