mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-07-03 20:07:09 +02:00
bump milli version and fix a performance issue for large dumps
This commit is contained in:
parent
956012da95
commit
26dcb9e66d
3 changed files with 7 additions and 9 deletions
|
@ -333,16 +333,12 @@ impl<S: IndexStore + Sync + Send> IndexActor<S> {
|
|||
|
||||
// Get write txn to wait for ongoing write transaction before dump.
|
||||
let txn = index.write_txn()?;
|
||||
let documents_ids = index.documents_ids(&txn)?;
|
||||
// TODO: TAMO: calling this function here can consume **a lot** of RAM, we should
|
||||
// use some kind of iterators -> waiting for a milli release
|
||||
let documents = index.documents(&txn, documents_ids)?;
|
||||
|
||||
let fields_ids_map = index.fields_ids_map(&txn)?;
|
||||
// we want to save **all** the fields in the dump.
|
||||
let fields_to_dump: Vec<u8> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||
|
||||
for (_doc_id, document) in documents {
|
||||
for document in index.all_documents(&txn)? {
|
||||
let (_doc_id, document) = document?;
|
||||
let json = milli::obkv_to_json(&fields_to_dump, &fields_ids_map, document)?;
|
||||
file.write_all(serde_json::to_string(&json)?.as_bytes())?;
|
||||
file.write_all(b"\n")?;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue