MeiliSearch/index-scheduler/src/batch.rs

257 lines
9.2 KiB
Rust
Raw Normal View History

use crate::{
autobatcher::BatchKind,
task::{Kind, KindWithContent, Status, Task},
2022-09-14 13:13:44 +02:00
Error, IndexScheduler, Result,
};
2022-09-13 22:38:43 +02:00
use index::{Settings, Unchecked};
use milli::{
heed::{RoTxn, RwTxn},
update::IndexDocumentsMethod,
};
2022-09-09 01:09:50 +02:00
use uuid::Uuid;
pub(crate) enum Batch {
Cancel(Task),
Snapshot(Vec<Task>),
Dump(Vec<Task>),
// IndexSpecific { index_uid: String, kind: BatchKind },
DocumentAddition {
index_uid: String,
primary_key: Option<String>,
content_files: Vec<Uuid>,
tasks: Vec<Task>,
},
SettingsAndDocumentAddition {
index_uid: String,
primary_key: Option<String>,
content_files: Vec<Uuid>,
document_addition_tasks: Vec<Task>,
2022-09-13 22:38:43 +02:00
settings: Vec<Settings<Unchecked>>,
settings_tasks: Vec<Task>,
},
2022-09-09 01:09:50 +02:00
}
impl IndexScheduler {
pub(crate) fn create_next_batch_index(
&self,
rtxn: &RoTxn,
index_uid: String,
batch: BatchKind,
) -> Result<Option<Batch>> {
match batch {
2022-09-14 13:13:44 +02:00
BatchKind::DocumentClear { ids: _ } => todo!(),
BatchKind::DocumentAddition { addition_ids: _ } => todo!(),
BatchKind::DocumentUpdate { update_ids: _ } => todo!(),
BatchKind::DocumentDeletion { deletion_ids: _ } => todo!(),
2022-09-13 22:38:43 +02:00
BatchKind::ClearAndSettings {
2022-09-14 13:13:44 +02:00
other: _,
settings_ids: _,
} => todo!(),
BatchKind::SettingsAndDocumentAddition {
addition_ids,
settings_ids,
} => {
// you're not supposed to create an empty BatchKind.
assert!(addition_ids.len() > 0);
assert!(settings_ids.len() > 0);
let document_addition_tasks = addition_ids
.iter()
.map(|tid| {
self.get_task(rtxn, *tid)
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))
})
.collect::<Result<Vec<_>>>()?;
let settings_tasks = settings_ids
.iter()
.map(|tid| {
self.get_task(rtxn, *tid)
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))
})
.collect::<Result<Vec<_>>>()?;
let primary_key = match &document_addition_tasks[0].kind {
KindWithContent::DocumentAddition { primary_key, .. } => primary_key.clone(),
_ => unreachable!(),
};
let content_files = document_addition_tasks
.iter()
.map(|task| match task.kind {
KindWithContent::DocumentAddition { content_file, .. } => content_file,
_ => unreachable!(),
})
.collect();
let settings = settings_tasks
.iter()
.map(|task| match &task.kind {
2022-09-13 22:38:43 +02:00
KindWithContent::Settings { new_settings, .. } => new_settings.clone(),
_ => unreachable!(),
})
.collect();
Ok(Some(Batch::SettingsAndDocumentAddition {
index_uid,
primary_key,
content_files,
document_addition_tasks,
settings,
settings_tasks,
}))
}
BatchKind::SettingsAndDocumentUpdate {
2022-09-14 13:13:44 +02:00
update_ids: _,
settings_ids: _,
} => todo!(),
2022-09-14 13:13:44 +02:00
BatchKind::Settings { settings_ids: _ } => todo!(),
BatchKind::IndexCreation { id: _ } => todo!(),
BatchKind::IndexDeletion { ids: _ } => todo!(),
BatchKind::IndexUpdate { id: _ } => todo!(),
BatchKind::IndexSwap { id: _ } => todo!(),
BatchKind::IndexRename { id: _ } => todo!(),
}
}
/// Create the next batch to be processed;
/// 1. We get the *last* task to cancel.
/// 2. We get the *next* snapshot to process.
/// 3. We get the *next* dump to process.
/// 4. We get the *next* tasks to process for a specific index.
2022-09-09 01:40:28 +02:00
pub(crate) fn create_next_batch(&self, rtxn: &RoTxn) -> Result<Option<Batch>> {
let enqueued = &self.get_status(rtxn, Status::Enqueued)?;
let to_cancel = self.get_kind(rtxn, Kind::CancelTask)? & enqueued;
// 1. we get the last task to cancel.
if let Some(task_id) = to_cancel.max() {
2022-09-09 01:40:28 +02:00
return Ok(Some(Batch::Cancel(
self.get_task(rtxn, task_id)?
.ok_or(Error::CorruptedTaskQueue)?,
2022-09-09 01:40:28 +02:00
)));
}
// 2. we batch the snapshot.
let to_snapshot = self.get_kind(rtxn, Kind::Snapshot)? & enqueued;
if !to_snapshot.is_empty() {
2022-09-09 01:40:28 +02:00
return Ok(Some(Batch::Snapshot(
self.get_existing_tasks(rtxn, to_snapshot)?,
)));
}
// 3. we batch the dumps.
let to_dump = self.get_kind(rtxn, Kind::DumpExport)? & enqueued;
if !to_dump.is_empty() {
2022-09-09 01:40:28 +02:00
return Ok(Some(Batch::Dump(self.get_existing_tasks(rtxn, to_dump)?)));
}
// 4. We take the next task and try to batch all the tasks associated with this index.
if let Some(task_id) = enqueued.min() {
let task = self
.get_task(rtxn, task_id)?
.ok_or(Error::CorruptedTaskQueue)?;
2022-09-09 01:40:28 +02:00
// This is safe because all the remaining task are associated with
// AT LEAST one index. We can use the right or left one it doesn't
// matter.
let index_name = task.indexes().unwrap()[0];
2022-09-14 13:13:44 +02:00
let _index = self.get_index(rtxn, &index_name)? & enqueued;
2022-09-09 01:40:28 +02:00
let enqueued = enqueued
.into_iter()
.map(|task_id| {
self.get_task(rtxn, task_id)
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))
.map(|task| (task.uid, task.kind.as_kind()))
})
.collect::<Result<Vec<_>>>()?;
if let Some(batchkind) = crate::autobatcher::autobatch(enqueued) {
return self.create_next_batch_index(rtxn, index_name.to_string(), batchkind);
}
}
// If we found no tasks then we were notified for something that got autobatched
// somehow and there is nothing to do.
2022-09-09 01:40:28 +02:00
Ok(None)
}
pub(crate) fn process_batch(&self, wtxn: &mut RwTxn, batch: Batch) -> Result<Vec<Task>> {
match batch {
Batch::Cancel(_) => todo!(),
Batch::Snapshot(_) => todo!(),
Batch::Dump(_) => todo!(),
Batch::DocumentAddition {
2022-09-14 13:13:44 +02:00
index_uid: _,
primary_key: _,
content_files: _,
tasks: _,
} => todo!(),
Batch::SettingsAndDocumentAddition {
index_uid,
primary_key,
content_files,
document_addition_tasks,
2022-09-14 13:13:44 +02:00
settings: _,
settings_tasks: _,
} => {
2022-09-14 13:13:07 +02:00
let index = self.index_mapper.create_index(wtxn, &index_uid)?;
let mut updated_tasks = Vec::new();
/*
let ret = index.update_settings(settings)?;
for (ret, task) in ret.iter().zip(settings_tasks) {
match ret {
Ok(ret) => task.status = Some(ret),
Err(err) => task.error = Some(err),
}
}
*/
/*
for (ret, task) in ret.iter().zip(settings_tasks) {
match ret {
Ok(ret) => task.status = Some(ret),
Err(err) => task.error = Some(err),
}
updated_tasks.push(task);
}
*/
let ret = index.update_documents(
IndexDocumentsMethod::ReplaceDocuments,
primary_key,
self.file_store.clone(),
content_files.into_iter(),
)?;
for (ret, mut task) in ret.iter().zip(document_addition_tasks.into_iter()) {
match ret {
Ok(ret) => todo!(), // task.info = Some(format!("{:?}", ret)),
Err(err) => todo!(), // task.error = Some(err.to_string()),
}
updated_tasks.push(task);
}
Ok(updated_tasks)
}
}
}
}
/*
impl Batch {
pub fn task_ids(&self) -> impl IntoIterator<Item = TaskId> + '_ {
match self {
Batch::Cancel(task) | Batch::One(task) => {
Box::new(std::iter::once(task.uid)) as Box<dyn Iterator<Item = TaskId>>
}
Batch::Snapshot(tasks) | Batch::Dump(tasks) | Batch::Contiguous { tasks, .. } => {
Box::new(tasks.iter().map(|task| task.uid)) as Box<dyn Iterator<Item = TaskId>>
}
Batch::Empty => Box::new(std::iter::empty()) as Box<dyn Iterator<Item = TaskId>>,
}
}
}
*/