start integrating the index-scheduler in the meilisearch codebase

This commit is contained in:
Tamo 2022-09-14 16:16:53 +02:00 committed by Clément Renault
parent 9882b7fa57
commit f84ced7e38
No known key found for this signature in database
GPG key ID: 92ADA4E935E71FA4
30 changed files with 682 additions and 3622 deletions

View file

@ -1,75 +0,0 @@
use time::OffsetDateTime;
use crate::snapshot::SnapshotJob;
use super::task::{Task, TaskEvent};
pub type BatchId = u32;
#[derive(Debug)]
pub enum BatchContent {
DocumentsAdditionBatch(Vec<Task>),
IndexUpdate(Task),
Dump(Task),
Snapshot(SnapshotJob),
// Symbolizes a empty batch. This can occur when we were woken, but there wasn't any work to do.
Empty,
}
impl BatchContent {
pub fn first(&self) -> Option<&Task> {
match self {
BatchContent::DocumentsAdditionBatch(ts) => ts.first(),
BatchContent::Dump(t) | BatchContent::IndexUpdate(t) => Some(t),
BatchContent::Snapshot(_) | BatchContent::Empty => None,
}
}
pub fn push_event(&mut self, event: TaskEvent) {
match self {
BatchContent::DocumentsAdditionBatch(ts) => {
ts.iter_mut().for_each(|t| t.events.push(event.clone()))
}
BatchContent::IndexUpdate(t) | BatchContent::Dump(t) => t.events.push(event),
BatchContent::Snapshot(_) | BatchContent::Empty => (),
}
}
}
#[derive(Debug)]
pub struct Batch {
// Only batches that contains a persistent tasks are given an id. Snapshot batches don't have
// an id.
pub id: Option<BatchId>,
pub created_at: OffsetDateTime,
pub content: BatchContent,
}
impl Batch {
pub fn new(id: Option<BatchId>, content: BatchContent) -> Self {
Self {
id,
created_at: OffsetDateTime::now_utc(),
content,
}
}
pub fn len(&self) -> usize {
match self.content {
BatchContent::DocumentsAdditionBatch(ref ts) => ts.len(),
BatchContent::IndexUpdate(_) | BatchContent::Dump(_) | BatchContent::Snapshot(_) => 1,
BatchContent::Empty => 0,
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn empty() -> Self {
Self {
id: None,
created_at: OffsetDateTime::now_utc(),
content: BatchContent::Empty,
}
}
}

View file

@ -1,34 +0,0 @@
use meilisearch_types::error::{Code, ErrorCode};
use meilisearch_types::internal_error;
use tokio::task::JoinError;
use crate::update_file_store::UpdateFileStoreError;
use super::task::TaskId;
pub type Result<T> = std::result::Result<T, TaskError>;
#[derive(Debug, thiserror::Error)]
pub enum TaskError {
#[error("Task `{0}` not found.")]
UnexistingTask(TaskId),
#[error("Internal error: {0}")]
Internal(Box<dyn std::error::Error + Send + Sync + 'static>),
}
internal_error!(
TaskError: milli::heed::Error,
JoinError,
std::io::Error,
serde_json::Error,
UpdateFileStoreError
);
impl ErrorCode for TaskError {
fn error_code(&self) -> Code {
match self {
TaskError::UnexistingTask(_) => Code::TaskNotFound,
TaskError::Internal(_) => Code::Internal,
}
}
}

View file

@ -1,132 +0,0 @@
use crate::dump::DumpHandler;
use crate::index_resolver::index_store::IndexStore;
use crate::index_resolver::meta_store::IndexMetaStore;
use crate::tasks::batch::{Batch, BatchContent};
use crate::tasks::task::{Task, TaskContent, TaskEvent, TaskResult};
use crate::tasks::BatchHandler;
#[async_trait::async_trait]
impl<U, I> BatchHandler for DumpHandler<U, I>
where
U: IndexMetaStore + Sync + Send + 'static,
I: IndexStore + Sync + Send + 'static,
{
fn accept(&self, batch: &Batch) -> bool {
matches!(batch.content, BatchContent::Dump { .. })
}
async fn process_batch(&self, mut batch: Batch) -> Batch {
match &batch.content {
BatchContent::Dump(Task {
content: TaskContent::Dump { uid },
..
}) => {
match self.run(uid.clone()).await {
Ok(_) => {
batch
.content
.push_event(TaskEvent::succeeded(TaskResult::Other));
}
Err(e) => batch.content.push_event(TaskEvent::failed(e)),
}
batch
}
_ => unreachable!("invalid batch content for dump"),
}
}
async fn finish(&self, _: &Batch) {}
}
#[cfg(test)]
mod test {
use crate::dump::error::{DumpError, Result as DumpResult};
use crate::index_resolver::{index_store::MockIndexStore, meta_store::MockIndexMetaStore};
use crate::tasks::handlers::test::task_to_batch;
use super::*;
use nelson::Mocker;
use proptest::prelude::*;
proptest! {
#[test]
fn finish_does_nothing(
task in any::<Task>(),
) {
let rt = tokio::runtime::Runtime::new().unwrap();
let handle = rt.spawn(async {
let batch = task_to_batch(task);
let mocker = Mocker::default();
let dump_handler = DumpHandler::<MockIndexMetaStore, MockIndexStore>::mock(mocker);
dump_handler.finish(&batch).await;
});
rt.block_on(handle).unwrap();
}
#[test]
fn test_handle_dump_success(
task in any::<Task>(),
) {
let rt = tokio::runtime::Runtime::new().unwrap();
let handle = rt.spawn(async {
let batch = task_to_batch(task);
let should_accept = matches!(batch.content, BatchContent::Dump { .. });
let mocker = Mocker::default();
if should_accept {
mocker.when::<String, DumpResult<()>>("run")
.once()
.then(|_| Ok(()));
}
let dump_handler = DumpHandler::<MockIndexMetaStore, MockIndexStore>::mock(mocker);
let accept = dump_handler.accept(&batch);
assert_eq!(accept, should_accept);
if accept {
let batch = dump_handler.process_batch(batch).await;
let last_event = batch.content.first().unwrap().events.last().unwrap();
assert!(matches!(last_event, TaskEvent::Succeeded { .. }));
}
});
rt.block_on(handle).unwrap();
}
#[test]
fn test_handle_dump_error(
task in any::<Task>(),
) {
let rt = tokio::runtime::Runtime::new().unwrap();
let handle = rt.spawn(async {
let batch = task_to_batch(task);
let should_accept = matches!(batch.content, BatchContent::Dump { .. });
let mocker = Mocker::default();
if should_accept {
mocker.when::<String, DumpResult<()>>("run")
.once()
.then(|_| Err(DumpError::Internal("error".into())));
}
let dump_handler = DumpHandler::<MockIndexMetaStore, MockIndexStore>::mock(mocker);
let accept = dump_handler.accept(&batch);
assert_eq!(accept, should_accept);
if accept {
let batch = dump_handler.process_batch(batch).await;
let last_event = batch.content.first().unwrap().events.last().unwrap();
assert!(matches!(last_event, TaskEvent::Failed { .. }));
}
});
rt.block_on(handle).unwrap();
}
}
}

View file

@ -1,18 +0,0 @@
use crate::tasks::batch::{Batch, BatchContent};
use crate::tasks::BatchHandler;
/// A sink handler for empty tasks.
pub struct EmptyBatchHandler;
#[async_trait::async_trait]
impl BatchHandler for EmptyBatchHandler {
fn accept(&self, batch: &Batch) -> bool {
matches!(batch.content, BatchContent::Empty)
}
async fn process_batch(&self, batch: Batch) -> Batch {
batch
}
async fn finish(&self, _: &Batch) {}
}

View file

@ -1,199 +0,0 @@
use crate::index_resolver::IndexResolver;
use crate::index_resolver::{index_store::IndexStore, meta_store::IndexMetaStore};
use crate::tasks::batch::{Batch, BatchContent};
use crate::tasks::BatchHandler;
#[async_trait::async_trait]
impl<U, I> BatchHandler for IndexResolver<U, I>
where
U: IndexMetaStore + Send + Sync + 'static,
I: IndexStore + Send + Sync + 'static,
{
fn accept(&self, batch: &Batch) -> bool {
matches!(
batch.content,
BatchContent::DocumentsAdditionBatch(_) | BatchContent::IndexUpdate(_)
)
}
async fn process_batch(&self, mut batch: Batch) -> Batch {
match batch.content {
BatchContent::DocumentsAdditionBatch(ref mut tasks) => {
self.process_document_addition_batch(tasks).await;
}
BatchContent::IndexUpdate(ref mut task) => {
self.process_task(task).await;
}
_ => unreachable!(),
}
batch
}
async fn finish(&self, batch: &Batch) {
if let BatchContent::DocumentsAdditionBatch(ref tasks) = batch.content {
for task in tasks {
if let Some(content_uuid) = task.get_content_uuid() {
if let Err(e) = self.delete_content_file(content_uuid).await {
log::error!("error deleting update file: {}", e);
}
}
}
}
}
}
#[cfg(test)]
mod test {
use crate::index_resolver::index_store::MapIndexStore;
use crate::index_resolver::meta_store::HeedMetaStore;
use crate::index_resolver::{
error::Result as IndexResult, index_store::MockIndexStore, meta_store::MockIndexMetaStore,
};
use crate::tasks::{
handlers::test::task_to_batch,
task::{Task, TaskContent},
};
use crate::update_file_store::{Result as FileStoreResult, UpdateFileStore};
use super::*;
use meilisearch_types::index_uid::IndexUid;
use milli::update::IndexDocumentsMethod;
use nelson::Mocker;
use proptest::prelude::*;
use uuid::Uuid;
proptest! {
#[test]
fn test_accept_task(
task in any::<Task>(),
) {
let batch = task_to_batch(task);
let index_store = MockIndexStore::new();
let meta_store = MockIndexMetaStore::new();
let mocker = Mocker::default();
let update_file_store = UpdateFileStore::mock(mocker);
let index_resolver = IndexResolver::new(meta_store, index_store, update_file_store);
match batch.content {
BatchContent::DocumentsAdditionBatch(_)
| BatchContent::IndexUpdate(_) => assert!(index_resolver.accept(&batch)),
BatchContent::Dump(_)
| BatchContent::Snapshot(_)
| BatchContent::Empty => assert!(!index_resolver.accept(&batch)),
}
}
}
#[actix_rt::test]
async fn finisher_called_on_document_update() {
let index_store = MockIndexStore::new();
let meta_store = MockIndexMetaStore::new();
let mocker = Mocker::default();
let content_uuid = Uuid::new_v4();
mocker
.when::<Uuid, FileStoreResult<()>>("delete")
.once()
.then(move |uuid| {
assert_eq!(uuid, content_uuid);
Ok(())
});
let update_file_store = UpdateFileStore::mock(mocker);
let index_resolver = IndexResolver::new(meta_store, index_store, update_file_store);
let task = Task {
id: 1,
content: TaskContent::DocumentAddition {
content_uuid,
merge_strategy: IndexDocumentsMethod::ReplaceDocuments,
primary_key: None,
documents_count: 100,
allow_index_creation: true,
index_uid: IndexUid::new_unchecked("test"),
},
events: Vec::new(),
};
let batch = task_to_batch(task);
index_resolver.finish(&batch).await;
}
#[actix_rt::test]
#[should_panic]
async fn panic_when_passed_unsupported_batch() {
let index_store = MockIndexStore::new();
let meta_store = MockIndexMetaStore::new();
let mocker = Mocker::default();
let update_file_store = UpdateFileStore::mock(mocker);
let index_resolver = IndexResolver::new(meta_store, index_store, update_file_store);
let task = Task {
id: 1,
content: TaskContent::Dump {
uid: String::from("hello"),
},
events: Vec::new(),
};
let batch = task_to_batch(task);
index_resolver.process_batch(batch).await;
}
proptest! {
#[test]
fn index_document_task_deletes_update_file(
task in any::<Task>(),
) {
let rt = tokio::runtime::Runtime::new().unwrap();
let handle = rt.spawn(async {
let mocker = Mocker::default();
if let TaskContent::DocumentAddition{ .. } = task.content {
mocker.when::<Uuid, IndexResult<()>>("delete_content_file").then(|_| Ok(()));
}
let index_resolver: IndexResolver<HeedMetaStore, MapIndexStore> = IndexResolver::mock(mocker);
let batch = task_to_batch(task);
index_resolver.finish(&batch).await;
});
rt.block_on(handle).unwrap();
}
#[test]
fn test_handle_batch(task in any::<Task>()) {
let rt = tokio::runtime::Runtime::new().unwrap();
let handle = rt.spawn(async {
let mocker = Mocker::default();
match task.content {
TaskContent::DocumentAddition { .. } => {
mocker.when::<&mut [Task], ()>("process_document_addition_batch").then(|_| ());
}
TaskContent::Dump { .. } => (),
_ => {
mocker.when::<&mut Task, ()>("process_task").then(|_| ());
}
}
let index_resolver: IndexResolver<HeedMetaStore, MapIndexStore> = IndexResolver::mock(mocker);
let batch = task_to_batch(task);
if index_resolver.accept(&batch) {
index_resolver.process_batch(batch).await;
}
});
if let Err(e) = rt.block_on(handle) {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
}
}
}
}
}

View file

@ -1,34 +0,0 @@
pub mod dump_handler;
pub mod empty_handler;
mod index_resolver_handler;
pub mod snapshot_handler;
#[cfg(test)]
mod test {
use time::OffsetDateTime;
use crate::tasks::{
batch::{Batch, BatchContent},
task::{Task, TaskContent},
};
pub fn task_to_batch(task: Task) -> Batch {
let content = match task.content {
TaskContent::DocumentAddition { .. } => {
BatchContent::DocumentsAdditionBatch(vec![task])
}
TaskContent::DocumentDeletion { .. }
| TaskContent::SettingsUpdate { .. }
| TaskContent::IndexDeletion { .. }
| TaskContent::IndexCreation { .. }
| TaskContent::IndexUpdate { .. } => BatchContent::IndexUpdate(task),
TaskContent::Dump { .. } => BatchContent::Dump(task),
};
Batch {
id: Some(1),
created_at: OffsetDateTime::now_utc(),
content,
}
}
}

View file

@ -1,26 +0,0 @@
use crate::tasks::batch::{Batch, BatchContent};
use crate::tasks::BatchHandler;
pub struct SnapshotHandler;
#[async_trait::async_trait]
impl BatchHandler for SnapshotHandler {
fn accept(&self, batch: &Batch) -> bool {
matches!(batch.content, BatchContent::Snapshot(_))
}
async fn process_batch(&self, batch: Batch) -> Batch {
match batch.content {
BatchContent::Snapshot(job) => {
if let Err(e) = job.run().await {
log::error!("snapshot error: {e}");
}
}
_ => unreachable!(),
}
Batch::empty()
}
async fn finish(&self, _: &Batch) {}
}

View file

@ -1,56 +0,0 @@
use async_trait::async_trait;
pub use handlers::empty_handler::EmptyBatchHandler;
pub use handlers::snapshot_handler::SnapshotHandler;
pub use scheduler::Scheduler;
pub use task_store::TaskFilter;
#[cfg(test)]
pub use task_store::test::MockTaskStore as TaskStore;
#[cfg(not(test))]
pub use task_store::TaskStore;
use batch::Batch;
use error::Result;
pub mod batch;
pub mod error;
mod handlers;
mod scheduler;
pub mod task;
mod task_store;
pub mod update_loop;
#[cfg_attr(test, mockall::automock(type Error=test::DebugError;))]
#[async_trait]
pub trait BatchHandler: Sync + Send + 'static {
/// return whether this handler can accept this batch
fn accept(&self, batch: &Batch) -> bool;
/// Processes the `Task` batch returning the batch with the `Task` updated.
///
/// It is ok for this function to panic if a batch is handed that hasn't been verified by
/// `accept` beforehand.
async fn process_batch(&self, batch: Batch) -> Batch;
/// `finish` is called when the result of `process` has been committed to the task store. This
/// method can be used to perform cleanup after the update has been completed for example.
async fn finish(&self, batch: &Batch);
}
#[cfg(test)]
mod test {
use serde::{Deserialize, Serialize};
use std::fmt::Display;
#[derive(Debug, Serialize, Deserialize)]
pub struct DebugError;
impl Display for DebugError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("an error")
}
}
impl std::error::Error for DebugError {}
}

View file

@ -1,609 +0,0 @@
use std::cmp::Ordering;
use std::collections::{hash_map::Entry, BinaryHeap, HashMap, VecDeque};
use std::ops::{Deref, DerefMut};
use std::slice;
use std::sync::Arc;
use atomic_refcell::AtomicRefCell;
use milli::update::IndexDocumentsMethod;
use time::OffsetDateTime;
use tokio::sync::{watch, RwLock};
use crate::options::SchedulerConfig;
use crate::snapshot::SnapshotJob;
use super::batch::{Batch, BatchContent};
use super::error::Result;
use super::task::{Task, TaskContent, TaskEvent, TaskId};
use super::update_loop::UpdateLoop;
use super::{BatchHandler, TaskFilter, TaskStore};
#[derive(Eq, Debug, Clone, Copy)]
enum TaskType {
DocumentAddition { number: usize },
DocumentUpdate { number: usize },
IndexUpdate,
Dump,
}
/// Two tasks are equal if they have the same type.
impl PartialEq for TaskType {
fn eq(&self, other: &Self) -> bool {
matches!(
(self, other),
(Self::DocumentAddition { .. }, Self::DocumentAddition { .. })
| (Self::DocumentUpdate { .. }, Self::DocumentUpdate { .. })
)
}
}
#[derive(Eq, Debug, Clone, Copy)]
struct PendingTask {
kind: TaskType,
id: TaskId,
}
impl PartialEq for PendingTask {
fn eq(&self, other: &Self) -> bool {
self.id.eq(&other.id)
}
}
impl PartialOrd for PendingTask {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for PendingTask {
fn cmp(&self, other: &Self) -> Ordering {
self.id.cmp(&other.id).reverse()
}
}
#[derive(Debug)]
struct TaskList {
id: TaskListIdentifier,
tasks: BinaryHeap<PendingTask>,
}
impl Deref for TaskList {
type Target = BinaryHeap<PendingTask>;
fn deref(&self) -> &Self::Target {
&self.tasks
}
}
impl DerefMut for TaskList {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.tasks
}
}
impl TaskList {
fn new(id: TaskListIdentifier) -> Self {
Self {
id,
tasks: Default::default(),
}
}
}
impl PartialEq for TaskList {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Eq for TaskList {}
impl Ord for TaskList {
fn cmp(&self, other: &Self) -> Ordering {
match (&self.id, &other.id) {
(TaskListIdentifier::Index(_), TaskListIdentifier::Index(_)) => {
match (self.peek(), other.peek()) {
(None, None) => Ordering::Equal,
(None, Some(_)) => Ordering::Less,
(Some(_), None) => Ordering::Greater,
(Some(lhs), Some(rhs)) => lhs.cmp(rhs),
}
}
(TaskListIdentifier::Index(_), TaskListIdentifier::Dump) => Ordering::Less,
(TaskListIdentifier::Dump, TaskListIdentifier::Index(_)) => Ordering::Greater,
(TaskListIdentifier::Dump, TaskListIdentifier::Dump) => {
unreachable!("There should be only one Dump task list")
}
}
}
}
impl PartialOrd for TaskList {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
#[derive(PartialEq, Eq, Hash, Debug, Clone)]
enum TaskListIdentifier {
Index(String),
Dump,
}
impl From<&Task> for TaskListIdentifier {
fn from(task: &Task) -> Self {
match &task.content {
TaskContent::DocumentAddition { index_uid, .. }
| TaskContent::DocumentDeletion { index_uid, .. }
| TaskContent::SettingsUpdate { index_uid, .. }
| TaskContent::IndexDeletion { index_uid }
| TaskContent::IndexCreation { index_uid, .. }
| TaskContent::IndexUpdate { index_uid, .. } => {
TaskListIdentifier::Index(index_uid.as_str().to_string())
}
TaskContent::Dump { .. } => TaskListIdentifier::Dump,
}
}
}
#[derive(Default)]
struct TaskQueue {
/// Maps index uids to their TaskList, for quick access
index_tasks: HashMap<TaskListIdentifier, Arc<AtomicRefCell<TaskList>>>,
/// A queue that orders TaskList by the priority of their fist update
queue: BinaryHeap<Arc<AtomicRefCell<TaskList>>>,
}
impl TaskQueue {
fn insert(&mut self, task: Task) {
let id = task.id;
let uid = TaskListIdentifier::from(&task);
let kind = match task.content {
TaskContent::DocumentAddition {
documents_count,
merge_strategy: IndexDocumentsMethod::ReplaceDocuments,
..
} => TaskType::DocumentAddition {
number: documents_count,
},
TaskContent::DocumentAddition {
documents_count,
merge_strategy: IndexDocumentsMethod::UpdateDocuments,
..
} => TaskType::DocumentUpdate {
number: documents_count,
},
TaskContent::Dump { .. } => TaskType::Dump,
TaskContent::DocumentDeletion { .. }
| TaskContent::SettingsUpdate { .. }
| TaskContent::IndexDeletion { .. }
| TaskContent::IndexCreation { .. }
| TaskContent::IndexUpdate { .. } => TaskType::IndexUpdate,
_ => unreachable!("unhandled task type"),
};
let task = PendingTask { kind, id };
match self.index_tasks.entry(uid) {
Entry::Occupied(entry) => {
// A task list already exists for this index, all we have to to is to push the new
// update to the end of the list. This won't change the order since ids are
// monotonically increasing.
let mut list = entry.get().borrow_mut();
// We only need the first element to be lower than the one we want to
// insert to preserve the order in the queue.
assert!(list.peek().map(|old_id| id >= old_id.id).unwrap_or(true));
list.push(task);
}
Entry::Vacant(entry) => {
let mut task_list = TaskList::new(entry.key().clone());
task_list.push(task);
let task_list = Arc::new(AtomicRefCell::new(task_list));
entry.insert(task_list.clone());
self.queue.push(task_list);
}
}
}
/// Passes a context with a view to the task list of the next index to schedule. It is
/// guaranteed that the first id from task list will be the lowest pending task id.
fn head_mut<R>(&mut self, mut f: impl FnMut(&mut TaskList) -> R) -> Option<R> {
let head = self.queue.pop()?;
let result = {
let mut ref_head = head.borrow_mut();
f(&mut *ref_head)
};
if !head.borrow().tasks.is_empty() {
// After being mutated, the head is reinserted to the correct position.
self.queue.push(head);
} else {
self.index_tasks.remove(&head.borrow().id);
}
Some(result)
}
pub fn is_empty(&self) -> bool {
self.queue.is_empty() && self.index_tasks.is_empty()
}
}
pub struct Scheduler {
// TODO: currently snapshots are non persistent tasks, and are treated differently.
snapshots: VecDeque<SnapshotJob>,
tasks: TaskQueue,
store: TaskStore,
processing: Processing,
next_fetched_task_id: TaskId,
config: SchedulerConfig,
/// Notifies the update loop that a new task was received
notifier: watch::Sender<()>,
}
impl Scheduler {
pub fn new(
store: TaskStore,
performers: Vec<Arc<dyn BatchHandler + Sync + Send + 'static>>,
config: SchedulerConfig,
) -> Result<Arc<RwLock<Self>>> {
let (notifier, rcv) = watch::channel(());
let this = Self {
snapshots: VecDeque::new(),
tasks: TaskQueue::default(),
store,
processing: Processing::Nothing,
next_fetched_task_id: 0,
config,
notifier,
};
// Notify update loop to start processing pending updates immediately after startup.
this.notify();
let this = Arc::new(RwLock::new(this));
let update_loop = UpdateLoop::new(this.clone(), performers, rcv);
tokio::task::spawn_local(update_loop.run());
Ok(this)
}
fn register_task(&mut self, task: Task) {
assert!(!task.is_finished());
self.tasks.insert(task);
}
/// Clears the processing list, this method should be called when the processing of a batch is finished.
pub fn finish(&mut self) {
self.processing = Processing::Nothing;
}
pub fn notify(&self) {
let _ = self.notifier.send(());
}
fn notify_if_not_empty(&self) {
if !self.snapshots.is_empty() || !self.tasks.is_empty() {
self.notify();
}
}
pub async fn update_tasks(&self, content: BatchContent) -> Result<BatchContent> {
match content {
BatchContent::DocumentsAdditionBatch(tasks) => {
let tasks = self.store.update_tasks(tasks).await?;
Ok(BatchContent::DocumentsAdditionBatch(tasks))
}
BatchContent::IndexUpdate(t) => {
let mut tasks = self.store.update_tasks(vec![t]).await?;
Ok(BatchContent::IndexUpdate(tasks.remove(0)))
}
BatchContent::Dump(t) => {
let mut tasks = self.store.update_tasks(vec![t]).await?;
Ok(BatchContent::Dump(tasks.remove(0)))
}
other => Ok(other),
}
}
pub async fn get_task(&self, id: TaskId, filter: Option<TaskFilter>) -> Result<Task> {
self.store.get_task(id, filter).await
}
pub async fn list_tasks(
&self,
offset: Option<TaskId>,
filter: Option<TaskFilter>,
limit: Option<usize>,
) -> Result<Vec<Task>> {
self.store.list_tasks(offset, filter, limit).await
}
pub async fn get_processing_tasks(&self) -> Result<Vec<Task>> {
let mut tasks = Vec::new();
for id in self.processing.ids() {
let task = self.store.get_task(id, None).await?;
tasks.push(task);
}
Ok(tasks)
}
pub fn schedule_snapshot(&mut self, job: SnapshotJob) {
self.snapshots.push_back(job);
self.notify();
}
async fn fetch_pending_tasks(&mut self) -> Result<()> {
self.store
.fetch_unfinished_tasks(Some(self.next_fetched_task_id))
.await?
.into_iter()
.for_each(|t| {
self.next_fetched_task_id = t.id + 1;
self.register_task(t);
});
Ok(())
}
/// Prepare the next batch, and set `processing` to the ids in that batch.
pub async fn prepare(&mut self) -> Result<Batch> {
// If there is a job to process, do it first.
if let Some(job) = self.snapshots.pop_front() {
// There is more work to do, notify the update loop
self.notify_if_not_empty();
let batch = Batch::new(None, BatchContent::Snapshot(job));
return Ok(batch);
}
// Try to fill the queue with pending tasks.
self.fetch_pending_tasks().await?;
self.processing = make_batch(&mut self.tasks, &self.config);
log::debug!("prepared batch with {} tasks", self.processing.len());
if !self.processing.is_nothing() {
let (processing, mut content) = self
.store
.get_processing_tasks(std::mem::take(&mut self.processing))
.await?;
// The batch id is the id of the first update it contains. At this point we must have a
// valid batch that contains at least 1 task.
let id = match content.first() {
Some(Task { id, .. }) => *id,
_ => panic!("invalid batch"),
};
content.push_event(TaskEvent::Batched {
batch_id: id,
timestamp: OffsetDateTime::now_utc(),
});
self.processing = processing;
let batch = Batch::new(Some(id), content);
// There is more work to do, notify the update loop
self.notify_if_not_empty();
Ok(batch)
} else {
Ok(Batch::empty())
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum Processing {
DocumentAdditions(Vec<TaskId>),
IndexUpdate(TaskId),
Dump(TaskId),
/// Variant used when there is nothing to process.
Nothing,
}
impl Default for Processing {
fn default() -> Self {
Self::Nothing
}
}
enum ProcessingIter<'a> {
Many(slice::Iter<'a, TaskId>),
Single(Option<TaskId>),
}
impl<'a> Iterator for ProcessingIter<'a> {
type Item = TaskId;
fn next(&mut self) -> Option<Self::Item> {
match self {
ProcessingIter::Many(iter) => iter.next().copied(),
ProcessingIter::Single(val) => val.take(),
}
}
}
impl Processing {
fn is_nothing(&self) -> bool {
matches!(self, Processing::Nothing)
}
pub fn ids(&self) -> impl Iterator<Item = TaskId> + '_ {
match self {
Processing::DocumentAdditions(v) => ProcessingIter::Many(v.iter()),
Processing::IndexUpdate(id) | Processing::Dump(id) => ProcessingIter::Single(Some(*id)),
Processing::Nothing => ProcessingIter::Single(None),
}
}
pub fn len(&self) -> usize {
match self {
Processing::DocumentAdditions(v) => v.len(),
Processing::IndexUpdate(_) | Processing::Dump(_) => 1,
Processing::Nothing => 0,
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
fn make_batch(tasks: &mut TaskQueue, config: &SchedulerConfig) -> Processing {
let mut doc_count = 0;
tasks
.head_mut(|list| match list.peek().copied() {
Some(PendingTask {
kind: TaskType::IndexUpdate,
id,
}) => {
list.pop();
Processing::IndexUpdate(id)
}
Some(PendingTask {
kind: TaskType::Dump,
id,
}) => {
list.pop();
Processing::Dump(id)
}
Some(PendingTask { kind, .. }) => {
let mut task_list = Vec::new();
loop {
match list.peek() {
Some(pending) if pending.kind == kind => {
// We always need to process at least one task for the scheduler to make progress.
if config.disable_auto_batching && !task_list.is_empty() {
break;
}
let pending = list.pop().unwrap();
task_list.push(pending.id);
// We add the number of documents to the count if we are scheduling document additions.
match pending.kind {
TaskType::DocumentUpdate { number }
| TaskType::DocumentAddition { number } => {
doc_count += number;
}
_ => (),
}
}
_ => break,
}
}
Processing::DocumentAdditions(task_list)
}
None => Processing::Nothing,
})
.unwrap_or(Processing::Nothing)
}
#[cfg(test)]
mod test {
use meilisearch_types::index_uid::IndexUid;
use milli::update::IndexDocumentsMethod;
use uuid::Uuid;
use crate::tasks::task::TaskContent;
use super::*;
fn gen_task(id: TaskId, content: TaskContent) -> Task {
Task {
id,
content,
events: vec![],
}
}
#[test]
#[rustfmt::skip]
fn register_updates_multiples_indexes() {
let mut queue = TaskQueue::default();
queue.insert(gen_task(0, TaskContent::IndexDeletion { index_uid: IndexUid::new_unchecked("test1") }));
queue.insert(gen_task(1, TaskContent::IndexDeletion { index_uid: IndexUid::new_unchecked("test2") }));
queue.insert(gen_task(2, TaskContent::IndexDeletion { index_uid: IndexUid::new_unchecked("test2") }));
queue.insert(gen_task(3, TaskContent::IndexDeletion { index_uid: IndexUid::new_unchecked("test2") }));
queue.insert(gen_task(4, TaskContent::IndexDeletion { index_uid: IndexUid::new_unchecked("test1") }));
queue.insert(gen_task(5, TaskContent::IndexDeletion { index_uid: IndexUid::new_unchecked("test1") }));
queue.insert(gen_task(6, TaskContent::IndexDeletion { index_uid: IndexUid::new_unchecked("test2") }));
let test1_tasks = queue
.head_mut(|tasks| tasks.drain().map(|t| t.id).collect::<Vec<_>>())
.unwrap();
assert_eq!(test1_tasks, &[0, 4, 5]);
let test2_tasks = queue
.head_mut(|tasks| tasks.drain().map(|t| t.id).collect::<Vec<_>>())
.unwrap();
assert_eq!(test2_tasks, &[1, 2, 3, 6]);
assert!(queue.index_tasks.is_empty());
assert!(queue.queue.is_empty());
}
fn gen_doc_addition_task_content(index_uid: &str) -> TaskContent {
TaskContent::DocumentAddition {
content_uuid: Uuid::new_v4(),
merge_strategy: IndexDocumentsMethod::ReplaceDocuments,
primary_key: Some("test".to_string()),
documents_count: 0,
allow_index_creation: true,
index_uid: IndexUid::new_unchecked(index_uid),
}
}
#[test]
#[rustfmt::skip]
fn test_make_batch() {
let mut queue = TaskQueue::default();
queue.insert(gen_task(0, gen_doc_addition_task_content("test1")));
queue.insert(gen_task(1, gen_doc_addition_task_content("test2")));
queue.insert(gen_task(2, TaskContent::IndexDeletion { index_uid: IndexUid::new_unchecked("test2")}));
queue.insert(gen_task(3, gen_doc_addition_task_content("test2")));
queue.insert(gen_task(4, gen_doc_addition_task_content("test1")));
queue.insert(gen_task(5, TaskContent::IndexDeletion { index_uid: IndexUid::new_unchecked("test1")}));
queue.insert(gen_task(6, gen_doc_addition_task_content("test2")));
queue.insert(gen_task(7, gen_doc_addition_task_content("test1")));
queue.insert(gen_task(8, TaskContent::Dump { uid: "adump".to_owned() }));
let config = SchedulerConfig::default();
// Make sure that the dump is processed before everybody else.
let batch = make_batch(&mut queue, &config);
assert_eq!(batch, Processing::Dump(8));
let batch = make_batch(&mut queue, &config);
assert_eq!(batch, Processing::DocumentAdditions(vec![0, 4]));
let batch = make_batch(&mut queue, &config);
assert_eq!(batch, Processing::DocumentAdditions(vec![1]));
let batch = make_batch(&mut queue, &config);
assert_eq!(batch, Processing::IndexUpdate(2));
let batch = make_batch(&mut queue, &config);
assert_eq!(batch, Processing::DocumentAdditions(vec![3, 6]));
let batch = make_batch(&mut queue, &config);
assert_eq!(batch, Processing::IndexUpdate(5));
let batch = make_batch(&mut queue, &config);
assert_eq!(batch, Processing::DocumentAdditions(vec![7]));
assert!(queue.is_empty());
}
}

View file

@ -1,195 +0,0 @@
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use milli::update::{DocumentAdditionResult, IndexDocumentsMethod};
use serde::{Deserialize, Serialize};
use time::OffsetDateTime;
use uuid::Uuid;
use super::batch::BatchId;
use crate::index::{Settings, Unchecked};
pub type TaskId = u32;
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[cfg_attr(test, derive(proptest_derive::Arbitrary))]
pub enum TaskResult {
DocumentAddition { indexed_documents: u64 },
DocumentDeletion { deleted_documents: u64 },
ClearAll { deleted_documents: u64 },
Other,
}
impl From<DocumentAdditionResult> for TaskResult {
fn from(other: DocumentAdditionResult) -> Self {
Self::DocumentAddition {
indexed_documents: other.indexed_documents,
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[cfg_attr(test, derive(proptest_derive::Arbitrary))]
pub enum TaskEvent {
Created(
#[cfg_attr(test, proptest(strategy = "test::datetime_strategy()"))]
#[serde(with = "time::serde::rfc3339")]
OffsetDateTime,
),
Batched {
#[cfg_attr(test, proptest(strategy = "test::datetime_strategy()"))]
#[serde(with = "time::serde::rfc3339")]
timestamp: OffsetDateTime,
batch_id: BatchId,
},
Processing(
#[cfg_attr(test, proptest(strategy = "test::datetime_strategy()"))]
#[serde(with = "time::serde::rfc3339")]
OffsetDateTime,
),
Succeeded {
result: TaskResult,
#[cfg_attr(test, proptest(strategy = "test::datetime_strategy()"))]
#[serde(with = "time::serde::rfc3339")]
timestamp: OffsetDateTime,
},
Failed {
error: ResponseError,
#[cfg_attr(test, proptest(strategy = "test::datetime_strategy()"))]
#[serde(with = "time::serde::rfc3339")]
timestamp: OffsetDateTime,
},
}
impl TaskEvent {
pub fn succeeded(result: TaskResult) -> Self {
Self::Succeeded {
result,
timestamp: OffsetDateTime::now_utc(),
}
}
pub fn failed(error: impl Into<ResponseError>) -> Self {
Self::Failed {
error: error.into(),
timestamp: OffsetDateTime::now_utc(),
}
}
}
/// A task represents an operation that Meilisearch must do.
/// It's stored on disk and executed from the lowest to highest Task id.
/// Every time a new task is created it has a higher Task id than the previous one.
/// See also `Job`.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[cfg_attr(test, derive(proptest_derive::Arbitrary))]
pub struct Task {
pub id: TaskId,
/// The name of the index the task is targeting. If it isn't targeting any index (i.e Dump task)
/// then this is None
// TODO: when next forward breaking dumps, it would be a good idea to move this field inside of
// the TaskContent.
pub content: TaskContent,
pub events: Vec<TaskEvent>,
}
impl Task {
/// Return true when a task is finished.
/// A task is finished when its last state is either `Succeeded` or `Failed`.
pub fn is_finished(&self) -> bool {
self.events.last().map_or(false, |event| {
matches!(
event,
TaskEvent::Succeeded { .. } | TaskEvent::Failed { .. }
)
})
}
/// Return the content_uuid of the `Task` if there is one.
pub fn get_content_uuid(&self) -> Option<Uuid> {
match self {
Task {
content: TaskContent::DocumentAddition { content_uuid, .. },
..
} => Some(*content_uuid),
_ => None,
}
}
pub fn index_uid(&self) -> Option<&str> {
match &self.content {
TaskContent::DocumentAddition { index_uid, .. }
| TaskContent::DocumentDeletion { index_uid, .. }
| TaskContent::SettingsUpdate { index_uid, .. }
| TaskContent::IndexDeletion { index_uid }
| TaskContent::IndexCreation { index_uid, .. }
| TaskContent::IndexUpdate { index_uid, .. } => Some(index_uid.as_str()),
TaskContent::Dump { .. } => None,
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[cfg_attr(test, derive(proptest_derive::Arbitrary))]
pub enum DocumentDeletion {
Clear,
Ids(Vec<String>),
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[cfg_attr(test, derive(proptest_derive::Arbitrary))]
#[allow(clippy::large_enum_variant)]
pub enum TaskContent {
DocumentAddition {
index_uid: IndexUid,
#[cfg_attr(test, proptest(value = "Uuid::new_v4()"))]
content_uuid: Uuid,
#[cfg_attr(test, proptest(strategy = "test::index_document_method_strategy()"))]
merge_strategy: IndexDocumentsMethod,
primary_key: Option<String>,
documents_count: usize,
allow_index_creation: bool,
},
DocumentDeletion {
index_uid: IndexUid,
deletion: DocumentDeletion,
},
SettingsUpdate {
index_uid: IndexUid,
settings: Settings<Unchecked>,
/// Indicates whether the task was a deletion
is_deletion: bool,
allow_index_creation: bool,
},
IndexDeletion {
index_uid: IndexUid,
},
IndexCreation {
index_uid: IndexUid,
primary_key: Option<String>,
},
IndexUpdate {
index_uid: IndexUid,
primary_key: Option<String>,
},
Dump {
uid: String,
},
}
#[cfg(test)]
mod test {
use proptest::prelude::*;
use super::*;
pub(super) fn index_document_method_strategy() -> impl Strategy<Value = IndexDocumentsMethod> {
prop_oneof![
Just(IndexDocumentsMethod::ReplaceDocuments),
Just(IndexDocumentsMethod::UpdateDocuments),
]
}
pub(super) fn datetime_strategy() -> impl Strategy<Value = OffsetDateTime> {
Just(OffsetDateTime::now_utc())
}
}

View file

@ -1,93 +0,0 @@
use std::sync::Arc;
use time::OffsetDateTime;
use tokio::sync::{watch, RwLock};
use super::batch::Batch;
use super::error::Result;
use super::{BatchHandler, Scheduler};
use crate::tasks::task::TaskEvent;
/// The update loop sequentially performs batches of updates by asking the scheduler for a batch,
/// and handing it to the `TaskPerformer`.
pub struct UpdateLoop {
scheduler: Arc<RwLock<Scheduler>>,
performers: Vec<Arc<dyn BatchHandler + Send + Sync + 'static>>,
notifier: Option<watch::Receiver<()>>,
}
impl UpdateLoop {
pub fn new(
scheduler: Arc<RwLock<Scheduler>>,
performers: Vec<Arc<dyn BatchHandler + Send + Sync + 'static>>,
notifier: watch::Receiver<()>,
) -> Self {
Self {
scheduler,
performers,
notifier: Some(notifier),
}
}
pub async fn run(mut self) {
let mut notifier = self.notifier.take().unwrap();
loop {
if notifier.changed().await.is_err() {
break;
}
if let Err(e) = self.process_next_batch().await {
log::error!("an error occurred while processing an update batch: {}", e);
}
}
}
async fn process_next_batch(&self) -> Result<()> {
let mut batch = { self.scheduler.write().await.prepare().await? };
let performer = self
.performers
.iter()
.find(|p| p.accept(&batch))
.expect("No performer found for batch")
.clone();
batch
.content
.push_event(TaskEvent::Processing(OffsetDateTime::now_utc()));
batch.content = {
self.scheduler
.read()
.await
.update_tasks(batch.content)
.await?
};
let batch = performer.process_batch(batch).await;
self.handle_batch_result(batch, performer).await?;
Ok(())
}
/// Handles the result from a processed batch.
///
/// When a task is processed, the result of the process is pushed to its event list. The
/// `handle_batch_result` make sure that the new state is saved to the store.
/// The tasks are then removed from the processing queue.
async fn handle_batch_result(
&self,
mut batch: Batch,
performer: Arc<dyn BatchHandler + Sync + Send + 'static>,
) -> Result<()> {
let mut scheduler = self.scheduler.write().await;
let content = scheduler.update_tasks(batch.content).await?;
scheduler.finish();
drop(scheduler);
batch.content = content;
performer.finish(&batch).await;
Ok(())
}
}