mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-07-04 20:37:15 +02:00
feat(lib): auto-batching
This commit is contained in:
parent
622c15e825
commit
c9a236b0af
28 changed files with 1181 additions and 777 deletions
|
@ -1,14 +1,14 @@
|
|||
use chrono::{DateTime, Utc};
|
||||
|
||||
use super::{task::Task, task_store::Pending};
|
||||
use super::task::Task;
|
||||
|
||||
pub type BatchId = u32;
|
||||
pub type BatchId = u64;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Batch {
|
||||
pub id: BatchId,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub tasks: Vec<Pending<Task>>,
|
||||
pub tasks: Vec<Task>,
|
||||
}
|
||||
|
||||
impl Batch {
|
||||
|
|
|
@ -1,47 +1,38 @@
|
|||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub use scheduler::Scheduler;
|
||||
pub use task_store::TaskFilter;
|
||||
|
||||
#[cfg(test)]
|
||||
pub use task_store::test::MockTaskStore as TaskStore;
|
||||
#[cfg(not(test))]
|
||||
pub use task_store::TaskStore;
|
||||
|
||||
pub use task_store::{Pending, TaskFilter};
|
||||
|
||||
use batch::Batch;
|
||||
use error::Result;
|
||||
use scheduler::Scheduler;
|
||||
|
||||
use self::task::Job;
|
||||
|
||||
pub mod batch;
|
||||
pub mod error;
|
||||
pub mod scheduler;
|
||||
mod scheduler;
|
||||
pub mod task;
|
||||
mod task_store;
|
||||
pub mod update_loop;
|
||||
|
||||
#[cfg_attr(test, mockall::automock(type Error=test::DebugError;))]
|
||||
#[async_trait]
|
||||
pub trait TaskPerformer: Sync + Send + 'static {
|
||||
type Error: Serialize + for<'de> Deserialize<'de> + std::error::Error + Sync + Send + 'static;
|
||||
/// Processes the `Task` batch returning the batch with the `Task` updated.
|
||||
async fn process(&self, batch: Batch) -> Batch;
|
||||
async fn process_batch(&self, batch: Batch) -> Batch;
|
||||
|
||||
async fn process_job(&self, job: Job);
|
||||
|
||||
/// `finish` is called when the result of `process` has been commited to the task store. This
|
||||
/// method can be used to perform cleanup after the update has been completed for example.
|
||||
async fn finish(&self, batch: &Batch);
|
||||
}
|
||||
|
||||
pub fn create_task_store<P>(env: Arc<heed::Env>, performer: Arc<P>) -> Result<TaskStore>
|
||||
where
|
||||
P: TaskPerformer,
|
||||
{
|
||||
let task_store = TaskStore::new(env)?;
|
||||
let scheduler = Scheduler::new(task_store.clone(), performer, Duration::from_millis(1));
|
||||
tokio::task::spawn_local(scheduler.run());
|
||||
Ok(task_store)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
|
|
@ -1,253 +1,526 @@
|
|||
use std::cmp::Ordering;
|
||||
use std::collections::{hash_map::Entry, BinaryHeap, HashMap, VecDeque};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use atomic_refcell::AtomicRefCell;
|
||||
use chrono::Utc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use milli::update::IndexDocumentsMethod;
|
||||
use tokio::sync::{watch, RwLock};
|
||||
|
||||
use crate::options::SchedulerConfig;
|
||||
use crate::update_file_store::UpdateFileStore;
|
||||
|
||||
use super::batch::Batch;
|
||||
use super::error::Result;
|
||||
#[cfg(test)]
|
||||
use super::task_store::test::MockTaskStore as TaskStore;
|
||||
use super::task_store::Pending;
|
||||
#[cfg(not(test))]
|
||||
use super::task_store::TaskStore;
|
||||
use super::TaskPerformer;
|
||||
use crate::tasks::task::TaskEvent;
|
||||
use super::task::{Job, Task, TaskContent, TaskEvent, TaskId};
|
||||
use super::update_loop::UpdateLoop;
|
||||
use super::{TaskFilter, TaskPerformer, TaskStore};
|
||||
|
||||
/// The scheduler roles is to perform batches of tasks one at a time. It will monitor the TaskStore
|
||||
/// for new tasks, put them in a batch, and process the batch as soon as possible.
|
||||
///
|
||||
/// When a batch is currently processing, the scheduler is just waiting.
|
||||
pub struct Scheduler<P: TaskPerformer> {
|
||||
store: TaskStore,
|
||||
performer: Arc<P>,
|
||||
|
||||
/// The interval at which the the `TaskStore` should be checked for new updates
|
||||
task_store_check_interval: Duration,
|
||||
#[derive(Eq, Debug, Clone, Copy)]
|
||||
enum TaskType {
|
||||
DocumentAddition { number: usize },
|
||||
DocumentUpdate { number: usize },
|
||||
Other,
|
||||
}
|
||||
|
||||
impl<P> Scheduler<P>
|
||||
where
|
||||
P: TaskPerformer + Send + Sync + 'static,
|
||||
P::Error: Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static,
|
||||
{
|
||||
pub fn new(store: TaskStore, performer: Arc<P>, task_store_check_interval: Duration) -> Self {
|
||||
/// Two tasks are equal if they have the same type.
|
||||
impl PartialEq for TaskType {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
matches!(
|
||||
(self, other),
|
||||
(Self::DocumentAddition { .. }, Self::DocumentAddition { .. })
|
||||
| (Self::DocumentUpdate { .. }, Self::DocumentUpdate { .. })
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Eq, Debug, Clone, Copy)]
|
||||
struct PendingTask {
|
||||
kind: TaskType,
|
||||
id: TaskId,
|
||||
}
|
||||
|
||||
impl PartialEq for PendingTask {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.id.eq(&other.id)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for PendingTask {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for PendingTask {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.id.cmp(&other.id).reverse()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TaskList {
|
||||
index: String,
|
||||
tasks: BinaryHeap<PendingTask>,
|
||||
}
|
||||
|
||||
impl Deref for TaskList {
|
||||
type Target = BinaryHeap<PendingTask>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.tasks
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for TaskList {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.tasks
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskList {
|
||||
fn new(index: String) -> Self {
|
||||
Self {
|
||||
store,
|
||||
performer,
|
||||
task_store_check_interval,
|
||||
index,
|
||||
tasks: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(self) {
|
||||
loop {
|
||||
if let Err(e) = self.process_next_batch().await {
|
||||
log::error!("an error occured while processing an update batch: {}", e);
|
||||
impl PartialEq for TaskList {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.index == other.index
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for TaskList {}
|
||||
|
||||
impl Ord for TaskList {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match (self.peek(), other.peek()) {
|
||||
(None, None) => Ordering::Equal,
|
||||
(None, Some(_)) => Ordering::Less,
|
||||
(Some(_), None) => Ordering::Greater,
|
||||
(Some(lhs), Some(rhs)) => lhs.cmp(rhs),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for TaskList {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TaskQueue {
|
||||
/// Maps index uids to their TaskList, for quick access
|
||||
index_tasks: HashMap<String, Arc<AtomicRefCell<TaskList>>>,
|
||||
/// A queue that orders TaskList by the priority of their fist update
|
||||
queue: BinaryHeap<Arc<AtomicRefCell<TaskList>>>,
|
||||
}
|
||||
|
||||
impl TaskQueue {
|
||||
fn insert(&mut self, task: Task) {
|
||||
let uid = task.index_uid.into_inner();
|
||||
let id = task.id;
|
||||
let kind = match task.content {
|
||||
TaskContent::DocumentAddition {
|
||||
documents_count,
|
||||
merge_strategy: IndexDocumentsMethod::ReplaceDocuments,
|
||||
..
|
||||
} => TaskType::DocumentAddition {
|
||||
number: documents_count,
|
||||
},
|
||||
TaskContent::DocumentAddition {
|
||||
documents_count,
|
||||
merge_strategy: IndexDocumentsMethod::UpdateDocuments,
|
||||
..
|
||||
} => TaskType::DocumentUpdate {
|
||||
number: documents_count,
|
||||
},
|
||||
_ => TaskType::Other,
|
||||
};
|
||||
let task = PendingTask { kind, id };
|
||||
|
||||
match self.index_tasks.entry(uid) {
|
||||
Entry::Occupied(entry) => {
|
||||
// A task list already exists for this index, all we have to to is to push the new
|
||||
// update to the end of the list. This won't change the order since ids are
|
||||
// monotically increasing.
|
||||
let mut list = entry.get().borrow_mut();
|
||||
|
||||
// We only need the first element to be lower than the one we want to
|
||||
// insert to preserve the order in the queue.
|
||||
assert!(list.peek().map(|old_id| id >= old_id.id).unwrap_or(true));
|
||||
|
||||
list.push(task);
|
||||
}
|
||||
Entry::Vacant(entry) => {
|
||||
let mut task_list = TaskList::new(entry.key().to_owned());
|
||||
task_list.push(task);
|
||||
let task_list = Arc::new(AtomicRefCell::new(task_list));
|
||||
entry.insert(task_list.clone());
|
||||
self.queue.push(task_list);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_next_batch(&self) -> Result<()> {
|
||||
match self.prepare_batch().await? {
|
||||
Some(mut batch) => {
|
||||
for task in &mut batch.tasks {
|
||||
match task {
|
||||
Pending::Task(task) => task.events.push(TaskEvent::Processing(Utc::now())),
|
||||
Pending::Job(_) => (),
|
||||
/// Passes a context with a view to the task list of the next index to schedule. It is
|
||||
/// guaranteed that the first id from task list will be the lowest pending task id.
|
||||
fn head_mut<R>(&mut self, mut f: impl FnMut(&mut TaskList) -> R) -> Option<R> {
|
||||
let head = self.queue.pop()?;
|
||||
let result = {
|
||||
let mut ref_head = head.borrow_mut();
|
||||
f(&mut *ref_head)
|
||||
};
|
||||
if !head.borrow().tasks.is_empty() {
|
||||
// After being mutated, the head is reinserted to the correct position.
|
||||
self.queue.push(head);
|
||||
} else {
|
||||
self.index_tasks.remove(&head.borrow().index);
|
||||
}
|
||||
|
||||
Some(result)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.queue.is_empty() && self.index_tasks.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Scheduler {
|
||||
jobs: VecDeque<Job>,
|
||||
tasks: TaskQueue,
|
||||
|
||||
store: TaskStore,
|
||||
processing: Vec<TaskId>,
|
||||
next_fetched_task_id: TaskId,
|
||||
config: SchedulerConfig,
|
||||
/// Notifies the update loop that a new task was received
|
||||
notifier: watch::Sender<()>,
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub fn new<P>(
|
||||
store: TaskStore,
|
||||
performer: Arc<P>,
|
||||
mut config: SchedulerConfig,
|
||||
) -> Result<Arc<RwLock<Self>>>
|
||||
where
|
||||
P: TaskPerformer,
|
||||
{
|
||||
let (notifier, rcv) = watch::channel(());
|
||||
|
||||
let debounce_time = config.debounce_duration_sec;
|
||||
|
||||
// Disable autobatching
|
||||
if !config.enable_autobatching {
|
||||
config.max_batch_size = Some(1);
|
||||
}
|
||||
|
||||
let this = Self {
|
||||
jobs: VecDeque::new(),
|
||||
tasks: TaskQueue::default(),
|
||||
|
||||
store,
|
||||
processing: Vec::new(),
|
||||
next_fetched_task_id: 0,
|
||||
config,
|
||||
notifier,
|
||||
};
|
||||
|
||||
// Notify update loop to start processing pending updates immediately after startup.
|
||||
this.notify();
|
||||
|
||||
let this = Arc::new(RwLock::new(this));
|
||||
|
||||
let update_loop = UpdateLoop::new(
|
||||
this.clone(),
|
||||
performer,
|
||||
debounce_time.filter(|&v| v > 0).map(Duration::from_secs),
|
||||
rcv,
|
||||
);
|
||||
|
||||
tokio::task::spawn_local(update_loop.run());
|
||||
|
||||
Ok(this)
|
||||
}
|
||||
|
||||
pub async fn dump(&self, path: &Path, file_store: UpdateFileStore) -> Result<()> {
|
||||
self.store.dump(path, file_store).await
|
||||
}
|
||||
|
||||
fn register_task(&mut self, task: Task) {
|
||||
assert!(!task.is_finished());
|
||||
self.tasks.insert(task);
|
||||
}
|
||||
|
||||
/// Clears the processing list, this method should be called when the processing of a batch is finished.
|
||||
pub fn finish(&mut self) {
|
||||
self.processing.clear();
|
||||
}
|
||||
|
||||
pub fn notify(&self) {
|
||||
let _ = self.notifier.send(());
|
||||
}
|
||||
|
||||
fn notify_if_not_empty(&self) {
|
||||
if !self.jobs.is_empty() || !self.tasks.is_empty() {
|
||||
self.notify();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn update_tasks(&self, tasks: Vec<Task>) -> Result<Vec<Task>> {
|
||||
self.store.update_tasks(tasks).await
|
||||
}
|
||||
|
||||
pub async fn get_task(&self, id: TaskId, filter: Option<TaskFilter>) -> Result<Task> {
|
||||
self.store.get_task(id, filter).await
|
||||
}
|
||||
|
||||
pub async fn list_tasks(
|
||||
&self,
|
||||
offset: Option<TaskId>,
|
||||
filter: Option<TaskFilter>,
|
||||
limit: Option<usize>,
|
||||
) -> Result<Vec<Task>> {
|
||||
self.store.list_tasks(offset, filter, limit).await
|
||||
}
|
||||
|
||||
pub async fn get_processing_tasks(&self) -> Result<Vec<Task>> {
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for id in self.processing.iter() {
|
||||
let task = self.store.get_task(*id, None).await?;
|
||||
tasks.push(task);
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
pub async fn schedule_job(&mut self, job: Job) {
|
||||
self.jobs.push_back(job);
|
||||
self.notify();
|
||||
}
|
||||
|
||||
async fn fetch_pending_tasks(&mut self) -> Result<()> {
|
||||
// We must NEVER re-enqueue an already processed task! It's content uuid would point to an unexisting file.
|
||||
//
|
||||
// TODO(marin): This may create some latency when the first batch lazy loads the pending updates.
|
||||
let mut filter = TaskFilter::default();
|
||||
filter.filter_fn(|task| !task.is_finished());
|
||||
|
||||
self.store
|
||||
.list_tasks(Some(self.next_fetched_task_id), Some(filter), None)
|
||||
.await?
|
||||
.into_iter()
|
||||
// The tasks arrive in reverse order, and we need to insert them in order.
|
||||
.rev()
|
||||
.for_each(|t| {
|
||||
self.next_fetched_task_id = t.id + 1;
|
||||
self.register_task(t);
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prepare the next batch, and set `processing` to the ids in that batch.
|
||||
pub async fn prepare(&mut self) -> Result<Pending> {
|
||||
// If there is a job to process, do it first.
|
||||
if let Some(job) = self.jobs.pop_front() {
|
||||
// There is more work to do, notify the update loop
|
||||
self.notify_if_not_empty();
|
||||
return Ok(Pending::Job(job));
|
||||
}
|
||||
// Try to fill the queue with pending tasks.
|
||||
self.fetch_pending_tasks().await?;
|
||||
|
||||
make_batch(&mut self.tasks, &mut self.processing, &self.config);
|
||||
|
||||
log::debug!("prepared batch with {} tasks", self.processing.len());
|
||||
|
||||
if !self.processing.is_empty() {
|
||||
let ids = std::mem::take(&mut self.processing);
|
||||
|
||||
let (ids, mut tasks) = self.store.get_pending_tasks(ids).await?;
|
||||
|
||||
// The batch id is the id of the first update it contains
|
||||
let id = match tasks.first() {
|
||||
Some(Task { id, .. }) => *id,
|
||||
_ => panic!("invalid batch"),
|
||||
};
|
||||
|
||||
tasks.iter_mut().for_each(|t| {
|
||||
t.events.push(TaskEvent::Batched {
|
||||
batch_id: id,
|
||||
timestamp: Utc::now(),
|
||||
})
|
||||
});
|
||||
|
||||
self.processing = ids;
|
||||
|
||||
let batch = Batch {
|
||||
id,
|
||||
created_at: Utc::now(),
|
||||
tasks,
|
||||
};
|
||||
|
||||
// There is more work to do, notify the update loop
|
||||
self.notify_if_not_empty();
|
||||
|
||||
Ok(Pending::Batch(batch))
|
||||
} else {
|
||||
Ok(Pending::Nothing)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Pending {
|
||||
Batch(Batch),
|
||||
Job(Job),
|
||||
Nothing,
|
||||
}
|
||||
|
||||
fn make_batch(tasks: &mut TaskQueue, processing: &mut Vec<TaskId>, config: &SchedulerConfig) {
|
||||
processing.clear();
|
||||
|
||||
let mut doc_count = 0;
|
||||
tasks.head_mut(|list| match list.peek().copied() {
|
||||
Some(PendingTask {
|
||||
kind: TaskType::Other,
|
||||
id,
|
||||
}) => {
|
||||
processing.push(id);
|
||||
list.pop();
|
||||
}
|
||||
Some(PendingTask { kind, .. }) => loop {
|
||||
match list.peek() {
|
||||
Some(pending) if pending.kind == kind => {
|
||||
// We always need to process at least one task for the scheduler to make progress.
|
||||
if processing.len() >= config.max_batch_size.unwrap_or(usize::MAX).max(1) {
|
||||
break;
|
||||
}
|
||||
let pending = list.pop().unwrap();
|
||||
processing.push(pending.id);
|
||||
|
||||
// We add the number of documents to the count if we are scheduling document additions and
|
||||
// stop adding if we already have enough.
|
||||
//
|
||||
// We check that bound only after adding the current task to the batch, so that a batch contains at least one task.
|
||||
match pending.kind {
|
||||
TaskType::DocumentUpdate { number }
|
||||
| TaskType::DocumentAddition { number } => {
|
||||
doc_count += number;
|
||||
|
||||
if doc_count >= config.max_documents_per_batch.unwrap_or(usize::MAX) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
// the jobs are ignored
|
||||
batch.tasks = self.store.update_tasks(batch.tasks).await?;
|
||||
|
||||
let performer = self.performer.clone();
|
||||
let batch_result = performer.process(batch).await;
|
||||
self.handle_batch_result(batch_result).await?;
|
||||
_ => break,
|
||||
}
|
||||
None => {
|
||||
// No update found to create a batch we wait a bit before we retry.
|
||||
tokio::time::sleep(self.task_store_check_interval).await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks for pending tasks and groups them in a batch. If there are no pending update,
|
||||
/// return Ok(None)
|
||||
///
|
||||
/// Until batching is properly implemented, the batches contain only one task.
|
||||
async fn prepare_batch(&self) -> Result<Option<Batch>> {
|
||||
match self.store.peek_pending_task().await {
|
||||
Some(Pending::Task(next_task_id)) => {
|
||||
let mut task = self.store.get_task(next_task_id, None).await?;
|
||||
|
||||
task.events.push(TaskEvent::Batched {
|
||||
timestamp: Utc::now(),
|
||||
batch_id: 0,
|
||||
});
|
||||
|
||||
let batch = Batch {
|
||||
id: 0,
|
||||
// index_uid: task.index_uid.clone(),
|
||||
created_at: Utc::now(),
|
||||
tasks: vec![Pending::Task(task)],
|
||||
};
|
||||
Ok(Some(batch))
|
||||
}
|
||||
Some(Pending::Job(job)) => Ok(Some(Batch {
|
||||
id: 0,
|
||||
created_at: Utc::now(),
|
||||
tasks: vec![Pending::Job(job)],
|
||||
})),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles the result from a batch processing.
|
||||
///
|
||||
/// When a task is processed, the result of the processing is pushed to its event list. The
|
||||
/// handle batch result make sure that the new state is save into its store.
|
||||
/// The tasks are then removed from the processing queue.
|
||||
async fn handle_batch_result(&self, mut batch: Batch) -> Result<()> {
|
||||
let tasks = self.store.update_tasks(batch.tasks).await?;
|
||||
batch.tasks = tasks;
|
||||
self.store.delete_pending(&batch.tasks[0]).await;
|
||||
self.performer.finish(&batch).await;
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
None => (),
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use nelson::Mocker;
|
||||
use milli::update::IndexDocumentsMethod;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::index_resolver::IndexUid;
|
||||
use crate::tasks::task::Task;
|
||||
use crate::tasks::task_store::TaskFilter;
|
||||
use crate::{index_resolver::IndexUid, tasks::task::TaskContent};
|
||||
|
||||
use super::super::task::{TaskContent, TaskEvent, TaskId, TaskResult};
|
||||
use super::super::MockTaskPerformer;
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_prepare_batch_full() {
|
||||
let mocker = Mocker::default();
|
||||
|
||||
mocker
|
||||
.when::<(TaskId, Option<TaskFilter>), Result<Option<Task>>>("get_task")
|
||||
.once()
|
||||
.then(|(id, _filter)| {
|
||||
let task = Task {
|
||||
id,
|
||||
index_uid: IndexUid::new("Test".to_string()).unwrap(),
|
||||
content: TaskContent::IndexDeletion,
|
||||
events: vec![TaskEvent::Created(Utc::now())],
|
||||
};
|
||||
Ok(Some(task))
|
||||
});
|
||||
|
||||
mocker
|
||||
.when::<(), Option<Pending<TaskId>>>("peek_pending_task")
|
||||
.then(|()| Some(Pending::Task(1)));
|
||||
|
||||
let store = TaskStore::mock(mocker);
|
||||
let performer = Arc::new(MockTaskPerformer::new());
|
||||
|
||||
let scheduler = Scheduler {
|
||||
store,
|
||||
performer,
|
||||
task_store_check_interval: Duration::from_millis(1),
|
||||
};
|
||||
|
||||
let batch = scheduler.prepare_batch().await.unwrap().unwrap();
|
||||
|
||||
assert_eq!(batch.tasks.len(), 1);
|
||||
assert!(
|
||||
matches!(batch.tasks[0], Pending::Task(Task { id: 1, .. })),
|
||||
"{:?}",
|
||||
batch.tasks[0]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_prepare_batch_empty() {
|
||||
let mocker = Mocker::default();
|
||||
mocker
|
||||
.when::<(), Option<Pending<TaskId>>>("peek_pending_task")
|
||||
.then(|()| None);
|
||||
|
||||
let store = TaskStore::mock(mocker);
|
||||
let performer = Arc::new(MockTaskPerformer::new());
|
||||
|
||||
let scheduler = Scheduler {
|
||||
store,
|
||||
performer,
|
||||
task_store_check_interval: Duration::from_millis(1),
|
||||
};
|
||||
|
||||
assert!(scheduler.prepare_batch().await.unwrap().is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_loop_run_normal() {
|
||||
let mocker = Mocker::default();
|
||||
let mut id = Some(1);
|
||||
mocker
|
||||
.when::<(), Option<Pending<TaskId>>>("peek_pending_task")
|
||||
.then(move |()| id.take().map(Pending::Task));
|
||||
mocker
|
||||
.when::<(TaskId, Option<TaskFilter>), Result<Task>>("get_task")
|
||||
.once()
|
||||
.then(|(id, _)| {
|
||||
let task = Task {
|
||||
id,
|
||||
index_uid: IndexUid::new("Test".to_string()).unwrap(),
|
||||
content: TaskContent::IndexDeletion,
|
||||
events: vec![TaskEvent::Created(Utc::now())],
|
||||
};
|
||||
Ok(task)
|
||||
});
|
||||
|
||||
mocker
|
||||
.when::<Vec<Pending<Task>>, Result<Vec<Pending<Task>>>>("update_tasks")
|
||||
.times(2)
|
||||
.then(|tasks| {
|
||||
assert_eq!(tasks.len(), 1);
|
||||
Ok(tasks)
|
||||
});
|
||||
|
||||
mocker.when::<(), ()>("delete_pending").once().then(|_| ());
|
||||
|
||||
let store = TaskStore::mock(mocker);
|
||||
|
||||
let mut performer = MockTaskPerformer::new();
|
||||
performer.expect_process().once().returning(|mut batch| {
|
||||
batch.tasks.iter_mut().for_each(|t| match t {
|
||||
Pending::Task(Task { ref mut events, .. }) => events.push(TaskEvent::Succeded {
|
||||
result: TaskResult::Other,
|
||||
timestamp: Utc::now(),
|
||||
}),
|
||||
_ => panic!("expected a task, found a job"),
|
||||
});
|
||||
|
||||
batch
|
||||
});
|
||||
|
||||
performer.expect_finish().once().returning(|_| ());
|
||||
|
||||
let performer = Arc::new(performer);
|
||||
|
||||
let scheduler = Scheduler {
|
||||
store,
|
||||
performer,
|
||||
task_store_check_interval: Duration::from_millis(1),
|
||||
};
|
||||
|
||||
let handle = tokio::spawn(scheduler.run());
|
||||
|
||||
if let Ok(r) = tokio::time::timeout(Duration::from_millis(100), handle).await {
|
||||
r.unwrap();
|
||||
fn gen_task(id: TaskId, index_uid: &str, content: TaskContent) -> Task {
|
||||
Task {
|
||||
id,
|
||||
index_uid: IndexUid::new_unchecked(index_uid.to_owned()),
|
||||
content,
|
||||
events: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn register_updates_multiples_indexes() {
|
||||
let mut queue = TaskQueue::default();
|
||||
queue.insert(gen_task(0, "test1", TaskContent::IndexDeletion));
|
||||
queue.insert(gen_task(1, "test2", TaskContent::IndexDeletion));
|
||||
queue.insert(gen_task(2, "test2", TaskContent::IndexDeletion));
|
||||
queue.insert(gen_task(3, "test2", TaskContent::IndexDeletion));
|
||||
queue.insert(gen_task(4, "test1", TaskContent::IndexDeletion));
|
||||
queue.insert(gen_task(5, "test1", TaskContent::IndexDeletion));
|
||||
queue.insert(gen_task(6, "test2", TaskContent::IndexDeletion));
|
||||
|
||||
let test1_tasks = queue
|
||||
.head_mut(|tasks| tasks.drain().map(|t| t.id).collect::<Vec<_>>())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(test1_tasks, &[0, 4, 5]);
|
||||
|
||||
let test2_tasks = queue
|
||||
.head_mut(|tasks| tasks.drain().map(|t| t.id).collect::<Vec<_>>())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(test2_tasks, &[1, 2, 3, 6]);
|
||||
|
||||
assert!(queue.index_tasks.is_empty());
|
||||
assert!(queue.queue.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_make_batch() {
|
||||
let mut queue = TaskQueue::default();
|
||||
let content = TaskContent::DocumentAddition {
|
||||
content_uuid: Uuid::new_v4(),
|
||||
merge_strategy: IndexDocumentsMethod::ReplaceDocuments,
|
||||
primary_key: Some("test".to_string()),
|
||||
documents_count: 0,
|
||||
allow_index_creation: true,
|
||||
};
|
||||
queue.insert(gen_task(0, "test1", content.clone()));
|
||||
queue.insert(gen_task(1, "test2", content.clone()));
|
||||
queue.insert(gen_task(2, "test2", TaskContent::IndexDeletion));
|
||||
queue.insert(gen_task(3, "test2", content.clone()));
|
||||
queue.insert(gen_task(4, "test1", content.clone()));
|
||||
queue.insert(gen_task(5, "test1", TaskContent::IndexDeletion));
|
||||
queue.insert(gen_task(6, "test2", content.clone()));
|
||||
queue.insert(gen_task(7, "test1", content));
|
||||
|
||||
let mut batch = Vec::new();
|
||||
|
||||
let config = SchedulerConfig::default();
|
||||
make_batch(&mut queue, &mut batch, &config);
|
||||
assert_eq!(batch, &[0, 4]);
|
||||
|
||||
batch.clear();
|
||||
make_batch(&mut queue, &mut batch, &config);
|
||||
assert_eq!(batch, &[1]);
|
||||
|
||||
batch.clear();
|
||||
make_batch(&mut queue, &mut batch, &config);
|
||||
assert_eq!(batch, &[2]);
|
||||
|
||||
batch.clear();
|
||||
make_batch(&mut queue, &mut batch, &config);
|
||||
assert_eq!(batch, &[3, 6]);
|
||||
|
||||
batch.clear();
|
||||
make_batch(&mut queue, &mut batch, &config);
|
||||
assert_eq!(batch, &[5]);
|
||||
|
||||
batch.clear();
|
||||
make_batch(&mut queue, &mut batch, &config);
|
||||
assert_eq!(batch, &[7]);
|
||||
|
||||
assert!(queue.is_empty());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ impl Task {
|
|||
pub enum Job {
|
||||
Dump {
|
||||
#[derivative(PartialEq = "ignore")]
|
||||
ret: oneshot::Sender<Result<(), IndexResolverError>>,
|
||||
ret: oneshot::Sender<Result<oneshot::Sender<()>, IndexResolverError>>,
|
||||
path: PathBuf,
|
||||
},
|
||||
Snapshot(#[derivative(PartialEq = "ignore")] SnapshotJob),
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
mod store;
|
||||
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::{BinaryHeap, HashSet};
|
||||
use std::collections::HashSet;
|
||||
use std::io::{BufWriter, Write};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
@ -9,11 +8,9 @@ use std::sync::Arc;
|
|||
use chrono::Utc;
|
||||
use heed::{Env, RwTxn};
|
||||
use log::debug;
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::error::TaskError;
|
||||
use super::task::{Job, Task, TaskContent, TaskId};
|
||||
use super::task::{Task, TaskContent, TaskId};
|
||||
use super::Result;
|
||||
use crate::index_resolver::IndexUid;
|
||||
use crate::tasks::task::TaskEvent;
|
||||
|
@ -25,9 +22,10 @@ pub use store::test::MockStore as Store;
|
|||
pub use store::Store;
|
||||
|
||||
/// Defines constraints to be applied when querying for Tasks from the store.
|
||||
#[derive(Default, Debug)]
|
||||
#[derive(Default)]
|
||||
pub struct TaskFilter {
|
||||
indexes: Option<HashSet<String>>,
|
||||
filter_fn: Option<Box<dyn Fn(&Task) -> bool + Sync + Send + 'static>>,
|
||||
}
|
||||
|
||||
impl TaskFilter {
|
||||
|
@ -44,85 +42,28 @@ impl TaskFilter {
|
|||
.get_or_insert_with(Default::default)
|
||||
.insert(index);
|
||||
}
|
||||
}
|
||||
|
||||
/// You can't clone a job because of its volatile nature.
|
||||
/// If you need to take the `Job` with you though. You can call the method
|
||||
/// `Pending::take`. It'll return the `Pending` as-is but `Empty` the original.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Pending<T> {
|
||||
/// A task stored on disk that must be processed.
|
||||
Task(T),
|
||||
/// Job always have a higher priority over normal tasks and are not stored on disk.
|
||||
/// It can be refered as `Volatile job`.
|
||||
Job(Job),
|
||||
}
|
||||
|
||||
impl Pending<TaskId> {
|
||||
/// Makes a copy of the task or take the content of the volatile job.
|
||||
pub(crate) fn take(&mut self) -> Self {
|
||||
match self {
|
||||
Self::Task(id) => Self::Task(*id),
|
||||
Self::Job(job) => Self::Job(job.take()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Pending<TaskId> {}
|
||||
|
||||
impl PartialOrd for Pending<TaskId> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
match (self, other) {
|
||||
// in case of two tasks we want to return the lowest taskId first.
|
||||
(Pending::Task(lhs), Pending::Task(rhs)) => Some(lhs.cmp(rhs).reverse()),
|
||||
// A job is always better than a task.
|
||||
(Pending::Task(_), Pending::Job(_)) => Some(Ordering::Less),
|
||||
(Pending::Job(_), Pending::Task(_)) => Some(Ordering::Greater),
|
||||
// When there is two jobs we consider them equals.
|
||||
(Pending::Job(_), Pending::Job(_)) => Some(Ordering::Equal),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Pending<Task> {
|
||||
pub fn get_content_uuid(&self) -> Option<Uuid> {
|
||||
match self {
|
||||
Pending::Task(task) => task.get_content_uuid(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for Pending<TaskId> {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.partial_cmp(other).unwrap()
|
||||
pub fn filter_fn(&mut self, f: impl Fn(&Task) -> bool + Sync + Send + 'static) {
|
||||
self.filter_fn.replace(Box::new(f));
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TaskStore {
|
||||
store: Arc<Store>,
|
||||
pending_queue: Arc<RwLock<BinaryHeap<Pending<TaskId>>>>,
|
||||
}
|
||||
|
||||
impl Clone for TaskStore {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
store: self.store.clone(),
|
||||
pending_queue: self.pending_queue.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskStore {
|
||||
pub fn new(env: Arc<heed::Env>) -> Result<Self> {
|
||||
let mut store = Store::new(env)?;
|
||||
let unfinished_tasks = store.reset_and_return_unfinished_tasks()?;
|
||||
let store = Arc::new(store);
|
||||
|
||||
Ok(Self {
|
||||
store,
|
||||
pending_queue: Arc::new(RwLock::new(unfinished_tasks)),
|
||||
})
|
||||
let store = Arc::new(Store::new(env)?);
|
||||
Ok(Self { store })
|
||||
}
|
||||
|
||||
pub async fn register(&self, index_uid: IndexUid, content: TaskContent) -> Result<Task> {
|
||||
|
@ -146,11 +87,6 @@ impl TaskStore {
|
|||
})
|
||||
.await??;
|
||||
|
||||
self.pending_queue
|
||||
.write()
|
||||
.await
|
||||
.push(Pending::Task(task.id));
|
||||
|
||||
Ok(task)
|
||||
}
|
||||
|
||||
|
@ -159,35 +95,6 @@ impl TaskStore {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Register an update that applies on multiple indexes.
|
||||
/// Currently the update is considered as a priority.
|
||||
pub async fn register_job(&self, content: Job) {
|
||||
debug!("registering a job: {:?}", content);
|
||||
self.pending_queue.write().await.push(Pending::Job(content));
|
||||
}
|
||||
|
||||
/// Returns the next task to process.
|
||||
pub async fn peek_pending_task(&self) -> Option<Pending<TaskId>> {
|
||||
let mut pending_queue = self.pending_queue.write().await;
|
||||
loop {
|
||||
match pending_queue.peek()? {
|
||||
Pending::Job(Job::Empty) => drop(pending_queue.pop()),
|
||||
_ => return Some(pending_queue.peek_mut()?.take()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the next task to process if there is one.
|
||||
pub async fn get_processing_task(&self) -> Result<Option<Task>> {
|
||||
match self.peek_pending_task().await {
|
||||
Some(Pending::Task(tid)) => {
|
||||
let task = self.get_task(tid, None).await?;
|
||||
Ok(matches!(task.events.last(), Some(TaskEvent::Processing(_))).then(|| task))
|
||||
}
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_task(&self, id: TaskId, filter: Option<TaskFilter>) -> Result<Task> {
|
||||
let store = self.store.clone();
|
||||
let task = tokio::task::spawn_blocking(move || -> Result<_> {
|
||||
|
@ -207,17 +114,33 @@ impl TaskStore {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn update_tasks(&self, tasks: Vec<Pending<Task>>) -> Result<Vec<Pending<Task>>> {
|
||||
pub async fn get_pending_tasks(&self, ids: Vec<TaskId>) -> Result<(Vec<TaskId>, Vec<Task>)> {
|
||||
let store = self.store.clone();
|
||||
let tasks = tokio::task::spawn_blocking(move || -> Result<_> {
|
||||
let mut tasks = Vec::new();
|
||||
let txn = store.rtxn()?;
|
||||
|
||||
for id in ids.iter() {
|
||||
let task = store
|
||||
.get(&txn, *id)?
|
||||
.ok_or(TaskError::UnexistingTask(*id))?;
|
||||
tasks.push(task);
|
||||
}
|
||||
Ok((ids, tasks))
|
||||
})
|
||||
.await??;
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
pub async fn update_tasks(&self, tasks: Vec<Task>) -> Result<Vec<Task>> {
|
||||
let store = self.store.clone();
|
||||
|
||||
let tasks = tokio::task::spawn_blocking(move || -> Result<_> {
|
||||
let mut txn = store.wtxn()?;
|
||||
|
||||
for task in &tasks {
|
||||
match task {
|
||||
Pending::Task(task) => store.put(&mut txn, task)?,
|
||||
Pending::Job(_) => (),
|
||||
}
|
||||
store.put(&mut txn, task)?;
|
||||
}
|
||||
|
||||
txn.commit()?;
|
||||
|
@ -229,21 +152,6 @@ impl TaskStore {
|
|||
Ok(tasks)
|
||||
}
|
||||
|
||||
/// Delete one task from the queue and remove all `Empty` job.
|
||||
pub async fn delete_pending(&self, to_delete: &Pending<Task>) {
|
||||
if let Pending::Task(Task { id: pending_id, .. }) = to_delete {
|
||||
let mut pending_queue = self.pending_queue.write().await;
|
||||
*pending_queue = std::mem::take(&mut *pending_queue)
|
||||
.into_iter()
|
||||
.filter(|pending| match pending {
|
||||
Pending::Job(Job::Empty) => false,
|
||||
Pending::Task(id) => pending_id != id,
|
||||
_ => true,
|
||||
})
|
||||
.collect::<BinaryHeap<Pending<TaskId>>>();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list_tasks(
|
||||
&self,
|
||||
offset: Option<TaskId>,
|
||||
|
@ -348,23 +256,15 @@ pub mod test {
|
|||
Self::Mock(Arc::new(mocker))
|
||||
}
|
||||
|
||||
pub async fn update_tasks(&self, tasks: Vec<Pending<Task>>) -> Result<Vec<Pending<Task>>> {
|
||||
pub async fn update_tasks(&self, tasks: Vec<Task>) -> Result<Vec<Task>> {
|
||||
match self {
|
||||
Self::Real(s) => s.update_tasks(tasks).await,
|
||||
Self::Mock(m) => unsafe {
|
||||
m.get::<_, Result<Vec<Pending<Task>>>>("update_tasks")
|
||||
.call(tasks)
|
||||
m.get::<_, Result<Vec<Task>>>("update_tasks").call(tasks)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_pending(&self, to_delete: &Pending<Task>) {
|
||||
match self {
|
||||
Self::Real(s) => s.delete_pending(to_delete).await,
|
||||
Self::Mock(m) => unsafe { m.get("delete_pending").call(to_delete) },
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_task(&self, id: TaskId, filter: Option<TaskFilter>) -> Result<Task> {
|
||||
match self {
|
||||
Self::Real(s) => s.get_task(id, filter).await,
|
||||
|
@ -372,23 +272,13 @@ pub mod test {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn get_processing_task(&self) -> Result<Option<Task>> {
|
||||
pub async fn get_pending_tasks(
|
||||
&self,
|
||||
tasks: Vec<TaskId>,
|
||||
) -> Result<(Vec<TaskId>, Vec<Task>)> {
|
||||
match self {
|
||||
Self::Real(s) => s.get_processing_task().await,
|
||||
Self::Mock(m) => unsafe {
|
||||
m.get::<_, Result<Option<Task>>>("get_pending_task")
|
||||
.call(())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn peek_pending_task(&self) -> Option<Pending<TaskId>> {
|
||||
match self {
|
||||
Self::Real(s) => s.peek_pending_task().await,
|
||||
Self::Mock(m) => unsafe {
|
||||
m.get::<_, Option<Pending<TaskId>>>("peek_pending_task")
|
||||
.call(())
|
||||
},
|
||||
Self::Real(s) => s.get_pending_tasks(tasks).await,
|
||||
Self::Mock(m) => unsafe { m.get("get_pending_task").call(tasks) },
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -400,14 +290,18 @@ pub mod test {
|
|||
) -> Result<Vec<Task>> {
|
||||
match self {
|
||||
Self::Real(s) => s.list_tasks(from, filter, limit).await,
|
||||
Self::Mock(_m) => todo!(),
|
||||
Self::Mock(m) => unsafe { m.get("list_tasks").call((from, filter, limit)) },
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn dump(&self, path: &Path, update_file_store: UpdateFileStore) -> Result<()> {
|
||||
pub async fn dump(
|
||||
&self,
|
||||
path: impl AsRef<Path>,
|
||||
update_file_store: UpdateFileStore,
|
||||
) -> Result<()> {
|
||||
match self {
|
||||
Self::Real(s) => s.dump(path, update_file_store).await,
|
||||
Self::Mock(_m) => todo!(),
|
||||
Self::Mock(m) => unsafe { m.get("dump").call((path, update_file_store)) },
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -425,13 +319,6 @@ pub mod test {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn register_job(&self, content: Job) {
|
||||
match self {
|
||||
Self::Real(s) => s.register_job(content).await,
|
||||
Self::Mock(_m) => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_dump(path: impl AsRef<Path>, env: Arc<Env>) -> anyhow::Result<()> {
|
||||
TaskStore::load_dump(path, env)
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ use crate::tasks::task::{Task, TaskId};
|
|||
|
||||
use super::super::Result;
|
||||
|
||||
use super::{Pending, TaskFilter};
|
||||
use super::TaskFilter;
|
||||
|
||||
enum IndexUidTaskIdCodec {}
|
||||
|
||||
|
@ -84,41 +84,6 @@ impl Store {
|
|||
})
|
||||
}
|
||||
|
||||
/// This function should be called *right after* creating the store.
|
||||
/// It put back all unfinished update in the `Created` state. This
|
||||
/// allow us to re-enqueue an update that didn't had the time to finish
|
||||
/// when Meilisearch closed.
|
||||
pub fn reset_and_return_unfinished_tasks(&mut self) -> Result<BinaryHeap<Pending<TaskId>>> {
|
||||
let mut unfinished_tasks: BinaryHeap<Pending<TaskId>> = BinaryHeap::new();
|
||||
|
||||
let mut wtxn = self.wtxn()?;
|
||||
let mut iter = self.tasks.rev_iter_mut(&mut wtxn)?;
|
||||
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (id, mut task): (BEU64, Task) = entry;
|
||||
|
||||
// Since all tasks are ordered, we can stop iterating when we encounter our first non-finished task.
|
||||
if task.is_finished() {
|
||||
break;
|
||||
}
|
||||
|
||||
// we only keep the first state. It’s supposed to be a `Created` state.
|
||||
task.events.drain(1..);
|
||||
unfinished_tasks.push(Pending::Task(id.get()));
|
||||
|
||||
// Since we own the id and the task this is a safe operation.
|
||||
unsafe {
|
||||
iter.put_current(&id, &task)?;
|
||||
}
|
||||
}
|
||||
|
||||
drop(iter);
|
||||
wtxn.commit()?;
|
||||
|
||||
Ok(unfinished_tasks)
|
||||
}
|
||||
|
||||
pub fn wtxn(&self) -> Result<RwTxn> {
|
||||
Ok(self.env.write_txn()?)
|
||||
}
|
||||
|
@ -166,7 +131,11 @@ impl Store {
|
|||
.map(|limit| (limit as u64).saturating_add(from))
|
||||
.unwrap_or(u64::MAX);
|
||||
let iter: Box<dyn Iterator<Item = StdResult<_, heed::Error>>> = match filter {
|
||||
Some(filter) => {
|
||||
Some(
|
||||
ref filter @ TaskFilter {
|
||||
indexes: Some(_), ..
|
||||
},
|
||||
) => {
|
||||
let iter = self
|
||||
.compute_candidates(txn, filter, range)?
|
||||
.into_iter()
|
||||
|
@ -174,15 +143,24 @@ impl Store {
|
|||
|
||||
Box::new(iter)
|
||||
}
|
||||
None => Box::new(
|
||||
_ => Box::new(
|
||||
self.tasks
|
||||
.rev_range(txn, &(BEU64::new(range.start)..BEU64::new(range.end)))?
|
||||
.map(|r| r.map(|(_, t)| t)),
|
||||
),
|
||||
};
|
||||
|
||||
let apply_fitler = |task: &StdResult<_, heed::Error>| match task {
|
||||
Ok(ref t) => filter
|
||||
.as_ref()
|
||||
.and_then(|filter| filter.filter_fn.as_ref())
|
||||
.map(|f| f(t))
|
||||
.unwrap_or(true),
|
||||
Err(_) => true,
|
||||
};
|
||||
// Collect 'limit' task if it exists or all of them.
|
||||
let tasks = iter
|
||||
.filter(apply_fitler)
|
||||
.take(limit.unwrap_or(usize::MAX))
|
||||
.try_fold::<_, _, StdResult<_, heed::Error>>(Vec::new(), |mut v, task| {
|
||||
v.push(task?);
|
||||
|
@ -195,11 +173,11 @@ impl Store {
|
|||
fn compute_candidates(
|
||||
&self,
|
||||
txn: &heed::RoTxn,
|
||||
filter: TaskFilter,
|
||||
filter: &TaskFilter,
|
||||
range: Range<TaskId>,
|
||||
) -> Result<BinaryHeap<TaskId>> {
|
||||
let mut candidates = BinaryHeap::new();
|
||||
if let Some(indexes) = filter.indexes {
|
||||
if let Some(ref indexes) = filter.indexes {
|
||||
for index in indexes {
|
||||
// We need to prefix search the null terminated string to make sure that we only
|
||||
// get exact matches for the index, and not other uids that would share the same
|
||||
|
@ -290,13 +268,6 @@ pub mod test {
|
|||
Ok(Self::Real(Store::new(env)?))
|
||||
}
|
||||
|
||||
pub fn reset_and_return_unfinished_tasks(&mut self) -> Result<BinaryHeap<Pending<TaskId>>> {
|
||||
match self {
|
||||
MockStore::Real(index) => index.reset_and_return_unfinished_tasks(),
|
||||
MockStore::Fake(_) => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wtxn(&self) -> Result<RwTxn> {
|
||||
match self {
|
||||
MockStore::Real(index) => index.wtxn(),
|
||||
|
|
107
meilisearch-lib/src/tasks/update_loop.rs
Normal file
107
meilisearch-lib/src/tasks/update_loop.rs
Normal file
|
@ -0,0 +1,107 @@
|
|||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use chrono::Utc;
|
||||
use tokio::sync::{watch, RwLock};
|
||||
use tokio::time::interval_at;
|
||||
|
||||
use super::batch::Batch;
|
||||
use super::error::Result;
|
||||
use super::scheduler::Pending;
|
||||
use super::{Scheduler, TaskPerformer};
|
||||
use crate::tasks::task::TaskEvent;
|
||||
|
||||
/// The update loop sequentially performs batches of updates by asking the scheduler for a batch,
|
||||
/// and handing it to the `TaskPerformer`.
|
||||
pub struct UpdateLoop<P: TaskPerformer> {
|
||||
scheduler: Arc<RwLock<Scheduler>>,
|
||||
performer: Arc<P>,
|
||||
|
||||
notifier: Option<watch::Receiver<()>>,
|
||||
debounce_duration: Option<Duration>,
|
||||
}
|
||||
|
||||
impl<P> UpdateLoop<P>
|
||||
where
|
||||
P: TaskPerformer + Send + Sync + 'static,
|
||||
{
|
||||
pub fn new(
|
||||
scheduler: Arc<RwLock<Scheduler>>,
|
||||
performer: Arc<P>,
|
||||
debuf_duration: Option<Duration>,
|
||||
notifier: watch::Receiver<()>,
|
||||
) -> Self {
|
||||
Self {
|
||||
scheduler,
|
||||
performer,
|
||||
debounce_duration: debuf_duration,
|
||||
notifier: Some(notifier),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
let mut notifier = self.notifier.take().unwrap();
|
||||
|
||||
loop {
|
||||
if notifier.changed().await.is_err() {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Some(t) = self.debounce_duration {
|
||||
let mut interval = interval_at(tokio::time::Instant::now() + t, t);
|
||||
interval.tick().await;
|
||||
};
|
||||
|
||||
if let Err(e) = self.process_next_batch().await {
|
||||
log::error!("an error occured while processing an update batch: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_next_batch(&self) -> Result<()> {
|
||||
let pending = { self.scheduler.write().await.prepare().await? };
|
||||
match pending {
|
||||
Pending::Batch(mut batch) => {
|
||||
for task in &mut batch.tasks {
|
||||
task.events.push(TaskEvent::Processing(Utc::now()));
|
||||
}
|
||||
|
||||
batch.tasks = {
|
||||
self.scheduler
|
||||
.read()
|
||||
.await
|
||||
.update_tasks(batch.tasks)
|
||||
.await?
|
||||
};
|
||||
|
||||
let performer = self.performer.clone();
|
||||
|
||||
let batch = performer.process_batch(batch).await;
|
||||
|
||||
self.handle_batch_result(batch).await?;
|
||||
}
|
||||
Pending::Job(job) => {
|
||||
let performer = self.performer.clone();
|
||||
performer.process_job(job).await;
|
||||
}
|
||||
Pending::Nothing => (),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handles the result from a processed batch.
|
||||
///
|
||||
/// When a task is processed, the result of the process is pushed to its event list. The
|
||||
/// `handle_batch_result` make sure that the new state is saved to the store.
|
||||
/// The tasks are then removed from the processing queue.
|
||||
async fn handle_batch_result(&self, mut batch: Batch) -> Result<()> {
|
||||
let mut scheduler = self.scheduler.write().await;
|
||||
let tasks = scheduler.update_tasks(batch.tasks).await?;
|
||||
scheduler.finish();
|
||||
drop(scheduler);
|
||||
batch.tasks = tasks;
|
||||
self.performer.finish(&batch).await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue