2022-10-20 10:25:34 +02:00
|
|
|
/*!
|
|
|
|
This module handles the creation and processing of batch operations.
|
|
|
|
|
|
|
|
A batch is a combination of multiple tasks that can be processed at once.
|
|
|
|
Executing a batch operation should always be functionally equivalent to
|
|
|
|
executing each of its tasks' operations individually and in order.
|
|
|
|
|
|
|
|
For example, if the user sends two tasks:
|
|
|
|
1. import documents X
|
|
|
|
2. import documents Y
|
|
|
|
|
|
|
|
We can combine the two tasks in a single batch:
|
|
|
|
1. import documents X and Y
|
|
|
|
|
|
|
|
Processing this batch is functionally equivalent to processing the two
|
2024-04-18 08:12:52 +02:00
|
|
|
tasks individually, but should be much faster since we are only performing
|
2022-10-20 10:25:34 +02:00
|
|
|
one indexing operation.
|
|
|
|
*/
|
|
|
|
|
2022-10-27 09:41:32 +02:00
|
|
|
use std::collections::{BTreeSet, HashSet};
|
2022-10-25 15:51:15 +02:00
|
|
|
use std::ffi::OsStr;
|
2023-09-27 16:41:22 +02:00
|
|
|
use std::fmt;
|
2022-10-25 14:09:01 +02:00
|
|
|
use std::fs::{self, File};
|
2022-10-13 15:02:59 +02:00
|
|
|
use std::io::BufWriter;
|
2024-11-05 16:23:02 +01:00
|
|
|
use std::sync::atomic::{self, AtomicU64};
|
|
|
|
use std::time::Duration;
|
2022-10-13 11:09:00 +02:00
|
|
|
|
2024-10-03 18:08:09 +02:00
|
|
|
use bumpalo::collections::CollectIn;
|
|
|
|
use bumpalo::Bump;
|
2022-10-13 15:02:59 +02:00
|
|
|
use dump::IndexMetadata;
|
2023-05-16 13:56:18 +02:00
|
|
|
use meilisearch_types::error::Code;
|
2022-10-20 18:00:07 +02:00
|
|
|
use meilisearch_types::heed::{RoTxn, RwTxn};
|
2024-10-16 09:27:18 +02:00
|
|
|
use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader, PrimaryKey};
|
2022-10-25 14:09:01 +02:00
|
|
|
use meilisearch_types::milli::heed::CompactionOption;
|
2024-11-14 18:16:42 +01:00
|
|
|
use meilisearch_types::milli::update::new::indexer::{self, UpdateByFunction};
|
2024-10-28 14:21:39 +01:00
|
|
|
use meilisearch_types::milli::update::{IndexDocumentsMethod, Settings as MilliSettings};
|
2024-05-22 12:26:00 +02:00
|
|
|
use meilisearch_types::milli::vector::parsed_vectors::{
|
|
|
|
ExplicitVectors, VectorOrArrayOfVectors, RESERVED_VECTORS_FIELD_NAME,
|
|
|
|
};
|
2024-11-14 16:00:11 +01:00
|
|
|
use meilisearch_types::milli::{self, Filter, ThreadPoolNoAbortBuilder};
|
2022-10-11 17:42:43 +02:00
|
|
|
use meilisearch_types::settings::{apply_settings_to_builder, Settings, Unchecked};
|
2022-10-27 09:41:32 +02:00
|
|
|
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
|
2022-10-25 15:51:15 +02:00
|
|
|
use meilisearch_types::{compression, Index, VERSION_FILE_NAME};
|
2022-10-06 16:53:21 +02:00
|
|
|
use roaring::RoaringBitmap;
|
2022-11-28 16:27:41 +01:00
|
|
|
use time::macros::format_description;
|
2022-10-16 02:01:45 +02:00
|
|
|
use time::OffsetDateTime;
|
2022-09-09 01:09:50 +02:00
|
|
|
use uuid::Uuid;
|
2022-09-07 00:10:14 +02:00
|
|
|
|
2022-10-25 10:26:51 +02:00
|
|
|
use crate::autobatcher::{self, BatchKind};
|
2022-10-20 18:00:07 +02:00
|
|
|
use crate::utils::{self, swap_index_uid_in_task};
|
2024-10-28 14:21:39 +01:00
|
|
|
use crate::{Error, IndexScheduler, ProcessingTasks, Result, TaskId};
|
2022-10-20 18:00:07 +02:00
|
|
|
|
2022-10-20 10:25:34 +02:00
|
|
|
/// Represents a combination of tasks that can all be processed at the same time.
|
|
|
|
///
|
|
|
|
/// A batch contains the set of tasks that it represents (accessible through
|
|
|
|
/// [`self.ids()`](Batch::ids)), as well as additional information on how to
|
|
|
|
/// be processed.
|
2022-10-13 12:48:23 +02:00
|
|
|
#[derive(Debug)]
|
2022-09-07 20:08:07 +02:00
|
|
|
pub(crate) enum Batch {
|
2022-11-28 16:27:41 +01:00
|
|
|
TaskCancelation {
|
|
|
|
/// The task cancelation itself.
|
|
|
|
task: Task,
|
|
|
|
/// The date and time at which the previously processing tasks started.
|
|
|
|
previous_started_at: OffsetDateTime,
|
|
|
|
/// The list of tasks that were processing when this task cancelation appeared.
|
|
|
|
previous_processing_tasks: RoaringBitmap,
|
|
|
|
},
|
2024-01-11 14:44:29 +01:00
|
|
|
TaskDeletions(Vec<Task>),
|
2022-10-25 10:44:58 +02:00
|
|
|
SnapshotCreation(Vec<Task>),
|
2022-10-13 15:02:59 +02:00
|
|
|
Dump(Task),
|
2022-11-28 16:27:41 +01:00
|
|
|
IndexOperation {
|
|
|
|
op: IndexOperation,
|
|
|
|
must_create_index: bool,
|
|
|
|
},
|
|
|
|
IndexCreation {
|
|
|
|
index_uid: String,
|
|
|
|
primary_key: Option<String>,
|
|
|
|
task: Task,
|
|
|
|
},
|
|
|
|
IndexUpdate {
|
|
|
|
index_uid: String,
|
|
|
|
primary_key: Option<String>,
|
|
|
|
task: Task,
|
|
|
|
},
|
|
|
|
IndexDeletion {
|
|
|
|
index_uid: String,
|
|
|
|
tasks: Vec<Task>,
|
|
|
|
index_has_been_created: bool,
|
|
|
|
},
|
|
|
|
IndexSwap {
|
|
|
|
task: Task,
|
|
|
|
},
|
2022-09-29 18:15:50 +02:00
|
|
|
}
|
|
|
|
|
2023-02-08 20:53:19 +01:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub(crate) enum DocumentOperation {
|
|
|
|
Add(Uuid),
|
|
|
|
Delete(Vec<String>),
|
|
|
|
}
|
|
|
|
|
2022-10-20 10:25:34 +02:00
|
|
|
/// A [batch](Batch) that combines multiple tasks operating on an index.
|
2022-10-13 12:48:23 +02:00
|
|
|
#[derive(Debug)]
|
2022-09-29 18:15:50 +02:00
|
|
|
pub(crate) enum IndexOperation {
|
2023-02-08 20:53:19 +01:00
|
|
|
DocumentOperation {
|
2022-09-16 16:31:16 +02:00
|
|
|
index_uid: String,
|
|
|
|
primary_key: Option<String>,
|
2022-09-29 15:49:54 +02:00
|
|
|
method: IndexDocumentsMethod,
|
2022-10-04 18:50:18 +02:00
|
|
|
documents_counts: Vec<u64>,
|
2023-02-08 20:53:19 +01:00
|
|
|
operations: Vec<DocumentOperation>,
|
2022-09-16 16:31:16 +02:00
|
|
|
tasks: Vec<Task>,
|
|
|
|
},
|
2024-05-08 15:26:21 +02:00
|
|
|
DocumentEdition {
|
|
|
|
index_uid: String,
|
|
|
|
task: Task,
|
|
|
|
},
|
2024-08-29 15:56:24 +02:00
|
|
|
DocumentDeletion {
|
2023-09-07 18:22:42 +02:00
|
|
|
index_uid: String,
|
2024-08-29 15:56:24 +02:00
|
|
|
tasks: Vec<Task>,
|
2023-09-07 18:22:42 +02:00
|
|
|
},
|
2022-09-16 16:31:16 +02:00
|
|
|
DocumentClear {
|
|
|
|
index_uid: String,
|
|
|
|
tasks: Vec<Task>,
|
|
|
|
},
|
|
|
|
Settings {
|
|
|
|
index_uid: String,
|
2022-10-27 16:38:21 +02:00
|
|
|
// The boolean indicates if it's a settings deletion or creation.
|
2022-09-16 16:31:16 +02:00
|
|
|
settings: Vec<(bool, Settings<Unchecked>)>,
|
|
|
|
tasks: Vec<Task>,
|
|
|
|
},
|
|
|
|
DocumentClearAndSetting {
|
|
|
|
index_uid: String,
|
|
|
|
cleared_tasks: Vec<Task>,
|
|
|
|
|
2022-10-27 16:38:21 +02:00
|
|
|
// The boolean indicates if it's a settings deletion or creation.
|
2022-09-16 16:31:16 +02:00
|
|
|
settings: Vec<(bool, Settings<Unchecked>)>,
|
|
|
|
settings_tasks: Vec<Task>,
|
|
|
|
},
|
2023-02-08 20:53:19 +01:00
|
|
|
SettingsAndDocumentOperation {
|
2022-09-16 16:31:16 +02:00
|
|
|
index_uid: String,
|
|
|
|
|
|
|
|
primary_key: Option<String>,
|
2022-09-29 15:49:54 +02:00
|
|
|
method: IndexDocumentsMethod,
|
2022-10-04 18:50:18 +02:00
|
|
|
documents_counts: Vec<u64>,
|
2023-02-08 20:53:19 +01:00
|
|
|
operations: Vec<DocumentOperation>,
|
2022-09-29 15:49:54 +02:00
|
|
|
document_import_tasks: Vec<Task>,
|
2022-09-16 16:31:16 +02:00
|
|
|
|
2022-10-27 16:38:21 +02:00
|
|
|
// The boolean indicates if it's a settings deletion or creation.
|
2022-09-16 16:31:16 +02:00
|
|
|
settings: Vec<(bool, Settings<Unchecked>)>,
|
2022-09-13 11:46:07 +02:00
|
|
|
settings_tasks: Vec<Task>,
|
|
|
|
},
|
2022-09-09 01:09:50 +02:00
|
|
|
}
|
|
|
|
|
2022-09-16 01:58:08 +02:00
|
|
|
impl Batch {
|
2022-10-20 10:25:34 +02:00
|
|
|
/// Return the task ids associated with this batch.
|
2024-02-26 10:43:04 +01:00
|
|
|
pub fn ids(&self) -> RoaringBitmap {
|
2022-09-16 01:58:08 +02:00
|
|
|
match self {
|
2022-11-28 16:27:41 +01:00
|
|
|
Batch::TaskCancelation { task, .. }
|
2022-10-13 15:02:59 +02:00
|
|
|
| Batch::Dump(task)
|
2022-09-16 21:24:49 +02:00
|
|
|
| Batch::IndexCreation { task, .. }
|
2024-02-26 10:43:04 +01:00
|
|
|
| Batch::IndexUpdate { task, .. } => {
|
|
|
|
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
|
|
|
}
|
2024-01-11 14:44:29 +01:00
|
|
|
Batch::SnapshotCreation(tasks)
|
|
|
|
| Batch::TaskDeletions(tasks)
|
2024-02-26 10:43:04 +01:00
|
|
|
| Batch::IndexDeletion { tasks, .. } => {
|
|
|
|
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
|
|
|
|
}
|
2022-10-19 18:27:18 +02:00
|
|
|
Batch::IndexOperation { op, .. } => match op {
|
2023-02-08 20:53:19 +01:00
|
|
|
IndexOperation::DocumentOperation { tasks, .. }
|
2022-09-29 18:15:50 +02:00
|
|
|
| IndexOperation::Settings { tasks, .. }
|
2024-08-29 15:56:24 +02:00
|
|
|
| IndexOperation::DocumentDeletion { tasks, .. }
|
2022-09-29 18:15:50 +02:00
|
|
|
| IndexOperation::DocumentClear { tasks, .. } => {
|
2024-02-26 10:43:04 +01:00
|
|
|
RoaringBitmap::from_iter(tasks.iter().map(|task| task.uid))
|
|
|
|
}
|
2024-08-29 15:56:24 +02:00
|
|
|
IndexOperation::DocumentEdition { task, .. } => {
|
2024-02-26 10:43:04 +01:00
|
|
|
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
2022-09-29 18:15:50 +02:00
|
|
|
}
|
2023-02-08 20:53:19 +01:00
|
|
|
IndexOperation::SettingsAndDocumentOperation {
|
2022-09-29 18:15:50 +02:00
|
|
|
document_import_tasks: tasks,
|
|
|
|
settings_tasks: other,
|
|
|
|
..
|
|
|
|
}
|
|
|
|
| IndexOperation::DocumentClearAndSetting {
|
|
|
|
cleared_tasks: tasks,
|
|
|
|
settings_tasks: other,
|
|
|
|
..
|
2024-02-26 10:43:04 +01:00
|
|
|
} => RoaringBitmap::from_iter(tasks.iter().chain(other).map(|task| task.uid)),
|
2022-09-29 18:15:50 +02:00
|
|
|
},
|
2024-02-26 10:43:04 +01:00
|
|
|
Batch::IndexSwap { task } => {
|
|
|
|
RoaringBitmap::from_sorted_iter(std::iter::once(task.uid)).unwrap()
|
|
|
|
}
|
2022-09-16 01:58:08 +02:00
|
|
|
}
|
|
|
|
}
|
2023-01-09 09:33:44 +01:00
|
|
|
|
|
|
|
/// Return the index UID associated with this batch
|
|
|
|
pub fn index_uid(&self) -> Option<&str> {
|
|
|
|
use Batch::*;
|
|
|
|
match self {
|
|
|
|
TaskCancelation { .. }
|
2024-01-11 14:44:29 +01:00
|
|
|
| TaskDeletions(_)
|
2023-01-09 09:33:44 +01:00
|
|
|
| SnapshotCreation(_)
|
|
|
|
| Dump(_)
|
|
|
|
| IndexSwap { .. } => None,
|
|
|
|
IndexOperation { op, .. } => Some(op.index_uid()),
|
|
|
|
IndexCreation { index_uid, .. }
|
|
|
|
| IndexUpdate { index_uid, .. }
|
2023-09-07 18:22:42 +02:00
|
|
|
| IndexDeletion { index_uid, .. } => Some(index_uid),
|
2023-01-09 09:33:44 +01:00
|
|
|
}
|
|
|
|
}
|
2022-09-16 01:58:08 +02:00
|
|
|
}
|
|
|
|
|
2023-09-25 15:38:08 +02:00
|
|
|
impl fmt::Display for Batch {
|
|
|
|
/// A text used when we debug the profiling reports.
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
let index_uid = self.index_uid();
|
|
|
|
let tasks = self.ids();
|
2023-09-27 16:41:22 +02:00
|
|
|
match self {
|
|
|
|
Batch::TaskCancelation { .. } => f.write_str("TaskCancelation")?,
|
2024-01-11 14:44:29 +01:00
|
|
|
Batch::TaskDeletions(_) => f.write_str("TaskDeletion")?,
|
2023-09-27 16:41:22 +02:00
|
|
|
Batch::SnapshotCreation(_) => f.write_str("SnapshotCreation")?,
|
|
|
|
Batch::Dump(_) => f.write_str("Dump")?,
|
|
|
|
Batch::IndexOperation { op, .. } => write!(f, "{op}")?,
|
|
|
|
Batch::IndexCreation { .. } => f.write_str("IndexCreation")?,
|
|
|
|
Batch::IndexUpdate { .. } => f.write_str("IndexUpdate")?,
|
|
|
|
Batch::IndexDeletion { .. } => f.write_str("IndexDeletion")?,
|
|
|
|
Batch::IndexSwap { .. } => f.write_str("IndexSwap")?,
|
2023-09-25 15:38:08 +02:00
|
|
|
};
|
|
|
|
match index_uid {
|
2023-09-27 16:41:22 +02:00
|
|
|
Some(name) => f.write_fmt(format_args!(" on {name:?} from tasks: {tasks:?}")),
|
|
|
|
None => f.write_fmt(format_args!(" from tasks: {tasks:?}")),
|
2023-09-25 15:38:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-19 18:27:18 +02:00
|
|
|
impl IndexOperation {
|
|
|
|
pub fn index_uid(&self) -> &str {
|
|
|
|
match self {
|
2023-02-08 20:53:19 +01:00
|
|
|
IndexOperation::DocumentOperation { index_uid, .. }
|
2024-05-08 15:26:21 +02:00
|
|
|
| IndexOperation::DocumentEdition { index_uid, .. }
|
2024-08-29 15:56:24 +02:00
|
|
|
| IndexOperation::DocumentDeletion { index_uid, .. }
|
2022-10-19 18:27:18 +02:00
|
|
|
| IndexOperation::DocumentClear { index_uid, .. }
|
|
|
|
| IndexOperation::Settings { index_uid, .. }
|
|
|
|
| IndexOperation::DocumentClearAndSetting { index_uid, .. }
|
2023-02-08 20:53:19 +01:00
|
|
|
| IndexOperation::SettingsAndDocumentOperation { index_uid, .. } => index_uid,
|
2022-10-19 18:27:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-25 15:38:08 +02:00
|
|
|
impl fmt::Display for IndexOperation {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
match self {
|
|
|
|
IndexOperation::DocumentOperation { .. } => {
|
|
|
|
f.write_str("IndexOperation::DocumentOperation")
|
|
|
|
}
|
2024-05-08 15:26:21 +02:00
|
|
|
IndexOperation::DocumentEdition { .. } => {
|
|
|
|
f.write_str("IndexOperation::DocumentEdition")
|
|
|
|
}
|
2024-08-29 15:56:24 +02:00
|
|
|
IndexOperation::DocumentDeletion { .. } => {
|
|
|
|
f.write_str("IndexOperation::DocumentDeletion")
|
2023-09-25 15:38:08 +02:00
|
|
|
}
|
|
|
|
IndexOperation::DocumentClear { .. } => f.write_str("IndexOperation::DocumentClear"),
|
|
|
|
IndexOperation::Settings { .. } => f.write_str("IndexOperation::Settings"),
|
|
|
|
IndexOperation::DocumentClearAndSetting { .. } => {
|
|
|
|
f.write_str("IndexOperation::DocumentClearAndSetting")
|
|
|
|
}
|
|
|
|
IndexOperation::SettingsAndDocumentOperation { .. } => {
|
|
|
|
f.write_str("IndexOperation::SettingsAndDocumentOperation")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-07 00:10:14 +02:00
|
|
|
impl IndexScheduler {
|
2022-10-20 10:25:34 +02:00
|
|
|
/// Convert an [`BatchKind`](crate::autobatcher::BatchKind) into a [`Batch`].
|
|
|
|
///
|
|
|
|
/// ## Arguments
|
|
|
|
/// - `rtxn`: read transaction
|
|
|
|
/// - `index_uid`: name of the index affected by the operations of the autobatch
|
|
|
|
/// - `batch`: the result of the autobatcher
|
2022-09-13 11:46:07 +02:00
|
|
|
pub(crate) fn create_next_batch_index(
|
|
|
|
&self,
|
|
|
|
rtxn: &RoTxn,
|
|
|
|
index_uid: String,
|
|
|
|
batch: BatchKind,
|
2022-10-19 18:27:18 +02:00
|
|
|
must_create_index: bool,
|
2022-09-13 11:46:07 +02:00
|
|
|
) -> Result<Option<Batch>> {
|
|
|
|
match batch {
|
2022-10-19 18:27:18 +02:00
|
|
|
BatchKind::DocumentClear { ids } => Ok(Some(Batch::IndexOperation {
|
|
|
|
op: IndexOperation::DocumentClear {
|
2022-09-29 18:15:50 +02:00
|
|
|
tasks: self.get_existing_tasks(rtxn, ids)?,
|
|
|
|
index_uid,
|
2022-10-19 18:27:18 +02:00
|
|
|
},
|
|
|
|
must_create_index,
|
|
|
|
})),
|
2024-05-08 15:26:21 +02:00
|
|
|
BatchKind::DocumentEdition { id } => {
|
|
|
|
let task = self.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
|
|
match &task.kind {
|
|
|
|
KindWithContent::DocumentEdition { index_uid, .. } => {
|
|
|
|
Ok(Some(Batch::IndexOperation {
|
|
|
|
op: IndexOperation::DocumentEdition {
|
|
|
|
index_uid: index_uid.clone(),
|
|
|
|
task,
|
|
|
|
},
|
|
|
|
must_create_index: false,
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
|
|
|
}
|
2023-02-08 20:53:19 +01:00
|
|
|
BatchKind::DocumentOperation { method, operation_ids, .. } => {
|
|
|
|
let tasks = self.get_existing_tasks(rtxn, operation_ids)?;
|
|
|
|
let primary_key = tasks
|
|
|
|
.iter()
|
|
|
|
.find_map(|task| match task.kind {
|
|
|
|
KindWithContent::DocumentAdditionOrUpdate { ref primary_key, .. } => {
|
|
|
|
// we want to stop on the first document addition
|
|
|
|
Some(primary_key.clone())
|
|
|
|
}
|
|
|
|
KindWithContent::DocumentDeletion { .. } => None,
|
|
|
|
_ => unreachable!(),
|
|
|
|
})
|
|
|
|
.flatten();
|
2022-10-04 18:50:18 +02:00
|
|
|
|
|
|
|
let mut documents_counts = Vec::new();
|
2023-02-08 20:53:19 +01:00
|
|
|
let mut operations = Vec::new();
|
2023-01-23 17:32:13 +01:00
|
|
|
|
2023-01-23 20:16:16 +01:00
|
|
|
for task in tasks.iter() {
|
2022-10-05 13:46:45 +02:00
|
|
|
match task.kind {
|
2022-10-21 18:03:10 +02:00
|
|
|
KindWithContent::DocumentAdditionOrUpdate {
|
|
|
|
content_file,
|
|
|
|
documents_count,
|
|
|
|
..
|
2022-10-05 13:46:45 +02:00
|
|
|
} => {
|
|
|
|
documents_counts.push(documents_count);
|
2023-02-08 20:53:19 +01:00
|
|
|
operations.push(DocumentOperation::Add(content_file));
|
|
|
|
}
|
|
|
|
KindWithContent::DocumentDeletion { ref documents_ids, .. } => {
|
|
|
|
documents_counts.push(documents_ids.len() as u64);
|
|
|
|
operations.push(DocumentOperation::Delete(documents_ids.clone()));
|
2022-10-05 13:46:45 +02:00
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
2022-10-04 18:50:18 +02:00
|
|
|
}
|
|
|
|
}
|
2022-09-13 11:46:07 +02:00
|
|
|
|
2022-10-19 18:27:18 +02:00
|
|
|
Ok(Some(Batch::IndexOperation {
|
2023-02-08 20:53:19 +01:00
|
|
|
op: IndexOperation::DocumentOperation {
|
2022-09-29 18:15:50 +02:00
|
|
|
index_uid,
|
|
|
|
primary_key,
|
|
|
|
method,
|
2022-10-04 18:50:18 +02:00
|
|
|
documents_counts,
|
2023-02-08 20:53:19 +01:00
|
|
|
operations,
|
2022-09-29 18:15:50 +02:00
|
|
|
tasks,
|
|
|
|
},
|
2022-10-19 18:27:18 +02:00
|
|
|
must_create_index,
|
|
|
|
}))
|
2022-09-16 16:31:16 +02:00
|
|
|
}
|
2024-08-29 15:56:24 +02:00
|
|
|
BatchKind::DocumentDeletion { deletion_ids, includes_by_filter: _ } => {
|
2022-09-16 16:31:16 +02:00
|
|
|
let tasks = self.get_existing_tasks(rtxn, deletion_ids)?;
|
|
|
|
|
2022-10-19 18:27:18 +02:00
|
|
|
Ok(Some(Batch::IndexOperation {
|
2024-08-29 15:56:24 +02:00
|
|
|
op: IndexOperation::DocumentDeletion { index_uid, tasks },
|
2022-10-19 18:27:18 +02:00
|
|
|
must_create_index,
|
|
|
|
}))
|
2022-09-16 16:31:16 +02:00
|
|
|
}
|
2022-10-20 15:19:28 +02:00
|
|
|
BatchKind::Settings { settings_ids, .. } => {
|
2022-09-16 16:31:16 +02:00
|
|
|
let tasks = self.get_existing_tasks(rtxn, settings_ids)?;
|
|
|
|
|
|
|
|
let mut settings = Vec::new();
|
|
|
|
for task in &tasks {
|
|
|
|
match task.kind {
|
2022-10-21 18:03:10 +02:00
|
|
|
KindWithContent::SettingsUpdate {
|
|
|
|
ref new_settings, is_deletion, ..
|
2022-10-22 16:35:42 +02:00
|
|
|
} => settings.push((is_deletion, *new_settings.clone())),
|
2022-09-16 16:31:16 +02:00
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-19 18:27:18 +02:00
|
|
|
Ok(Some(Batch::IndexOperation {
|
2022-10-20 18:00:07 +02:00
|
|
|
op: IndexOperation::Settings { index_uid, settings, tasks },
|
2022-10-19 18:27:18 +02:00
|
|
|
must_create_index,
|
|
|
|
}))
|
2022-09-16 16:31:16 +02:00
|
|
|
}
|
2022-10-20 18:00:07 +02:00
|
|
|
BatchKind::ClearAndSettings { other, settings_ids, allow_index_creation } => {
|
2022-09-16 16:31:16 +02:00
|
|
|
let (index_uid, settings, settings_tasks) = match self
|
2022-10-06 15:55:48 +02:00
|
|
|
.create_next_batch_index(
|
|
|
|
rtxn,
|
|
|
|
index_uid,
|
2022-10-20 18:00:07 +02:00
|
|
|
BatchKind::Settings { settings_ids, allow_index_creation },
|
2022-10-19 18:27:18 +02:00
|
|
|
must_create_index,
|
2022-10-06 15:55:48 +02:00
|
|
|
)?
|
2022-09-16 16:31:16 +02:00
|
|
|
.unwrap()
|
|
|
|
{
|
2022-10-19 18:27:18 +02:00
|
|
|
Batch::IndexOperation {
|
2022-10-20 18:00:07 +02:00
|
|
|
op: IndexOperation::Settings { index_uid, settings, tasks, .. },
|
2022-10-06 15:51:26 +02:00
|
|
|
..
|
2022-10-19 18:27:18 +02:00
|
|
|
} => (index_uid, settings, tasks),
|
2022-09-16 16:31:16 +02:00
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
|
|
|
let (index_uid, cleared_tasks) = match self
|
|
|
|
.create_next_batch_index(
|
|
|
|
rtxn,
|
|
|
|
index_uid,
|
|
|
|
BatchKind::DocumentClear { ids: other },
|
2022-10-19 18:27:18 +02:00
|
|
|
must_create_index,
|
2022-09-16 16:31:16 +02:00
|
|
|
)?
|
|
|
|
.unwrap()
|
|
|
|
{
|
2022-10-19 18:27:18 +02:00
|
|
|
Batch::IndexOperation {
|
|
|
|
op: IndexOperation::DocumentClear { index_uid, tasks },
|
|
|
|
..
|
|
|
|
} => (index_uid, tasks),
|
2022-09-16 16:31:16 +02:00
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
|
|
|
|
2022-10-19 18:27:18 +02:00
|
|
|
Ok(Some(Batch::IndexOperation {
|
|
|
|
op: IndexOperation::DocumentClearAndSetting {
|
2022-09-29 18:15:50 +02:00
|
|
|
index_uid,
|
|
|
|
cleared_tasks,
|
|
|
|
settings,
|
|
|
|
settings_tasks,
|
|
|
|
},
|
2022-10-19 18:27:18 +02:00
|
|
|
must_create_index,
|
|
|
|
}))
|
2022-09-16 16:31:16 +02:00
|
|
|
}
|
2023-02-08 18:07:59 +01:00
|
|
|
BatchKind::SettingsAndDocumentOperation {
|
2022-09-16 16:31:16 +02:00
|
|
|
settings_ids,
|
2022-09-29 15:49:54 +02:00
|
|
|
method,
|
2022-10-06 15:55:48 +02:00
|
|
|
allow_index_creation,
|
2023-01-23 20:16:16 +01:00
|
|
|
primary_key,
|
2023-02-08 20:53:19 +01:00
|
|
|
operation_ids,
|
2022-09-16 16:31:16 +02:00
|
|
|
} => {
|
|
|
|
let settings = self.create_next_batch_index(
|
|
|
|
rtxn,
|
|
|
|
index_uid.clone(),
|
2022-10-20 18:00:07 +02:00
|
|
|
BatchKind::Settings { settings_ids, allow_index_creation },
|
2022-10-19 18:27:18 +02:00
|
|
|
must_create_index,
|
2022-09-16 16:31:16 +02:00
|
|
|
)?;
|
|
|
|
|
2022-09-29 15:49:54 +02:00
|
|
|
let document_import = self.create_next_batch_index(
|
2022-09-16 16:31:16 +02:00
|
|
|
rtxn,
|
|
|
|
index_uid.clone(),
|
2023-02-08 18:07:59 +01:00
|
|
|
BatchKind::DocumentOperation {
|
2023-01-23 20:16:16 +01:00
|
|
|
method,
|
|
|
|
allow_index_creation,
|
|
|
|
primary_key,
|
2023-02-08 20:53:19 +01:00
|
|
|
operation_ids,
|
2023-01-23 20:16:16 +01:00
|
|
|
},
|
2022-10-19 18:27:18 +02:00
|
|
|
must_create_index,
|
2022-09-16 16:31:16 +02:00
|
|
|
)?;
|
|
|
|
|
2022-09-29 15:49:54 +02:00
|
|
|
match (document_import, settings) {
|
2022-09-16 16:31:16 +02:00
|
|
|
(
|
2022-10-19 18:27:18 +02:00
|
|
|
Some(Batch::IndexOperation {
|
|
|
|
op:
|
2023-02-08 20:53:19 +01:00
|
|
|
IndexOperation::DocumentOperation {
|
2022-10-19 18:27:18 +02:00
|
|
|
primary_key,
|
|
|
|
documents_counts,
|
2023-02-08 20:53:19 +01:00
|
|
|
operations,
|
2022-10-19 18:27:18 +02:00
|
|
|
tasks: document_import_tasks,
|
|
|
|
..
|
|
|
|
},
|
2022-09-16 16:31:16 +02:00
|
|
|
..
|
2022-10-19 18:27:18 +02:00
|
|
|
}),
|
|
|
|
Some(Batch::IndexOperation {
|
2022-10-20 18:00:07 +02:00
|
|
|
op: IndexOperation::Settings { settings, tasks: settings_tasks, .. },
|
2022-09-16 16:31:16 +02:00
|
|
|
..
|
2022-10-19 18:27:18 +02:00
|
|
|
}),
|
|
|
|
) => Ok(Some(Batch::IndexOperation {
|
2023-02-08 20:53:19 +01:00
|
|
|
op: IndexOperation::SettingsAndDocumentOperation {
|
2022-09-29 18:15:50 +02:00
|
|
|
index_uid,
|
|
|
|
primary_key,
|
|
|
|
method,
|
2022-10-04 18:50:18 +02:00
|
|
|
documents_counts,
|
2023-02-08 20:53:19 +01:00
|
|
|
operations,
|
2022-09-29 18:15:50 +02:00
|
|
|
document_import_tasks,
|
|
|
|
settings,
|
|
|
|
settings_tasks,
|
|
|
|
},
|
2022-10-19 18:27:18 +02:00
|
|
|
must_create_index,
|
|
|
|
})),
|
2022-09-16 16:31:16 +02:00
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
|
|
|
}
|
2022-09-16 21:24:49 +02:00
|
|
|
BatchKind::IndexCreation { id } => {
|
|
|
|
let task = self.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
|
|
let (index_uid, primary_key) = match &task.kind {
|
2022-10-20 18:00:07 +02:00
|
|
|
KindWithContent::IndexCreation { index_uid, primary_key } => {
|
|
|
|
(index_uid.clone(), primary_key.clone())
|
|
|
|
}
|
2022-09-16 21:24:49 +02:00
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
2022-10-20 18:00:07 +02:00
|
|
|
Ok(Some(Batch::IndexCreation { index_uid, primary_key, task }))
|
2022-09-16 21:24:49 +02:00
|
|
|
}
|
|
|
|
BatchKind::IndexUpdate { id } => {
|
|
|
|
let task = self.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
|
|
let primary_key = match &task.kind {
|
|
|
|
KindWithContent::IndexUpdate { primary_key, .. } => primary_key.clone(),
|
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
2022-10-20 18:00:07 +02:00
|
|
|
Ok(Some(Batch::IndexUpdate { index_uid, primary_key, task }))
|
2022-09-16 21:24:49 +02:00
|
|
|
}
|
|
|
|
BatchKind::IndexDeletion { ids } => Ok(Some(Batch::IndexDeletion {
|
|
|
|
index_uid,
|
2022-10-20 13:18:25 +02:00
|
|
|
index_has_been_created: must_create_index,
|
2022-09-16 21:24:49 +02:00
|
|
|
tasks: self.get_existing_tasks(rtxn, ids)?,
|
|
|
|
})),
|
2022-10-17 16:30:18 +02:00
|
|
|
BatchKind::IndexSwap { id } => {
|
|
|
|
let task = self.get_task(rtxn, id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
|
|
Ok(Some(Batch::IndexSwap { task }))
|
|
|
|
}
|
2022-09-13 11:46:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-07 00:10:14 +02:00
|
|
|
/// Create the next batch to be processed;
|
|
|
|
/// 1. We get the *last* task to cancel.
|
2022-10-06 16:53:21 +02:00
|
|
|
/// 2. We get the *next* task to delete.
|
|
|
|
/// 3. We get the *next* snapshot to process.
|
|
|
|
/// 4. We get the *next* dump to process.
|
|
|
|
/// 5. We get the *next* tasks to process for a specific index.
|
2024-01-23 09:41:59 +01:00
|
|
|
#[tracing::instrument(level = "trace", skip(self, rtxn), target = "indexing::scheduler")]
|
2022-09-09 01:40:28 +02:00
|
|
|
pub(crate) fn create_next_batch(&self, rtxn: &RoTxn) -> Result<Option<Batch>> {
|
2022-10-20 17:11:44 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
self.maybe_fail(crate::tests::FailureLocation::InsideCreateBatch)?;
|
|
|
|
|
2022-09-07 00:10:14 +02:00
|
|
|
let enqueued = &self.get_status(rtxn, Status::Enqueued)?;
|
2022-10-17 17:19:17 +02:00
|
|
|
let to_cancel = self.get_kind(rtxn, Kind::TaskCancelation)? & enqueued;
|
2022-09-07 00:10:14 +02:00
|
|
|
|
|
|
|
// 1. we get the last task to cancel.
|
|
|
|
if let Some(task_id) = to_cancel.max() {
|
2022-11-28 16:27:41 +01:00
|
|
|
// We retrieve the tasks that were processing before this tasks cancelation started.
|
|
|
|
// We must *not* reset the processing tasks before calling this method.
|
2024-11-05 16:23:02 +01:00
|
|
|
let ProcessingTasks { started_at, processing, progress: _ } =
|
2022-11-28 16:27:41 +01:00
|
|
|
&*self.processing_tasks.read().unwrap();
|
|
|
|
return Ok(Some(Batch::TaskCancelation {
|
|
|
|
task: self.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?,
|
|
|
|
previous_started_at: *started_at,
|
|
|
|
previous_processing_tasks: processing.clone(),
|
|
|
|
}));
|
2022-09-07 00:10:14 +02:00
|
|
|
}
|
|
|
|
|
2022-10-06 16:53:21 +02:00
|
|
|
// 2. we get the next task to delete
|
2022-10-13 12:48:23 +02:00
|
|
|
let to_delete = self.get_kind(rtxn, Kind::TaskDeletion)? & enqueued;
|
2024-01-11 14:44:29 +01:00
|
|
|
if !to_delete.is_empty() {
|
|
|
|
let tasks = self.get_existing_tasks(rtxn, to_delete)?;
|
|
|
|
return Ok(Some(Batch::TaskDeletions(tasks)));
|
2022-10-06 16:53:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// 3. we batch the snapshot.
|
2022-10-25 10:44:58 +02:00
|
|
|
let to_snapshot = self.get_kind(rtxn, Kind::SnapshotCreation)? & enqueued;
|
2022-09-07 00:10:14 +02:00
|
|
|
if !to_snapshot.is_empty() {
|
2022-10-25 10:44:58 +02:00
|
|
|
return Ok(Some(Batch::SnapshotCreation(self.get_existing_tasks(rtxn, to_snapshot)?)));
|
2022-09-07 00:10:14 +02:00
|
|
|
}
|
|
|
|
|
2022-10-06 16:53:21 +02:00
|
|
|
// 4. we batch the dumps.
|
2022-10-24 19:08:15 +02:00
|
|
|
let to_dump = self.get_kind(rtxn, Kind::DumpCreation)? & enqueued;
|
2022-10-13 15:02:59 +02:00
|
|
|
if let Some(to_dump) = to_dump.min() {
|
|
|
|
return Ok(Some(Batch::Dump(
|
2022-10-20 18:00:07 +02:00
|
|
|
self.get_task(rtxn, to_dump)?.ok_or(Error::CorruptedTaskQueue)?,
|
2022-10-13 15:02:59 +02:00
|
|
|
)));
|
2022-09-07 00:10:14 +02:00
|
|
|
}
|
|
|
|
|
2022-10-25 10:26:51 +02:00
|
|
|
// 5. We make a batch from the unprioritised tasks. Start by taking the next enqueued task.
|
|
|
|
let task_id = if let Some(task_id) = enqueued.min() { task_id } else { return Ok(None) };
|
|
|
|
let task = self.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
|
|
|
|
|
|
// If the task is not associated with any index, verify that it is an index swap and
|
|
|
|
// create the batch directly. Otherwise, get the index name associated with the task
|
|
|
|
// and use the autobatcher to batch the enqueued tasks associated with it
|
|
|
|
|
|
|
|
let index_name = if let Some(&index_name) = task.indexes().first() {
|
|
|
|
index_name
|
|
|
|
} else {
|
|
|
|
assert!(matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty()));
|
|
|
|
return Ok(Some(Batch::IndexSwap { task }));
|
|
|
|
};
|
|
|
|
|
|
|
|
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
|
2023-01-23 20:16:16 +01:00
|
|
|
let mut primary_key = None;
|
|
|
|
if index_already_exists {
|
|
|
|
let index = self.index_mapper.index(rtxn, index_name)?;
|
|
|
|
let rtxn = index.read_txn()?;
|
|
|
|
primary_key = index.primary_key(&rtxn)?.map(|pk| pk.to_string());
|
|
|
|
}
|
2022-10-25 10:26:51 +02:00
|
|
|
|
|
|
|
let index_tasks = self.index_tasks(rtxn, index_name)? & enqueued;
|
|
|
|
|
|
|
|
// If autobatching is disabled we only take one task at a time.
|
2023-12-11 16:08:39 +01:00
|
|
|
// Otherwise, we take only a maximum of tasks to create batches.
|
|
|
|
let tasks_limit =
|
|
|
|
if self.autobatching_enabled { self.max_number_of_batched_tasks } else { 1 };
|
2022-10-25 10:26:51 +02:00
|
|
|
|
|
|
|
let enqueued = index_tasks
|
|
|
|
.into_iter()
|
|
|
|
.take(tasks_limit)
|
|
|
|
.map(|task_id| {
|
|
|
|
self.get_task(rtxn, task_id)
|
|
|
|
.and_then(|task| task.ok_or(Error::CorruptedTaskQueue))
|
|
|
|
.map(|task| (task.uid, task.kind))
|
|
|
|
})
|
|
|
|
.collect::<Result<Vec<_>>>()?;
|
|
|
|
|
|
|
|
if let Some((batchkind, create_index)) =
|
2023-01-23 20:16:16 +01:00
|
|
|
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
|
2022-10-25 10:26:51 +02:00
|
|
|
{
|
|
|
|
return self.create_next_batch_index(
|
|
|
|
rtxn,
|
|
|
|
index_name.to_string(),
|
|
|
|
batchkind,
|
|
|
|
create_index,
|
|
|
|
);
|
2022-09-07 00:10:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we found no tasks then we were notified for something that got autobatched
|
|
|
|
// somehow and there is nothing to do.
|
2022-09-09 01:40:28 +02:00
|
|
|
Ok(None)
|
2022-09-07 00:10:14 +02:00
|
|
|
}
|
|
|
|
|
2022-10-20 10:25:34 +02:00
|
|
|
/// Apply the operation associated with the given batch.
|
|
|
|
///
|
|
|
|
/// ## Return
|
|
|
|
/// The list of tasks that were processed. The metadata of each task in the returned
|
|
|
|
/// list is updated accordingly, with the exception of the its date fields
|
|
|
|
/// [`finished_at`](meilisearch_types::tasks::Task::finished_at) and [`started_at`](meilisearch_types::tasks::Task::started_at).
|
2024-01-23 09:41:59 +01:00
|
|
|
#[tracing::instrument(level = "trace", skip(self, batch), target = "indexing::scheduler", fields(batch=batch.to_string()))]
|
2022-09-26 22:26:30 +02:00
|
|
|
pub(crate) fn process_batch(&self, batch: Batch) -> Result<Vec<Task>> {
|
2022-10-20 17:11:44 +02:00
|
|
|
#[cfg(test)]
|
2022-10-25 11:42:14 +02:00
|
|
|
{
|
|
|
|
self.maybe_fail(crate::tests::FailureLocation::InsideProcessBatch)?;
|
|
|
|
self.maybe_fail(crate::tests::FailureLocation::PanicInsideProcessBatch)?;
|
|
|
|
self.breakpoint(crate::Breakpoint::InsideProcessBatch);
|
|
|
|
}
|
2023-07-10 18:41:54 +02:00
|
|
|
|
2022-09-09 12:16:19 +02:00
|
|
|
match batch {
|
2022-11-28 16:27:41 +01:00
|
|
|
Batch::TaskCancelation { mut task, previous_started_at, previous_processing_tasks } => {
|
2022-10-17 17:19:17 +02:00
|
|
|
// 1. Retrieve the tasks that matched the query at enqueue-time.
|
|
|
|
let matched_tasks =
|
|
|
|
if let KindWithContent::TaskCancelation { tasks, query: _ } = &task.kind {
|
|
|
|
tasks
|
|
|
|
} else {
|
|
|
|
unreachable!()
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut wtxn = self.env.write_txn()?;
|
2022-11-28 16:27:41 +01:00
|
|
|
let canceled_tasks_content_uuids = self.cancel_matched_tasks(
|
|
|
|
&mut wtxn,
|
|
|
|
task.uid,
|
|
|
|
matched_tasks,
|
|
|
|
previous_started_at,
|
|
|
|
&previous_processing_tasks,
|
|
|
|
)?;
|
2022-10-17 17:19:17 +02:00
|
|
|
|
|
|
|
task.status = Status::Succeeded;
|
|
|
|
match &mut task.details {
|
|
|
|
Some(Details::TaskCancelation {
|
|
|
|
matched_tasks: _,
|
|
|
|
canceled_tasks,
|
2022-11-28 16:27:41 +01:00
|
|
|
original_filter: _,
|
2022-10-17 17:19:17 +02:00
|
|
|
}) => {
|
2022-10-19 11:48:35 +02:00
|
|
|
*canceled_tasks = Some(canceled_tasks_content_uuids.len() as u64);
|
2022-10-17 17:19:17 +02:00
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
|
|
|
|
2022-10-25 18:44:07 +02:00
|
|
|
// We must only remove the content files if the transaction is successfully committed
|
2022-10-19 11:48:35 +02:00
|
|
|
// and if errors occurs when we are deleting files we must do our best to delete
|
|
|
|
// everything. We do not return the encountered errors when deleting the content
|
|
|
|
// files as it is not a breaking operation and we can safely continue our job.
|
|
|
|
match wtxn.commit() {
|
|
|
|
Ok(()) => {
|
|
|
|
for content_uuid in canceled_tasks_content_uuids {
|
|
|
|
if let Err(error) = self.delete_update_file(content_uuid) {
|
2024-01-23 09:41:59 +01:00
|
|
|
tracing::error!(
|
|
|
|
file_content_uuid = %content_uuid,
|
|
|
|
%error,
|
|
|
|
"Failed deleting content file"
|
2022-10-19 11:48:35 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(e) => return Err(e.into()),
|
|
|
|
}
|
|
|
|
|
2022-10-17 17:19:17 +02:00
|
|
|
Ok(vec![task])
|
|
|
|
}
|
2024-01-11 14:44:29 +01:00
|
|
|
Batch::TaskDeletions(mut tasks) => {
|
2022-10-11 09:53:08 +02:00
|
|
|
// 1. Retrieve the tasks that matched the query at enqueue-time.
|
2024-01-11 14:44:29 +01:00
|
|
|
let mut matched_tasks = RoaringBitmap::new();
|
|
|
|
|
|
|
|
for task in tasks.iter() {
|
2022-10-13 11:09:00 +02:00
|
|
|
if let KindWithContent::TaskDeletion { tasks, query: _ } = &task.kind {
|
2024-01-11 14:44:29 +01:00
|
|
|
matched_tasks |= tasks;
|
2022-10-06 16:53:21 +02:00
|
|
|
} else {
|
|
|
|
unreachable!()
|
2024-01-11 14:44:29 +01:00
|
|
|
}
|
|
|
|
}
|
2022-10-10 12:57:17 +02:00
|
|
|
|
2022-10-06 16:53:21 +02:00
|
|
|
let mut wtxn = self.env.write_txn()?;
|
2024-01-11 14:44:29 +01:00
|
|
|
let mut deleted_tasks = self.delete_matched_tasks(&mut wtxn, &matched_tasks)?;
|
|
|
|
wtxn.commit()?;
|
2022-10-10 12:57:17 +02:00
|
|
|
|
2024-01-11 14:44:29 +01:00
|
|
|
for task in tasks.iter_mut() {
|
|
|
|
task.status = Status::Succeeded;
|
|
|
|
let KindWithContent::TaskDeletion { tasks, query: _ } = &task.kind else {
|
|
|
|
unreachable!()
|
|
|
|
};
|
|
|
|
|
|
|
|
let deleted_tasks_count = deleted_tasks.intersection_len(tasks);
|
|
|
|
deleted_tasks -= tasks;
|
|
|
|
|
|
|
|
match &mut task.details {
|
|
|
|
Some(Details::TaskDeletion {
|
|
|
|
matched_tasks: _,
|
|
|
|
deleted_tasks,
|
|
|
|
original_filter: _,
|
|
|
|
}) => {
|
|
|
|
*deleted_tasks = Some(deleted_tasks_count);
|
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
2022-10-06 16:53:21 +02:00
|
|
|
}
|
|
|
|
}
|
2024-01-11 14:44:29 +01:00
|
|
|
Ok(tasks)
|
2022-10-06 16:53:21 +02:00
|
|
|
}
|
2022-10-25 14:09:01 +02:00
|
|
|
Batch::SnapshotCreation(mut tasks) => {
|
|
|
|
fs::create_dir_all(&self.snapshots_path)?;
|
|
|
|
let temp_snapshot_dir = tempfile::tempdir()?;
|
|
|
|
|
|
|
|
// 1. Snapshot the version file.
|
2022-10-25 15:06:28 +02:00
|
|
|
let dst = temp_snapshot_dir.path().join(VERSION_FILE_NAME);
|
|
|
|
fs::copy(&self.version_file_path, dst)?;
|
2022-10-25 14:09:01 +02:00
|
|
|
|
|
|
|
// 2. Snapshot the index-scheduler LMDB env
|
|
|
|
//
|
2023-11-22 18:21:19 +01:00
|
|
|
// When we call copy_to_file, LMDB opens a read transaction by itself,
|
2022-10-25 14:09:01 +02:00
|
|
|
// we can't provide our own. It is an issue as we would like to know
|
|
|
|
// the update files to copy but new ones can be enqueued between the copy
|
|
|
|
// of the env and the new transaction we open to retrieve the enqueued tasks.
|
|
|
|
// So we prefer opening a new transaction after copying the env and copy more
|
|
|
|
// update files than not enough.
|
|
|
|
//
|
|
|
|
// Note that there cannot be any update files deleted between those
|
|
|
|
// two read operations as the task processing is synchronous.
|
|
|
|
|
2022-10-25 18:44:07 +02:00
|
|
|
// 2.1 First copy the LMDB env of the index-scheduler
|
|
|
|
let dst = temp_snapshot_dir.path().join("tasks");
|
|
|
|
fs::create_dir_all(&dst)?;
|
2023-11-22 18:21:19 +01:00
|
|
|
self.env.copy_to_file(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
2022-10-25 14:09:01 +02:00
|
|
|
|
|
|
|
// 2.2 Create a read transaction on the index-scheduler
|
|
|
|
let rtxn = self.env.read_txn()?;
|
|
|
|
|
|
|
|
// 2.3 Create the update files directory
|
|
|
|
let update_files_dir = temp_snapshot_dir.path().join("update_files");
|
|
|
|
fs::create_dir_all(&update_files_dir)?;
|
|
|
|
|
|
|
|
// 2.4 Only copy the update files of the enqueued tasks
|
|
|
|
for task_id in self.get_status(&rtxn, Status::Enqueued)? {
|
|
|
|
let task = self.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
|
|
if let Some(content_uuid) = task.content_uuid() {
|
|
|
|
let src = self.file_store.get_update_path(content_uuid);
|
|
|
|
let dst = update_files_dir.join(content_uuid.to_string());
|
|
|
|
fs::copy(src, dst)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// 3. Snapshot every indexes
|
|
|
|
for result in self.index_mapper.index_mapping.iter(&rtxn)? {
|
|
|
|
let (name, uuid) = result?;
|
|
|
|
let index = self.index_mapper.index(&rtxn, name)?;
|
2022-10-25 18:44:07 +02:00
|
|
|
let dst = temp_snapshot_dir.path().join("indexes").join(uuid.to_string());
|
|
|
|
fs::create_dir_all(&dst)?;
|
2023-11-28 14:32:30 +01:00
|
|
|
index.copy_to_file(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
2022-10-25 14:09:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
drop(rtxn);
|
|
|
|
|
|
|
|
// 4. Snapshot the auth LMDB env
|
2022-10-25 18:44:07 +02:00
|
|
|
let dst = temp_snapshot_dir.path().join("auth");
|
2022-10-25 14:09:01 +02:00
|
|
|
fs::create_dir_all(&dst)?;
|
2022-10-25 18:44:07 +02:00
|
|
|
// TODO We can't use the open_auth_store_env function here but we should
|
2024-05-16 16:10:55 +02:00
|
|
|
let auth = unsafe {
|
|
|
|
milli::heed::EnvOpenOptions::new()
|
|
|
|
.map_size(1024 * 1024 * 1024) // 1 GiB
|
|
|
|
.max_dbs(2)
|
|
|
|
.open(&self.auth_path)
|
|
|
|
}?;
|
2023-11-22 18:21:19 +01:00
|
|
|
auth.copy_to_file(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
2022-10-25 14:09:01 +02:00
|
|
|
|
2022-10-25 15:51:15 +02:00
|
|
|
// 5. Copy and tarball the flat snapshot
|
|
|
|
// 5.1 Find the original name of the database
|
|
|
|
// TODO find a better way to get this path
|
|
|
|
let mut base_path = self.env.path().to_owned();
|
|
|
|
base_path.pop();
|
|
|
|
let db_name = base_path.file_name().and_then(OsStr::to_str).unwrap_or("data.ms");
|
|
|
|
|
|
|
|
// 5.2 Tarball the content of the snapshot in a tempfile with a .snapshot extension
|
2022-10-25 18:44:07 +02:00
|
|
|
let snapshot_path = self.snapshots_path.join(format!("{}.snapshot", db_name));
|
2022-10-25 15:51:15 +02:00
|
|
|
let temp_snapshot_file = tempfile::NamedTempFile::new_in(&self.snapshots_path)?;
|
|
|
|
compression::to_tar_gz(temp_snapshot_dir.path(), temp_snapshot_file.path())?;
|
2022-12-19 14:12:26 +01:00
|
|
|
let file = temp_snapshot_file.persist(snapshot_path)?;
|
2022-10-25 15:51:15 +02:00
|
|
|
|
|
|
|
// 5.3 Change the permission to make the snapshot readonly
|
|
|
|
let mut permissions = file.metadata()?.permissions();
|
|
|
|
permissions.set_readonly(true);
|
2023-03-06 13:30:06 +01:00
|
|
|
#[cfg(unix)]
|
|
|
|
{
|
|
|
|
use std::os::unix::fs::PermissionsExt;
|
|
|
|
#[allow(clippy::non_octal_unix_permissions)]
|
|
|
|
// rwxrwxrwx
|
|
|
|
permissions.set_mode(0b100100100);
|
|
|
|
}
|
|
|
|
|
2022-10-25 15:51:15 +02:00
|
|
|
file.set_permissions(permissions)?;
|
2022-10-25 14:09:01 +02:00
|
|
|
|
|
|
|
for task in &mut tasks {
|
|
|
|
task.status = Status::Succeeded;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(tasks)
|
|
|
|
}
|
2022-10-13 15:02:59 +02:00
|
|
|
Batch::Dump(mut task) => {
|
2022-10-16 02:01:45 +02:00
|
|
|
let started_at = OffsetDateTime::now_utc();
|
2022-11-28 16:27:41 +01:00
|
|
|
let (keys, instance_uid) =
|
|
|
|
if let KindWithContent::DumpCreation { keys, instance_uid } = &task.kind {
|
|
|
|
(keys, instance_uid)
|
2022-10-24 19:08:15 +02:00
|
|
|
} else {
|
|
|
|
unreachable!();
|
|
|
|
};
|
2022-10-22 16:35:42 +02:00
|
|
|
let dump = dump::DumpWriter::new(*instance_uid)?;
|
2022-10-13 15:02:59 +02:00
|
|
|
|
|
|
|
// 1. dump the keys
|
2022-10-17 17:04:52 +02:00
|
|
|
let mut dump_keys = dump.create_keys()?;
|
2022-10-13 15:02:59 +02:00
|
|
|
for key in keys {
|
2022-10-17 16:45:00 +02:00
|
|
|
dump_keys.push_key(key)?;
|
2022-10-13 15:02:59 +02:00
|
|
|
}
|
2022-10-17 17:04:52 +02:00
|
|
|
dump_keys.flush()?;
|
2022-10-13 15:02:59 +02:00
|
|
|
|
|
|
|
let rtxn = self.env.read_txn()?;
|
|
|
|
|
|
|
|
// 2. dump the tasks
|
2022-10-17 17:04:52 +02:00
|
|
|
let mut dump_tasks = dump.create_tasks_queue()?;
|
2022-10-13 15:02:59 +02:00
|
|
|
for ret in self.all_tasks.iter(&rtxn)? {
|
2023-11-14 10:59:02 +01:00
|
|
|
if self.must_stop_processing.get() {
|
|
|
|
return Err(Error::AbortedTask);
|
|
|
|
}
|
|
|
|
|
2022-10-16 02:01:45 +02:00
|
|
|
let (_, mut t) = ret?;
|
|
|
|
let status = t.status;
|
2022-10-25 14:09:01 +02:00
|
|
|
let content_file = t.content_uuid();
|
2022-10-16 02:01:45 +02:00
|
|
|
|
|
|
|
// In the case we're dumping ourselves we want to be marked as finished
|
|
|
|
// to not loop over ourselves indefinitely.
|
|
|
|
if t.uid == task.uid {
|
|
|
|
let finished_at = OffsetDateTime::now_utc();
|
|
|
|
|
|
|
|
// We're going to fake the date because we don't know if everything is going to go well.
|
|
|
|
// But we need to dump the task as finished and successful.
|
|
|
|
// If something fail everything will be set appropriately in the end.
|
|
|
|
t.status = Status::Succeeded;
|
|
|
|
t.started_at = Some(started_at);
|
|
|
|
t.finished_at = Some(finished_at);
|
|
|
|
}
|
2022-10-17 17:04:52 +02:00
|
|
|
let mut dump_content_file = dump_tasks.push_task(&t.into())?;
|
2022-10-13 15:02:59 +02:00
|
|
|
|
2022-10-16 02:01:45 +02:00
|
|
|
// 2.1. Dump the `content_file` associated with the task if there is one and the task is not finished yet.
|
2022-10-13 16:21:54 +02:00
|
|
|
if let Some(content_file) = content_file {
|
2023-11-14 10:59:02 +01:00
|
|
|
if self.must_stop_processing.get() {
|
|
|
|
return Err(Error::AbortedTask);
|
|
|
|
}
|
2022-10-16 02:01:45 +02:00
|
|
|
if status == Status::Enqueued {
|
|
|
|
let content_file = self.file_store.get_update(content_file)?;
|
|
|
|
|
|
|
|
let reader = DocumentsBatchReader::from_reader(content_file)
|
|
|
|
.map_err(milli::Error::from)?;
|
|
|
|
|
|
|
|
let (mut cursor, documents_batch_index) =
|
|
|
|
reader.into_cursor_and_fields_index();
|
|
|
|
|
|
|
|
while let Some(doc) =
|
|
|
|
cursor.next_document().map_err(milli::Error::from)?
|
|
|
|
{
|
2024-08-30 11:49:47 +02:00
|
|
|
dump_content_file
|
|
|
|
.push_document(&obkv_to_object(doc, &documents_batch_index)?)?;
|
2022-10-16 02:01:45 +02:00
|
|
|
}
|
2022-10-17 17:04:52 +02:00
|
|
|
dump_content_file.flush()?;
|
2022-10-13 15:02:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-10-17 17:04:52 +02:00
|
|
|
dump_tasks.flush()?;
|
2022-10-13 15:02:59 +02:00
|
|
|
|
|
|
|
// 3. Dump the indexes
|
2023-02-20 16:42:54 +01:00
|
|
|
self.index_mapper.try_for_each_index(&rtxn, |uid, index| -> Result<()> {
|
2022-10-13 15:02:59 +02:00
|
|
|
let rtxn = index.read_txn()?;
|
|
|
|
let metadata = IndexMetadata {
|
2023-02-20 16:42:54 +01:00
|
|
|
uid: uid.to_owned(),
|
2022-10-13 15:02:59 +02:00
|
|
|
primary_key: index.primary_key(&rtxn)?.map(String::from),
|
|
|
|
created_at: index.created_at(&rtxn)?,
|
|
|
|
updated_at: index.updated_at(&rtxn)?,
|
|
|
|
};
|
2023-02-20 16:42:54 +01:00
|
|
|
let mut index_dumper = dump.create_index(uid, &metadata)?;
|
2022-10-13 15:02:59 +02:00
|
|
|
|
|
|
|
let fields_ids_map = index.fields_ids_map(&rtxn)?;
|
|
|
|
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
2024-06-04 16:41:33 +02:00
|
|
|
let embedding_configs = index.embedding_configs(&rtxn)?;
|
2022-10-13 15:02:59 +02:00
|
|
|
|
|
|
|
// 3.1. Dump the documents
|
|
|
|
for ret in index.all_documents(&rtxn)? {
|
2023-11-14 10:59:02 +01:00
|
|
|
if self.must_stop_processing.get() {
|
|
|
|
return Err(Error::AbortedTask);
|
|
|
|
}
|
2024-05-14 11:43:16 +02:00
|
|
|
|
|
|
|
let (id, doc) = ret?;
|
|
|
|
|
|
|
|
let mut document = milli::obkv_to_json(&all_fields, &fields_ids_map, doc)?;
|
|
|
|
|
|
|
|
'inject_vectors: {
|
|
|
|
let embeddings = index.embeddings(&rtxn, id)?;
|
|
|
|
|
|
|
|
if embeddings.is_empty() {
|
|
|
|
break 'inject_vectors;
|
|
|
|
}
|
|
|
|
|
|
|
|
let vectors = document
|
|
|
|
.entry(RESERVED_VECTORS_FIELD_NAME.to_owned())
|
|
|
|
.or_insert(serde_json::Value::Object(Default::default()));
|
|
|
|
|
|
|
|
let serde_json::Value::Object(vectors) = vectors else {
|
|
|
|
return Err(milli::Error::UserError(
|
|
|
|
milli::UserError::InvalidVectorsMapType {
|
|
|
|
document_id: {
|
|
|
|
if let Ok(Some(Ok(index))) = index
|
|
|
|
.external_id_of(&rtxn, std::iter::once(id))
|
|
|
|
.map(|it| it.into_iter().next())
|
|
|
|
{
|
|
|
|
index
|
|
|
|
} else {
|
|
|
|
format!("internal docid={id}")
|
|
|
|
}
|
|
|
|
},
|
|
|
|
value: vectors.clone(),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.into());
|
|
|
|
};
|
|
|
|
|
|
|
|
for (embedder_name, embeddings) in embeddings {
|
2024-06-04 16:41:33 +02:00
|
|
|
let user_provided = embedding_configs
|
|
|
|
.iter()
|
|
|
|
.find(|conf| conf.name == embedder_name)
|
2024-06-05 15:38:49 +02:00
|
|
|
.is_some_and(|conf| conf.user_provided.contains(id));
|
2024-06-04 16:41:33 +02:00
|
|
|
|
|
|
|
let embeddings = ExplicitVectors {
|
2024-06-12 18:11:11 +02:00
|
|
|
embeddings: Some(
|
|
|
|
VectorOrArrayOfVectors::from_array_of_vectors(embeddings),
|
2024-06-04 16:41:33 +02:00
|
|
|
),
|
2024-06-12 18:11:11 +02:00
|
|
|
regenerate: !user_provided,
|
2024-06-04 16:41:33 +02:00
|
|
|
};
|
|
|
|
vectors.insert(
|
|
|
|
embedder_name,
|
|
|
|
serde_json::to_value(embeddings).unwrap(),
|
|
|
|
);
|
2024-05-14 11:43:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-13 15:02:59 +02:00
|
|
|
index_dumper.push_document(&document)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 3.2. Dump the settings
|
2024-03-26 10:36:24 +01:00
|
|
|
let settings = meilisearch_types::settings::settings(
|
|
|
|
index,
|
|
|
|
&rtxn,
|
|
|
|
meilisearch_types::settings::SecretPolicy::RevealSecrets,
|
|
|
|
)?;
|
2022-10-13 15:02:59 +02:00
|
|
|
index_dumper.settings(&settings)?;
|
2023-02-20 16:42:54 +01:00
|
|
|
Ok(())
|
|
|
|
})?;
|
2022-10-13 15:02:59 +02:00
|
|
|
|
2023-06-26 12:24:55 +02:00
|
|
|
// 4. Dump experimental feature settings
|
2023-10-23 10:38:56 +02:00
|
|
|
let features = self.features().runtime_features();
|
2023-06-26 12:24:55 +02:00
|
|
|
dump.create_experimental_features(features)?;
|
|
|
|
|
2022-11-28 16:27:41 +01:00
|
|
|
let dump_uid = started_at.format(format_description!(
|
|
|
|
"[year repr:full][month repr:numerical][day padding:zero]-[hour padding:zero][minute padding:zero][second padding:zero][subsecond digits:3]"
|
|
|
|
)).unwrap();
|
|
|
|
|
2023-11-14 10:59:02 +01:00
|
|
|
if self.must_stop_processing.get() {
|
|
|
|
return Err(Error::AbortedTask);
|
|
|
|
}
|
2022-10-13 15:02:59 +02:00
|
|
|
let path = self.dumps_path.join(format!("{}.dump", dump_uid));
|
2022-10-17 16:45:00 +02:00
|
|
|
let file = File::create(path)?;
|
|
|
|
dump.persist_to(BufWriter::new(file))?;
|
2022-10-13 15:02:59 +02:00
|
|
|
|
2022-10-17 17:04:52 +02:00
|
|
|
// if we reached this step we can tell the scheduler we succeeded to dump ourselves.
|
2022-10-13 15:02:59 +02:00
|
|
|
task.status = Status::Succeeded;
|
2022-11-28 16:27:41 +01:00
|
|
|
task.details = Some(Details::Dump { dump_uid: Some(dump_uid) });
|
2022-10-13 15:02:59 +02:00
|
|
|
Ok(vec![task])
|
|
|
|
}
|
2022-10-20 18:00:07 +02:00
|
|
|
Batch::IndexOperation { op, must_create_index } => {
|
2023-02-23 19:31:57 +01:00
|
|
|
let index_uid = op.index_uid().to_string();
|
2022-10-19 18:27:18 +02:00
|
|
|
let index = if must_create_index {
|
|
|
|
// create the index if it doesn't already exist
|
2022-10-26 14:19:56 +02:00
|
|
|
let wtxn = self.env.write_txn()?;
|
2023-02-23 19:31:57 +01:00
|
|
|
self.index_mapper.create_index(wtxn, &index_uid, None)?
|
2022-10-19 18:27:18 +02:00
|
|
|
} else {
|
|
|
|
let rtxn = self.env.read_txn()?;
|
2023-02-23 19:31:57 +01:00
|
|
|
self.index_mapper.index(&rtxn, &index_uid)?
|
2022-09-29 18:15:50 +02:00
|
|
|
};
|
|
|
|
|
2023-11-13 10:44:36 +01:00
|
|
|
// the index operation can take a long time, so save this handle to make it available to the search for the duration of the tick
|
2024-01-09 15:37:27 +01:00
|
|
|
self.index_mapper
|
|
|
|
.set_currently_updating_index(Some((index_uid.clone(), index.clone())));
|
2023-11-10 10:50:19 +01:00
|
|
|
|
2022-09-29 18:15:50 +02:00
|
|
|
let mut index_wtxn = index.write_txn()?;
|
2022-10-19 18:27:18 +02:00
|
|
|
let tasks = self.apply_index_operation(&mut index_wtxn, &index, op)?;
|
2022-09-29 18:15:50 +02:00
|
|
|
index_wtxn.commit()?;
|
2022-09-29 14:31:01 +02:00
|
|
|
|
2023-02-23 19:31:57 +01:00
|
|
|
// if the update processed successfully, we're going to store the new
|
|
|
|
// stats of the index. Since the tasks have already been processed and
|
|
|
|
// this is a non-critical operation. If it fails, we should not fail
|
|
|
|
// the entire batch.
|
|
|
|
let res = || -> Result<()> {
|
2023-02-28 15:24:31 +01:00
|
|
|
let index_rtxn = index.read_txn()?;
|
|
|
|
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)?;
|
2023-02-23 19:31:57 +01:00
|
|
|
let mut wtxn = self.env.write_txn()?;
|
2023-03-07 14:00:54 +01:00
|
|
|
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
2023-02-23 19:31:57 +01:00
|
|
|
wtxn.commit()?;
|
|
|
|
Ok(())
|
|
|
|
}();
|
|
|
|
|
|
|
|
match res {
|
|
|
|
Ok(_) => (),
|
2024-01-23 09:41:59 +01:00
|
|
|
Err(e) => tracing::error!(
|
|
|
|
error = &e as &dyn std::error::Error,
|
|
|
|
"Could not write the stats of the index"
|
|
|
|
),
|
2023-02-23 19:31:57 +01:00
|
|
|
}
|
|
|
|
|
2022-09-29 14:31:01 +02:00
|
|
|
Ok(tasks)
|
|
|
|
}
|
2022-10-20 18:00:07 +02:00
|
|
|
Batch::IndexCreation { index_uid, primary_key, task } => {
|
2022-10-26 14:19:56 +02:00
|
|
|
let wtxn = self.env.write_txn()?;
|
2022-10-22 14:57:59 +02:00
|
|
|
if self.index_mapper.exists(&wtxn, &index_uid)? {
|
|
|
|
return Err(Error::IndexAlreadyExists(index_uid));
|
|
|
|
}
|
2022-12-16 08:11:12 +01:00
|
|
|
self.index_mapper.create_index(wtxn, &index_uid, None)?;
|
2022-10-04 11:51:39 +02:00
|
|
|
|
2022-10-20 18:00:07 +02:00
|
|
|
self.process_batch(Batch::IndexUpdate { index_uid, primary_key, task })
|
2022-10-04 11:51:39 +02:00
|
|
|
}
|
2022-10-20 18:00:07 +02:00
|
|
|
Batch::IndexUpdate { index_uid, primary_key, mut task } => {
|
2022-10-04 11:51:39 +02:00
|
|
|
let rtxn = self.env.read_txn()?;
|
|
|
|
let index = self.index_mapper.index(&rtxn, &index_uid)?;
|
2022-10-04 11:48:08 +02:00
|
|
|
|
2022-10-04 18:50:18 +02:00
|
|
|
if let Some(primary_key) = primary_key.clone() {
|
2022-10-04 11:48:08 +02:00
|
|
|
let mut index_wtxn = index.write_txn()?;
|
2022-10-11 17:42:43 +02:00
|
|
|
let mut builder = MilliSettings::new(
|
2022-10-04 11:48:08 +02:00
|
|
|
&mut index_wtxn,
|
|
|
|
&index,
|
|
|
|
self.index_mapper.indexer_config(),
|
|
|
|
);
|
|
|
|
builder.set_primary_key(primary_key);
|
2022-10-19 11:22:59 +02:00
|
|
|
let must_stop_processing = self.must_stop_processing.clone();
|
2022-10-17 17:19:17 +02:00
|
|
|
builder.execute(
|
2024-01-23 09:41:59 +01:00
|
|
|
|indexing_step| tracing::debug!(update = ?indexing_step),
|
2022-10-19 11:22:59 +02:00
|
|
|
|| must_stop_processing.get(),
|
2022-10-17 17:19:17 +02:00
|
|
|
)?;
|
2022-10-04 11:48:08 +02:00
|
|
|
index_wtxn.commit()?;
|
|
|
|
}
|
2023-02-28 15:24:31 +01:00
|
|
|
|
|
|
|
// drop rtxn before starting a new wtxn on the same db
|
|
|
|
rtxn.commit()?;
|
|
|
|
|
2022-10-04 18:50:18 +02:00
|
|
|
task.status = Status::Succeeded;
|
|
|
|
task.details = Some(Details::IndexInfo { primary_key });
|
|
|
|
|
2023-02-23 19:31:57 +01:00
|
|
|
// if the update processed successfully, we're going to store the new
|
|
|
|
// stats of the index. Since the tasks have already been processed and
|
|
|
|
// this is a non-critical operation. If it fails, we should not fail
|
|
|
|
// the entire batch.
|
|
|
|
let res = || -> Result<()> {
|
|
|
|
let mut wtxn = self.env.write_txn()?;
|
2023-02-28 15:24:31 +01:00
|
|
|
let index_rtxn = index.read_txn()?;
|
|
|
|
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)?;
|
2023-03-07 14:00:54 +01:00
|
|
|
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
2023-02-23 19:31:57 +01:00
|
|
|
wtxn.commit()?;
|
|
|
|
Ok(())
|
|
|
|
}();
|
|
|
|
|
|
|
|
match res {
|
|
|
|
Ok(_) => (),
|
2024-01-23 09:41:59 +01:00
|
|
|
Err(e) => tracing::error!(
|
|
|
|
error = &e as &dyn std::error::Error,
|
|
|
|
"Could not write the stats of the index"
|
|
|
|
),
|
2023-02-23 19:31:57 +01:00
|
|
|
}
|
|
|
|
|
2022-10-04 11:48:08 +02:00
|
|
|
Ok(vec![task])
|
|
|
|
}
|
2022-10-20 18:00:07 +02:00
|
|
|
Batch::IndexDeletion { index_uid, index_has_been_created, mut tasks } => {
|
2022-10-04 18:19:18 +02:00
|
|
|
let wtxn = self.env.write_txn()?;
|
2022-10-05 14:05:20 +02:00
|
|
|
|
2022-10-20 13:18:25 +02:00
|
|
|
// it's possible that the index doesn't exist
|
|
|
|
let number_of_documents = || -> Result<u64> {
|
2022-10-05 14:05:20 +02:00
|
|
|
let index = self.index_mapper.index(&wtxn, &index_uid)?;
|
|
|
|
let index_rtxn = index.read_txn()?;
|
2022-10-20 13:18:25 +02:00
|
|
|
Ok(index.number_of_documents(&index_rtxn)?)
|
|
|
|
}()
|
|
|
|
.unwrap_or_default();
|
2022-10-05 14:05:20 +02:00
|
|
|
|
2023-06-19 10:43:32 +02:00
|
|
|
// The write transaction is directly owned and committed inside.
|
2022-10-20 13:18:25 +02:00
|
|
|
match self.index_mapper.delete_index(wtxn, &index_uid) {
|
|
|
|
Ok(()) => (),
|
|
|
|
Err(Error::IndexNotFound(_)) if index_has_been_created => (),
|
|
|
|
Err(e) => return Err(e),
|
|
|
|
}
|
2022-10-04 18:50:18 +02:00
|
|
|
|
|
|
|
// We set all the tasks details to the default value.
|
|
|
|
for task in &mut tasks {
|
|
|
|
task.status = Status::Succeeded;
|
2022-10-05 14:05:20 +02:00
|
|
|
task.details = match &task.kind {
|
2022-10-20 18:00:07 +02:00
|
|
|
KindWithContent::IndexDeletion { .. } => {
|
|
|
|
Some(Details::ClearAll { deleted_documents: Some(number_of_documents) })
|
|
|
|
}
|
2022-10-19 16:44:42 +02:00
|
|
|
otherwise => otherwise.default_finished_details(),
|
2022-10-05 14:05:20 +02:00
|
|
|
};
|
2022-10-04 18:50:18 +02:00
|
|
|
}
|
2022-10-04 18:19:18 +02:00
|
|
|
|
2022-10-04 18:50:18 +02:00
|
|
|
Ok(tasks)
|
2022-10-04 18:19:18 +02:00
|
|
|
}
|
2022-10-17 16:30:18 +02:00
|
|
|
Batch::IndexSwap { mut task } => {
|
|
|
|
let mut wtxn = self.env.write_txn()?;
|
|
|
|
let swaps = if let KindWithContent::IndexSwap { swaps } = &task.kind {
|
|
|
|
swaps
|
|
|
|
} else {
|
|
|
|
unreachable!()
|
|
|
|
};
|
2022-10-27 09:41:32 +02:00
|
|
|
let mut not_found_indexes = BTreeSet::new();
|
|
|
|
for IndexSwap { indexes: (lhs, rhs) } in swaps {
|
|
|
|
for index in [lhs, rhs] {
|
|
|
|
let index_exists = self.index_mapper.index_exists(&wtxn, index)?;
|
|
|
|
if !index_exists {
|
|
|
|
not_found_indexes.insert(index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !not_found_indexes.is_empty() {
|
|
|
|
if not_found_indexes.len() == 1 {
|
2023-01-05 23:21:44 +01:00
|
|
|
return Err(Error::SwapIndexNotFound(
|
2022-10-27 09:41:32 +02:00
|
|
|
not_found_indexes.into_iter().next().unwrap().clone(),
|
|
|
|
));
|
|
|
|
} else {
|
2023-01-05 23:21:44 +01:00
|
|
|
return Err(Error::SwapIndexesNotFound(
|
2022-10-27 09:41:32 +02:00
|
|
|
not_found_indexes.into_iter().cloned().collect(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
}
|
2022-10-26 12:57:29 +02:00
|
|
|
for swap in swaps {
|
|
|
|
self.apply_index_swap(&mut wtxn, task.uid, &swap.indexes.0, &swap.indexes.1)?;
|
2022-10-17 16:30:18 +02:00
|
|
|
}
|
|
|
|
wtxn.commit()?;
|
|
|
|
task.status = Status::Succeeded;
|
|
|
|
Ok(vec![task])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Swap the index `lhs` with the index `rhs`.
|
|
|
|
fn apply_index_swap(&self, wtxn: &mut RwTxn, task_id: u32, lhs: &str, rhs: &str) -> Result<()> {
|
|
|
|
// 1. Verify that both lhs and rhs are existing indexes
|
2022-10-22 16:35:42 +02:00
|
|
|
let index_lhs_exists = self.index_mapper.index_exists(wtxn, lhs)?;
|
2022-10-17 16:30:18 +02:00
|
|
|
if !index_lhs_exists {
|
|
|
|
return Err(Error::IndexNotFound(lhs.to_owned()));
|
2022-09-29 18:15:50 +02:00
|
|
|
}
|
2022-10-22 16:35:42 +02:00
|
|
|
let index_rhs_exists = self.index_mapper.index_exists(wtxn, rhs)?;
|
2022-10-17 16:30:18 +02:00
|
|
|
if !index_rhs_exists {
|
|
|
|
return Err(Error::IndexNotFound(rhs.to_owned()));
|
|
|
|
}
|
|
|
|
|
2022-10-27 09:41:32 +02:00
|
|
|
// 2. Get the task set for index = name that appeared before the index swap task
|
2022-10-27 13:00:30 +02:00
|
|
|
let mut index_lhs_task_ids = self.index_tasks(wtxn, lhs)?;
|
2022-10-17 16:30:18 +02:00
|
|
|
index_lhs_task_ids.remove_range(task_id..);
|
2022-10-27 13:00:30 +02:00
|
|
|
let mut index_rhs_task_ids = self.index_tasks(wtxn, rhs)?;
|
2022-10-17 16:30:18 +02:00
|
|
|
index_rhs_task_ids.remove_range(task_id..);
|
|
|
|
|
|
|
|
// 3. before_name -> new_name in the task's KindWithContent
|
|
|
|
for task_id in &index_lhs_task_ids | &index_rhs_task_ids {
|
2022-10-22 16:35:42 +02:00
|
|
|
let mut task = self.get_task(wtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
2022-10-17 16:30:18 +02:00
|
|
|
swap_index_uid_in_task(&mut task, (lhs, rhs));
|
2023-11-22 18:21:19 +01:00
|
|
|
self.all_tasks.put(wtxn, &task_id, &task)?;
|
2022-10-17 16:30:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// 4. remove the task from indexuid = before_name
|
|
|
|
// 5. add the task to indexuid = after_name
|
|
|
|
self.update_index(wtxn, lhs, |lhs_tasks| {
|
|
|
|
*lhs_tasks -= &index_lhs_task_ids;
|
|
|
|
*lhs_tasks |= &index_rhs_task_ids;
|
|
|
|
})?;
|
2022-10-26 17:31:23 +02:00
|
|
|
self.update_index(wtxn, rhs, |rhs_tasks| {
|
|
|
|
*rhs_tasks -= &index_rhs_task_ids;
|
|
|
|
*rhs_tasks |= &index_lhs_task_ids;
|
2022-10-17 16:30:18 +02:00
|
|
|
})?;
|
|
|
|
|
|
|
|
// 6. Swap in the index mapper
|
|
|
|
self.index_mapper.swap(wtxn, lhs, rhs)?;
|
|
|
|
|
|
|
|
Ok(())
|
2022-09-29 18:15:50 +02:00
|
|
|
}
|
|
|
|
|
2022-10-20 10:25:34 +02:00
|
|
|
/// Process the index operation on the given index.
|
|
|
|
///
|
|
|
|
/// ## Return
|
|
|
|
/// The list of processed tasks.
|
2024-01-23 09:41:59 +01:00
|
|
|
#[tracing::instrument(
|
|
|
|
level = "trace",
|
|
|
|
skip(self, index_wtxn, index),
|
|
|
|
target = "indexing::scheduler"
|
|
|
|
)]
|
2023-01-31 10:14:43 +01:00
|
|
|
fn apply_index_operation<'i>(
|
2022-09-29 18:15:50 +02:00
|
|
|
&self,
|
2023-11-22 18:21:19 +01:00
|
|
|
index_wtxn: &mut RwTxn<'i>,
|
2022-10-11 17:42:43 +02:00
|
|
|
index: &'i Index,
|
2022-09-29 18:15:50 +02:00
|
|
|
operation: IndexOperation,
|
|
|
|
) -> Result<Vec<Task>> {
|
2024-10-03 18:08:09 +02:00
|
|
|
let indexer_alloc = Bump::new();
|
|
|
|
|
2024-11-05 16:23:02 +01:00
|
|
|
let started_processing_at = std::time::Instant::now();
|
|
|
|
let secs_since_started_processing_at = AtomicU64::new(0);
|
|
|
|
const PRINT_SECS_DELTA: u64 = 1;
|
2024-11-04 15:10:40 +01:00
|
|
|
|
2024-11-05 16:23:02 +01:00
|
|
|
let processing_tasks = self.processing_tasks.clone();
|
|
|
|
let must_stop_processing = self.must_stop_processing.clone();
|
|
|
|
let send_progress = |progress| {
|
|
|
|
let now = std::time::Instant::now();
|
|
|
|
let elapsed = secs_since_started_processing_at.load(atomic::Ordering::Relaxed);
|
|
|
|
let previous = started_processing_at + Duration::from_secs(elapsed);
|
|
|
|
let elapsed = now - previous;
|
2024-11-04 15:10:40 +01:00
|
|
|
|
2024-11-05 16:23:02 +01:00
|
|
|
if elapsed.as_secs() < PRINT_SECS_DELTA {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
secs_since_started_processing_at
|
|
|
|
.store((now - started_processing_at).as_secs(), atomic::Ordering::Relaxed);
|
|
|
|
|
|
|
|
processing_tasks.write().unwrap().update_progress(progress);
|
|
|
|
};
|
2024-11-04 15:10:40 +01:00
|
|
|
|
2022-09-29 18:15:50 +02:00
|
|
|
match operation {
|
2022-10-04 18:50:18 +02:00
|
|
|
IndexOperation::DocumentClear { mut tasks, .. } => {
|
2022-10-05 16:48:43 +02:00
|
|
|
let count = milli::update::ClearDocuments::new(index_wtxn, index).execute()?;
|
|
|
|
|
2022-10-05 16:54:06 +02:00
|
|
|
let mut first_clear_found = false;
|
2022-09-29 18:15:50 +02:00
|
|
|
for task in &mut tasks {
|
2022-10-05 16:48:43 +02:00
|
|
|
task.status = Status::Succeeded;
|
2022-10-05 16:54:06 +02:00
|
|
|
// The first document clear will effectively delete every documents
|
|
|
|
// in the database but the next ones will clear 0 documents.
|
2022-10-05 16:48:43 +02:00
|
|
|
task.details = match &task.kind {
|
2022-10-05 16:54:06 +02:00
|
|
|
KindWithContent::DocumentClear { .. } => {
|
|
|
|
let count = if first_clear_found { 0 } else { count };
|
|
|
|
first_clear_found = true;
|
2022-10-20 18:00:07 +02:00
|
|
|
Some(Details::ClearAll { deleted_documents: Some(count) })
|
2022-10-05 16:54:06 +02:00
|
|
|
}
|
2022-10-05 16:48:43 +02:00
|
|
|
otherwise => otherwise.default_details(),
|
|
|
|
};
|
2022-09-18 22:04:36 +02:00
|
|
|
}
|
|
|
|
|
2022-09-21 12:01:46 +02:00
|
|
|
Ok(tasks)
|
2022-09-18 22:04:36 +02:00
|
|
|
}
|
2023-02-08 20:53:19 +01:00
|
|
|
IndexOperation::DocumentOperation {
|
2022-10-04 18:50:18 +02:00
|
|
|
index_uid: _,
|
2022-09-13 11:46:07 +02:00
|
|
|
primary_key,
|
2022-09-29 15:49:54 +02:00
|
|
|
method,
|
2023-02-08 20:57:35 +01:00
|
|
|
documents_counts: _,
|
2023-02-08 20:53:19 +01:00
|
|
|
operations,
|
2022-09-29 18:15:50 +02:00
|
|
|
mut tasks,
|
2022-09-13 11:46:07 +02:00
|
|
|
} => {
|
2024-10-03 18:08:09 +02:00
|
|
|
// TODO: at some point, for better efficiency we might want to reuse the bumpalo for successive batches.
|
|
|
|
// this is made difficult by the fact we're doing private clones of the index scheduler and sending it
|
|
|
|
// to a fresh thread.
|
2024-09-05 20:08:23 +02:00
|
|
|
let mut content_files = Vec::new();
|
|
|
|
for operation in &operations {
|
|
|
|
if let DocumentOperation::Add(content_uuid) = operation {
|
|
|
|
let content_file = self.file_store.get_update(*content_uuid)?;
|
|
|
|
let mmap = unsafe { memmap2::Mmap::map(&content_file)? };
|
2024-09-11 15:59:30 +02:00
|
|
|
if !mmap.is_empty() {
|
|
|
|
content_files.push(mmap);
|
|
|
|
}
|
2024-09-05 20:08:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-16 09:27:18 +02:00
|
|
|
let rtxn = index.read_txn()?;
|
2024-10-03 18:08:09 +02:00
|
|
|
let db_fields_ids_map = index.fields_ids_map(&rtxn)?;
|
|
|
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
|
|
|
|
2024-09-05 20:08:23 +02:00
|
|
|
let mut content_files_iter = content_files.iter();
|
2024-09-02 19:39:48 +02:00
|
|
|
let mut indexer = indexer::DocumentOperation::new(method);
|
2024-10-29 17:43:36 +01:00
|
|
|
let embedders = index.embedding_configs(index_wtxn)?;
|
|
|
|
let embedders = self.embedders(embedders)?;
|
2024-11-14 16:00:11 +01:00
|
|
|
for operation in operations {
|
2023-02-08 20:53:19 +01:00
|
|
|
match operation {
|
2024-09-05 20:08:23 +02:00
|
|
|
DocumentOperation::Add(_content_uuid) => {
|
|
|
|
let mmap = content_files_iter.next().unwrap();
|
2024-11-14 16:00:11 +01:00
|
|
|
indexer.add_documents(mmap)?;
|
2024-09-02 19:39:48 +02:00
|
|
|
// builder = builder.with_embedders(embedders.clone());
|
2023-02-08 20:53:19 +01:00
|
|
|
}
|
|
|
|
DocumentOperation::Delete(document_ids) => {
|
2024-10-03 18:08:09 +02:00
|
|
|
let document_ids: bumpalo::collections::vec::Vec<_> = document_ids
|
|
|
|
.iter()
|
|
|
|
.map(|s| &*indexer_alloc.alloc_str(s))
|
|
|
|
.collect_in(&indexer_alloc);
|
|
|
|
indexer.delete_documents(document_ids.into_bump_slice());
|
2023-02-08 20:53:19 +01:00
|
|
|
}
|
|
|
|
}
|
2022-09-29 18:15:50 +02:00
|
|
|
}
|
|
|
|
|
2024-11-14 16:13:38 +01:00
|
|
|
let local_pool;
|
2024-11-18 11:25:37 +01:00
|
|
|
let indexer_config = self.index_mapper.indexer_config();
|
2024-11-14 16:13:38 +01:00
|
|
|
let pool = match &indexer_config.thread_pool {
|
|
|
|
Some(pool) => pool,
|
|
|
|
None => {
|
|
|
|
local_pool = ThreadPoolNoAbortBuilder::new().build().unwrap();
|
|
|
|
&local_pool
|
|
|
|
}
|
|
|
|
};
|
2024-09-05 20:08:23 +02:00
|
|
|
|
2024-11-14 18:16:42 +01:00
|
|
|
let (document_changes, operation_stats, primary_key) = indexer.into_changes(
|
2024-11-14 16:13:38 +01:00
|
|
|
&indexer_alloc,
|
|
|
|
index,
|
|
|
|
&rtxn,
|
2024-11-14 18:16:42 +01:00
|
|
|
primary_key.as_deref(),
|
2024-11-14 16:13:38 +01:00
|
|
|
&mut new_fields_ids_map,
|
|
|
|
)?;
|
2024-10-03 18:08:09 +02:00
|
|
|
|
2024-11-14 16:13:38 +01:00
|
|
|
let mut addition = 0;
|
|
|
|
for (stats, task) in operation_stats.into_iter().zip(&mut tasks) {
|
|
|
|
addition += stats.document_count;
|
|
|
|
match stats.error {
|
|
|
|
Some(error) => {
|
|
|
|
task.status = Status::Failed;
|
|
|
|
task.error = Some(milli::Error::UserError(error).into());
|
2024-11-14 16:00:11 +01:00
|
|
|
}
|
2024-11-14 16:13:38 +01:00
|
|
|
None => task.status = Status::Succeeded,
|
|
|
|
}
|
2024-11-14 16:00:11 +01:00
|
|
|
|
2024-11-14 16:13:38 +01:00
|
|
|
task.details = match task.details {
|
|
|
|
Some(Details::DocumentAdditionOrUpdate { received_documents, .. }) => {
|
2024-11-14 16:00:11 +01:00
|
|
|
Some(Details::DocumentAdditionOrUpdate {
|
|
|
|
received_documents,
|
|
|
|
indexed_documents: Some(stats.document_count),
|
2024-11-14 16:13:38 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
Some(Details::DocumentDeletion { provided_ids, .. }) => {
|
|
|
|
Some(Details::DocumentDeletion {
|
|
|
|
provided_ids,
|
|
|
|
deleted_documents: Some(stats.document_count),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
// In the case of a `documentAdditionOrUpdate` or `DocumentDeletion`
|
|
|
|
// the details MUST be set to either addition or deletion
|
|
|
|
unreachable!();
|
2024-11-14 16:00:11 +01:00
|
|
|
}
|
|
|
|
}
|
2024-11-14 16:13:38 +01:00
|
|
|
}
|
2024-11-14 16:00:11 +01:00
|
|
|
|
2024-11-14 16:13:38 +01:00
|
|
|
if tasks.iter().any(|res| res.error.is_none()) {
|
2024-11-14 10:40:32 +01:00
|
|
|
pool.install(|| {
|
|
|
|
indexer::index(
|
|
|
|
index_wtxn,
|
|
|
|
index,
|
2024-11-18 11:25:37 +01:00
|
|
|
indexer_config.grenad_parameters(),
|
2024-11-14 10:40:32 +01:00
|
|
|
&db_fields_ids_map,
|
|
|
|
new_fields_ids_map,
|
2024-11-14 18:16:42 +01:00
|
|
|
primary_key,
|
2024-11-14 10:40:32 +01:00
|
|
|
&document_changes,
|
|
|
|
embedders,
|
|
|
|
&|| must_stop_processing.get(),
|
|
|
|
&send_progress,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.unwrap()?;
|
2024-09-02 19:39:48 +02:00
|
|
|
|
2024-11-14 16:13:38 +01:00
|
|
|
tracing::info!(indexing_result = ?addition, processed_in = ?started_processing_at.elapsed(), "document indexing done");
|
2022-09-29 18:15:50 +02:00
|
|
|
}
|
2024-09-03 16:08:33 +02:00
|
|
|
// else if primary_key_has_been_set {
|
|
|
|
// // Everything failed but we've set a primary key.
|
|
|
|
// // We need to remove it.
|
|
|
|
// let mut builder =
|
|
|
|
// milli::update::Settings::new(index_wtxn, index, indexer_config);
|
|
|
|
// builder.reset_primary_key();
|
|
|
|
// builder.execute(
|
|
|
|
// |indexing_step| tracing::trace!(update = ?indexing_step),
|
|
|
|
// || must_stop_processing.clone().get(),
|
|
|
|
// )?;
|
|
|
|
// }
|
2022-09-29 18:15:50 +02:00
|
|
|
|
2022-09-29 16:04:23 +02:00
|
|
|
Ok(tasks)
|
2022-09-09 12:16:19 +02:00
|
|
|
}
|
2024-05-08 15:53:40 +02:00
|
|
|
IndexOperation::DocumentEdition { mut task, .. } => {
|
2024-10-14 15:41:10 +02:00
|
|
|
let (filter, context, code) =
|
2024-05-10 20:08:05 +02:00
|
|
|
if let KindWithContent::DocumentEdition {
|
|
|
|
filter_expr, context, function, ..
|
|
|
|
} = &task.kind
|
2024-05-08 15:53:40 +02:00
|
|
|
{
|
2024-05-10 20:08:05 +02:00
|
|
|
(filter_expr, context, function)
|
2024-05-08 15:53:40 +02:00
|
|
|
} else {
|
|
|
|
unreachable!()
|
|
|
|
};
|
|
|
|
|
2024-10-14 15:41:10 +02:00
|
|
|
let candidates = match filter.as_ref().map(Filter::from_json) {
|
|
|
|
Some(Ok(Some(filter))) => {
|
|
|
|
filter.evaluate(index_wtxn, index).map_err(|err| match err {
|
|
|
|
milli::Error::UserError(milli::UserError::InvalidFilter(_)) => {
|
|
|
|
Error::from(err).with_custom_error_code(Code::InvalidDocumentFilter)
|
|
|
|
}
|
|
|
|
e => e.into(),
|
|
|
|
})?
|
2024-05-08 15:53:40 +02:00
|
|
|
}
|
2024-10-14 15:41:10 +02:00
|
|
|
None | Some(Ok(None)) => index.documents_ids(index_wtxn)?,
|
|
|
|
Some(Err(e)) => return Err(e.into()),
|
|
|
|
};
|
|
|
|
|
2024-10-16 09:27:18 +02:00
|
|
|
let (original_filter, context, function) = if let Some(Details::DocumentEdition {
|
|
|
|
original_filter,
|
|
|
|
context,
|
|
|
|
function,
|
|
|
|
..
|
|
|
|
}) = task.details
|
|
|
|
{
|
|
|
|
(original_filter, context, function)
|
|
|
|
} else {
|
|
|
|
// In the case of a `documentEdition` the details MUST be set
|
|
|
|
unreachable!();
|
|
|
|
};
|
|
|
|
|
|
|
|
if candidates.is_empty() {
|
|
|
|
task.status = Status::Succeeded;
|
|
|
|
task.details = Some(Details::DocumentEdition {
|
|
|
|
original_filter,
|
|
|
|
context,
|
|
|
|
function,
|
|
|
|
deleted_documents: Some(0),
|
|
|
|
edited_documents: Some(0),
|
|
|
|
});
|
|
|
|
|
|
|
|
return Ok(vec![task]);
|
|
|
|
}
|
|
|
|
|
2024-10-14 15:41:10 +02:00
|
|
|
let rtxn = index.read_txn()?;
|
|
|
|
let db_fields_ids_map = index.fields_ids_map(&rtxn)?;
|
|
|
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
2024-10-16 09:27:18 +02:00
|
|
|
// candidates not empty => index not empty => a primary key is set
|
|
|
|
let primary_key = index.primary_key(&rtxn)?.unwrap();
|
|
|
|
|
|
|
|
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
|
|
|
|
.map_err(milli::Error::from)?;
|
2024-10-14 15:41:10 +02:00
|
|
|
|
2024-10-14 16:48:15 +02:00
|
|
|
let result_count = Ok((candidates.len(), candidates.len())) as Result<_>;
|
|
|
|
|
2024-10-14 15:41:10 +02:00
|
|
|
if task.error.is_none() {
|
2024-11-14 10:40:32 +01:00
|
|
|
let local_pool;
|
2024-11-18 11:25:37 +01:00
|
|
|
let indexer_config = self.index_mapper.indexer_config();
|
|
|
|
let pool = match &indexer_config.thread_pool {
|
2024-11-14 10:40:32 +01:00
|
|
|
Some(pool) => pool,
|
|
|
|
None => {
|
|
|
|
local_pool = ThreadPoolNoAbortBuilder::new().build().unwrap();
|
|
|
|
&local_pool
|
|
|
|
}
|
|
|
|
};
|
2024-10-14 15:41:10 +02:00
|
|
|
|
2024-11-14 10:40:32 +01:00
|
|
|
pool.install(|| {
|
|
|
|
let indexer =
|
|
|
|
UpdateByFunction::new(candidates, context.clone(), code.clone());
|
|
|
|
let document_changes = indexer.into_changes(&primary_key)?;
|
|
|
|
let embedders = index.embedding_configs(index_wtxn)?;
|
|
|
|
let embedders = self.embedders(embedders)?;
|
|
|
|
|
|
|
|
indexer::index(
|
|
|
|
index_wtxn,
|
|
|
|
index,
|
2024-11-18 11:25:37 +01:00
|
|
|
indexer_config.grenad_parameters(),
|
2024-11-14 10:40:32 +01:00
|
|
|
&db_fields_ids_map,
|
|
|
|
new_fields_ids_map,
|
|
|
|
None, // cannot change primary key in DocumentEdition
|
|
|
|
&document_changes,
|
|
|
|
embedders,
|
|
|
|
&|| must_stop_processing.get(),
|
|
|
|
&send_progress,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Result::Ok(())
|
|
|
|
})
|
|
|
|
.unwrap()?;
|
2024-10-14 15:41:10 +02:00
|
|
|
|
|
|
|
// tracing::info!(indexing_result = ?addition, processed_in = ?started_processing_at.elapsed(), "document indexing done");
|
2024-05-08 15:53:40 +02:00
|
|
|
}
|
|
|
|
|
2024-10-14 16:48:15 +02:00
|
|
|
match result_count {
|
|
|
|
Ok((deleted_documents, edited_documents)) => {
|
|
|
|
task.status = Status::Succeeded;
|
|
|
|
task.details = Some(Details::DocumentEdition {
|
|
|
|
original_filter,
|
|
|
|
context,
|
|
|
|
function,
|
|
|
|
deleted_documents: Some(deleted_documents),
|
|
|
|
edited_documents: Some(edited_documents),
|
|
|
|
});
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
task.status = Status::Failed;
|
|
|
|
task.details = Some(Details::DocumentEdition {
|
|
|
|
original_filter,
|
|
|
|
context,
|
|
|
|
function,
|
|
|
|
deleted_documents: Some(0),
|
|
|
|
edited_documents: Some(0),
|
|
|
|
});
|
|
|
|
task.error = Some(e.into());
|
|
|
|
}
|
|
|
|
}
|
2024-10-14 15:41:10 +02:00
|
|
|
|
2024-05-08 15:53:40 +02:00
|
|
|
Ok(vec![task])
|
2024-05-08 15:26:21 +02:00
|
|
|
}
|
2024-08-29 15:56:24 +02:00
|
|
|
IndexOperation::DocumentDeletion { mut tasks, index_uid: _ } => {
|
|
|
|
let mut to_delete = RoaringBitmap::new();
|
|
|
|
let external_documents_ids = index.external_documents_ids();
|
2023-09-07 18:22:42 +02:00
|
|
|
|
2024-08-29 15:56:24 +02:00
|
|
|
for task in tasks.iter_mut() {
|
|
|
|
let before = to_delete.len();
|
2024-09-02 10:34:28 +02:00
|
|
|
task.status = Status::Succeeded;
|
|
|
|
|
2024-08-29 15:56:24 +02:00
|
|
|
match &task.kind {
|
|
|
|
KindWithContent::DocumentDeletion { index_uid: _, documents_ids } => {
|
|
|
|
for id in documents_ids {
|
|
|
|
if let Some(id) = external_documents_ids.get(index_wtxn, id)? {
|
|
|
|
to_delete.insert(id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let will_be_removed = to_delete.len() - before;
|
|
|
|
task.details = Some(Details::DocumentDeletion {
|
|
|
|
provided_ids: documents_ids.len(),
|
|
|
|
deleted_documents: Some(will_be_removed),
|
|
|
|
});
|
|
|
|
}
|
|
|
|
KindWithContent::DocumentDeletionByFilter { index_uid: _, filter_expr } => {
|
|
|
|
let before = to_delete.len();
|
2024-09-02 10:34:28 +02:00
|
|
|
let filter = match Filter::from_json(filter_expr) {
|
|
|
|
Ok(filter) => filter,
|
|
|
|
Err(err) => {
|
|
|
|
// theorically, this should be catched by deserr before reaching the index-scheduler and cannot happens
|
|
|
|
task.status = Status::Failed;
|
|
|
|
task.error = match err {
|
|
|
|
milli::Error::UserError(
|
|
|
|
milli::UserError::InvalidFilterExpression { .. },
|
|
|
|
) => Some(
|
|
|
|
Error::from(err)
|
|
|
|
.with_custom_error_code(Code::InvalidDocumentFilter)
|
|
|
|
.into(),
|
|
|
|
),
|
|
|
|
e => Some(e.into()),
|
|
|
|
};
|
|
|
|
None
|
|
|
|
}
|
|
|
|
};
|
2024-08-29 15:56:24 +02:00
|
|
|
if let Some(filter) = filter {
|
2024-09-02 10:34:28 +02:00
|
|
|
let candidates =
|
|
|
|
filter.evaluate(index_wtxn, index).map_err(|err| match err {
|
2024-08-29 15:56:24 +02:00
|
|
|
milli::Error::UserError(
|
|
|
|
milli::UserError::InvalidFilter(_),
|
|
|
|
) => Error::from(err)
|
|
|
|
.with_custom_error_code(Code::InvalidDocumentFilter),
|
|
|
|
e => e.into(),
|
2024-09-02 10:34:28 +02:00
|
|
|
});
|
|
|
|
match candidates {
|
|
|
|
Ok(candidates) => to_delete |= candidates,
|
|
|
|
Err(err) => {
|
|
|
|
task.status = Status::Failed;
|
|
|
|
task.error = Some(err.into());
|
|
|
|
}
|
|
|
|
};
|
2024-08-29 15:56:24 +02:00
|
|
|
}
|
|
|
|
let will_be_removed = to_delete.len() - before;
|
|
|
|
if let Some(Details::DocumentDeletionByFilter {
|
|
|
|
original_filter: _,
|
|
|
|
deleted_documents,
|
|
|
|
}) = &mut task.details
|
|
|
|
{
|
|
|
|
*deleted_documents = Some(will_be_removed);
|
|
|
|
} else {
|
|
|
|
// In the case of a `documentDeleteByFilter` the details MUST be set
|
|
|
|
unreachable!()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
2023-09-07 18:22:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-16 09:27:18 +02:00
|
|
|
if to_delete.is_empty() {
|
|
|
|
return Ok(tasks);
|
|
|
|
}
|
|
|
|
|
2024-10-08 16:04:19 +02:00
|
|
|
let rtxn = index.read_txn()?;
|
2024-10-03 18:08:09 +02:00
|
|
|
let db_fields_ids_map = index.fields_ids_map(&rtxn)?;
|
|
|
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
2024-08-29 15:56:24 +02:00
|
|
|
|
2024-10-16 09:27:18 +02:00
|
|
|
// to_delete not empty => index not empty => primary key set
|
|
|
|
let primary_key = index.primary_key(&rtxn)?.unwrap();
|
|
|
|
|
|
|
|
let primary_key = PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
|
|
|
|
.map_err(milli::Error::from)?;
|
2024-10-08 16:04:19 +02:00
|
|
|
|
|
|
|
if !tasks.iter().all(|res| res.error.is_some()) {
|
2024-11-14 10:40:32 +01:00
|
|
|
let local_pool;
|
2024-11-18 11:25:37 +01:00
|
|
|
let indexer_config = self.index_mapper.indexer_config();
|
|
|
|
let pool = match &indexer_config.thread_pool {
|
2024-11-14 10:40:32 +01:00
|
|
|
Some(pool) => pool,
|
|
|
|
None => {
|
|
|
|
local_pool = ThreadPoolNoAbortBuilder::new().build().unwrap();
|
|
|
|
&local_pool
|
|
|
|
}
|
|
|
|
};
|
2024-08-29 15:56:24 +02:00
|
|
|
|
2024-10-08 16:04:19 +02:00
|
|
|
let mut indexer = indexer::DocumentDeletion::new();
|
|
|
|
indexer.delete_documents_by_docids(to_delete);
|
2024-10-03 18:08:09 +02:00
|
|
|
let document_changes = indexer.into_changes(&indexer_alloc, primary_key);
|
2024-10-29 17:43:36 +01:00
|
|
|
let embedders = index.embedding_configs(index_wtxn)?;
|
|
|
|
let embedders = self.embedders(embedders)?;
|
2024-10-03 18:08:09 +02:00
|
|
|
|
2024-11-14 10:40:32 +01:00
|
|
|
pool.install(|| {
|
|
|
|
indexer::index(
|
|
|
|
index_wtxn,
|
|
|
|
index,
|
2024-11-18 11:25:37 +01:00
|
|
|
indexer_config.grenad_parameters(),
|
2024-11-14 10:40:32 +01:00
|
|
|
&db_fields_ids_map,
|
|
|
|
new_fields_ids_map,
|
|
|
|
None, // document deletion never changes primary key
|
|
|
|
&document_changes,
|
|
|
|
embedders,
|
|
|
|
&|| must_stop_processing.get(),
|
|
|
|
&send_progress,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.unwrap()?;
|
2024-08-29 15:56:24 +02:00
|
|
|
|
2024-10-08 16:04:19 +02:00
|
|
|
// tracing::info!(indexing_result = ?addition, processed_in = ?started_processing_at.elapsed(), "document indexing done");
|
|
|
|
}
|
2024-08-29 15:56:24 +02:00
|
|
|
|
|
|
|
Ok(tasks)
|
2023-09-07 18:22:42 +02:00
|
|
|
}
|
2022-10-20 18:00:07 +02:00
|
|
|
IndexOperation::Settings { index_uid: _, settings, mut tasks } => {
|
2022-09-29 18:15:50 +02:00
|
|
|
let indexer_config = self.index_mapper.indexer_config();
|
2022-10-27 16:38:21 +02:00
|
|
|
let mut builder = milli::update::Settings::new(index_wtxn, index, indexer_config);
|
|
|
|
|
2022-09-29 13:57:28 +02:00
|
|
|
for (task, (_, settings)) in tasks.iter_mut().zip(settings) {
|
|
|
|
let checked_settings = settings.clone().check();
|
2022-10-22 16:35:42 +02:00
|
|
|
task.details = Some(Details::SettingsUpdate { settings: Box::new(settings) });
|
2022-09-29 18:15:50 +02:00
|
|
|
apply_settings_to_builder(&checked_settings, &mut builder);
|
|
|
|
|
2022-10-27 16:38:21 +02:00
|
|
|
// We can apply the status right now and if an update fail later
|
|
|
|
// the whole batch will be marked as failed.
|
2022-10-05 16:48:43 +02:00
|
|
|
task.status = Status::Succeeded;
|
2022-09-29 13:57:28 +02:00
|
|
|
}
|
|
|
|
|
2022-10-27 16:38:21 +02:00
|
|
|
builder.execute(
|
2024-01-23 09:41:59 +01:00
|
|
|
|indexing_step| tracing::debug!(update = ?indexing_step),
|
2022-10-27 16:38:21 +02:00
|
|
|
|| must_stop_processing.get(),
|
|
|
|
)?;
|
|
|
|
|
2022-09-29 13:57:28 +02:00
|
|
|
Ok(tasks)
|
|
|
|
}
|
2023-02-08 20:53:19 +01:00
|
|
|
IndexOperation::SettingsAndDocumentOperation {
|
2022-09-16 16:31:16 +02:00
|
|
|
index_uid,
|
2022-09-29 18:15:50 +02:00
|
|
|
primary_key,
|
|
|
|
method,
|
2022-10-04 18:50:18 +02:00
|
|
|
documents_counts,
|
2023-02-08 20:53:19 +01:00
|
|
|
operations,
|
2022-09-29 18:15:50 +02:00
|
|
|
document_import_tasks,
|
2022-09-16 16:31:16 +02:00
|
|
|
settings,
|
2022-09-29 18:15:50 +02:00
|
|
|
settings_tasks,
|
2022-09-29 14:19:13 +02:00
|
|
|
} => {
|
2022-09-29 18:15:50 +02:00
|
|
|
let settings_tasks = self.apply_index_operation(
|
|
|
|
index_wtxn,
|
|
|
|
index,
|
|
|
|
IndexOperation::Settings {
|
|
|
|
index_uid: index_uid.clone(),
|
|
|
|
settings,
|
|
|
|
tasks: settings_tasks,
|
|
|
|
},
|
|
|
|
)?;
|
2022-09-29 14:19:13 +02:00
|
|
|
|
2022-09-29 18:15:50 +02:00
|
|
|
let mut import_tasks = self.apply_index_operation(
|
|
|
|
index_wtxn,
|
|
|
|
index,
|
2023-02-08 20:53:19 +01:00
|
|
|
IndexOperation::DocumentOperation {
|
2022-09-29 18:15:50 +02:00
|
|
|
index_uid,
|
|
|
|
primary_key,
|
|
|
|
method,
|
2022-10-04 18:50:18 +02:00
|
|
|
documents_counts,
|
2023-02-08 20:53:19 +01:00
|
|
|
operations,
|
2022-09-29 18:15:50 +02:00
|
|
|
tasks: document_import_tasks,
|
|
|
|
},
|
|
|
|
)?;
|
2022-09-29 14:19:13 +02:00
|
|
|
|
2022-09-29 18:15:50 +02:00
|
|
|
let mut tasks = settings_tasks;
|
|
|
|
tasks.append(&mut import_tasks);
|
2022-09-29 14:19:13 +02:00
|
|
|
Ok(tasks)
|
|
|
|
}
|
2022-09-29 18:15:50 +02:00
|
|
|
IndexOperation::DocumentClearAndSetting {
|
2022-09-16 21:24:49 +02:00
|
|
|
index_uid,
|
2022-09-29 18:15:50 +02:00
|
|
|
cleared_tasks,
|
|
|
|
settings,
|
|
|
|
settings_tasks,
|
|
|
|
} => {
|
|
|
|
let mut import_tasks = self.apply_index_operation(
|
|
|
|
index_wtxn,
|
|
|
|
index,
|
|
|
|
IndexOperation::DocumentClear {
|
|
|
|
index_uid: index_uid.clone(),
|
|
|
|
tasks: cleared_tasks,
|
|
|
|
},
|
|
|
|
)?;
|
|
|
|
|
|
|
|
let settings_tasks = self.apply_index_operation(
|
|
|
|
index_wtxn,
|
|
|
|
index,
|
2022-10-20 18:00:07 +02:00
|
|
|
IndexOperation::Settings { index_uid, settings, tasks: settings_tasks },
|
2022-09-29 18:15:50 +02:00
|
|
|
)?;
|
|
|
|
|
|
|
|
let mut tasks = settings_tasks;
|
|
|
|
tasks.append(&mut import_tasks);
|
|
|
|
Ok(tasks)
|
|
|
|
}
|
2022-09-07 00:10:14 +02:00
|
|
|
}
|
|
|
|
}
|
2022-10-06 16:53:21 +02:00
|
|
|
|
|
|
|
/// Delete each given task from all the databases (if it is deleteable).
|
|
|
|
///
|
2022-10-17 12:58:20 +02:00
|
|
|
/// Return the number of tasks that were actually deleted.
|
2024-01-11 14:44:29 +01:00
|
|
|
fn delete_matched_tasks(
|
|
|
|
&self,
|
|
|
|
wtxn: &mut RwTxn,
|
|
|
|
matched_tasks: &RoaringBitmap,
|
|
|
|
) -> Result<RoaringBitmap> {
|
2022-10-06 16:53:21 +02:00
|
|
|
// 1. Remove from this list the tasks that we are not allowed to delete
|
|
|
|
let enqueued_tasks = self.get_status(wtxn, Status::Enqueued)?;
|
2022-10-17 13:54:35 +02:00
|
|
|
let processing_tasks = &self.processing_tasks.read().unwrap().processing.clone();
|
2022-10-06 16:53:21 +02:00
|
|
|
|
2022-10-22 16:35:42 +02:00
|
|
|
let all_task_ids = self.all_task_ids(wtxn)?;
|
2022-10-17 12:58:20 +02:00
|
|
|
let mut to_delete_tasks = all_task_ids & matched_tasks;
|
|
|
|
to_delete_tasks -= processing_tasks;
|
2022-10-13 11:09:00 +02:00
|
|
|
to_delete_tasks -= enqueued_tasks;
|
2022-10-06 16:53:21 +02:00
|
|
|
|
2022-10-13 11:09:00 +02:00
|
|
|
// 2. We now have a list of tasks to delete, delete them
|
2022-10-06 16:53:21 +02:00
|
|
|
|
2022-10-13 11:09:00 +02:00
|
|
|
let mut affected_indexes = HashSet::new();
|
|
|
|
let mut affected_statuses = HashSet::new();
|
|
|
|
let mut affected_kinds = HashSet::new();
|
2022-11-28 16:27:41 +01:00
|
|
|
let mut affected_canceled_by = RoaringBitmap::new();
|
2022-10-06 16:53:21 +02:00
|
|
|
|
2022-10-13 11:09:00 +02:00
|
|
|
for task_id in to_delete_tasks.iter() {
|
2022-10-20 18:00:07 +02:00
|
|
|
let task = self.get_task(wtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
2022-10-25 10:26:51 +02:00
|
|
|
|
|
|
|
affected_indexes.extend(task.indexes().into_iter().map(|x| x.to_owned()));
|
2022-10-18 15:04:14 +02:00
|
|
|
affected_statuses.insert(task.status);
|
|
|
|
affected_kinds.insert(task.kind.as_kind());
|
|
|
|
// Note: don't delete the persisted task data since
|
|
|
|
// we can only delete succeeded, failed, and canceled tasks.
|
|
|
|
// In each of those cases, the persisted data is supposed to
|
|
|
|
// have been deleted already.
|
2022-10-19 12:59:12 +02:00
|
|
|
utils::remove_task_datetime(wtxn, self.enqueued_at, task.enqueued_at, task.uid)?;
|
|
|
|
if let Some(started_at) = task.started_at {
|
|
|
|
utils::remove_task_datetime(wtxn, self.started_at, started_at, task.uid)?;
|
|
|
|
}
|
|
|
|
if let Some(finished_at) = task.finished_at {
|
|
|
|
utils::remove_task_datetime(wtxn, self.finished_at, finished_at, task.uid)?;
|
|
|
|
}
|
2022-11-28 16:27:41 +01:00
|
|
|
if let Some(canceled_by) = task.canceled_by {
|
|
|
|
affected_canceled_by.insert(canceled_by);
|
|
|
|
}
|
2022-10-06 16:53:21 +02:00
|
|
|
}
|
2022-10-18 13:47:22 +02:00
|
|
|
|
2022-10-13 11:09:00 +02:00
|
|
|
for index in affected_indexes {
|
2022-10-18 13:47:22 +02:00
|
|
|
self.update_index(wtxn, &index, |bitmap| *bitmap -= &to_delete_tasks)?;
|
2022-10-13 11:09:00 +02:00
|
|
|
}
|
2022-10-18 13:47:22 +02:00
|
|
|
|
2022-10-13 11:09:00 +02:00
|
|
|
for status in affected_statuses {
|
2022-10-18 13:47:22 +02:00
|
|
|
self.update_status(wtxn, status, |bitmap| *bitmap -= &to_delete_tasks)?;
|
2022-10-13 11:09:00 +02:00
|
|
|
}
|
2022-10-18 13:47:22 +02:00
|
|
|
|
2022-10-13 11:09:00 +02:00
|
|
|
for kind in affected_kinds {
|
2022-10-18 13:47:22 +02:00
|
|
|
self.update_kind(wtxn, kind, |bitmap| *bitmap -= &to_delete_tasks)?;
|
2022-10-13 11:09:00 +02:00
|
|
|
}
|
2022-10-18 13:47:22 +02:00
|
|
|
|
2022-10-13 11:09:00 +02:00
|
|
|
for task in to_delete_tasks.iter() {
|
2023-11-22 18:21:19 +01:00
|
|
|
self.all_tasks.delete(wtxn, &task)?;
|
2022-10-13 11:09:00 +02:00
|
|
|
}
|
2022-11-28 16:27:41 +01:00
|
|
|
for canceled_by in affected_canceled_by {
|
|
|
|
if let Some(mut tasks) = self.canceled_by.get(wtxn, &canceled_by)? {
|
|
|
|
tasks -= &to_delete_tasks;
|
|
|
|
if tasks.is_empty() {
|
|
|
|
self.canceled_by.delete(wtxn, &canceled_by)?;
|
|
|
|
} else {
|
|
|
|
self.canceled_by.put(wtxn, &canceled_by, &tasks)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-10-18 13:47:22 +02:00
|
|
|
|
2024-01-11 14:44:29 +01:00
|
|
|
Ok(to_delete_tasks)
|
2022-10-06 16:53:21 +02:00
|
|
|
}
|
2022-10-18 13:47:22 +02:00
|
|
|
|
|
|
|
/// Cancel each given task from all the databases (if it is cancelable).
|
|
|
|
///
|
2022-10-19 11:48:35 +02:00
|
|
|
/// Returns the content files that the transaction owner must delete if the commit is successful.
|
2022-10-18 13:47:22 +02:00
|
|
|
fn cancel_matched_tasks(
|
|
|
|
&self,
|
|
|
|
wtxn: &mut RwTxn,
|
2022-10-18 13:57:58 +02:00
|
|
|
cancel_task_id: TaskId,
|
2022-10-18 13:47:22 +02:00
|
|
|
matched_tasks: &RoaringBitmap,
|
2022-11-28 16:27:41 +01:00
|
|
|
previous_started_at: OffsetDateTime,
|
|
|
|
previous_processing_tasks: &RoaringBitmap,
|
2022-10-19 11:48:35 +02:00
|
|
|
) -> Result<Vec<Uuid>> {
|
2022-10-18 13:57:58 +02:00
|
|
|
let now = OffsetDateTime::now_utc();
|
|
|
|
|
2022-10-18 13:47:22 +02:00
|
|
|
// 1. Remove from this list the tasks that we are not allowed to cancel
|
|
|
|
// Notice that only the _enqueued_ ones are cancelable and we should
|
|
|
|
// have already aborted the indexation of the _processing_ ones
|
2022-10-18 13:57:58 +02:00
|
|
|
let cancelable_tasks = self.get_status(wtxn, Status::Enqueued)?;
|
2022-10-18 13:47:22 +02:00
|
|
|
let tasks_to_cancel = cancelable_tasks & matched_tasks;
|
|
|
|
|
|
|
|
// 2. We now have a list of tasks to cancel, cancel them
|
2022-10-19 11:48:35 +02:00
|
|
|
let mut content_files_to_delete = Vec::new();
|
2022-10-18 13:57:58 +02:00
|
|
|
for mut task in self.get_existing_tasks(wtxn, tasks_to_cancel.iter())? {
|
2022-10-19 11:48:35 +02:00
|
|
|
if let Some(uuid) = task.content_uuid() {
|
2022-10-25 14:09:01 +02:00
|
|
|
content_files_to_delete.push(uuid);
|
2022-10-19 11:48:35 +02:00
|
|
|
}
|
2022-11-28 16:27:41 +01:00
|
|
|
if previous_processing_tasks.contains(task.uid) {
|
|
|
|
task.started_at = Some(previous_started_at);
|
|
|
|
}
|
2022-10-18 13:57:58 +02:00
|
|
|
task.status = Status::Canceled;
|
|
|
|
task.canceled_by = Some(cancel_task_id);
|
|
|
|
task.finished_at = Some(now);
|
2022-11-28 16:27:41 +01:00
|
|
|
task.details = task.details.map(|d| d.to_failed());
|
2022-10-18 13:57:58 +02:00
|
|
|
self.update_task(wtxn, &task)?;
|
|
|
|
}
|
2023-11-22 18:21:19 +01:00
|
|
|
self.canceled_by.put(wtxn, &cancel_task_id, &tasks_to_cancel)?;
|
2022-10-18 13:47:22 +02:00
|
|
|
|
2022-10-19 11:48:35 +02:00
|
|
|
Ok(content_files_to_delete)
|
2022-10-18 13:47:22 +02:00
|
|
|
}
|
2022-09-07 00:10:14 +02:00
|
|
|
}
|