mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-07-03 03:47:02 +02:00
move the version check to the task queue
This commit is contained in:
parent
e70ac35e02
commit
3ef7a478cd
9 changed files with 95 additions and 72 deletions
|
@ -147,7 +147,7 @@ pub enum Error {
|
|||
#[error("Corrupted task queue.")]
|
||||
CorruptedTaskQueue,
|
||||
#[error(transparent)]
|
||||
TaskDatabaseUpdate(Box<Self>),
|
||||
TaskDatabaseUpgrade(Box<Self>),
|
||||
#[error(transparent)]
|
||||
HeedTransaction(heed::Error),
|
||||
|
||||
|
@ -202,7 +202,7 @@ impl Error {
|
|||
| Error::Anyhow(_) => true,
|
||||
Error::CreateBatch(_)
|
||||
| Error::CorruptedTaskQueue
|
||||
| Error::TaskDatabaseUpdate(_)
|
||||
| Error::TaskDatabaseUpgrade(_)
|
||||
| Error::HeedTransaction(_) => false,
|
||||
#[cfg(test)]
|
||||
Error::PlannedFailure => false,
|
||||
|
@ -266,7 +266,7 @@ impl ErrorCode for Error {
|
|||
Error::Anyhow(_) => Code::Internal,
|
||||
Error::CorruptedTaskQueue => Code::Internal,
|
||||
Error::CorruptedDump => Code::Internal,
|
||||
Error::TaskDatabaseUpdate(_) => Code::Internal,
|
||||
Error::TaskDatabaseUpgrade(_) => Code::Internal,
|
||||
Error::CreateBatch(_) => Code::Internal,
|
||||
|
||||
// This one should never be seen by the end user
|
||||
|
|
|
@ -369,6 +369,7 @@ impl IndexScheduler {
|
|||
match ret {
|
||||
Ok(Ok(TickOutcome::TickAgain(_))) => (),
|
||||
Ok(Ok(TickOutcome::WaitForSignal)) => run.scheduler.wake_up.wait(),
|
||||
Ok(Ok(TickOutcome::StopProcessingForever)) => break,
|
||||
Ok(Err(e)) => {
|
||||
tracing::error!("{e}");
|
||||
// Wait one second when an irrecoverable error occurs.
|
||||
|
@ -816,6 +817,8 @@ pub enum TickOutcome {
|
|||
TickAgain(u64),
|
||||
/// The scheduler should wait for an external signal before attempting another `tick`.
|
||||
WaitForSignal,
|
||||
/// The scheduler exits the run-loop and will never process tasks again
|
||||
StopProcessingForever,
|
||||
}
|
||||
|
||||
/// How many indexes we can afford to have open simultaneously.
|
||||
|
|
|
@ -184,6 +184,7 @@ impl IndexScheduler {
|
|||
|
||||
progress.update_progress(BatchProgress::WritingTasksToDisk);
|
||||
processing_batch.finished();
|
||||
let mut stop_scheduler_forever = false;
|
||||
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
||||
let mut canceled = RoaringBitmap::new();
|
||||
|
||||
|
@ -222,7 +223,7 @@ impl IndexScheduler {
|
|||
self.queue
|
||||
.tasks
|
||||
.update_task(&mut wtxn, &task)
|
||||
.map_err(|e| Error::TaskDatabaseUpdate(Box::new(e)))?;
|
||||
.map_err(|e| Error::TaskDatabaseUpgrade(Box::new(e)))?;
|
||||
}
|
||||
if let Some(canceled_by) = canceled_by {
|
||||
self.queue.tasks.canceled_by.put(&mut wtxn, &canceled_by, &canceled)?;
|
||||
|
@ -273,6 +274,12 @@ impl IndexScheduler {
|
|||
let (task_progress, task_progress_obj) = AtomicTaskStep::new(ids.len() as u32);
|
||||
progress.update_progress(task_progress_obj);
|
||||
|
||||
if matches!(err, Error::TaskDatabaseUpgrade(_)) {
|
||||
tracing::error!(
|
||||
"Upgrade task failed, tasks won't be processed until the following issue is fixed: {err}"
|
||||
);
|
||||
stop_scheduler_forever = true;
|
||||
}
|
||||
let error: ResponseError = err.into();
|
||||
for id in ids.iter() {
|
||||
task_progress.fetch_add(1, Ordering::Relaxed);
|
||||
|
@ -280,7 +287,7 @@ impl IndexScheduler {
|
|||
.queue
|
||||
.tasks
|
||||
.get_task(&wtxn, id)
|
||||
.map_err(|e| Error::TaskDatabaseUpdate(Box::new(e)))?
|
||||
.map_err(|e| Error::TaskDatabaseUpgrade(Box::new(e)))?
|
||||
.ok_or(Error::CorruptedTaskQueue)?;
|
||||
task.status = Status::Failed;
|
||||
task.error = Some(error.clone());
|
||||
|
@ -297,7 +304,7 @@ impl IndexScheduler {
|
|||
self.queue
|
||||
.tasks
|
||||
.update_task(&mut wtxn, &task)
|
||||
.map_err(|e| Error::TaskDatabaseUpdate(Box::new(e)))?;
|
||||
.map_err(|e| Error::TaskDatabaseUpgrade(Box::new(e)))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -327,7 +334,7 @@ impl IndexScheduler {
|
|||
.queue
|
||||
.tasks
|
||||
.get_task(&rtxn, id)
|
||||
.map_err(|e| Error::TaskDatabaseUpdate(Box::new(e)))?
|
||||
.map_err(|e| Error::TaskDatabaseUpgrade(Box::new(e)))?
|
||||
.ok_or(Error::CorruptedTaskQueue)?;
|
||||
if let Err(e) = self.queue.delete_persisted_task_data(&task) {
|
||||
tracing::error!(
|
||||
|
@ -345,6 +352,10 @@ impl IndexScheduler {
|
|||
#[cfg(test)]
|
||||
self.breakpoint(crate::test_utils::Breakpoint::AfterProcessing);
|
||||
|
||||
Ok(TickOutcome::TickAgain(processed_tasks))
|
||||
if stop_scheduler_forever {
|
||||
Ok(TickOutcome::StopProcessingForever)
|
||||
} else {
|
||||
Ok(TickOutcome::TickAgain(processed_tasks))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
use std::panic::{catch_unwind, AssertUnwindSafe};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use meilisearch_types::batches::BatchId;
|
||||
|
@ -314,7 +315,24 @@ impl IndexScheduler {
|
|||
task.status = Status::Succeeded;
|
||||
Ok(vec![task])
|
||||
}
|
||||
Batch::UpgradeDatabase { tasks } => self.process_upgrade(progress, tasks),
|
||||
Batch::UpgradeDatabase { mut tasks } => {
|
||||
let ret = catch_unwind(AssertUnwindSafe(|| self.process_upgrade(progress)));
|
||||
match ret {
|
||||
Ok(Ok(())) => (),
|
||||
Ok(Err(e)) => return Err(Error::TaskDatabaseUpgrade(Box::new(e))),
|
||||
Err(_e) => {
|
||||
return Err(Error::TaskDatabaseUpgrade(Box::new(
|
||||
Error::ProcessBatchPanicked,
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
for task in tasks.iter_mut() {
|
||||
task.status = Status::Succeeded;
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,24 +1,14 @@
|
|||
use meilisearch_types::{
|
||||
milli,
|
||||
milli::progress::{Progress, VariableNameStep},
|
||||
tasks::{KindWithContent, Status, Task},
|
||||
};
|
||||
|
||||
use crate::{processing::UpgradeDatabaseProgress, Error, IndexScheduler, Result};
|
||||
|
||||
impl IndexScheduler {
|
||||
pub(super) fn process_upgrade(
|
||||
&self,
|
||||
progress: Progress,
|
||||
mut tasks: Vec<Task>,
|
||||
) -> Result<Vec<Task>> {
|
||||
pub(super) fn process_upgrade(&self, progress: Progress) -> Result<()> {
|
||||
progress.update_progress(UpgradeDatabaseProgress::EnsuringCorrectnessOfTheSwap);
|
||||
|
||||
// Since we should not have multiple upgrade tasks, we're only going to process the latest one:
|
||||
let KindWithContent::UpgradeDatabase { from } = tasks.last().unwrap().kind else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
enum UpgradeIndex {}
|
||||
let indexes = self.index_names()?;
|
||||
|
||||
|
@ -29,14 +19,10 @@ impl IndexScheduler {
|
|||
indexes.len() as u32,
|
||||
));
|
||||
let index = self.index(uid)?;
|
||||
milli::update::upgrade::upgrade(&index, from, progress.clone())
|
||||
milli::update::upgrade::upgrade(&index, progress.clone())
|
||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||
}
|
||||
|
||||
for task in tasks.iter_mut() {
|
||||
task.status = Status::Succeeded;
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,16 +1,53 @@
|
|||
use std::path::Path;
|
||||
|
||||
use anyhow::bail;
|
||||
use meilisearch_types::{
|
||||
heed,
|
||||
tasks::{KindWithContent, Status, Task},
|
||||
versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH},
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use tracing::info;
|
||||
|
||||
use crate::queue::TaskQueue;
|
||||
|
||||
pub fn upgrade_task_queue(tasks_path: &Path, version: (u32, u32, u32)) -> anyhow::Result<()> {
|
||||
pub fn upgrade_task_queue(tasks_path: &Path, from: (u32, u32, u32)) -> anyhow::Result<()> {
|
||||
let current_major: u32 = VERSION_MAJOR.parse().unwrap();
|
||||
let current_minor: u32 = VERSION_MINOR.parse().unwrap();
|
||||
let current_patch: u32 = VERSION_PATCH.parse().unwrap();
|
||||
|
||||
let upgrade_functions =
|
||||
[(v1_12_to_current as fn(&Path) -> anyhow::Result<()>, "Upgrading from v1.12 to v1.13")];
|
||||
|
||||
let start = match from {
|
||||
(1, 12, _) => 0,
|
||||
(major, minor, patch) => {
|
||||
if major > current_major
|
||||
|| (major == current_major && minor > current_minor)
|
||||
|| (major == current_major && minor == current_minor && patch > current_patch)
|
||||
{
|
||||
bail!(
|
||||
"Database version {major}.{minor}.{patch} is higher than the binary version {current_major}.{current_minor}.{current_patch}. Downgrade is not supported",
|
||||
);
|
||||
} else if major < current_major
|
||||
|| (major == current_major && minor < current_minor)
|
||||
|| (major == current_major && minor == current_minor && patch < current_patch)
|
||||
{
|
||||
bail!(
|
||||
"Database version {major}.{minor}.{patch} is too old for the experimental dumpless upgrade feature. Please generate a dump using the v{major}.{minor}.{patch} and imports it in the v{current_major}.{current_minor}.{current_patch}",
|
||||
);
|
||||
} else {
|
||||
bail!("Unknown database version: v{major}.{minor}.{patch}");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
info!("Upgrading the task queue");
|
||||
for (upgrade, upgrade_name) in upgrade_functions[start..].iter() {
|
||||
info!("{upgrade_name}");
|
||||
(upgrade)(tasks_path)?;
|
||||
}
|
||||
|
||||
let env = unsafe {
|
||||
heed::EnvOpenOptions::new()
|
||||
.max_dbs(19)
|
||||
|
@ -33,7 +70,7 @@ pub fn upgrade_task_queue(tasks_path: &Path, version: (u32, u32, u32)) -> anyhow
|
|||
canceled_by: None,
|
||||
details: None,
|
||||
status: Status::Enqueued,
|
||||
kind: KindWithContent::UpgradeDatabase { from: version },
|
||||
kind: KindWithContent::UpgradeDatabase { from },
|
||||
},
|
||||
)?;
|
||||
wtxn.commit()?;
|
||||
|
@ -41,3 +78,8 @@ pub fn upgrade_task_queue(tasks_path: &Path, version: (u32, u32, u32)) -> anyhow
|
|||
env.prepare_for_closing().wait();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The task queue is 100% compatible with the previous versions
|
||||
fn v1_12_to_current(_path: &Path) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue