Changes for tracking issue 138

- create a snapshot as well as a dump
- only detect inconsistencies in the facet -> document direction
- mark index as corrupted after creating snapshot and dump
- always abort tasks on indexes marked as corrupted
This commit is contained in:
Louis Dureuil 2024-04-04 10:22:49 +02:00
parent e4f8ee00c8
commit e6ff45e3b9
No known key found for this signature in database
4 changed files with 71 additions and 8 deletions

View File

@ -961,6 +961,13 @@ impl IndexScheduler {
.set_currently_updating_index(Some((index_uid.clone(), index.clone())));
let mut index_wtxn = index.write_txn()?;
if index.is_corrupted(&index_wtxn)? {
tracing::error!("Aborting task due to corrupted index");
index_wtxn.abort();
return Err(crate::Error::CorruptedIndex);
}
let tasks = self.apply_index_operation(&mut index_wtxn, &index, op)?;
index.check_document_facet_consistency(&index_wtxn)?.check();

View File

@ -138,6 +138,8 @@ pub enum Error {
CreateBatch(Box<Self>),
#[error("Corrupted task queue.")]
CorruptedTaskQueue,
#[error("Corrupted index.")]
CorruptedIndex,
#[error(transparent)]
TaskDatabaseUpdate(Box<Self>),
#[error(transparent)]
@ -192,6 +194,7 @@ impl Error {
| Error::Anyhow(_) => true,
Error::CreateBatch(_)
| Error::CorruptedTaskQueue
| Error::CorruptedIndex
| Error::TaskDatabaseUpdate(_)
| Error::HeedTransaction(_) => false,
#[cfg(test)]
@ -242,6 +245,7 @@ impl ErrorCode for Error {
Error::CorruptedDump => Code::Internal,
Error::TaskDatabaseUpdate(_) => Code::Internal,
Error::CreateBatch(_) => Code::Internal,
Error::CorruptedIndex => Code::Internal,
// This one should never be seen by the end user
Error::AbortedTask => Code::Internal,

View File

@ -1219,8 +1219,48 @@ impl IndexScheduler {
};
match res {
Ok(_) => tracing::info!("Created a dump after failed task"),
Err(error) => tracing::error!(%error, "Could not create a dump after failed task"),
Ok(_) => tracing::info!("Created a dump after panicked task"),
Err(error) => {
tracing::error!(%error, "Could not create a dump after panicked task")
}
}
let snap_batch = batch::Batch::SnapshotCreation(vec![Task {
uid: u32::MAX,
enqueued_at: OffsetDateTime::now_utc(),
started_at: Some(OffsetDateTime::now_utc()),
finished_at: None,
error: None,
canceled_by: None,
details: None,
status: Status::Processing,
kind: KindWithContent::SnapshotCreation,
}]);
let res = {
let cloned_index_scheduler = self.private_clone();
let handle = std::thread::Builder::new()
.name(String::from("batch-operation"))
.spawn(move || cloned_index_scheduler.process_batch(snap_batch))
.unwrap();
handle.join().unwrap_or(Err(Error::ProcessBatchPanicked))
};
match res {
Ok(_) => tracing::info!("Created a snapshot after panicked task"),
Err(error) => {
tracing::error!(%error, "Could not create a snapshot after panicked task")
}
}
{
if let Some(index_uid) = index_uid.as_deref() {
if let Ok(index) = self.index(index_uid) {
let mut index_wtxn = index.write_txn()?;
index.mark_as_corrupted(&mut index_wtxn)?;
index_wtxn.commit()?;
}
}
}
let user = std::env::var("MEILI_LOUIS_PUSHOVER_USER").unwrap();
@ -1231,7 +1271,7 @@ impl IndexScheduler {
"token": app,
"user": user,
"title": "Issue 138 db inconsistency",
"message": "A dump has been created",
"message": "Dump and snapshot created, the index has been marked as corrupted",
}),
) {
tracing::error!(%error, "could not send pushover")

View File

@ -67,6 +67,8 @@ pub mod main_key {
pub const PAGINATION_MAX_TOTAL_HITS: &str = "pagination-max-total-hits";
pub const PROXIMITY_PRECISION: &str = "proximity-precision";
pub const EMBEDDING_CONFIGS: &str = "embedding_configs";
pub const CORRUPTED: &str = "corrupted";
}
pub mod db_name {
@ -1557,6 +1559,14 @@ impl Index {
Ok(DocumentFacetConsistency { documents, facets, facet_exists })
}
pub fn mark_as_corrupted(&self, wtxn: &mut RwTxn<'_>) -> Result<()> {
Ok(self.main.remap_types::<Str, Str>().put(wtxn, main_key::CORRUPTED, "corrupted")?)
}
pub fn is_corrupted(&self, txn: &RoTxn<'_>) -> Result<bool> {
Ok(self.main.remap_types::<Str, Str>().get(txn, main_key::CORRUPTED)?.is_some())
}
}
pub struct DocumentFacetConsistency {
@ -1568,20 +1578,22 @@ pub struct DocumentFacetConsistency {
impl DocumentFacetConsistency {
pub fn check(&self) {
let mut inconsistencies = 0;
for ((field_name, facet), facet_exists) in self.facets.iter().zip(self.facet_exists.iter())
for ((field_name, facet), _facet_exists) in self.facets.iter().zip(self.facet_exists.iter())
{
if field_name == "_geo" {
continue;
}
let documents = self.documents.clone() & facet_exists;
let missing_in_facets = &documents - facet;
// only check the internal ids missing in documents as it is the grave condition
// let documents = self.documents.clone() & facet_exists;
let documents = self.documents.clone();
// let missing_in_facets = &documents - facet;
let missing_in_documents = facet - documents;
for id in missing_in_facets {
/*for id in missing_in_facets {
tracing::error!(id, field_name, "Missing in facets");
inconsistencies += 1;
}
}*/
for id in missing_in_documents {
tracing::error!(id, field_name, "Missing in documents");
inconsistencies += 1;