MeiliSearch/meilisearch-lib/src/index_controller/dump_actor/mod.rs

511 lines
15 KiB
Rust
Raw Normal View History

2021-05-27 14:30:20 +02:00
use std::fs::File;
use std::path::{Path, PathBuf};
2022-01-19 11:21:19 +01:00
use std::sync::Arc;
2021-05-10 20:25:09 +02:00
use anyhow::bail;
2021-05-30 15:55:17 +02:00
use chrono::{DateTime, Utc};
use log::{info, trace};
2021-05-10 20:23:12 +02:00
use serde::{Deserialize, Serialize};
2021-04-28 16:43:49 +02:00
2021-05-10 20:25:09 +02:00
pub use actor::DumpActor;
pub use handle_impl::*;
use meilisearch_auth::AuthController;
2021-05-10 20:25:09 +02:00
pub use message::DumpMsg;
use tempfile::TempDir;
use tokio::fs::create_dir_all;
2022-01-19 11:21:19 +01:00
use tokio::sync::{oneshot, RwLock};
2021-05-10 20:25:09 +02:00
use crate::analytics;
2021-09-29 12:02:27 +02:00
use crate::compression::{from_tar_gz, to_tar_gz};
use crate::index_controller::dump_actor::error::DumpActorError;
use crate::index_controller::dump_actor::loaders::{v2, v3, v4};
use crate::options::IndexerOpts;
use crate::tasks::task::Job;
2022-01-19 11:21:19 +01:00
use crate::tasks::Scheduler;
use crate::update_file_store::UpdateFileStore;
use error::Result;
2021-05-26 22:52:06 +02:00
mod actor;
mod compat;
2021-06-15 17:39:07 +02:00
pub mod error;
2021-05-26 22:52:06 +02:00
mod handle_impl;
mod loaders;
mod message;
2021-05-31 16:03:39 +02:00
const META_FILE_NAME: &str = "metadata.json";
2021-05-27 14:30:20 +02:00
2021-09-29 15:24:59 +02:00
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Metadata {
db_version: String,
index_db_size: usize,
update_db_size: usize,
dump_date: DateTime<Utc>,
}
impl Metadata {
pub fn new(index_db_size: usize, update_db_size: usize) -> Self {
Self {
db_version: env!("CARGO_PKG_VERSION").to_string(),
index_db_size,
update_db_size,
dump_date: Utc::now(),
}
}
}
2021-05-10 20:25:09 +02:00
#[async_trait::async_trait]
2021-10-06 13:01:02 +02:00
#[cfg_attr(test, mockall::automock)]
2021-05-10 20:25:09 +02:00
pub trait DumpActorHandle {
/// Start the creation of a dump
/// Implementation: [handle_impl::DumpActorHandleImpl::create_dump]
async fn create_dump(&self) -> Result<DumpInfo>;
2021-05-10 20:25:09 +02:00
/// Return the status of an already created dump
/// Implementation: [handle_impl::DumpActorHandleImpl::dump_info]
async fn dump_info(&self, uid: String) -> Result<DumpInfo>;
2021-05-10 20:25:09 +02:00
}
2021-12-07 10:36:27 +01:00
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct MetadataV1 {
pub db_version: String,
}
2021-04-28 16:43:49 +02:00
#[derive(Debug, Serialize, Deserialize)]
2021-05-31 10:42:31 +02:00
#[serde(tag = "dumpVersion")]
2021-09-29 15:24:59 +02:00
pub enum MetadataVersion {
2021-05-31 10:42:31 +02:00
V1(MetadataV1),
2021-09-29 15:24:59 +02:00
V2(Metadata),
V3(Metadata),
V4(Metadata),
2021-04-28 16:43:49 +02:00
}
2021-09-29 15:24:59 +02:00
impl MetadataVersion {
pub fn load_dump(
self,
src: impl AsRef<Path>,
dst: impl AsRef<Path>,
index_db_size: usize,
meta_env_size: usize,
indexing_options: &IndexerOpts,
) -> anyhow::Result<()> {
match self {
MetadataVersion::V1(_meta) => {
anyhow::bail!("The version 1 of the dumps is not supported anymore. You can re-export your dump from a version between 0.21 and 0.24, or start fresh from a version 0.25 onwards.")
}
MetadataVersion::V2(meta) => v2::load_dump(
meta,
src,
dst,
index_db_size,
meta_env_size,
indexing_options,
)?,
MetadataVersion::V3(meta) => v3::load_dump(
meta,
src,
dst,
index_db_size,
meta_env_size,
indexing_options,
)?,
MetadataVersion::V4(meta) => v4::load_dump(
meta,
src,
dst,
index_db_size,
meta_env_size,
indexing_options,
)?,
}
Ok(())
}
pub fn new_v4(index_db_size: usize, update_db_size: usize) -> Self {
2021-09-29 15:24:59 +02:00
let meta = Metadata::new(index_db_size, update_db_size);
Self::V4(meta)
2021-05-27 10:51:19 +02:00
}
2021-09-29 15:41:25 +02:00
pub fn db_version(&self) -> &str {
match self {
Self::V1(meta) => &meta.db_version,
Self::V2(meta) | Self::V3(meta) | Self::V4(meta) => &meta.db_version,
2021-09-29 15:41:25 +02:00
}
}
pub fn version(&self) -> &str {
match self {
MetadataVersion::V1(_) => "V1",
MetadataVersion::V2(_) => "V2",
MetadataVersion::V3(_) => "V3",
MetadataVersion::V4(_) => "V4",
2021-09-29 15:41:25 +02:00
}
}
pub fn dump_date(&self) -> Option<&DateTime<Utc>> {
match self {
MetadataVersion::V1(_) => None,
MetadataVersion::V2(meta) | MetadataVersion::V3(meta) | MetadataVersion::V4(meta) => {
Some(&meta.dump_date)
}
2021-09-29 15:41:25 +02:00
}
}
2021-04-28 16:43:49 +02:00
}
2021-05-10 20:25:09 +02:00
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
#[serde(rename_all = "snake_case")]
pub enum DumpStatus {
Done,
InProgress,
Failed,
2021-04-28 16:43:49 +02:00
}
2021-05-10 20:25:09 +02:00
#[derive(Debug, Serialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct DumpInfo {
pub uid: String,
pub status: DumpStatus,
2021-05-25 10:48:57 +02:00
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
2021-05-30 15:55:17 +02:00
started_at: DateTime<Utc>,
#[serde(skip_serializing_if = "Option::is_none")]
finished_at: Option<DateTime<Utc>>,
2021-05-10 20:25:09 +02:00
}
2021-04-28 16:43:49 +02:00
2021-05-10 20:25:09 +02:00
impl DumpInfo {
pub fn new(uid: String, status: DumpStatus) -> Self {
Self {
uid,
status,
error: None,
2021-05-30 15:55:17 +02:00
started_at: Utc::now(),
finished_at: None,
2021-05-10 20:25:09 +02:00
}
2021-05-05 14:11:56 +02:00
}
2021-04-28 16:43:49 +02:00
2021-05-10 20:25:09 +02:00
pub fn with_error(&mut self, error: String) {
self.status = DumpStatus::Failed;
2021-05-30 15:55:17 +02:00
self.finished_at = Some(Utc::now());
self.error = Some(error);
2021-05-05 14:11:56 +02:00
}
2021-04-28 16:43:49 +02:00
2021-05-10 20:25:09 +02:00
pub fn done(&mut self) {
2021-05-30 15:55:17 +02:00
self.finished_at = Some(Utc::now());
2021-05-10 20:25:09 +02:00
self.status = DumpStatus::Done;
}
2021-04-28 16:43:49 +02:00
2021-05-10 20:25:09 +02:00
pub fn dump_already_in_progress(&self) -> bool {
self.status == DumpStatus::InProgress
}
2021-04-28 16:43:49 +02:00
}
2021-05-27 14:30:20 +02:00
pub fn load_dump(
2021-05-26 20:42:09 +02:00
dst_path: impl AsRef<Path>,
src_path: impl AsRef<Path>,
ignore_dump_if_db_exists: bool,
ignore_missing_dump: bool,
2021-05-31 16:40:59 +02:00
index_db_size: usize,
update_db_size: usize,
2021-05-26 22:52:06 +02:00
indexer_opts: &IndexerOpts,
2021-06-15 17:39:07 +02:00
) -> anyhow::Result<()> {
let empty_db = crate::is_empty_db(&dst_path);
let src_path_exists = src_path.as_ref().exists();
if empty_db && src_path_exists {
let (tmp_src, tmp_dst, meta) = extract_dump(&dst_path, &src_path)?;
meta.load_dump(
tmp_src.path(),
tmp_dst.path(),
index_db_size,
update_db_size,
indexer_opts,
)?;
persist_dump(&dst_path, tmp_dst)?;
Ok(())
} else if !empty_db && !ignore_dump_if_db_exists {
bail!(
"database already exists at {:?}, try to delete it or rename it",
dst_path
.as_ref()
.canonicalize()
.unwrap_or_else(|_| dst_path.as_ref().to_owned())
)
} else if !src_path_exists && !ignore_missing_dump {
bail!("dump doesn't exist at {:?}", src_path.as_ref())
} else {
// there is nothing to do
Ok(())
}
}
fn extract_dump(
dst_path: impl AsRef<Path>,
src_path: impl AsRef<Path>,
) -> anyhow::Result<(TempDir, TempDir, MetadataVersion)> {
2021-09-29 12:34:39 +02:00
// Setup a temp directory path in the same path as the database, to prevent cross devices
// references.
2021-09-29 15:41:25 +02:00
let temp_path = dst_path
.as_ref()
.parent()
.map(ToOwned::to_owned)
.unwrap_or_else(|| ".".into());
2021-09-29 12:34:39 +02:00
if cfg!(windows) {
std::env::set_var("TMP", temp_path);
} else {
std::env::set_var("TMPDIR", temp_path);
}
2021-09-28 18:10:09 +02:00
let tmp_src = tempfile::tempdir()?;
2021-05-27 14:30:20 +02:00
let tmp_src_path = tmp_src.path();
2021-09-29 12:02:27 +02:00
from_tar_gz(&src_path, tmp_src_path)?;
2021-05-27 14:30:20 +02:00
let meta_path = tmp_src_path.join(META_FILE_NAME);
2021-05-26 20:42:09 +02:00
let mut meta_file = File::open(&meta_path)?;
2021-09-29 15:24:59 +02:00
let meta: MetadataVersion = serde_json::from_reader(&mut meta_file)?;
2021-04-28 16:43:49 +02:00
if !dst_path.as_ref().exists() {
std::fs::create_dir_all(dst_path.as_ref())?;
}
let tmp_dst = tempfile::tempdir_in(dst_path.as_ref())?;
2021-05-31 10:42:31 +02:00
2021-09-29 15:41:25 +02:00
info!(
"Loading dump {}, dump database version: {}, dump version: {}",
meta.dump_date()
.map(|t| format!("from {}", t))
.unwrap_or_else(String::new),
meta.db_version(),
meta.version()
);
2021-09-29 12:34:39 +02:00
Ok((tmp_src, tmp_dst, meta))
}
fn persist_dump(dst_path: impl AsRef<Path>, tmp_dst: TempDir) -> anyhow::Result<()> {
2021-05-31 10:42:31 +02:00
let persisted_dump = tmp_dst.into_path();
// Delete everything in the `data.ms` except the tempdir.
2021-05-31 10:42:31 +02:00
if dst_path.as_ref().exists() {
for file in dst_path.as_ref().read_dir().unwrap() {
let file = file.unwrap().path();
if file.file_name() == persisted_dump.file_name() {
continue;
}
if file.is_file() {
std::fs::remove_file(&file)?;
} else {
std::fs::remove_dir_all(&file)?;
}
}
}
// Move the whole content of the tempdir into the `data.ms`.
for file in persisted_dump.read_dir().unwrap() {
let file = file.unwrap().path();
std::fs::rename(&file, &dst_path.as_ref().join(file.file_name().unwrap()))?;
2021-05-31 10:42:31 +02:00
}
// Delete the empty tempdir.
std::fs::remove_dir_all(&persisted_dump)?;
2021-05-06 18:44:16 +02:00
2021-04-28 16:43:49 +02:00
Ok(())
}
2021-05-27 14:30:20 +02:00
struct DumpJob {
2021-10-26 13:02:40 +02:00
dump_path: PathBuf,
db_path: PathBuf,
update_file_store: UpdateFileStore,
2022-01-19 11:21:19 +01:00
scheduler: Arc<RwLock<Scheduler>>,
2021-05-27 14:30:20 +02:00
uid: String,
2021-05-31 16:40:59 +02:00
update_db_size: usize,
index_db_size: usize,
2021-05-27 14:30:20 +02:00
}
impl DumpJob {
async fn run(self) -> Result<()> {
2021-06-23 10:41:55 +02:00
trace!("Performing dump.");
2021-05-27 14:30:20 +02:00
2021-10-26 13:02:40 +02:00
create_dir_all(&self.dump_path).await?;
2021-05-27 14:30:20 +02:00
2021-09-28 22:22:59 +02:00
let temp_dump_dir = tokio::task::spawn_blocking(tempfile::TempDir::new).await??;
2021-05-27 14:30:20 +02:00
let temp_dump_path = temp_dump_dir.path().to_owned();
let meta = MetadataVersion::new_v4(self.index_db_size, self.update_db_size);
2021-05-27 14:30:20 +02:00
let meta_path = temp_dump_path.join(META_FILE_NAME);
let mut meta_file = File::create(&meta_path)?;
serde_json::to_writer(&mut meta_file, &meta)?;
2021-10-26 13:02:40 +02:00
analytics::copy_user_id(&self.db_path, &temp_dump_path);
2021-05-27 14:30:20 +02:00
create_dir_all(&temp_dump_path.join("indexes")).await?;
2021-05-27 14:30:20 +02:00
let (sender, receiver) = oneshot::channel();
2022-01-19 11:21:19 +01:00
self.scheduler
.write()
.await
.schedule_job(Job::Dump {
ret: sender,
path: temp_dump_path.clone(),
})
.await;
2022-01-19 11:21:19 +01:00
// wait until the job has started performing before finishing the dump process
let sender = receiver.await??;
2021-05-27 14:30:20 +02:00
AuthController::dump(&self.db_path, &temp_dump_path)?;
2022-01-19 11:21:19 +01:00
//TODO(marin): this is not right, the scheduler should dump itself, not do it here...
self.scheduler
.read()
.await
.dump(&temp_dump_path, self.update_file_store.clone())
.await?;
let dump_path = tokio::task::spawn_blocking(move || -> Result<PathBuf> {
// for now we simply copy the updates/updates_files
// FIXME: We may copy more files than necessary, if new files are added while we are
// performing the dump. We need a way to filter them out.
2021-10-26 13:02:40 +02:00
let temp_dump_file = tempfile::NamedTempFile::new_in(&self.dump_path)?;
2021-09-29 12:02:27 +02:00
to_tar_gz(temp_dump_path, temp_dump_file.path())
2021-06-15 17:39:07 +02:00
.map_err(|e| DumpActorError::Internal(e.into()))?;
2021-05-27 14:30:20 +02:00
2021-10-26 13:02:40 +02:00
let dump_path = self.dump_path.join(self.uid).with_extension("dump");
2021-05-27 14:30:20 +02:00
temp_dump_file.persist(&dump_path)?;
Ok(dump_path)
})
.await??;
2022-01-19 11:21:19 +01:00
// notify the update loop that we are finished performing the dump.
let _ = sender.send(());
2021-05-27 14:30:20 +02:00
info!("Created dump in {:?}.", dump_path);
Ok(())
}
}
2021-10-05 13:53:22 +02:00
#[cfg(test)]
mod test {
use nelson::Mocker;
2021-10-05 13:53:22 +02:00
use once_cell::sync::Lazy;
use super::*;
use crate::index_resolver::error::IndexResolverError;
2022-01-19 11:21:19 +01:00
use crate::options::SchedulerConfig;
use crate::tasks::error::Result as TaskResult;
use crate::tasks::task::{Task, TaskId};
use crate::tasks::{MockTaskPerformer, TaskFilter, TaskStore};
use crate::update_file_store::UpdateFileStore;
2021-10-05 13:53:22 +02:00
fn setup() {
static SETUP: Lazy<()> = Lazy::new(|| {
if cfg!(windows) {
std::env::set_var("TMP", ".");
} else {
std::env::set_var("TMPDIR", ".");
}
});
// just deref to make sure the env is setup
*SETUP
}
#[actix_rt::test]
async fn test_dump_normal() {
setup();
let tmp = tempfile::tempdir().unwrap();
let mocker = Mocker::default();
let update_file_store = UpdateFileStore::mock(mocker);
2022-01-19 11:21:19 +01:00
let mut performer = MockTaskPerformer::new();
performer
.expect_process_job()
.once()
.returning(|j| match j {
Job::Dump { ret, .. } => {
let (sender, _receiver) = oneshot::channel();
ret.send(Ok(sender)).unwrap();
}
_ => unreachable!(),
});
let performer = Arc::new(performer);
let mocker = Mocker::default();
2022-01-19 11:21:19 +01:00
mocker
.when::<(&Path, UpdateFileStore), TaskResult<()>>("dump")
.then(|_| Ok(()));
mocker
.when::<(Option<TaskId>, Option<TaskFilter>, Option<usize>), TaskResult<Vec<Task>>>(
"list_tasks",
)
.then(|_| Ok(Vec::new()));
let store = TaskStore::mock(mocker);
let config = SchedulerConfig::default();
let scheduler = Scheduler::new(store, performer, config).unwrap();
2021-10-05 13:53:22 +02:00
let task = DumpJob {
2021-10-26 13:02:40 +02:00
dump_path: tmp.path().into(),
// this should do nothing
update_file_store,
2021-10-26 13:02:40 +02:00
db_path: tmp.path().into(),
2021-10-05 13:53:22 +02:00
uid: String::from("test"),
update_db_size: 4096 * 10,
index_db_size: 4096 * 10,
2022-01-19 11:21:19 +01:00
scheduler,
2021-10-05 13:53:22 +02:00
};
task.run().await.unwrap();
}
#[actix_rt::test]
async fn error_performing_dump() {
let tmp = tempfile::tempdir().unwrap();
let mocker = Mocker::default();
let file_store = UpdateFileStore::mock(mocker);
2021-10-05 13:53:22 +02:00
let mocker = Mocker::default();
2022-01-19 11:21:19 +01:00
mocker
.when::<(Option<TaskId>, Option<TaskFilter>, Option<usize>), TaskResult<Vec<Task>>>(
"list_tasks",
)
.then(|_| Ok(Vec::new()));
let task_store = TaskStore::mock(mocker);
2022-01-19 11:21:19 +01:00
let mut performer = MockTaskPerformer::new();
performer
.expect_process_job()
.once()
.returning(|job| match job {
Job::Dump { ret, .. } => drop(ret.send(Err(IndexResolverError::BadlyFormatted(
"blabla".to_string(),
)))),
_ => unreachable!(),
});
let performer = Arc::new(performer);
let scheduler = Scheduler::new(task_store, performer, SchedulerConfig::default()).unwrap();
2021-10-05 13:53:22 +02:00
let task = DumpJob {
2021-10-26 13:02:40 +02:00
dump_path: tmp.path().into(),
// this should do nothing
2021-10-26 13:02:40 +02:00
db_path: tmp.path().into(),
update_file_store: file_store,
2021-10-05 13:53:22 +02:00
uid: String::from("test"),
update_db_size: 4096 * 10,
index_db_size: 4096 * 10,
2022-01-19 11:21:19 +01:00
scheduler,
2021-10-05 13:53:22 +02:00
};
assert!(task.run().await.is_err());
}
}