2021-05-27 14:30:20 +02:00
|
|
|
use std::fs::File;
|
|
|
|
use std::path::{Path, PathBuf};
|
2021-05-10 20:25:09 +02:00
|
|
|
|
2021-05-30 15:55:17 +02:00
|
|
|
use chrono::{DateTime, Utc};
|
2021-06-23 10:41:55 +02:00
|
|
|
use log::{info, trace, warn};
|
2021-05-10 20:23:12 +02:00
|
|
|
use serde::{Deserialize, Serialize};
|
2021-04-28 16:43:49 +02:00
|
|
|
|
2021-05-26 20:42:09 +02:00
|
|
|
use loaders::v1::MetadataV1;
|
2021-05-10 20:25:09 +02:00
|
|
|
|
|
|
|
pub use actor::DumpActor;
|
2021-05-10 20:20:36 +02:00
|
|
|
pub use handle_impl::*;
|
2021-05-10 20:25:09 +02:00
|
|
|
pub use message::DumpMsg;
|
2021-12-02 16:03:26 +01:00
|
|
|
use tokio::fs::create_dir_all;
|
|
|
|
use tokio::sync::oneshot;
|
2021-05-10 20:25:09 +02:00
|
|
|
|
2021-10-26 12:34:00 +02:00
|
|
|
use crate::analytics;
|
2021-09-29 12:02:27 +02:00
|
|
|
use crate::compression::{from_tar_gz, to_tar_gz};
|
2021-06-14 21:26:35 +02:00
|
|
|
use crate::index_controller::dump_actor::error::DumpActorError;
|
2021-12-02 16:03:26 +01:00
|
|
|
use crate::index_controller::dump_actor::loaders::{v2, v3, v4};
|
2021-09-21 13:23:22 +02:00
|
|
|
use crate::options::IndexerOpts;
|
2021-12-02 16:03:26 +01:00
|
|
|
use crate::tasks::task::Job;
|
|
|
|
use crate::tasks::TaskStore;
|
|
|
|
use crate::update_file_store::UpdateFileStore;
|
2021-06-14 21:26:35 +02:00
|
|
|
use error::Result;
|
2021-05-26 22:52:06 +02:00
|
|
|
|
|
|
|
mod actor;
|
2021-12-02 16:03:26 +01:00
|
|
|
mod compat;
|
2021-06-15 17:39:07 +02:00
|
|
|
pub mod error;
|
2021-05-26 22:52:06 +02:00
|
|
|
mod handle_impl;
|
|
|
|
mod loaders;
|
|
|
|
mod message;
|
|
|
|
|
2021-05-31 16:03:39 +02:00
|
|
|
const META_FILE_NAME: &str = "metadata.json";
|
2021-05-27 14:30:20 +02:00
|
|
|
|
2021-09-29 15:24:59 +02:00
|
|
|
#[derive(Serialize, Deserialize, Debug)]
|
|
|
|
#[serde(rename_all = "camelCase")]
|
|
|
|
pub struct Metadata {
|
|
|
|
db_version: String,
|
|
|
|
index_db_size: usize,
|
|
|
|
update_db_size: usize,
|
|
|
|
dump_date: DateTime<Utc>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Metadata {
|
|
|
|
pub fn new(index_db_size: usize, update_db_size: usize) -> Self {
|
|
|
|
Self {
|
|
|
|
db_version: env!("CARGO_PKG_VERSION").to_string(),
|
|
|
|
index_db_size,
|
|
|
|
update_db_size,
|
|
|
|
dump_date: Utc::now(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-10 20:25:09 +02:00
|
|
|
#[async_trait::async_trait]
|
2021-10-06 13:01:02 +02:00
|
|
|
#[cfg_attr(test, mockall::automock)]
|
2021-05-10 20:25:09 +02:00
|
|
|
pub trait DumpActorHandle {
|
|
|
|
/// Start the creation of a dump
|
|
|
|
/// Implementation: [handle_impl::DumpActorHandleImpl::create_dump]
|
2021-06-14 21:26:35 +02:00
|
|
|
async fn create_dump(&self) -> Result<DumpInfo>;
|
2021-05-10 20:25:09 +02:00
|
|
|
|
|
|
|
/// Return the status of an already created dump
|
2021-07-27 16:01:06 +02:00
|
|
|
/// Implementation: [handle_impl::DumpActorHandleImpl::dump_info]
|
2021-06-14 21:26:35 +02:00
|
|
|
async fn dump_info(&self, uid: String) -> Result<DumpInfo>;
|
2021-05-10 20:25:09 +02:00
|
|
|
}
|
|
|
|
|
2021-04-28 16:43:49 +02:00
|
|
|
#[derive(Debug, Serialize, Deserialize)]
|
2021-05-31 10:42:31 +02:00
|
|
|
#[serde(tag = "dumpVersion")]
|
2021-09-29 15:24:59 +02:00
|
|
|
pub enum MetadataVersion {
|
2021-05-31 10:42:31 +02:00
|
|
|
V1(MetadataV1),
|
2021-09-29 15:24:59 +02:00
|
|
|
V2(Metadata),
|
|
|
|
V3(Metadata),
|
2021-12-02 16:03:26 +01:00
|
|
|
V4(Metadata),
|
2021-04-28 16:43:49 +02:00
|
|
|
}
|
|
|
|
|
2021-09-29 15:24:59 +02:00
|
|
|
impl MetadataVersion {
|
2021-12-02 16:03:26 +01:00
|
|
|
pub fn new_v4(index_db_size: usize, update_db_size: usize) -> Self {
|
2021-09-29 15:24:59 +02:00
|
|
|
let meta = Metadata::new(index_db_size, update_db_size);
|
2021-12-02 16:03:26 +01:00
|
|
|
Self::V4(meta)
|
2021-05-27 10:51:19 +02:00
|
|
|
}
|
2021-09-29 15:41:25 +02:00
|
|
|
|
|
|
|
pub fn db_version(&self) -> &str {
|
|
|
|
match self {
|
|
|
|
Self::V1(meta) => &meta.db_version,
|
2021-12-02 16:03:26 +01:00
|
|
|
Self::V2(meta) | Self::V3(meta) | Self::V4(meta) => &meta.db_version,
|
2021-09-29 15:41:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn version(&self) -> &str {
|
|
|
|
match self {
|
|
|
|
MetadataVersion::V1(_) => "V1",
|
|
|
|
MetadataVersion::V2(_) => "V2",
|
|
|
|
MetadataVersion::V3(_) => "V3",
|
2021-12-02 16:03:26 +01:00
|
|
|
MetadataVersion::V4(_) => "V4",
|
2021-09-29 15:41:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn dump_date(&self) -> Option<&DateTime<Utc>> {
|
|
|
|
match self {
|
|
|
|
MetadataVersion::V1(_) => None,
|
2021-12-02 16:03:26 +01:00
|
|
|
MetadataVersion::V2(meta) | MetadataVersion::V3(meta) | MetadataVersion::V4(meta) => {
|
|
|
|
Some(&meta.dump_date)
|
|
|
|
}
|
2021-09-29 15:41:25 +02:00
|
|
|
}
|
|
|
|
}
|
2021-04-28 16:43:49 +02:00
|
|
|
}
|
|
|
|
|
2021-05-10 20:25:09 +02:00
|
|
|
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
|
|
|
|
#[serde(rename_all = "snake_case")]
|
|
|
|
pub enum DumpStatus {
|
|
|
|
Done,
|
|
|
|
InProgress,
|
|
|
|
Failed,
|
2021-04-28 16:43:49 +02:00
|
|
|
}
|
|
|
|
|
2021-05-10 20:25:09 +02:00
|
|
|
#[derive(Debug, Serialize, Clone)]
|
|
|
|
#[serde(rename_all = "camelCase")]
|
|
|
|
pub struct DumpInfo {
|
|
|
|
pub uid: String,
|
|
|
|
pub status: DumpStatus,
|
2021-05-25 10:48:57 +02:00
|
|
|
#[serde(skip_serializing_if = "Option::is_none")]
|
2021-05-24 17:33:42 +02:00
|
|
|
pub error: Option<String>,
|
2021-05-30 15:55:17 +02:00
|
|
|
started_at: DateTime<Utc>,
|
|
|
|
#[serde(skip_serializing_if = "Option::is_none")]
|
|
|
|
finished_at: Option<DateTime<Utc>>,
|
2021-05-10 20:25:09 +02:00
|
|
|
}
|
2021-04-28 16:43:49 +02:00
|
|
|
|
2021-05-10 20:25:09 +02:00
|
|
|
impl DumpInfo {
|
|
|
|
pub fn new(uid: String, status: DumpStatus) -> Self {
|
|
|
|
Self {
|
|
|
|
uid,
|
|
|
|
status,
|
|
|
|
error: None,
|
2021-05-30 15:55:17 +02:00
|
|
|
started_at: Utc::now(),
|
|
|
|
finished_at: None,
|
2021-05-10 20:25:09 +02:00
|
|
|
}
|
2021-05-05 14:11:56 +02:00
|
|
|
}
|
2021-04-28 16:43:49 +02:00
|
|
|
|
2021-05-10 20:25:09 +02:00
|
|
|
pub fn with_error(&mut self, error: String) {
|
|
|
|
self.status = DumpStatus::Failed;
|
2021-05-30 15:55:17 +02:00
|
|
|
self.finished_at = Some(Utc::now());
|
2021-05-24 17:33:42 +02:00
|
|
|
self.error = Some(error);
|
2021-05-05 14:11:56 +02:00
|
|
|
}
|
2021-04-28 16:43:49 +02:00
|
|
|
|
2021-05-10 20:25:09 +02:00
|
|
|
pub fn done(&mut self) {
|
2021-05-30 15:55:17 +02:00
|
|
|
self.finished_at = Some(Utc::now());
|
2021-05-10 20:25:09 +02:00
|
|
|
self.status = DumpStatus::Done;
|
|
|
|
}
|
2021-04-28 16:43:49 +02:00
|
|
|
|
2021-05-10 20:25:09 +02:00
|
|
|
pub fn dump_already_in_progress(&self) -> bool {
|
|
|
|
self.status == DumpStatus::InProgress
|
|
|
|
}
|
2021-04-28 16:43:49 +02:00
|
|
|
}
|
|
|
|
|
2021-05-27 14:30:20 +02:00
|
|
|
pub fn load_dump(
|
2021-05-26 20:42:09 +02:00
|
|
|
dst_path: impl AsRef<Path>,
|
|
|
|
src_path: impl AsRef<Path>,
|
2021-05-31 16:40:59 +02:00
|
|
|
index_db_size: usize,
|
|
|
|
update_db_size: usize,
|
2021-05-26 22:52:06 +02:00
|
|
|
indexer_opts: &IndexerOpts,
|
2021-06-15 17:39:07 +02:00
|
|
|
) -> anyhow::Result<()> {
|
2021-09-29 12:34:39 +02:00
|
|
|
// Setup a temp directory path in the same path as the database, to prevent cross devices
|
|
|
|
// references.
|
2021-09-29 15:41:25 +02:00
|
|
|
let temp_path = dst_path
|
|
|
|
.as_ref()
|
|
|
|
.parent()
|
|
|
|
.map(ToOwned::to_owned)
|
|
|
|
.unwrap_or_else(|| ".".into());
|
2021-09-29 12:34:39 +02:00
|
|
|
if cfg!(windows) {
|
|
|
|
std::env::set_var("TMP", temp_path);
|
|
|
|
} else {
|
|
|
|
std::env::set_var("TMPDIR", temp_path);
|
|
|
|
}
|
|
|
|
|
2021-09-28 18:10:09 +02:00
|
|
|
let tmp_src = tempfile::tempdir()?;
|
2021-05-27 14:30:20 +02:00
|
|
|
let tmp_src_path = tmp_src.path();
|
|
|
|
|
2021-09-29 12:02:27 +02:00
|
|
|
from_tar_gz(&src_path, tmp_src_path)?;
|
2021-05-27 14:30:20 +02:00
|
|
|
|
|
|
|
let meta_path = tmp_src_path.join(META_FILE_NAME);
|
2021-05-26 20:42:09 +02:00
|
|
|
let mut meta_file = File::open(&meta_path)?;
|
2021-09-29 15:24:59 +02:00
|
|
|
let meta: MetadataVersion = serde_json::from_reader(&mut meta_file)?;
|
2021-04-28 16:43:49 +02:00
|
|
|
|
2021-09-28 18:10:09 +02:00
|
|
|
let tmp_dst = tempfile::tempdir()?;
|
2021-05-31 10:42:31 +02:00
|
|
|
|
2021-09-29 15:41:25 +02:00
|
|
|
info!(
|
|
|
|
"Loading dump {}, dump database version: {}, dump version: {}",
|
|
|
|
meta.dump_date()
|
|
|
|
.map(|t| format!("from {}", t))
|
|
|
|
.unwrap_or_else(String::new),
|
|
|
|
meta.db_version(),
|
|
|
|
meta.version()
|
|
|
|
);
|
2021-09-29 12:34:39 +02:00
|
|
|
|
2021-05-26 20:42:09 +02:00
|
|
|
match meta {
|
2021-12-02 16:03:26 +01:00
|
|
|
MetadataVersion::V1(_meta) => {
|
|
|
|
anyhow::bail!("This version (v1) of the dump is too old to be imported.")
|
|
|
|
// meta.load_dump(&tmp_src_path, tmp_dst.path(), index_db_size, indexer _opts)?
|
2021-05-31 16:03:39 +02:00
|
|
|
}
|
2021-09-29 15:41:25 +02:00
|
|
|
MetadataVersion::V2(meta) => v2::load_dump(
|
|
|
|
meta,
|
|
|
|
&tmp_src_path,
|
|
|
|
tmp_dst.path(),
|
|
|
|
index_db_size,
|
|
|
|
update_db_size,
|
|
|
|
indexer_opts,
|
|
|
|
)?,
|
2021-09-29 15:24:59 +02:00
|
|
|
MetadataVersion::V3(meta) => v3::load_dump(
|
|
|
|
meta,
|
2021-05-27 14:30:20 +02:00
|
|
|
&tmp_src_path,
|
2021-05-31 10:42:31 +02:00
|
|
|
tmp_dst.path(),
|
2021-05-27 14:30:20 +02:00
|
|
|
index_db_size,
|
|
|
|
update_db_size,
|
|
|
|
indexer_opts,
|
|
|
|
)?,
|
2021-12-02 16:03:26 +01:00
|
|
|
MetadataVersion::V4(meta) => v4::load_dump(
|
|
|
|
meta,
|
|
|
|
&tmp_src_path,
|
|
|
|
tmp_dst.path(),
|
|
|
|
index_db_size,
|
|
|
|
update_db_size,
|
|
|
|
indexer_opts,
|
|
|
|
)?,
|
2021-05-11 13:03:47 +02:00
|
|
|
}
|
2021-05-31 10:42:31 +02:00
|
|
|
// Persist and atomically rename the db
|
|
|
|
let persisted_dump = tmp_dst.into_path();
|
|
|
|
if dst_path.as_ref().exists() {
|
|
|
|
warn!("Overwriting database at {}", dst_path.as_ref().display());
|
|
|
|
std::fs::remove_dir_all(&dst_path)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::fs::rename(&persisted_dump, &dst_path)?;
|
2021-05-06 18:44:16 +02:00
|
|
|
|
2021-04-28 16:43:49 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
2021-05-27 14:30:20 +02:00
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
struct DumpJob {
|
2021-10-26 13:02:40 +02:00
|
|
|
dump_path: PathBuf,
|
|
|
|
db_path: PathBuf,
|
2021-12-02 16:03:26 +01:00
|
|
|
update_file_store: UpdateFileStore,
|
|
|
|
task_store: TaskStore,
|
2021-05-27 14:30:20 +02:00
|
|
|
uid: String,
|
2021-05-31 16:40:59 +02:00
|
|
|
update_db_size: usize,
|
|
|
|
index_db_size: usize,
|
2021-05-27 14:30:20 +02:00
|
|
|
}
|
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
impl DumpJob {
|
2021-06-14 21:26:35 +02:00
|
|
|
async fn run(self) -> Result<()> {
|
2021-06-23 10:41:55 +02:00
|
|
|
trace!("Performing dump.");
|
2021-05-27 14:30:20 +02:00
|
|
|
|
2021-10-26 13:02:40 +02:00
|
|
|
create_dir_all(&self.dump_path).await?;
|
2021-05-27 14:30:20 +02:00
|
|
|
|
2021-09-28 22:22:59 +02:00
|
|
|
let temp_dump_dir = tokio::task::spawn_blocking(tempfile::TempDir::new).await??;
|
2021-05-27 14:30:20 +02:00
|
|
|
let temp_dump_path = temp_dump_dir.path().to_owned();
|
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
let meta = MetadataVersion::new_v4(self.index_db_size, self.update_db_size);
|
2021-05-27 14:30:20 +02:00
|
|
|
let meta_path = temp_dump_path.join(META_FILE_NAME);
|
|
|
|
let mut meta_file = File::create(&meta_path)?;
|
|
|
|
serde_json::to_writer(&mut meta_file, &meta)?;
|
2021-10-26 13:02:40 +02:00
|
|
|
analytics::copy_user_id(&self.db_path, &temp_dump_path);
|
2021-05-27 14:30:20 +02:00
|
|
|
|
2021-10-07 18:51:04 +02:00
|
|
|
create_dir_all(&temp_dump_path.join("indexes")).await?;
|
2021-05-27 14:30:20 +02:00
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
let (sender, receiver) = oneshot::channel();
|
|
|
|
|
|
|
|
self.task_store
|
|
|
|
.register_job(Job::Dump {
|
|
|
|
ret: sender,
|
|
|
|
path: temp_dump_path.clone(),
|
|
|
|
})
|
|
|
|
.await;
|
|
|
|
receiver.await??;
|
|
|
|
self.task_store
|
|
|
|
.dump(&temp_dump_path, self.update_file_store.clone())
|
|
|
|
.await?;
|
2021-05-27 14:30:20 +02:00
|
|
|
|
2021-06-14 21:26:35 +02:00
|
|
|
let dump_path = tokio::task::spawn_blocking(move || -> Result<PathBuf> {
|
2021-12-02 16:03:26 +01:00
|
|
|
// for now we simply copy the updates/updates_files
|
|
|
|
// FIXME: We may copy more files than necessary, if new files are added while we are
|
|
|
|
// performing the dump. We need a way to filter them out.
|
|
|
|
|
2021-10-26 13:02:40 +02:00
|
|
|
let temp_dump_file = tempfile::NamedTempFile::new_in(&self.dump_path)?;
|
2021-09-29 12:02:27 +02:00
|
|
|
to_tar_gz(temp_dump_path, temp_dump_file.path())
|
2021-06-15 17:39:07 +02:00
|
|
|
.map_err(|e| DumpActorError::Internal(e.into()))?;
|
2021-05-27 14:30:20 +02:00
|
|
|
|
2021-10-26 13:02:40 +02:00
|
|
|
let dump_path = self.dump_path.join(self.uid).with_extension("dump");
|
2021-05-27 14:30:20 +02:00
|
|
|
temp_dump_file.persist(&dump_path)?;
|
|
|
|
|
|
|
|
Ok(dump_path)
|
|
|
|
})
|
|
|
|
.await??;
|
|
|
|
|
|
|
|
info!("Created dump in {:?}.", dump_path);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
2021-10-05 13:53:22 +02:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use std::collections::HashSet;
|
|
|
|
|
|
|
|
use futures::future::{err, ok};
|
2021-12-02 16:03:26 +01:00
|
|
|
use nelson::Mocker;
|
2021-10-05 13:53:22 +02:00
|
|
|
use once_cell::sync::Lazy;
|
|
|
|
use uuid::Uuid;
|
|
|
|
|
|
|
|
use super::*;
|
2021-10-06 13:01:02 +02:00
|
|
|
use crate::index::error::Result as IndexResult;
|
2021-10-05 13:53:22 +02:00
|
|
|
use crate::index::Index;
|
2021-12-02 16:03:26 +01:00
|
|
|
use crate::index_resolver::error::IndexResolverError;
|
|
|
|
use crate::index_resolver::index_store::MockIndexStore;
|
|
|
|
use crate::index_resolver::meta_store::MockIndexMetaStore;
|
|
|
|
use crate::update_file_store::UpdateFileStore;
|
2021-10-05 13:53:22 +02:00
|
|
|
|
|
|
|
fn setup() {
|
|
|
|
static SETUP: Lazy<()> = Lazy::new(|| {
|
|
|
|
if cfg!(windows) {
|
|
|
|
std::env::set_var("TMP", ".");
|
|
|
|
} else {
|
|
|
|
std::env::set_var("TMPDIR", ".");
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// just deref to make sure the env is setup
|
|
|
|
*SETUP
|
|
|
|
}
|
|
|
|
|
|
|
|
#[actix_rt::test]
|
2021-12-02 16:03:26 +01:00
|
|
|
#[ignore]
|
2021-10-05 13:53:22 +02:00
|
|
|
async fn test_dump_normal() {
|
|
|
|
setup();
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
|
|
|
let uuids = std::iter::repeat_with(Uuid::new_v4)
|
|
|
|
.take(4)
|
|
|
|
.collect::<HashSet<_>>();
|
2021-12-02 16:03:26 +01:00
|
|
|
let mut uuid_store = MockIndexMetaStore::new();
|
2021-10-05 13:53:22 +02:00
|
|
|
uuid_store
|
|
|
|
.expect_dump()
|
|
|
|
.once()
|
2021-12-02 16:03:26 +01:00
|
|
|
.returning(move |_| Box::pin(ok(())));
|
2021-10-05 13:53:22 +02:00
|
|
|
|
|
|
|
let mut index_store = MockIndexStore::new();
|
|
|
|
index_store.expect_get().times(4).returning(move |uuid| {
|
|
|
|
let mocker = Mocker::default();
|
|
|
|
let uuids_clone = uuids.clone();
|
|
|
|
mocker.when::<(), Uuid>("uuid").once().then(move |_| {
|
|
|
|
assert!(uuids_clone.contains(&uuid));
|
|
|
|
uuid
|
|
|
|
});
|
2021-10-06 13:01:02 +02:00
|
|
|
mocker
|
|
|
|
.when::<&Path, IndexResult<()>>("dump")
|
|
|
|
.once()
|
|
|
|
.then(move |_| Ok(()));
|
2021-12-02 16:03:26 +01:00
|
|
|
Box::pin(ok(Some(Index::mock(mocker))))
|
2021-10-05 13:53:22 +02:00
|
|
|
});
|
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
let mocker = Mocker::default();
|
|
|
|
let update_file_store = UpdateFileStore::mock(mocker);
|
|
|
|
|
|
|
|
//let update_sender =
|
|
|
|
// create_update_handler(index_resolver.clone(), tmp.path(), 4096 * 100).unwrap();
|
2021-10-05 13:53:22 +02:00
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
//TODO: fix dump tests
|
|
|
|
let mocker = Mocker::default();
|
|
|
|
let task_store = TaskStore::mock(mocker);
|
2021-10-05 13:53:22 +02:00
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
let task = DumpJob {
|
2021-10-26 13:02:40 +02:00
|
|
|
dump_path: tmp.path().into(),
|
2021-10-26 12:34:00 +02:00
|
|
|
// this should do nothing
|
2021-12-02 16:03:26 +01:00
|
|
|
update_file_store,
|
2021-10-26 13:02:40 +02:00
|
|
|
db_path: tmp.path().into(),
|
2021-12-02 16:03:26 +01:00
|
|
|
task_store,
|
2021-10-05 13:53:22 +02:00
|
|
|
uid: String::from("test"),
|
|
|
|
update_db_size: 4096 * 10,
|
|
|
|
index_db_size: 4096 * 10,
|
|
|
|
};
|
|
|
|
|
|
|
|
task.run().await.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[actix_rt::test]
|
2021-12-02 16:03:26 +01:00
|
|
|
#[ignore]
|
2021-10-05 13:53:22 +02:00
|
|
|
async fn error_performing_dump() {
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
let mut uuid_store = MockIndexMetaStore::new();
|
2021-10-05 13:53:22 +02:00
|
|
|
uuid_store
|
|
|
|
.expect_dump()
|
|
|
|
.once()
|
|
|
|
.returning(move |_| Box::pin(err(IndexResolverError::ExistingPrimaryKey)));
|
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
let mocker = Mocker::default();
|
|
|
|
let file_store = UpdateFileStore::mock(mocker);
|
2021-10-05 13:53:22 +02:00
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
let mocker = Mocker::default();
|
|
|
|
let task_store = TaskStore::mock(mocker);
|
2021-10-05 13:53:22 +02:00
|
|
|
|
2021-12-02 16:03:26 +01:00
|
|
|
let task = DumpJob {
|
2021-10-26 13:02:40 +02:00
|
|
|
dump_path: tmp.path().into(),
|
2021-10-26 12:34:00 +02:00
|
|
|
// this should do nothing
|
2021-10-26 13:02:40 +02:00
|
|
|
db_path: tmp.path().into(),
|
2021-12-02 16:03:26 +01:00
|
|
|
update_file_store: file_store,
|
|
|
|
task_store,
|
2021-10-05 13:53:22 +02:00
|
|
|
uid: String::from("test"),
|
|
|
|
update_db_size: 4096 * 10,
|
|
|
|
index_db_size: 4096 * 10,
|
|
|
|
};
|
|
|
|
|
|
|
|
assert!(task.run().await.is_err());
|
|
|
|
}
|
|
|
|
}
|