mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-07-04 04:17:10 +02:00
rename folder to dir
This commit is contained in:
parent
e049aead16
commit
834f3cc192
9 changed files with 64 additions and 64 deletions
|
@ -27,7 +27,7 @@ impl Deref for Data {
|
|||
pub struct DataInner {
|
||||
pub db: Arc<Database>,
|
||||
pub db_path: String,
|
||||
pub dumps_folder: PathBuf,
|
||||
pub dumps_dir: PathBuf,
|
||||
pub dump_batch_size: usize,
|
||||
pub api_keys: ApiKeys,
|
||||
pub server_pid: u32,
|
||||
|
@ -61,7 +61,7 @@ impl ApiKeys {
|
|||
impl Data {
|
||||
pub fn new(opt: Opt) -> Result<Data, Box<dyn Error>> {
|
||||
let db_path = opt.db_path.clone();
|
||||
let dumps_folder = opt.dumps_folder.clone();
|
||||
let dumps_dir = opt.dumps_dir.clone();
|
||||
let dump_batch_size = opt.dump_batch_size;
|
||||
let server_pid = std::process::id();
|
||||
|
||||
|
@ -85,7 +85,7 @@ impl Data {
|
|||
let inner_data = DataInner {
|
||||
db: db.clone(),
|
||||
db_path,
|
||||
dumps_folder,
|
||||
dumps_dir,
|
||||
dump_batch_size,
|
||||
api_keys,
|
||||
server_pid,
|
||||
|
|
|
@ -52,9 +52,9 @@ impl DumpMetadata {
|
|||
}
|
||||
}
|
||||
|
||||
/// Extract DumpMetadata from `metadata.json` file present at provided `folder_path`
|
||||
fn from_path(folder_path: &Path) -> Result<Self, Error> {
|
||||
let path = folder_path.join("metadata.json");
|
||||
/// Extract DumpMetadata from `metadata.json` file present at provided `dir_path`
|
||||
fn from_path(dir_path: &Path) -> Result<Self, Error> {
|
||||
let path = dir_path.join("metadata.json");
|
||||
let file = File::open(path)?;
|
||||
let reader = std::io::BufReader::new(file);
|
||||
let metadata = serde_json::from_reader(reader)?;
|
||||
|
@ -62,9 +62,9 @@ impl DumpMetadata {
|
|||
Ok(metadata)
|
||||
}
|
||||
|
||||
/// Write DumpMetadata in `metadata.json` file at provided `folder_path`
|
||||
fn to_path(&self, folder_path: &Path) -> Result<(), Error> {
|
||||
let path = folder_path.join("metadata.json");
|
||||
/// Write DumpMetadata in `metadata.json` file at provided `dir_path`
|
||||
fn to_path(&self, dir_path: &Path) -> Result<(), Error> {
|
||||
let path = dir_path.join("metadata.json");
|
||||
let file = File::create(path)?;
|
||||
|
||||
serde_json::to_writer(file, &self)?;
|
||||
|
@ -73,9 +73,9 @@ impl DumpMetadata {
|
|||
}
|
||||
}
|
||||
|
||||
/// Extract Settings from `settings.json` file present at provided `folder_path`
|
||||
fn settings_from_path(folder_path: &Path) -> Result<Settings, Error> {
|
||||
let path = folder_path.join("settings.json");
|
||||
/// Extract Settings from `settings.json` file present at provided `dir_path`
|
||||
fn settings_from_path(dir_path: &Path) -> Result<Settings, Error> {
|
||||
let path = dir_path.join("settings.json");
|
||||
let file = File::open(path)?;
|
||||
let reader = std::io::BufReader::new(file);
|
||||
let metadata = serde_json::from_reader(reader)?;
|
||||
|
@ -83,9 +83,9 @@ fn settings_from_path(folder_path: &Path) -> Result<Settings, Error> {
|
|||
Ok(metadata)
|
||||
}
|
||||
|
||||
/// Write Settings in `settings.json` file at provided `folder_path`
|
||||
fn settings_to_path(settings: &Settings, folder_path: &Path) -> Result<(), Error> {
|
||||
let path = folder_path.join("settings.json");
|
||||
/// Write Settings in `settings.json` file at provided `dir_path`
|
||||
fn settings_to_path(settings: &Settings, dir_path: &Path) -> Result<(), Error> {
|
||||
let path = dir_path.join("settings.json");
|
||||
let file = File::create(path)?;
|
||||
|
||||
serde_json::to_writer(file, settings)?;
|
||||
|
@ -96,7 +96,7 @@ fn settings_to_path(settings: &Settings, folder_path: &Path) -> Result<(), Error
|
|||
/// Import settings and documents of a dump with version `DumpVersion::V1` in specified index.
|
||||
fn import_index_v1(
|
||||
data: &Data,
|
||||
dumps_folder: &Path,
|
||||
dumps_dir: &Path,
|
||||
index_uid: &str,
|
||||
document_batch_size: usize,
|
||||
write_txn: &mut MainWriter,
|
||||
|
@ -108,8 +108,8 @@ fn import_index_v1(
|
|||
.open_index(index_uid)
|
||||
.ok_or(Error::index_not_found(index_uid))?;
|
||||
|
||||
// index folder path in dump folder
|
||||
let index_path = &dumps_folder.join(index_uid);
|
||||
// index dir path in dump dir
|
||||
let index_path = &dumps_dir.join(index_uid);
|
||||
|
||||
// extract `settings.json` file and import content
|
||||
let settings = settings_from_path(&index_path)?;
|
||||
|
@ -243,29 +243,29 @@ fn generate_uid() -> String {
|
|||
Utc::now().format("%Y%m%d-%H%M%S%3f").to_string()
|
||||
}
|
||||
|
||||
/// Infer dumps_folder from dump_uid
|
||||
pub fn compressed_dumps_folder(dumps_folder: &Path, dump_uid: &str) -> PathBuf {
|
||||
dumps_folder.join(format!("{}.tar.gz", dump_uid))
|
||||
/// Infer dumps_dir from dump_uid
|
||||
pub fn compressed_dumps_dir(dumps_dir: &Path, dump_uid: &str) -> PathBuf {
|
||||
dumps_dir.join(format!("{}.tar.gz", dump_uid))
|
||||
}
|
||||
|
||||
/// Write metadata in dump
|
||||
fn dump_metadata(data: &web::Data<Data>, folder_path: &Path, indexes: Vec<IndexResponse>) -> Result<(), Error> {
|
||||
fn dump_metadata(data: &web::Data<Data>, dir_path: &Path, indexes: Vec<IndexResponse>) -> Result<(), Error> {
|
||||
let (db_major, db_minor, db_patch) = data.db.version();
|
||||
let metadata = DumpMetadata::new(indexes, format!("{}.{}.{}", db_major, db_minor, db_patch));
|
||||
|
||||
metadata.to_path(folder_path)
|
||||
metadata.to_path(dir_path)
|
||||
}
|
||||
|
||||
/// Export settings of provided index in dump
|
||||
fn dump_index_settings(data: &web::Data<Data>, reader: &MainReader, folder_path: &Path, index_uid: &str) -> Result<(), Error> {
|
||||
fn dump_index_settings(data: &web::Data<Data>, reader: &MainReader, dir_path: &Path, index_uid: &str) -> Result<(), Error> {
|
||||
let settings = crate::routes::setting::get_all_sync(data, reader, index_uid)?;
|
||||
|
||||
settings_to_path(&settings, folder_path)
|
||||
settings_to_path(&settings, dir_path)
|
||||
}
|
||||
|
||||
/// Export updates of provided index in dump
|
||||
fn dump_index_updates(data: &web::Data<Data>, reader: &UpdateReader, folder_path: &Path, index_uid: &str) -> Result<(), Error> {
|
||||
let updates_path = folder_path.join("updates.jsonl");
|
||||
fn dump_index_updates(data: &web::Data<Data>, reader: &UpdateReader, dir_path: &Path, index_uid: &str) -> Result<(), Error> {
|
||||
let updates_path = dir_path.join("updates.jsonl");
|
||||
let updates = crate::routes::index::get_all_updates_status_sync(data, reader, index_uid)?;
|
||||
|
||||
let file = File::create(updates_path)?;
|
||||
|
@ -279,8 +279,8 @@ fn dump_index_updates(data: &web::Data<Data>, reader: &UpdateReader, folder_path
|
|||
}
|
||||
|
||||
/// Export documents of provided index in dump
|
||||
fn dump_index_documents(data: &web::Data<Data>, reader: &MainReader, folder_path: &Path, index_uid: &str) -> Result<(), Error> {
|
||||
let documents_path = folder_path.join("documents.jsonl");
|
||||
fn dump_index_documents(data: &web::Data<Data>, reader: &MainReader, dir_path: &Path, index_uid: &str) -> Result<(), Error> {
|
||||
let documents_path = dir_path.join("documents.jsonl");
|
||||
let file = File::create(documents_path)?;
|
||||
let dump_batch_size = data.dump_batch_size;
|
||||
|
||||
|
@ -307,7 +307,7 @@ fn fail_dump_process<E: std::error::Error>(dump_info: DumpInfo, context: &str, e
|
|||
}
|
||||
|
||||
/// Main function of dump.
|
||||
fn dump_process(data: web::Data<Data>, dumps_folder: PathBuf, dump_info: DumpInfo) {
|
||||
fn dump_process(data: web::Data<Data>, dumps_dir: PathBuf, dump_info: DumpInfo) {
|
||||
// open read transaction on Update
|
||||
let update_reader = match data.db.update_read_txn() {
|
||||
Ok(r) => r,
|
||||
|
@ -380,8 +380,8 @@ fn dump_process(data: web::Data<Data>, dumps_folder: PathBuf, dump_info: DumpInf
|
|||
}
|
||||
}
|
||||
|
||||
// compress dump in a file named `{dump_uid}.tar.gz` in `dumps_folder`
|
||||
if let Err(e) = crate::helpers::compression::to_tar_gz(&tmp_dir_path, &compressed_dumps_folder(&dumps_folder, &dump_info.uid)) {
|
||||
// compress dump in a file named `{dump_uid}.tar.gz` in `dumps_dir`
|
||||
if let Err(e) = crate::helpers::compression::to_tar_gz(&tmp_dir_path, &compressed_dumps_dir(&dumps_dir, &dump_info.uid)) {
|
||||
fail_dump_process(dump_info, "compressing dump", e);
|
||||
return ;
|
||||
}
|
||||
|
@ -395,8 +395,8 @@ fn dump_process(data: web::Data<Data>, dumps_folder: PathBuf, dump_info: DumpInf
|
|||
resume.set_current();
|
||||
}
|
||||
|
||||
pub fn init_dump_process(data: &web::Data<Data>, dumps_folder: &Path) -> Result<DumpInfo, Error> {
|
||||
create_dir_all(dumps_folder).map_err(|e| Error::dump_failed(format!("creating temporary directory {}", e)))?;
|
||||
pub fn init_dump_process(data: &web::Data<Data>, dumps_dir: &Path) -> Result<DumpInfo, Error> {
|
||||
create_dir_all(dumps_dir).map_err(|e| Error::dump_failed(format!("creating temporary directory {}", e)))?;
|
||||
|
||||
// check if a dump is already in progress
|
||||
if let Some(resume) = DumpInfo::get_current() {
|
||||
|
@ -414,11 +414,11 @@ pub fn init_dump_process(data: &web::Data<Data>, dumps_folder: &Path) -> Result<
|
|||
info.set_current();
|
||||
|
||||
let data = data.clone();
|
||||
let dumps_folder = dumps_folder.to_path_buf();
|
||||
let dumps_dir = dumps_dir.to_path_buf();
|
||||
let info_cloned = info.clone();
|
||||
// run dump process in a new thread
|
||||
thread::spawn(move ||
|
||||
dump_process(data, dumps_folder, info_cloned)
|
||||
dump_process(data, dumps_dir, info_cloned)
|
||||
);
|
||||
|
||||
Ok(info)
|
||||
|
|
|
@ -74,8 +74,8 @@ async fn main() -> Result<(), MainError> {
|
|||
dump::import_dump(&data, path, opt.dump_batch_size)?;
|
||||
}
|
||||
|
||||
if let Some(path) = &opt.snapshot_path {
|
||||
snapshot::schedule_snapshot(data.clone(), &path, opt.snapshot_interval_sec.unwrap_or(86400))?;
|
||||
if let Some(dir) = &opt.snapshot_dir {
|
||||
snapshot::schedule_snapshot(data.clone(), &dir, opt.snapshot_interval_sec.unwrap_or(86400))?;
|
||||
}
|
||||
|
||||
print_launch_resume(&opt, &data);
|
||||
|
|
|
@ -109,16 +109,16 @@ pub struct Opt {
|
|||
pub ignore_snapshot_if_db_exists: bool,
|
||||
|
||||
/// Defines the directory path where meilisearch will create snapshot each snapshot_time_gap.
|
||||
#[structopt(long, env = "MEILI_SNAPSHOT_PATH")]
|
||||
pub snapshot_path: Option<PathBuf>,
|
||||
#[structopt(long, env = "MEILI_SNAPSHOT_DIR")]
|
||||
pub snapshot_dir: Option<PathBuf>,
|
||||
|
||||
/// Defines time interval, in seconds, between each snapshot creation.
|
||||
#[structopt(long, requires = "snapshot-path", env = "MEILI_SNAPSHOT_INTERVAL_SEC")]
|
||||
pub snapshot_interval_sec: Option<u64>,
|
||||
|
||||
/// Folder where dumps are created when the dump route is called.
|
||||
#[structopt(long, env = "MEILI_DUMPS_FOLDER", default_value = "dumps/")]
|
||||
pub dumps_folder: PathBuf,
|
||||
#[structopt(long, env = "MEILI_DUMPS_DIR", default_value = "dumps/")]
|
||||
pub dumps_dir: PathBuf,
|
||||
|
||||
/// Import a dump from the specified path, must be a `.tar.gz` file.
|
||||
#[structopt(long, env = "MEILI_IMPORT_DUMP", conflicts_with = "load-from-snapshot")]
|
||||
|
|
|
@ -5,7 +5,7 @@ use actix_web::{get, post};
|
|||
use actix_web::{HttpResponse, web};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::dump::{DumpInfo, DumpStatus, compressed_dumps_folder, init_dump_process};
|
||||
use crate::dump::{DumpInfo, DumpStatus, compressed_dumps_dir, init_dump_process};
|
||||
use crate::Data;
|
||||
use crate::error::{Error, ResponseError};
|
||||
use crate::helpers::Authentication;
|
||||
|
@ -19,8 +19,8 @@ pub fn services(cfg: &mut web::ServiceConfig) {
|
|||
async fn trigger_dump(
|
||||
data: web::Data<Data>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let dumps_folder = Path::new(&data.dumps_folder);
|
||||
match init_dump_process(&data, &dumps_folder) {
|
||||
let dumps_dir = Path::new(&data.dumps_dir);
|
||||
match init_dump_process(&data, &dumps_dir) {
|
||||
Ok(resume) => Ok(HttpResponse::Accepted().json(resume)),
|
||||
Err(e) => Err(e.into())
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ async fn get_dump_status(
|
|||
data: web::Data<Data>,
|
||||
path: web::Path<DumpParam>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let dumps_folder = Path::new(&data.dumps_folder);
|
||||
let dumps_dir = Path::new(&data.dumps_dir);
|
||||
let dump_uid = &path.dump_uid;
|
||||
|
||||
if let Some(resume) = DumpInfo::get_current() {
|
||||
|
@ -51,7 +51,7 @@ async fn get_dump_status(
|
|||
}
|
||||
}
|
||||
|
||||
if File::open(compressed_dumps_folder(Path::new(dumps_folder), dump_uid)).is_ok() {
|
||||
if File::open(compressed_dumps_dir(Path::new(dumps_dir), dump_uid)).is_ok() {
|
||||
let resume = DumpInfo::new(
|
||||
dump_uid.into(),
|
||||
DumpStatus::Done
|
||||
|
|
|
@ -70,10 +70,10 @@ mod tests {
|
|||
let archive_path = test_dir.join("archive.tar.gz");
|
||||
|
||||
let file_1_relative = Path::new("file1.txt");
|
||||
let subfolder_relative = Path::new("subfolder/");
|
||||
let file_2_relative = Path::new("subfolder/file2.txt");
|
||||
let subdir_relative = Path::new("subdir/");
|
||||
let file_2_relative = Path::new("subdir/file2.txt");
|
||||
|
||||
create_dir_all(src_dir.join(subfolder_relative)).unwrap();
|
||||
create_dir_all(src_dir.join(subdir_relative)).unwrap();
|
||||
fs::File::create(src_dir.join(file_1_relative)).unwrap().write_all(b"Hello_file_1").unwrap();
|
||||
fs::File::create(src_dir.join(file_2_relative)).unwrap().write_all(b"Hello_file_2").unwrap();
|
||||
|
||||
|
@ -84,7 +84,7 @@ mod tests {
|
|||
|
||||
assert!(dest_dir.exists());
|
||||
assert!(dest_dir.join(file_1_relative).exists());
|
||||
assert!(dest_dir.join(subfolder_relative).exists());
|
||||
assert!(dest_dir.join(subdir_relative).exists());
|
||||
assert!(dest_dir.join(file_2_relative).exists());
|
||||
|
||||
let contents = fs::read_to_string(dest_dir.join(file_1_relative)).unwrap();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue