rename folder to dir

This commit is contained in:
many 2020-10-13 11:17:02 +02:00
parent e049aead16
commit 834f3cc192
No known key found for this signature in database
GPG Key ID: 2CEF23B75189EACA
9 changed files with 64 additions and 64 deletions

View File

@ -27,7 +27,7 @@ impl Deref for Data {
pub struct DataInner {
pub db: Arc<Database>,
pub db_path: String,
pub dumps_folder: PathBuf,
pub dumps_dir: PathBuf,
pub dump_batch_size: usize,
pub api_keys: ApiKeys,
pub server_pid: u32,
@ -61,7 +61,7 @@ impl ApiKeys {
impl Data {
pub fn new(opt: Opt) -> Result<Data, Box<dyn Error>> {
let db_path = opt.db_path.clone();
let dumps_folder = opt.dumps_folder.clone();
let dumps_dir = opt.dumps_dir.clone();
let dump_batch_size = opt.dump_batch_size;
let server_pid = std::process::id();
@ -85,7 +85,7 @@ impl Data {
let inner_data = DataInner {
db: db.clone(),
db_path,
dumps_folder,
dumps_dir,
dump_batch_size,
api_keys,
server_pid,

View File

@ -52,9 +52,9 @@ impl DumpMetadata {
}
}
/// Extract DumpMetadata from `metadata.json` file present at provided `folder_path`
fn from_path(folder_path: &Path) -> Result<Self, Error> {
let path = folder_path.join("metadata.json");
/// Extract DumpMetadata from `metadata.json` file present at provided `dir_path`
fn from_path(dir_path: &Path) -> Result<Self, Error> {
let path = dir_path.join("metadata.json");
let file = File::open(path)?;
let reader = std::io::BufReader::new(file);
let metadata = serde_json::from_reader(reader)?;
@ -62,9 +62,9 @@ impl DumpMetadata {
Ok(metadata)
}
/// Write DumpMetadata in `metadata.json` file at provided `folder_path`
fn to_path(&self, folder_path: &Path) -> Result<(), Error> {
let path = folder_path.join("metadata.json");
/// Write DumpMetadata in `metadata.json` file at provided `dir_path`
fn to_path(&self, dir_path: &Path) -> Result<(), Error> {
let path = dir_path.join("metadata.json");
let file = File::create(path)?;
serde_json::to_writer(file, &self)?;
@ -73,9 +73,9 @@ impl DumpMetadata {
}
}
/// Extract Settings from `settings.json` file present at provided `folder_path`
fn settings_from_path(folder_path: &Path) -> Result<Settings, Error> {
let path = folder_path.join("settings.json");
/// Extract Settings from `settings.json` file present at provided `dir_path`
fn settings_from_path(dir_path: &Path) -> Result<Settings, Error> {
let path = dir_path.join("settings.json");
let file = File::open(path)?;
let reader = std::io::BufReader::new(file);
let metadata = serde_json::from_reader(reader)?;
@ -83,9 +83,9 @@ fn settings_from_path(folder_path: &Path) -> Result<Settings, Error> {
Ok(metadata)
}
/// Write Settings in `settings.json` file at provided `folder_path`
fn settings_to_path(settings: &Settings, folder_path: &Path) -> Result<(), Error> {
let path = folder_path.join("settings.json");
/// Write Settings in `settings.json` file at provided `dir_path`
fn settings_to_path(settings: &Settings, dir_path: &Path) -> Result<(), Error> {
let path = dir_path.join("settings.json");
let file = File::create(path)?;
serde_json::to_writer(file, settings)?;
@ -96,7 +96,7 @@ fn settings_to_path(settings: &Settings, folder_path: &Path) -> Result<(), Error
/// Import settings and documents of a dump with version `DumpVersion::V1` in specified index.
fn import_index_v1(
data: &Data,
dumps_folder: &Path,
dumps_dir: &Path,
index_uid: &str,
document_batch_size: usize,
write_txn: &mut MainWriter,
@ -108,8 +108,8 @@ fn import_index_v1(
.open_index(index_uid)
.ok_or(Error::index_not_found(index_uid))?;
// index folder path in dump folder
let index_path = &dumps_folder.join(index_uid);
// index dir path in dump dir
let index_path = &dumps_dir.join(index_uid);
// extract `settings.json` file and import content
let settings = settings_from_path(&index_path)?;
@ -243,29 +243,29 @@ fn generate_uid() -> String {
Utc::now().format("%Y%m%d-%H%M%S%3f").to_string()
}
/// Infer dumps_folder from dump_uid
pub fn compressed_dumps_folder(dumps_folder: &Path, dump_uid: &str) -> PathBuf {
dumps_folder.join(format!("{}.tar.gz", dump_uid))
/// Infer dumps_dir from dump_uid
pub fn compressed_dumps_dir(dumps_dir: &Path, dump_uid: &str) -> PathBuf {
dumps_dir.join(format!("{}.tar.gz", dump_uid))
}
/// Write metadata in dump
fn dump_metadata(data: &web::Data<Data>, folder_path: &Path, indexes: Vec<IndexResponse>) -> Result<(), Error> {
fn dump_metadata(data: &web::Data<Data>, dir_path: &Path, indexes: Vec<IndexResponse>) -> Result<(), Error> {
let (db_major, db_minor, db_patch) = data.db.version();
let metadata = DumpMetadata::new(indexes, format!("{}.{}.{}", db_major, db_minor, db_patch));
metadata.to_path(folder_path)
metadata.to_path(dir_path)
}
/// Export settings of provided index in dump
fn dump_index_settings(data: &web::Data<Data>, reader: &MainReader, folder_path: &Path, index_uid: &str) -> Result<(), Error> {
fn dump_index_settings(data: &web::Data<Data>, reader: &MainReader, dir_path: &Path, index_uid: &str) -> Result<(), Error> {
let settings = crate::routes::setting::get_all_sync(data, reader, index_uid)?;
settings_to_path(&settings, folder_path)
settings_to_path(&settings, dir_path)
}
/// Export updates of provided index in dump
fn dump_index_updates(data: &web::Data<Data>, reader: &UpdateReader, folder_path: &Path, index_uid: &str) -> Result<(), Error> {
let updates_path = folder_path.join("updates.jsonl");
fn dump_index_updates(data: &web::Data<Data>, reader: &UpdateReader, dir_path: &Path, index_uid: &str) -> Result<(), Error> {
let updates_path = dir_path.join("updates.jsonl");
let updates = crate::routes::index::get_all_updates_status_sync(data, reader, index_uid)?;
let file = File::create(updates_path)?;
@ -279,8 +279,8 @@ fn dump_index_updates(data: &web::Data<Data>, reader: &UpdateReader, folder_path
}
/// Export documents of provided index in dump
fn dump_index_documents(data: &web::Data<Data>, reader: &MainReader, folder_path: &Path, index_uid: &str) -> Result<(), Error> {
let documents_path = folder_path.join("documents.jsonl");
fn dump_index_documents(data: &web::Data<Data>, reader: &MainReader, dir_path: &Path, index_uid: &str) -> Result<(), Error> {
let documents_path = dir_path.join("documents.jsonl");
let file = File::create(documents_path)?;
let dump_batch_size = data.dump_batch_size;
@ -307,7 +307,7 @@ fn fail_dump_process<E: std::error::Error>(dump_info: DumpInfo, context: &str, e
}
/// Main function of dump.
fn dump_process(data: web::Data<Data>, dumps_folder: PathBuf, dump_info: DumpInfo) {
fn dump_process(data: web::Data<Data>, dumps_dir: PathBuf, dump_info: DumpInfo) {
// open read transaction on Update
let update_reader = match data.db.update_read_txn() {
Ok(r) => r,
@ -380,8 +380,8 @@ fn dump_process(data: web::Data<Data>, dumps_folder: PathBuf, dump_info: DumpInf
}
}
// compress dump in a file named `{dump_uid}.tar.gz` in `dumps_folder`
if let Err(e) = crate::helpers::compression::to_tar_gz(&tmp_dir_path, &compressed_dumps_folder(&dumps_folder, &dump_info.uid)) {
// compress dump in a file named `{dump_uid}.tar.gz` in `dumps_dir`
if let Err(e) = crate::helpers::compression::to_tar_gz(&tmp_dir_path, &compressed_dumps_dir(&dumps_dir, &dump_info.uid)) {
fail_dump_process(dump_info, "compressing dump", e);
return ;
}
@ -395,8 +395,8 @@ fn dump_process(data: web::Data<Data>, dumps_folder: PathBuf, dump_info: DumpInf
resume.set_current();
}
pub fn init_dump_process(data: &web::Data<Data>, dumps_folder: &Path) -> Result<DumpInfo, Error> {
create_dir_all(dumps_folder).map_err(|e| Error::dump_failed(format!("creating temporary directory {}", e)))?;
pub fn init_dump_process(data: &web::Data<Data>, dumps_dir: &Path) -> Result<DumpInfo, Error> {
create_dir_all(dumps_dir).map_err(|e| Error::dump_failed(format!("creating temporary directory {}", e)))?;
// check if a dump is already in progress
if let Some(resume) = DumpInfo::get_current() {
@ -414,11 +414,11 @@ pub fn init_dump_process(data: &web::Data<Data>, dumps_folder: &Path) -> Result<
info.set_current();
let data = data.clone();
let dumps_folder = dumps_folder.to_path_buf();
let dumps_dir = dumps_dir.to_path_buf();
let info_cloned = info.clone();
// run dump process in a new thread
thread::spawn(move ||
dump_process(data, dumps_folder, info_cloned)
dump_process(data, dumps_dir, info_cloned)
);
Ok(info)

View File

@ -74,8 +74,8 @@ async fn main() -> Result<(), MainError> {
dump::import_dump(&data, path, opt.dump_batch_size)?;
}
if let Some(path) = &opt.snapshot_path {
snapshot::schedule_snapshot(data.clone(), &path, opt.snapshot_interval_sec.unwrap_or(86400))?;
if let Some(dir) = &opt.snapshot_dir {
snapshot::schedule_snapshot(data.clone(), &dir, opt.snapshot_interval_sec.unwrap_or(86400))?;
}
print_launch_resume(&opt, &data);

View File

@ -109,16 +109,16 @@ pub struct Opt {
pub ignore_snapshot_if_db_exists: bool,
/// Defines the directory path where meilisearch will create snapshot each snapshot_time_gap.
#[structopt(long, env = "MEILI_SNAPSHOT_PATH")]
pub snapshot_path: Option<PathBuf>,
#[structopt(long, env = "MEILI_SNAPSHOT_DIR")]
pub snapshot_dir: Option<PathBuf>,
/// Defines time interval, in seconds, between each snapshot creation.
#[structopt(long, requires = "snapshot-path", env = "MEILI_SNAPSHOT_INTERVAL_SEC")]
pub snapshot_interval_sec: Option<u64>,
/// Folder where dumps are created when the dump route is called.
#[structopt(long, env = "MEILI_DUMPS_FOLDER", default_value = "dumps/")]
pub dumps_folder: PathBuf,
#[structopt(long, env = "MEILI_DUMPS_DIR", default_value = "dumps/")]
pub dumps_dir: PathBuf,
/// Import a dump from the specified path, must be a `.tar.gz` file.
#[structopt(long, env = "MEILI_IMPORT_DUMP", conflicts_with = "load-from-snapshot")]

View File

@ -5,7 +5,7 @@ use actix_web::{get, post};
use actix_web::{HttpResponse, web};
use serde::{Deserialize, Serialize};
use crate::dump::{DumpInfo, DumpStatus, compressed_dumps_folder, init_dump_process};
use crate::dump::{DumpInfo, DumpStatus, compressed_dumps_dir, init_dump_process};
use crate::Data;
use crate::error::{Error, ResponseError};
use crate::helpers::Authentication;
@ -19,8 +19,8 @@ pub fn services(cfg: &mut web::ServiceConfig) {
async fn trigger_dump(
data: web::Data<Data>,
) -> Result<HttpResponse, ResponseError> {
let dumps_folder = Path::new(&data.dumps_folder);
match init_dump_process(&data, &dumps_folder) {
let dumps_dir = Path::new(&data.dumps_dir);
match init_dump_process(&data, &dumps_dir) {
Ok(resume) => Ok(HttpResponse::Accepted().json(resume)),
Err(e) => Err(e.into())
}
@ -42,7 +42,7 @@ async fn get_dump_status(
data: web::Data<Data>,
path: web::Path<DumpParam>,
) -> Result<HttpResponse, ResponseError> {
let dumps_folder = Path::new(&data.dumps_folder);
let dumps_dir = Path::new(&data.dumps_dir);
let dump_uid = &path.dump_uid;
if let Some(resume) = DumpInfo::get_current() {
@ -51,7 +51,7 @@ async fn get_dump_status(
}
}
if File::open(compressed_dumps_folder(Path::new(dumps_folder), dump_uid)).is_ok() {
if File::open(compressed_dumps_dir(Path::new(dumps_dir), dump_uid)).is_ok() {
let resume = DumpInfo::new(
dump_uid.into(),
DumpStatus::Done

View File

@ -70,10 +70,10 @@ mod tests {
let archive_path = test_dir.join("archive.tar.gz");
let file_1_relative = Path::new("file1.txt");
let subfolder_relative = Path::new("subfolder/");
let file_2_relative = Path::new("subfolder/file2.txt");
let subdir_relative = Path::new("subdir/");
let file_2_relative = Path::new("subdir/file2.txt");
create_dir_all(src_dir.join(subfolder_relative)).unwrap();
create_dir_all(src_dir.join(subdir_relative)).unwrap();
fs::File::create(src_dir.join(file_1_relative)).unwrap().write_all(b"Hello_file_1").unwrap();
fs::File::create(src_dir.join(file_2_relative)).unwrap().write_all(b"Hello_file_2").unwrap();
@ -84,7 +84,7 @@ mod tests {
assert!(dest_dir.exists());
assert!(dest_dir.join(file_1_relative).exists());
assert!(dest_dir.join(subfolder_relative).exists());
assert!(dest_dir.join(subdir_relative).exists());
assert!(dest_dir.join(file_2_relative).exists());
let contents = fs::read_to_string(dest_dir.join(file_1_relative)).unwrap();

View File

@ -49,7 +49,7 @@ impl Server {
let opt = Opt {
db_path: tmp_dir.path().join("db").to_str().unwrap().to_string(),
dumps_folder: tmp_dir.path().join("dump"),
dumps_dir: tmp_dir.path().join("dump"),
dump_batch_size: 16,
http_addr: "127.0.0.1:7700".to_owned(),
master_key: None,

View File

@ -187,7 +187,7 @@ async fn add_document_with_long_field() {
"relurl":"/configuration/app/web.html#locations",
"section":"Web",
"site":"docs",
"text":" The locations block is the most powerful, and potentially most involved, section of the .platform.app.yaml file. It allows you to control how the application container responds to incoming requests at a very fine-grained level. Common patterns also vary between language containers due to the way PHP-FPM handles incoming requests.\nEach entry of the locations block is an absolute URI path (with leading /) and its value includes the configuration directives for how the web server should handle matching requests. That is, if your domain is example.com then '/' means &ldquo;requests for example.com/&rdquo;, while '/admin' means &ldquo;requests for example.com/admin&rdquo;. If multiple blocks could match an incoming request then the most-specific will apply.\nweb:locations:&#39;/&#39;:# Rules for all requests that don&#39;t otherwise match....&#39;/sites/default/files&#39;:# Rules for any requests that begin with /sites/default/files....The simplest possible locations configuration is one that simply passes all requests on to your application unconditionally:\nweb:locations:&#39;/&#39;:passthru:trueThat is, all requests to /* should be forwarded to the process started by web.commands.start above. Note that for PHP containers the passthru key must specify what PHP file the request should be forwarded to, and must also specify a docroot under which the file lives. For example:\nweb:locations:&#39;/&#39;:root:&#39;web&#39;passthru:&#39;/app.php&#39;This block will serve requests to / from the web directory in the application, and if a file doesn&rsquo;t exist on disk then the request will be forwarded to the /app.php script.\nA full list of the possible subkeys for locations is below.\n root: The folder from which to serve static assets for this location relative to the application root. The application root is the directory in which the .platform.app.yaml file is located. Typical values for this property include public or web. Setting it to '' is not recommended, and its behavior may vary depending on the type of application. Absolute paths are not supported.\n passthru: Whether to forward disallowed and missing resources from this location to the application and can be true, false or an absolute URI path (with leading /). The default value is false. For non-PHP applications it will generally be just true or false. In a PHP application this will typically be the front controller such as /index.php or /app.php. This entry works similar to mod_rewrite under Apache. Note: If the value of passthru does not begin with the same value as the location key it is under, the passthru may evaluate to another entry. That may be useful when you want different cache settings for different paths, for instance, but want missing files in all of them to map back to the same front controller. See the example block below.\n index: The files to consider when serving a request for a directory: an array of file names or null. (typically ['index.html']). Note that in order for this to work, access to the static files named must be allowed by the allow or rules keys for this location.\n expires: How long to allow static assets from this location to be cached (this enables the Cache-Control and Expires headers) and can be a time or -1 for no caching (default). Times can be suffixed with &ldquo;ms&rdquo; (milliseconds), &ldquo;s&rdquo; (seconds), &ldquo;m&rdquo; (minutes), &ldquo;h&rdquo; (hours), &ldquo;d&rdquo; (days), &ldquo;w&rdquo; (weeks), &ldquo;M&rdquo; (months, 30d) or &ldquo;y&rdquo; (years, 365d).\n scripts: Whether to allow loading scripts in that location (true or false). This directive is only meaningful on PHP.\n allow: Whether to allow serving files which don&rsquo;t match a rule (true or false, default: true).\n headers: Any additional headers to apply to static assets. This section is a mapping of header names to header values. Responses from the application aren&rsquo;t affected, to avoid overlap with the application&rsquo;s own ability to include custom headers in the response.\n rules: Specific overrides for a specific location. The key is a PCRE (regular expression) that is matched against the full request path.\n request_buffering: Most application servers do not support chunked requests (e.g. fpm, uwsgi), so Platform.sh enables request_buffering by default to handle them. That default configuration would look like this if it was present in .platform.app.yaml:\nweb:locations:&#39;/&#39;:passthru:truerequest_buffering:enabled:truemax_request_size:250mIf the application server can already efficiently handle chunked requests, the request_buffering subkey can be modified to disable it entirely (enabled: false). Additionally, applications that frequently deal with uploads greater than 250MB in size can update the max_request_size key to the application&rsquo;s needs. Note that modifications to request_buffering will need to be specified at each location where it is desired.\n ",
"text":" The locations block is the most powerful, and potentially most involved, section of the .platform.app.yaml file. It allows you to control how the application container responds to incoming requests at a very fine-grained level. Common patterns also vary between language containers due to the way PHP-FPM handles incoming requests.\nEach entry of the locations block is an absolute URI path (with leading /) and its value includes the configuration directives for how the web server should handle matching requests. That is, if your domain is example.com then '/' means &ldquo;requests for example.com/&rdquo;, while '/admin' means &ldquo;requests for example.com/admin&rdquo;. If multiple blocks could match an incoming request then the most-specific will apply.\nweb:locations:&#39;/&#39;:# Rules for all requests that don&#39;t otherwise match....&#39;/sites/default/files&#39;:# Rules for any requests that begin with /sites/default/files....The simplest possible locations configuration is one that simply passes all requests on to your application unconditionally:\nweb:locations:&#39;/&#39;:passthru:trueThat is, all requests to /* should be forwarded to the process started by web.commands.start above. Note that for PHP containers the passthru key must specify what PHP file the request should be forwarded to, and must also specify a docroot under which the file lives. For example:\nweb:locations:&#39;/&#39;:root:&#39;web&#39;passthru:&#39;/app.php&#39;This block will serve requests to / from the web directory in the application, and if a file doesn&rsquo;t exist on disk then the request will be forwarded to the /app.php script.\nA full list of the possible subkeys for locations is below.\n root: The dir from which to serve static assets for this location relative to the application root. The application root is the directory in which the .platform.app.yaml file is located. Typical values for this property include public or web. Setting it to '' is not recommended, and its behavior may vary depending on the type of application. Absolute paths are not supported.\n passthru: Whether to forward disallowed and missing resources from this location to the application and can be true, false or an absolute URI path (with leading /). The default value is false. For non-PHP applications it will generally be just true or false. In a PHP application this will typically be the front controller such as /index.php or /app.php. This entry works similar to mod_rewrite under Apache. Note: If the value of passthru does not begin with the same value as the location key it is under, the passthru may evaluate to another entry. That may be useful when you want different cache settings for different paths, for instance, but want missing files in all of them to map back to the same front controller. See the example block below.\n index: The files to consider when serving a request for a directory: an array of file names or null. (typically ['index.html']). Note that in order for this to work, access to the static files named must be allowed by the allow or rules keys for this location.\n expires: How long to allow static assets from this location to be cached (this enables the Cache-Control and Expires headers) and can be a time or -1 for no caching (default). Times can be suffixed with &ldquo;ms&rdquo; (milliseconds), &ldquo;s&rdquo; (seconds), &ldquo;m&rdquo; (minutes), &ldquo;h&rdquo; (hours), &ldquo;d&rdquo; (days), &ldquo;w&rdquo; (weeks), &ldquo;M&rdquo; (months, 30d) or &ldquo;y&rdquo; (years, 365d).\n scripts: Whether to allow loading scripts in that location (true or false). This directive is only meaningful on PHP.\n allow: Whether to allow serving files which don&rsquo;t match a rule (true or false, default: true).\n headers: Any additional headers to apply to static assets. This section is a mapping of header names to header values. Responses from the application aren&rsquo;t affected, to avoid overlap with the application&rsquo;s own ability to include custom headers in the response.\n rules: Specific overrides for a specific location. The key is a PCRE (regular expression) that is matched against the full request path.\n request_buffering: Most application servers do not support chunked requests (e.g. fpm, uwsgi), so Platform.sh enables request_buffering by default to handle them. That default configuration would look like this if it was present in .platform.app.yaml:\nweb:locations:&#39;/&#39;:passthru:truerequest_buffering:enabled:truemax_request_size:250mIf the application server can already efficiently handle chunked requests, the request_buffering subkey can be modified to disable it entirely (enabled: false). Additionally, applications that frequently deal with uploads greater than 250MB in size can update the max_request_size key to the application&rsquo;s needs. Note that modifications to request_buffering will need to be specified at each location where it is desired.\n ",
"title":"Locations",
"url":"/configuration/app/web.html#locations"
}]);

View File

@ -159,7 +159,7 @@ async fn get_dump_status_should_return_error_provoking_it() {
let (value, status_code) = server.trigger_dump().await;
// removing destination directory provoking `No such file or directory` error
std::fs::remove_dir(server.data().dumps_folder.clone()).unwrap();
std::fs::remove_dir(server.data().dumps_dir.clone()).unwrap();
assert_eq!(status_code, 202);
@ -197,11 +197,11 @@ async fn dump_metadata_should_be_valid() {
let uid = trigger_and_wait_dump(&mut server).await;
let dumps_folder = Path::new(&server.data().dumps_folder);
let dumps_dir = Path::new(&server.data().dumps_dir);
let tmp_dir = TempDir::new().unwrap();
let tmp_dir_path = tmp_dir.path();
compression::from_tar_gz(&dumps_folder.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
compression::from_tar_gz(&dumps_dir.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
let file = File::open(tmp_dir_path.join("metadata.json")).unwrap();
let mut metadata: serde_json::Value = serde_json::from_reader(file).unwrap();
@ -238,9 +238,9 @@ async fn dump_gzip_should_have_been_created() {
let dump_uid = trigger_and_wait_dump(&mut server).await;
let dumps_folder = Path::new(&server.data().dumps_folder);
let dumps_dir = Path::new(&server.data().dumps_dir);
let compressed_path = dumps_folder.join(format!("{}.tar.gz", dump_uid));
let compressed_path = dumps_dir.join(format!("{}.tar.gz", dump_uid));
assert!(File::open(compressed_path).is_ok());
}
@ -312,11 +312,11 @@ async fn dump_index_settings_should_be_valid() {
let uid = trigger_and_wait_dump(&mut server).await;
let dumps_folder = Path::new(&server.data().dumps_folder);
let dumps_dir = Path::new(&server.data().dumps_dir);
let tmp_dir = TempDir::new().unwrap();
let tmp_dir_path = tmp_dir.path();
compression::from_tar_gz(&dumps_folder.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
compression::from_tar_gz(&dumps_dir.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
let file = File::open(tmp_dir_path.join("test").join("settings.json")).unwrap();
let settings: serde_json::Value = serde_json::from_reader(file).unwrap();
@ -336,11 +336,11 @@ async fn dump_index_documents_should_be_valid() {
let uid = trigger_and_wait_dump(&mut server).await;
let dumps_folder = Path::new(&server.data().dumps_folder);
let dumps_dir = Path::new(&server.data().dumps_dir);
let tmp_dir = TempDir::new().unwrap();
let tmp_dir_path = tmp_dir.path();
compression::from_tar_gz(&dumps_folder.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
compression::from_tar_gz(&dumps_dir.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
let file = File::open(tmp_dir_path.join("test").join("documents.jsonl")).unwrap();
let documents = read_all_jsonline(file);
@ -360,11 +360,11 @@ async fn dump_index_updates_should_be_valid() {
let uid = trigger_and_wait_dump(&mut server).await;
let dumps_folder = Path::new(&server.data().dumps_folder);
let dumps_dir = Path::new(&server.data().dumps_dir);
let tmp_dir = TempDir::new().unwrap();
let tmp_dir_path = tmp_dir.path();
compression::from_tar_gz(&dumps_folder.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
compression::from_tar_gz(&dumps_dir.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
let file = File::open(tmp_dir_path.join("test").join("updates.jsonl")).unwrap();
let mut updates = read_all_jsonline(file);