mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-11-23 13:24:27 +01:00
rename folder to dir
This commit is contained in:
parent
e049aead16
commit
834f3cc192
@ -27,7 +27,7 @@ impl Deref for Data {
|
|||||||
pub struct DataInner {
|
pub struct DataInner {
|
||||||
pub db: Arc<Database>,
|
pub db: Arc<Database>,
|
||||||
pub db_path: String,
|
pub db_path: String,
|
||||||
pub dumps_folder: PathBuf,
|
pub dumps_dir: PathBuf,
|
||||||
pub dump_batch_size: usize,
|
pub dump_batch_size: usize,
|
||||||
pub api_keys: ApiKeys,
|
pub api_keys: ApiKeys,
|
||||||
pub server_pid: u32,
|
pub server_pid: u32,
|
||||||
@ -61,7 +61,7 @@ impl ApiKeys {
|
|||||||
impl Data {
|
impl Data {
|
||||||
pub fn new(opt: Opt) -> Result<Data, Box<dyn Error>> {
|
pub fn new(opt: Opt) -> Result<Data, Box<dyn Error>> {
|
||||||
let db_path = opt.db_path.clone();
|
let db_path = opt.db_path.clone();
|
||||||
let dumps_folder = opt.dumps_folder.clone();
|
let dumps_dir = opt.dumps_dir.clone();
|
||||||
let dump_batch_size = opt.dump_batch_size;
|
let dump_batch_size = opt.dump_batch_size;
|
||||||
let server_pid = std::process::id();
|
let server_pid = std::process::id();
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ impl Data {
|
|||||||
let inner_data = DataInner {
|
let inner_data = DataInner {
|
||||||
db: db.clone(),
|
db: db.clone(),
|
||||||
db_path,
|
db_path,
|
||||||
dumps_folder,
|
dumps_dir,
|
||||||
dump_batch_size,
|
dump_batch_size,
|
||||||
api_keys,
|
api_keys,
|
||||||
server_pid,
|
server_pid,
|
||||||
|
@ -52,9 +52,9 @@ impl DumpMetadata {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extract DumpMetadata from `metadata.json` file present at provided `folder_path`
|
/// Extract DumpMetadata from `metadata.json` file present at provided `dir_path`
|
||||||
fn from_path(folder_path: &Path) -> Result<Self, Error> {
|
fn from_path(dir_path: &Path) -> Result<Self, Error> {
|
||||||
let path = folder_path.join("metadata.json");
|
let path = dir_path.join("metadata.json");
|
||||||
let file = File::open(path)?;
|
let file = File::open(path)?;
|
||||||
let reader = std::io::BufReader::new(file);
|
let reader = std::io::BufReader::new(file);
|
||||||
let metadata = serde_json::from_reader(reader)?;
|
let metadata = serde_json::from_reader(reader)?;
|
||||||
@ -62,9 +62,9 @@ impl DumpMetadata {
|
|||||||
Ok(metadata)
|
Ok(metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write DumpMetadata in `metadata.json` file at provided `folder_path`
|
/// Write DumpMetadata in `metadata.json` file at provided `dir_path`
|
||||||
fn to_path(&self, folder_path: &Path) -> Result<(), Error> {
|
fn to_path(&self, dir_path: &Path) -> Result<(), Error> {
|
||||||
let path = folder_path.join("metadata.json");
|
let path = dir_path.join("metadata.json");
|
||||||
let file = File::create(path)?;
|
let file = File::create(path)?;
|
||||||
|
|
||||||
serde_json::to_writer(file, &self)?;
|
serde_json::to_writer(file, &self)?;
|
||||||
@ -73,9 +73,9 @@ impl DumpMetadata {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extract Settings from `settings.json` file present at provided `folder_path`
|
/// Extract Settings from `settings.json` file present at provided `dir_path`
|
||||||
fn settings_from_path(folder_path: &Path) -> Result<Settings, Error> {
|
fn settings_from_path(dir_path: &Path) -> Result<Settings, Error> {
|
||||||
let path = folder_path.join("settings.json");
|
let path = dir_path.join("settings.json");
|
||||||
let file = File::open(path)?;
|
let file = File::open(path)?;
|
||||||
let reader = std::io::BufReader::new(file);
|
let reader = std::io::BufReader::new(file);
|
||||||
let metadata = serde_json::from_reader(reader)?;
|
let metadata = serde_json::from_reader(reader)?;
|
||||||
@ -83,9 +83,9 @@ fn settings_from_path(folder_path: &Path) -> Result<Settings, Error> {
|
|||||||
Ok(metadata)
|
Ok(metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write Settings in `settings.json` file at provided `folder_path`
|
/// Write Settings in `settings.json` file at provided `dir_path`
|
||||||
fn settings_to_path(settings: &Settings, folder_path: &Path) -> Result<(), Error> {
|
fn settings_to_path(settings: &Settings, dir_path: &Path) -> Result<(), Error> {
|
||||||
let path = folder_path.join("settings.json");
|
let path = dir_path.join("settings.json");
|
||||||
let file = File::create(path)?;
|
let file = File::create(path)?;
|
||||||
|
|
||||||
serde_json::to_writer(file, settings)?;
|
serde_json::to_writer(file, settings)?;
|
||||||
@ -96,7 +96,7 @@ fn settings_to_path(settings: &Settings, folder_path: &Path) -> Result<(), Error
|
|||||||
/// Import settings and documents of a dump with version `DumpVersion::V1` in specified index.
|
/// Import settings and documents of a dump with version `DumpVersion::V1` in specified index.
|
||||||
fn import_index_v1(
|
fn import_index_v1(
|
||||||
data: &Data,
|
data: &Data,
|
||||||
dumps_folder: &Path,
|
dumps_dir: &Path,
|
||||||
index_uid: &str,
|
index_uid: &str,
|
||||||
document_batch_size: usize,
|
document_batch_size: usize,
|
||||||
write_txn: &mut MainWriter,
|
write_txn: &mut MainWriter,
|
||||||
@ -108,8 +108,8 @@ fn import_index_v1(
|
|||||||
.open_index(index_uid)
|
.open_index(index_uid)
|
||||||
.ok_or(Error::index_not_found(index_uid))?;
|
.ok_or(Error::index_not_found(index_uid))?;
|
||||||
|
|
||||||
// index folder path in dump folder
|
// index dir path in dump dir
|
||||||
let index_path = &dumps_folder.join(index_uid);
|
let index_path = &dumps_dir.join(index_uid);
|
||||||
|
|
||||||
// extract `settings.json` file and import content
|
// extract `settings.json` file and import content
|
||||||
let settings = settings_from_path(&index_path)?;
|
let settings = settings_from_path(&index_path)?;
|
||||||
@ -243,29 +243,29 @@ fn generate_uid() -> String {
|
|||||||
Utc::now().format("%Y%m%d-%H%M%S%3f").to_string()
|
Utc::now().format("%Y%m%d-%H%M%S%3f").to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Infer dumps_folder from dump_uid
|
/// Infer dumps_dir from dump_uid
|
||||||
pub fn compressed_dumps_folder(dumps_folder: &Path, dump_uid: &str) -> PathBuf {
|
pub fn compressed_dumps_dir(dumps_dir: &Path, dump_uid: &str) -> PathBuf {
|
||||||
dumps_folder.join(format!("{}.tar.gz", dump_uid))
|
dumps_dir.join(format!("{}.tar.gz", dump_uid))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write metadata in dump
|
/// Write metadata in dump
|
||||||
fn dump_metadata(data: &web::Data<Data>, folder_path: &Path, indexes: Vec<IndexResponse>) -> Result<(), Error> {
|
fn dump_metadata(data: &web::Data<Data>, dir_path: &Path, indexes: Vec<IndexResponse>) -> Result<(), Error> {
|
||||||
let (db_major, db_minor, db_patch) = data.db.version();
|
let (db_major, db_minor, db_patch) = data.db.version();
|
||||||
let metadata = DumpMetadata::new(indexes, format!("{}.{}.{}", db_major, db_minor, db_patch));
|
let metadata = DumpMetadata::new(indexes, format!("{}.{}.{}", db_major, db_minor, db_patch));
|
||||||
|
|
||||||
metadata.to_path(folder_path)
|
metadata.to_path(dir_path)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Export settings of provided index in dump
|
/// Export settings of provided index in dump
|
||||||
fn dump_index_settings(data: &web::Data<Data>, reader: &MainReader, folder_path: &Path, index_uid: &str) -> Result<(), Error> {
|
fn dump_index_settings(data: &web::Data<Data>, reader: &MainReader, dir_path: &Path, index_uid: &str) -> Result<(), Error> {
|
||||||
let settings = crate::routes::setting::get_all_sync(data, reader, index_uid)?;
|
let settings = crate::routes::setting::get_all_sync(data, reader, index_uid)?;
|
||||||
|
|
||||||
settings_to_path(&settings, folder_path)
|
settings_to_path(&settings, dir_path)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Export updates of provided index in dump
|
/// Export updates of provided index in dump
|
||||||
fn dump_index_updates(data: &web::Data<Data>, reader: &UpdateReader, folder_path: &Path, index_uid: &str) -> Result<(), Error> {
|
fn dump_index_updates(data: &web::Data<Data>, reader: &UpdateReader, dir_path: &Path, index_uid: &str) -> Result<(), Error> {
|
||||||
let updates_path = folder_path.join("updates.jsonl");
|
let updates_path = dir_path.join("updates.jsonl");
|
||||||
let updates = crate::routes::index::get_all_updates_status_sync(data, reader, index_uid)?;
|
let updates = crate::routes::index::get_all_updates_status_sync(data, reader, index_uid)?;
|
||||||
|
|
||||||
let file = File::create(updates_path)?;
|
let file = File::create(updates_path)?;
|
||||||
@ -279,8 +279,8 @@ fn dump_index_updates(data: &web::Data<Data>, reader: &UpdateReader, folder_path
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Export documents of provided index in dump
|
/// Export documents of provided index in dump
|
||||||
fn dump_index_documents(data: &web::Data<Data>, reader: &MainReader, folder_path: &Path, index_uid: &str) -> Result<(), Error> {
|
fn dump_index_documents(data: &web::Data<Data>, reader: &MainReader, dir_path: &Path, index_uid: &str) -> Result<(), Error> {
|
||||||
let documents_path = folder_path.join("documents.jsonl");
|
let documents_path = dir_path.join("documents.jsonl");
|
||||||
let file = File::create(documents_path)?;
|
let file = File::create(documents_path)?;
|
||||||
let dump_batch_size = data.dump_batch_size;
|
let dump_batch_size = data.dump_batch_size;
|
||||||
|
|
||||||
@ -307,7 +307,7 @@ fn fail_dump_process<E: std::error::Error>(dump_info: DumpInfo, context: &str, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Main function of dump.
|
/// Main function of dump.
|
||||||
fn dump_process(data: web::Data<Data>, dumps_folder: PathBuf, dump_info: DumpInfo) {
|
fn dump_process(data: web::Data<Data>, dumps_dir: PathBuf, dump_info: DumpInfo) {
|
||||||
// open read transaction on Update
|
// open read transaction on Update
|
||||||
let update_reader = match data.db.update_read_txn() {
|
let update_reader = match data.db.update_read_txn() {
|
||||||
Ok(r) => r,
|
Ok(r) => r,
|
||||||
@ -380,8 +380,8 @@ fn dump_process(data: web::Data<Data>, dumps_folder: PathBuf, dump_info: DumpInf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// compress dump in a file named `{dump_uid}.tar.gz` in `dumps_folder`
|
// compress dump in a file named `{dump_uid}.tar.gz` in `dumps_dir`
|
||||||
if let Err(e) = crate::helpers::compression::to_tar_gz(&tmp_dir_path, &compressed_dumps_folder(&dumps_folder, &dump_info.uid)) {
|
if let Err(e) = crate::helpers::compression::to_tar_gz(&tmp_dir_path, &compressed_dumps_dir(&dumps_dir, &dump_info.uid)) {
|
||||||
fail_dump_process(dump_info, "compressing dump", e);
|
fail_dump_process(dump_info, "compressing dump", e);
|
||||||
return ;
|
return ;
|
||||||
}
|
}
|
||||||
@ -395,8 +395,8 @@ fn dump_process(data: web::Data<Data>, dumps_folder: PathBuf, dump_info: DumpInf
|
|||||||
resume.set_current();
|
resume.set_current();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init_dump_process(data: &web::Data<Data>, dumps_folder: &Path) -> Result<DumpInfo, Error> {
|
pub fn init_dump_process(data: &web::Data<Data>, dumps_dir: &Path) -> Result<DumpInfo, Error> {
|
||||||
create_dir_all(dumps_folder).map_err(|e| Error::dump_failed(format!("creating temporary directory {}", e)))?;
|
create_dir_all(dumps_dir).map_err(|e| Error::dump_failed(format!("creating temporary directory {}", e)))?;
|
||||||
|
|
||||||
// check if a dump is already in progress
|
// check if a dump is already in progress
|
||||||
if let Some(resume) = DumpInfo::get_current() {
|
if let Some(resume) = DumpInfo::get_current() {
|
||||||
@ -414,11 +414,11 @@ pub fn init_dump_process(data: &web::Data<Data>, dumps_folder: &Path) -> Result<
|
|||||||
info.set_current();
|
info.set_current();
|
||||||
|
|
||||||
let data = data.clone();
|
let data = data.clone();
|
||||||
let dumps_folder = dumps_folder.to_path_buf();
|
let dumps_dir = dumps_dir.to_path_buf();
|
||||||
let info_cloned = info.clone();
|
let info_cloned = info.clone();
|
||||||
// run dump process in a new thread
|
// run dump process in a new thread
|
||||||
thread::spawn(move ||
|
thread::spawn(move ||
|
||||||
dump_process(data, dumps_folder, info_cloned)
|
dump_process(data, dumps_dir, info_cloned)
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(info)
|
Ok(info)
|
||||||
|
@ -74,8 +74,8 @@ async fn main() -> Result<(), MainError> {
|
|||||||
dump::import_dump(&data, path, opt.dump_batch_size)?;
|
dump::import_dump(&data, path, opt.dump_batch_size)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(path) = &opt.snapshot_path {
|
if let Some(dir) = &opt.snapshot_dir {
|
||||||
snapshot::schedule_snapshot(data.clone(), &path, opt.snapshot_interval_sec.unwrap_or(86400))?;
|
snapshot::schedule_snapshot(data.clone(), &dir, opt.snapshot_interval_sec.unwrap_or(86400))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
print_launch_resume(&opt, &data);
|
print_launch_resume(&opt, &data);
|
||||||
|
@ -109,16 +109,16 @@ pub struct Opt {
|
|||||||
pub ignore_snapshot_if_db_exists: bool,
|
pub ignore_snapshot_if_db_exists: bool,
|
||||||
|
|
||||||
/// Defines the directory path where meilisearch will create snapshot each snapshot_time_gap.
|
/// Defines the directory path where meilisearch will create snapshot each snapshot_time_gap.
|
||||||
#[structopt(long, env = "MEILI_SNAPSHOT_PATH")]
|
#[structopt(long, env = "MEILI_SNAPSHOT_DIR")]
|
||||||
pub snapshot_path: Option<PathBuf>,
|
pub snapshot_dir: Option<PathBuf>,
|
||||||
|
|
||||||
/// Defines time interval, in seconds, between each snapshot creation.
|
/// Defines time interval, in seconds, between each snapshot creation.
|
||||||
#[structopt(long, requires = "snapshot-path", env = "MEILI_SNAPSHOT_INTERVAL_SEC")]
|
#[structopt(long, requires = "snapshot-path", env = "MEILI_SNAPSHOT_INTERVAL_SEC")]
|
||||||
pub snapshot_interval_sec: Option<u64>,
|
pub snapshot_interval_sec: Option<u64>,
|
||||||
|
|
||||||
/// Folder where dumps are created when the dump route is called.
|
/// Folder where dumps are created when the dump route is called.
|
||||||
#[structopt(long, env = "MEILI_DUMPS_FOLDER", default_value = "dumps/")]
|
#[structopt(long, env = "MEILI_DUMPS_DIR", default_value = "dumps/")]
|
||||||
pub dumps_folder: PathBuf,
|
pub dumps_dir: PathBuf,
|
||||||
|
|
||||||
/// Import a dump from the specified path, must be a `.tar.gz` file.
|
/// Import a dump from the specified path, must be a `.tar.gz` file.
|
||||||
#[structopt(long, env = "MEILI_IMPORT_DUMP", conflicts_with = "load-from-snapshot")]
|
#[structopt(long, env = "MEILI_IMPORT_DUMP", conflicts_with = "load-from-snapshot")]
|
||||||
|
@ -5,7 +5,7 @@ use actix_web::{get, post};
|
|||||||
use actix_web::{HttpResponse, web};
|
use actix_web::{HttpResponse, web};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::dump::{DumpInfo, DumpStatus, compressed_dumps_folder, init_dump_process};
|
use crate::dump::{DumpInfo, DumpStatus, compressed_dumps_dir, init_dump_process};
|
||||||
use crate::Data;
|
use crate::Data;
|
||||||
use crate::error::{Error, ResponseError};
|
use crate::error::{Error, ResponseError};
|
||||||
use crate::helpers::Authentication;
|
use crate::helpers::Authentication;
|
||||||
@ -19,8 +19,8 @@ pub fn services(cfg: &mut web::ServiceConfig) {
|
|||||||
async fn trigger_dump(
|
async fn trigger_dump(
|
||||||
data: web::Data<Data>,
|
data: web::Data<Data>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let dumps_folder = Path::new(&data.dumps_folder);
|
let dumps_dir = Path::new(&data.dumps_dir);
|
||||||
match init_dump_process(&data, &dumps_folder) {
|
match init_dump_process(&data, &dumps_dir) {
|
||||||
Ok(resume) => Ok(HttpResponse::Accepted().json(resume)),
|
Ok(resume) => Ok(HttpResponse::Accepted().json(resume)),
|
||||||
Err(e) => Err(e.into())
|
Err(e) => Err(e.into())
|
||||||
}
|
}
|
||||||
@ -42,7 +42,7 @@ async fn get_dump_status(
|
|||||||
data: web::Data<Data>,
|
data: web::Data<Data>,
|
||||||
path: web::Path<DumpParam>,
|
path: web::Path<DumpParam>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let dumps_folder = Path::new(&data.dumps_folder);
|
let dumps_dir = Path::new(&data.dumps_dir);
|
||||||
let dump_uid = &path.dump_uid;
|
let dump_uid = &path.dump_uid;
|
||||||
|
|
||||||
if let Some(resume) = DumpInfo::get_current() {
|
if let Some(resume) = DumpInfo::get_current() {
|
||||||
@ -51,7 +51,7 @@ async fn get_dump_status(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if File::open(compressed_dumps_folder(Path::new(dumps_folder), dump_uid)).is_ok() {
|
if File::open(compressed_dumps_dir(Path::new(dumps_dir), dump_uid)).is_ok() {
|
||||||
let resume = DumpInfo::new(
|
let resume = DumpInfo::new(
|
||||||
dump_uid.into(),
|
dump_uid.into(),
|
||||||
DumpStatus::Done
|
DumpStatus::Done
|
||||||
|
@ -70,10 +70,10 @@ mod tests {
|
|||||||
let archive_path = test_dir.join("archive.tar.gz");
|
let archive_path = test_dir.join("archive.tar.gz");
|
||||||
|
|
||||||
let file_1_relative = Path::new("file1.txt");
|
let file_1_relative = Path::new("file1.txt");
|
||||||
let subfolder_relative = Path::new("subfolder/");
|
let subdir_relative = Path::new("subdir/");
|
||||||
let file_2_relative = Path::new("subfolder/file2.txt");
|
let file_2_relative = Path::new("subdir/file2.txt");
|
||||||
|
|
||||||
create_dir_all(src_dir.join(subfolder_relative)).unwrap();
|
create_dir_all(src_dir.join(subdir_relative)).unwrap();
|
||||||
fs::File::create(src_dir.join(file_1_relative)).unwrap().write_all(b"Hello_file_1").unwrap();
|
fs::File::create(src_dir.join(file_1_relative)).unwrap().write_all(b"Hello_file_1").unwrap();
|
||||||
fs::File::create(src_dir.join(file_2_relative)).unwrap().write_all(b"Hello_file_2").unwrap();
|
fs::File::create(src_dir.join(file_2_relative)).unwrap().write_all(b"Hello_file_2").unwrap();
|
||||||
|
|
||||||
@ -84,7 +84,7 @@ mod tests {
|
|||||||
|
|
||||||
assert!(dest_dir.exists());
|
assert!(dest_dir.exists());
|
||||||
assert!(dest_dir.join(file_1_relative).exists());
|
assert!(dest_dir.join(file_1_relative).exists());
|
||||||
assert!(dest_dir.join(subfolder_relative).exists());
|
assert!(dest_dir.join(subdir_relative).exists());
|
||||||
assert!(dest_dir.join(file_2_relative).exists());
|
assert!(dest_dir.join(file_2_relative).exists());
|
||||||
|
|
||||||
let contents = fs::read_to_string(dest_dir.join(file_1_relative)).unwrap();
|
let contents = fs::read_to_string(dest_dir.join(file_1_relative)).unwrap();
|
||||||
|
@ -49,7 +49,7 @@ impl Server {
|
|||||||
|
|
||||||
let opt = Opt {
|
let opt = Opt {
|
||||||
db_path: tmp_dir.path().join("db").to_str().unwrap().to_string(),
|
db_path: tmp_dir.path().join("db").to_str().unwrap().to_string(),
|
||||||
dumps_folder: tmp_dir.path().join("dump"),
|
dumps_dir: tmp_dir.path().join("dump"),
|
||||||
dump_batch_size: 16,
|
dump_batch_size: 16,
|
||||||
http_addr: "127.0.0.1:7700".to_owned(),
|
http_addr: "127.0.0.1:7700".to_owned(),
|
||||||
master_key: None,
|
master_key: None,
|
||||||
|
@ -187,7 +187,7 @@ async fn add_document_with_long_field() {
|
|||||||
"relurl":"/configuration/app/web.html#locations",
|
"relurl":"/configuration/app/web.html#locations",
|
||||||
"section":"Web",
|
"section":"Web",
|
||||||
"site":"docs",
|
"site":"docs",
|
||||||
"text":" The locations block is the most powerful, and potentially most involved, section of the .platform.app.yaml file. It allows you to control how the application container responds to incoming requests at a very fine-grained level. Common patterns also vary between language containers due to the way PHP-FPM handles incoming requests.\nEach entry of the locations block is an absolute URI path (with leading /) and its value includes the configuration directives for how the web server should handle matching requests. That is, if your domain is example.com then '/' means “requests for example.com/”, while '/admin' means “requests for example.com/admin”. If multiple blocks could match an incoming request then the most-specific will apply.\nweb:locations:'/':# Rules for all requests that don't otherwise match....'/sites/default/files':# Rules for any requests that begin with /sites/default/files....The simplest possible locations configuration is one that simply passes all requests on to your application unconditionally:\nweb:locations:'/':passthru:trueThat is, all requests to /* should be forwarded to the process started by web.commands.start above. Note that for PHP containers the passthru key must specify what PHP file the request should be forwarded to, and must also specify a docroot under which the file lives. For example:\nweb:locations:'/':root:'web'passthru:'/app.php'This block will serve requests to / from the web directory in the application, and if a file doesn’t exist on disk then the request will be forwarded to the /app.php script.\nA full list of the possible subkeys for locations is below.\n root: The folder from which to serve static assets for this location relative to the application root. The application root is the directory in which the .platform.app.yaml file is located. Typical values for this property include public or web. Setting it to '' is not recommended, and its behavior may vary depending on the type of application. Absolute paths are not supported.\n passthru: Whether to forward disallowed and missing resources from this location to the application and can be true, false or an absolute URI path (with leading /). The default value is false. For non-PHP applications it will generally be just true or false. In a PHP application this will typically be the front controller such as /index.php or /app.php. This entry works similar to mod_rewrite under Apache. Note: If the value of passthru does not begin with the same value as the location key it is under, the passthru may evaluate to another entry. That may be useful when you want different cache settings for different paths, for instance, but want missing files in all of them to map back to the same front controller. See the example block below.\n index: The files to consider when serving a request for a directory: an array of file names or null. (typically ['index.html']). Note that in order for this to work, access to the static files named must be allowed by the allow or rules keys for this location.\n expires: How long to allow static assets from this location to be cached (this enables the Cache-Control and Expires headers) and can be a time or -1 for no caching (default). Times can be suffixed with “ms” (milliseconds), “s” (seconds), “m” (minutes), “h” (hours), “d” (days), “w” (weeks), “M” (months, 30d) or “y” (years, 365d).\n scripts: Whether to allow loading scripts in that location (true or false). This directive is only meaningful on PHP.\n allow: Whether to allow serving files which don’t match a rule (true or false, default: true).\n headers: Any additional headers to apply to static assets. This section is a mapping of header names to header values. Responses from the application aren’t affected, to avoid overlap with the application’s own ability to include custom headers in the response.\n rules: Specific overrides for a specific location. The key is a PCRE (regular expression) that is matched against the full request path.\n request_buffering: Most application servers do not support chunked requests (e.g. fpm, uwsgi), so Platform.sh enables request_buffering by default to handle them. That default configuration would look like this if it was present in .platform.app.yaml:\nweb:locations:'/':passthru:truerequest_buffering:enabled:truemax_request_size:250mIf the application server can already efficiently handle chunked requests, the request_buffering subkey can be modified to disable it entirely (enabled: false). Additionally, applications that frequently deal with uploads greater than 250MB in size can update the max_request_size key to the application’s needs. Note that modifications to request_buffering will need to be specified at each location where it is desired.\n ",
|
"text":" The locations block is the most powerful, and potentially most involved, section of the .platform.app.yaml file. It allows you to control how the application container responds to incoming requests at a very fine-grained level. Common patterns also vary between language containers due to the way PHP-FPM handles incoming requests.\nEach entry of the locations block is an absolute URI path (with leading /) and its value includes the configuration directives for how the web server should handle matching requests. That is, if your domain is example.com then '/' means “requests for example.com/”, while '/admin' means “requests for example.com/admin”. If multiple blocks could match an incoming request then the most-specific will apply.\nweb:locations:'/':# Rules for all requests that don't otherwise match....'/sites/default/files':# Rules for any requests that begin with /sites/default/files....The simplest possible locations configuration is one that simply passes all requests on to your application unconditionally:\nweb:locations:'/':passthru:trueThat is, all requests to /* should be forwarded to the process started by web.commands.start above. Note that for PHP containers the passthru key must specify what PHP file the request should be forwarded to, and must also specify a docroot under which the file lives. For example:\nweb:locations:'/':root:'web'passthru:'/app.php'This block will serve requests to / from the web directory in the application, and if a file doesn’t exist on disk then the request will be forwarded to the /app.php script.\nA full list of the possible subkeys for locations is below.\n root: The dir from which to serve static assets for this location relative to the application root. The application root is the directory in which the .platform.app.yaml file is located. Typical values for this property include public or web. Setting it to '' is not recommended, and its behavior may vary depending on the type of application. Absolute paths are not supported.\n passthru: Whether to forward disallowed and missing resources from this location to the application and can be true, false or an absolute URI path (with leading /). The default value is false. For non-PHP applications it will generally be just true or false. In a PHP application this will typically be the front controller such as /index.php or /app.php. This entry works similar to mod_rewrite under Apache. Note: If the value of passthru does not begin with the same value as the location key it is under, the passthru may evaluate to another entry. That may be useful when you want different cache settings for different paths, for instance, but want missing files in all of them to map back to the same front controller. See the example block below.\n index: The files to consider when serving a request for a directory: an array of file names or null. (typically ['index.html']). Note that in order for this to work, access to the static files named must be allowed by the allow or rules keys for this location.\n expires: How long to allow static assets from this location to be cached (this enables the Cache-Control and Expires headers) and can be a time or -1 for no caching (default). Times can be suffixed with “ms” (milliseconds), “s” (seconds), “m” (minutes), “h” (hours), “d” (days), “w” (weeks), “M” (months, 30d) or “y” (years, 365d).\n scripts: Whether to allow loading scripts in that location (true or false). This directive is only meaningful on PHP.\n allow: Whether to allow serving files which don’t match a rule (true or false, default: true).\n headers: Any additional headers to apply to static assets. This section is a mapping of header names to header values. Responses from the application aren’t affected, to avoid overlap with the application’s own ability to include custom headers in the response.\n rules: Specific overrides for a specific location. The key is a PCRE (regular expression) that is matched against the full request path.\n request_buffering: Most application servers do not support chunked requests (e.g. fpm, uwsgi), so Platform.sh enables request_buffering by default to handle them. That default configuration would look like this if it was present in .platform.app.yaml:\nweb:locations:'/':passthru:truerequest_buffering:enabled:truemax_request_size:250mIf the application server can already efficiently handle chunked requests, the request_buffering subkey can be modified to disable it entirely (enabled: false). Additionally, applications that frequently deal with uploads greater than 250MB in size can update the max_request_size key to the application’s needs. Note that modifications to request_buffering will need to be specified at each location where it is desired.\n ",
|
||||||
"title":"Locations",
|
"title":"Locations",
|
||||||
"url":"/configuration/app/web.html#locations"
|
"url":"/configuration/app/web.html#locations"
|
||||||
}]);
|
}]);
|
||||||
|
@ -159,7 +159,7 @@ async fn get_dump_status_should_return_error_provoking_it() {
|
|||||||
let (value, status_code) = server.trigger_dump().await;
|
let (value, status_code) = server.trigger_dump().await;
|
||||||
|
|
||||||
// removing destination directory provoking `No such file or directory` error
|
// removing destination directory provoking `No such file or directory` error
|
||||||
std::fs::remove_dir(server.data().dumps_folder.clone()).unwrap();
|
std::fs::remove_dir(server.data().dumps_dir.clone()).unwrap();
|
||||||
|
|
||||||
assert_eq!(status_code, 202);
|
assert_eq!(status_code, 202);
|
||||||
|
|
||||||
@ -197,11 +197,11 @@ async fn dump_metadata_should_be_valid() {
|
|||||||
|
|
||||||
let uid = trigger_and_wait_dump(&mut server).await;
|
let uid = trigger_and_wait_dump(&mut server).await;
|
||||||
|
|
||||||
let dumps_folder = Path::new(&server.data().dumps_folder);
|
let dumps_dir = Path::new(&server.data().dumps_dir);
|
||||||
let tmp_dir = TempDir::new().unwrap();
|
let tmp_dir = TempDir::new().unwrap();
|
||||||
let tmp_dir_path = tmp_dir.path();
|
let tmp_dir_path = tmp_dir.path();
|
||||||
|
|
||||||
compression::from_tar_gz(&dumps_folder.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
|
compression::from_tar_gz(&dumps_dir.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
|
||||||
|
|
||||||
let file = File::open(tmp_dir_path.join("metadata.json")).unwrap();
|
let file = File::open(tmp_dir_path.join("metadata.json")).unwrap();
|
||||||
let mut metadata: serde_json::Value = serde_json::from_reader(file).unwrap();
|
let mut metadata: serde_json::Value = serde_json::from_reader(file).unwrap();
|
||||||
@ -238,9 +238,9 @@ async fn dump_gzip_should_have_been_created() {
|
|||||||
|
|
||||||
|
|
||||||
let dump_uid = trigger_and_wait_dump(&mut server).await;
|
let dump_uid = trigger_and_wait_dump(&mut server).await;
|
||||||
let dumps_folder = Path::new(&server.data().dumps_folder);
|
let dumps_dir = Path::new(&server.data().dumps_dir);
|
||||||
|
|
||||||
let compressed_path = dumps_folder.join(format!("{}.tar.gz", dump_uid));
|
let compressed_path = dumps_dir.join(format!("{}.tar.gz", dump_uid));
|
||||||
assert!(File::open(compressed_path).is_ok());
|
assert!(File::open(compressed_path).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -312,11 +312,11 @@ async fn dump_index_settings_should_be_valid() {
|
|||||||
|
|
||||||
let uid = trigger_and_wait_dump(&mut server).await;
|
let uid = trigger_and_wait_dump(&mut server).await;
|
||||||
|
|
||||||
let dumps_folder = Path::new(&server.data().dumps_folder);
|
let dumps_dir = Path::new(&server.data().dumps_dir);
|
||||||
let tmp_dir = TempDir::new().unwrap();
|
let tmp_dir = TempDir::new().unwrap();
|
||||||
let tmp_dir_path = tmp_dir.path();
|
let tmp_dir_path = tmp_dir.path();
|
||||||
|
|
||||||
compression::from_tar_gz(&dumps_folder.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
|
compression::from_tar_gz(&dumps_dir.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
|
||||||
|
|
||||||
let file = File::open(tmp_dir_path.join("test").join("settings.json")).unwrap();
|
let file = File::open(tmp_dir_path.join("test").join("settings.json")).unwrap();
|
||||||
let settings: serde_json::Value = serde_json::from_reader(file).unwrap();
|
let settings: serde_json::Value = serde_json::from_reader(file).unwrap();
|
||||||
@ -336,11 +336,11 @@ async fn dump_index_documents_should_be_valid() {
|
|||||||
|
|
||||||
let uid = trigger_and_wait_dump(&mut server).await;
|
let uid = trigger_and_wait_dump(&mut server).await;
|
||||||
|
|
||||||
let dumps_folder = Path::new(&server.data().dumps_folder);
|
let dumps_dir = Path::new(&server.data().dumps_dir);
|
||||||
let tmp_dir = TempDir::new().unwrap();
|
let tmp_dir = TempDir::new().unwrap();
|
||||||
let tmp_dir_path = tmp_dir.path();
|
let tmp_dir_path = tmp_dir.path();
|
||||||
|
|
||||||
compression::from_tar_gz(&dumps_folder.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
|
compression::from_tar_gz(&dumps_dir.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
|
||||||
|
|
||||||
let file = File::open(tmp_dir_path.join("test").join("documents.jsonl")).unwrap();
|
let file = File::open(tmp_dir_path.join("test").join("documents.jsonl")).unwrap();
|
||||||
let documents = read_all_jsonline(file);
|
let documents = read_all_jsonline(file);
|
||||||
@ -360,11 +360,11 @@ async fn dump_index_updates_should_be_valid() {
|
|||||||
|
|
||||||
let uid = trigger_and_wait_dump(&mut server).await;
|
let uid = trigger_and_wait_dump(&mut server).await;
|
||||||
|
|
||||||
let dumps_folder = Path::new(&server.data().dumps_folder);
|
let dumps_dir = Path::new(&server.data().dumps_dir);
|
||||||
let tmp_dir = TempDir::new().unwrap();
|
let tmp_dir = TempDir::new().unwrap();
|
||||||
let tmp_dir_path = tmp_dir.path();
|
let tmp_dir_path = tmp_dir.path();
|
||||||
|
|
||||||
compression::from_tar_gz(&dumps_folder.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
|
compression::from_tar_gz(&dumps_dir.join(&format!("{}.tar.gz", uid)), tmp_dir_path).unwrap();
|
||||||
|
|
||||||
let file = File::open(tmp_dir_path.join("test").join("updates.jsonl")).unwrap();
|
let file = File::open(tmp_dir_path.join("test").join("updates.jsonl")).unwrap();
|
||||||
let mut updates = read_all_jsonline(file);
|
let mut updates = read_all_jsonline(file);
|
||||||
|
Loading…
Reference in New Issue
Block a user