Merge branch 'main' into fix-3037

This commit is contained in:
jiangbo212 2022-12-13 05:16:03 +08:00
commit 7c24fea9f2
99 changed files with 144 additions and 114 deletions

119
meilisearch/Cargo.toml Normal file
View file

@ -0,0 +1,119 @@
[package]
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
description = "Meilisearch HTTP server"
edition = "2021"
license = "MIT"
name = "meilisearch"
version = "0.30.1"
[dependencies]
actix-cors = "0.6.3"
actix-http = { version = "3.2.2", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls"] }
actix-web = { version = "4.2.1", default-features = false, features = ["macros", "compress-brotli", "compress-gzip", "cookies", "rustls"] }
actix-web-static-files = { git = "https://github.com/kilork/actix-web-static-files.git", rev = "2d3b6160", optional = true }
anyhow = { version = "1.0.65", features = ["backtrace"] }
async-stream = "0.3.3"
async-trait = "0.1.57"
bstr = "1.0.1"
byte-unit = { version = "4.0.14", default-features = false, features = ["std", "serde"] }
bytes = "1.2.1"
clap = { version = "4.0.9", features = ["derive", "env"] }
crossbeam-channel = "0.5.6"
dump = { path = "../dump" }
either = "1.8.0"
env_logger = "0.9.1"
file-store = { path = "../file-store" }
flate2 = "1.0.24"
fst = "0.4.7"
futures = "0.3.24"
futures-util = "0.3.24"
http = "0.2.8"
index-scheduler = { path = "../index-scheduler" }
indexmap = { version = "1.9.1", features = ["serde-1"] }
itertools = "0.10.5"
jsonwebtoken = "8.1.1"
lazy_static = "1.4.0"
log = "0.4.17"
meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" }
mimalloc = { version = "0.1.29", default-features = false }
mime = "0.3.16"
num_cpus = "1.13.1"
obkv = "0.2.0"
once_cell = "1.15.0"
parking_lot = "0.12.1"
permissive-json-pointer = { path = "../permissive-json-pointer" }
pin-project-lite = "0.2.9"
platform-dirs = "0.3.0"
prometheus = { version = "0.13.2", features = ["process"], optional = true }
rand = "0.8.5"
rayon = "1.5.3"
regex = "1.6.0"
reqwest = { version = "0.11.12", features = ["rustls-tls", "json"], default-features = false }
rustls = "0.20.6"
rustls-pemfile = "1.0.1"
segment = { version = "0.2.1", optional = true }
serde = { version = "1.0.145", features = ["derive"] }
serde-cs = "0.2.4"
serde_json = { version = "1.0.85", features = ["preserve_order"] }
sha2 = "0.10.6"
siphasher = "0.3.10"
slice-group-by = "0.3.0"
static-files = { version = "0.2.3", optional = true }
sysinfo = "0.26.4"
tar = "0.4.38"
tempfile = "3.3.0"
thiserror = "1.0.37"
time = { version = "0.3.15", features = ["serde-well-known", "formatting", "parsing", "macros"] }
tokio = { version = "1.21.2", features = ["full"] }
tokio-stream = "0.1.10"
toml = "0.5.9"
uuid = { version = "1.1.2", features = ["serde", "v4"] }
walkdir = "2.3.2"
yaup = "0.2.0"
[dev-dependencies]
actix-rt = "2.7.0"
assert-json-diff = "2.0.2"
brotli = "3.3.4"
manifest-dir-macros = "0.1.16"
maplit = "1.0.2"
meili-snap = {path = "../meili-snap"}
temp-env = "0.3.1"
urlencoding = "2.1.2"
yaup = "0.2.1"
[build-dependencies]
anyhow = { version = "1.0.65", optional = true }
cargo_toml = { version = "0.13.0", optional = true }
hex = { version = "0.4.3", optional = true }
reqwest = { version = "0.11.12", features = ["blocking", "rustls-tls"], default-features = false, optional = true }
sha-1 = { version = "0.10.0", optional = true }
static-files = { version = "0.2.3", optional = true }
tempfile = { version = "3.3.0", optional = true }
vergen = { version = "7.4.2", default-features = false, features = ["git"] }
zip = { version = "0.6.2", optional = true }
[features]
default = ["analytics", "meilisearch-types/default", "mini-dashboard"]
metrics = ["prometheus"]
analytics = ["segment"]
mini-dashboard = [
"actix-web-static-files",
"static-files",
"anyhow",
"cargo_toml",
"hex",
"reqwest",
"sha-1",
"tempfile",
"zip",
]
chinese = ["meilisearch-types/chinese"]
hebrew = ["meilisearch-types/hebrew"]
japanese = ["meilisearch-types/japanese"]
thai = ["meilisearch-types/thai"]
[package.metadata.mini-dashboard]
assets-url = "https://github.com/meilisearch/mini-dashboard/releases/download/v0.2.4/build.zip"
sha1 = "b53c2edb51d4ce1984d5586333b91c4ad3a1b4e4"

83
meilisearch/build.rs Normal file
View file

@ -0,0 +1,83 @@
use vergen::{vergen, Config};
fn main() {
if let Err(e) = vergen(Config::default()) {
println!("cargo:warning=vergen: {}", e);
}
#[cfg(feature = "mini-dashboard")]
mini_dashboard::setup_mini_dashboard().expect("Could not load the mini-dashboard assets");
}
#[cfg(feature = "mini-dashboard")]
mod mini_dashboard {
use std::env;
use std::fs::{create_dir_all, File, OpenOptions};
use std::io::{Cursor, Read, Write};
use std::path::PathBuf;
use anyhow::Context;
use cargo_toml::Manifest;
use reqwest::blocking::get;
use sha1::{Digest, Sha1};
use static_files::resource_dir;
pub fn setup_mini_dashboard() -> anyhow::Result<()> {
let cargo_manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
let cargo_toml = cargo_manifest_dir.join("Cargo.toml");
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let sha1_path = out_dir.join(".mini-dashboard.sha1");
let dashboard_dir = out_dir.join("mini-dashboard");
let manifest = Manifest::from_path(cargo_toml).unwrap();
let meta = &manifest
.package
.as_ref()
.context("package not specified in Cargo.toml")?
.metadata
.as_ref()
.context("no metadata specified in Cargo.toml")?["mini-dashboard"];
// Check if there already is a dashboard built, and if it is up to date.
if sha1_path.exists() && dashboard_dir.exists() {
let mut sha1_file = File::open(&sha1_path)?;
let mut sha1 = String::new();
sha1_file.read_to_string(&mut sha1)?;
if sha1 == meta["sha1"].as_str().unwrap() {
// Nothing to do.
return Ok(());
}
}
let url = meta["assets-url"].as_str().unwrap();
let dashboard_assets_bytes = get(url)?.bytes()?;
let mut hasher = Sha1::new();
hasher.update(&dashboard_assets_bytes);
let sha1 = hex::encode(hasher.finalize());
assert_eq!(
meta["sha1"].as_str().unwrap(),
sha1,
"Downloaded mini-dashboard shasum differs from the one specified in the Cargo.toml"
);
create_dir_all(&dashboard_dir)?;
let cursor = Cursor::new(&dashboard_assets_bytes);
let mut zip = zip::read::ZipArchive::new(cursor)?;
zip.extract(&dashboard_dir)?;
resource_dir(&dashboard_dir).build()?;
// Write the sha1 for the dashboard back to file.
let mut file =
OpenOptions::new().write(true).create(true).truncate(true).open(sha1_path)?;
file.write_all(sha1.as_bytes())?;
file.flush()?;
Ok(())
}
}

View file

@ -0,0 +1,63 @@
use std::any::Any;
use std::sync::Arc;
use actix_web::HttpRequest;
use meilisearch_types::InstanceUid;
use serde_json::Value;
use super::{find_user_id, Analytics, DocumentDeletionKind};
use crate::routes::indexes::documents::UpdateDocumentsQuery;
use crate::routes::tasks::TasksFilterQueryRaw;
use crate::Opt;
pub struct MockAnalytics {
instance_uid: Option<InstanceUid>,
}
#[derive(Default)]
pub struct SearchAggregator;
#[allow(dead_code)]
impl SearchAggregator {
pub fn from_query(_: &dyn Any, _: &dyn Any) -> Self {
Self::default()
}
pub fn succeed(&mut self, _: &dyn Any) {}
}
impl MockAnalytics {
#[allow(clippy::new_ret_no_self)]
pub fn new(opt: &Opt) -> Arc<dyn Analytics> {
let instance_uid = find_user_id(&opt.db_path);
Arc::new(Self { instance_uid })
}
}
impl Analytics for MockAnalytics {
fn instance_uid(&self) -> Option<&meilisearch_types::InstanceUid> {
self.instance_uid.as_ref()
}
// These methods are noop and should be optimized out
fn publish(&self, _event_name: String, _send: Value, _request: Option<&HttpRequest>) {}
fn get_search(&self, _aggregate: super::SearchAggregator) {}
fn post_search(&self, _aggregate: super::SearchAggregator) {}
fn add_documents(
&self,
_documents_query: &UpdateDocumentsQuery,
_index_creation: bool,
_request: &HttpRequest,
) {
}
fn delete_documents(&self, _kind: DocumentDeletionKind, _request: &HttpRequest) {}
fn update_documents(
&self,
_documents_query: &UpdateDocumentsQuery,
_index_creation: bool,
_request: &HttpRequest,
) {
}
fn get_tasks(&self, _query: &TasksFilterQueryRaw, _request: &HttpRequest) {}
fn health_seen(&self, _request: &HttpRequest) {}
}

View file

@ -0,0 +1,101 @@
mod mock_analytics;
// if we are in release mode and the feature analytics was enabled
#[cfg(all(not(debug_assertions), feature = "analytics"))]
mod segment_analytics;
use std::fs;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use actix_web::HttpRequest;
use meilisearch_types::InstanceUid;
pub use mock_analytics::MockAnalytics;
use once_cell::sync::Lazy;
use platform_dirs::AppDirs;
use serde_json::Value;
use crate::routes::indexes::documents::UpdateDocumentsQuery;
use crate::routes::tasks::TasksFilterQueryRaw;
// if we are in debug mode OR the analytics feature is disabled
// the `SegmentAnalytics` point to the mock instead of the real analytics
#[cfg(any(debug_assertions, not(feature = "analytics")))]
pub type SegmentAnalytics = mock_analytics::MockAnalytics;
#[cfg(any(debug_assertions, not(feature = "analytics")))]
pub type SearchAggregator = mock_analytics::SearchAggregator;
// if we are in release mode and the feature analytics was enabled
// we use the real analytics
#[cfg(all(not(debug_assertions), feature = "analytics"))]
pub type SegmentAnalytics = segment_analytics::SegmentAnalytics;
#[cfg(all(not(debug_assertions), feature = "analytics"))]
pub type SearchAggregator = segment_analytics::SearchAggregator;
/// The Meilisearch config dir:
/// `~/.config/Meilisearch` on *NIX or *BSD.
/// `~/Library/ApplicationSupport` on macOS.
/// `%APPDATA` (= `C:\Users%USERNAME%\AppData\Roaming`) on windows.
static MEILISEARCH_CONFIG_PATH: Lazy<Option<PathBuf>> =
Lazy::new(|| AppDirs::new(Some("Meilisearch"), false).map(|appdir| appdir.config_dir));
fn config_user_id_path(db_path: &Path) -> Option<PathBuf> {
db_path
.canonicalize()
.ok()
.map(|path| path.join("instance-uid").display().to_string().replace('/', "-"))
.zip(MEILISEARCH_CONFIG_PATH.as_ref())
.map(|(filename, config_path)| config_path.join(filename.trim_start_matches('-')))
}
/// Look for the instance-uid in the `data.ms` or in `~/.config/Meilisearch/path-to-db-instance-uid`
fn find_user_id(db_path: &Path) -> Option<InstanceUid> {
fs::read_to_string(db_path.join("instance-uid"))
.ok()
.or_else(|| fs::read_to_string(&config_user_id_path(db_path)?).ok())
.and_then(|uid| InstanceUid::from_str(&uid).ok())
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum DocumentDeletionKind {
PerDocumentId,
ClearAll,
PerBatch,
}
pub trait Analytics: Sync + Send {
fn instance_uid(&self) -> Option<&InstanceUid>;
/// The method used to publish most analytics that do not need to be batched every hours
fn publish(&self, event_name: String, send: Value, request: Option<&HttpRequest>);
/// This method should be called to aggregate a get search
fn get_search(&self, aggregate: SearchAggregator);
/// This method should be called to aggregate a post search
fn post_search(&self, aggregate: SearchAggregator);
// this method should be called to aggregate a add documents request
fn add_documents(
&self,
documents_query: &UpdateDocumentsQuery,
index_creation: bool,
request: &HttpRequest,
);
// this method should be called to aggregate a add documents request
fn delete_documents(&self, kind: DocumentDeletionKind, request: &HttpRequest);
// this method should be called to batch a update documents request
fn update_documents(
&self,
documents_query: &UpdateDocumentsQuery,
index_creation: bool,
request: &HttpRequest,
);
// this method should be called to aggregate the get tasks requests.
fn get_tasks(&self, query: &TasksFilterQueryRaw, request: &HttpRequest);
// this method should be called to aggregate a add documents request
fn health_seen(&self, request: &HttpRequest);
}

View file

@ -0,0 +1,991 @@
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{Duration, Instant};
use actix_web::http::header::USER_AGENT;
use actix_web::HttpRequest;
use byte_unit::Byte;
use http::header::CONTENT_TYPE;
use index_scheduler::IndexScheduler;
use meilisearch_auth::SearchRules;
use meilisearch_types::InstanceUid;
use once_cell::sync::Lazy;
use regex::Regex;
use segment::message::{Identify, Track, User};
use segment::{AutoBatcher, Batcher, HttpClient};
use serde::Serialize;
use serde_json::{json, Value};
use sysinfo::{DiskExt, System, SystemExt};
use time::OffsetDateTime;
use tokio::select;
use tokio::sync::mpsc::{self, Receiver, Sender};
use uuid::Uuid;
use super::{config_user_id_path, DocumentDeletionKind, MEILISEARCH_CONFIG_PATH};
use crate::analytics::Analytics;
use crate::option::{default_http_addr, IndexerOpts, MaxMemory, MaxThreads, SchedulerConfig};
use crate::routes::indexes::documents::UpdateDocumentsQuery;
use crate::routes::tasks::TasksFilterQueryRaw;
use crate::routes::{create_all_stats, Stats};
use crate::search::{
SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
};
use crate::Opt;
const ANALYTICS_HEADER: &str = "X-Meilisearch-Client";
/// Write the instance-uid in the `data.ms` and in `~/.config/MeiliSearch/path-to-db-instance-uid`. Ignore the errors.
fn write_user_id(db_path: &Path, user_id: &InstanceUid) {
let _ = fs::write(db_path.join("instance-uid"), user_id.to_string());
if let Some((meilisearch_config_path, user_id_path)) =
MEILISEARCH_CONFIG_PATH.as_ref().zip(config_user_id_path(db_path))
{
let _ = fs::create_dir_all(&meilisearch_config_path);
let _ = fs::write(user_id_path, user_id.to_string());
}
}
const SEGMENT_API_KEY: &str = "P3FWhhEsJiEDCuEHpmcN9DHcK4hVfBvb";
pub fn extract_user_agents(request: &HttpRequest) -> Vec<String> {
request
.headers()
.get(ANALYTICS_HEADER)
.or_else(|| request.headers().get(USER_AGENT))
.map(|header| header.to_str().ok())
.flatten()
.unwrap_or("unknown")
.split(';')
.map(str::trim)
.map(ToString::to_string)
.collect()
}
pub enum AnalyticsMsg {
BatchMessage(Track),
AggregateGetSearch(SearchAggregator),
AggregatePostSearch(SearchAggregator),
AggregateAddDocuments(DocumentsAggregator),
AggregateDeleteDocuments(DocumentsDeletionAggregator),
AggregateUpdateDocuments(DocumentsAggregator),
AggregateTasks(TasksAggregator),
AggregateHealth(HealthAggregator),
}
pub struct SegmentAnalytics {
instance_uid: InstanceUid,
sender: Sender<AnalyticsMsg>,
user: User,
}
impl SegmentAnalytics {
pub async fn new(opt: &Opt, index_scheduler: Arc<IndexScheduler>) -> Arc<dyn Analytics> {
let instance_uid = super::find_user_id(&opt.db_path);
let first_time_run = instance_uid.is_none();
let instance_uid = instance_uid.unwrap_or_else(|| Uuid::new_v4());
write_user_id(&opt.db_path, &instance_uid);
let client = reqwest::Client::builder().connect_timeout(Duration::from_secs(10)).build();
// if reqwest throws an error we won't be able to send analytics
if client.is_err() {
return super::MockAnalytics::new(opt);
}
let client =
HttpClient::new(client.unwrap(), "https://telemetry.meilisearch.com".to_string());
let user = User::UserId { user_id: instance_uid.to_string() };
let mut batcher = AutoBatcher::new(client, Batcher::new(None), SEGMENT_API_KEY.to_string());
// If Meilisearch is Launched for the first time:
// 1. Send an event Launched associated to the user `total_launch`.
// 2. Batch an event Launched with the real instance-id and send it in one hour.
if first_time_run {
let _ = batcher
.push(Track {
user: User::UserId { user_id: "total_launch".to_string() },
event: "Launched".to_string(),
..Default::default()
})
.await;
let _ = batcher.flush().await;
let _ = batcher
.push(Track {
user: user.clone(),
event: "Launched".to_string(),
..Default::default()
})
.await;
}
let (sender, inbox) = mpsc::channel(100); // How many analytics can we bufferize
let segment = Box::new(Segment {
inbox,
user: user.clone(),
opt: opt.clone(),
batcher,
post_search_aggregator: SearchAggregator::default(),
get_search_aggregator: SearchAggregator::default(),
add_documents_aggregator: DocumentsAggregator::default(),
delete_documents_aggregator: DocumentsDeletionAggregator::default(),
update_documents_aggregator: DocumentsAggregator::default(),
get_tasks_aggregator: TasksAggregator::default(),
health_aggregator: HealthAggregator::default(),
});
tokio::spawn(segment.run(index_scheduler.clone()));
let this = Self { instance_uid, sender, user: user.clone() };
Arc::new(this)
}
}
impl super::Analytics for SegmentAnalytics {
fn instance_uid(&self) -> Option<&InstanceUid> {
Some(&self.instance_uid)
}
fn publish(&self, event_name: String, mut send: Value, request: Option<&HttpRequest>) {
let user_agent = request.map(|req| extract_user_agents(req));
send["user-agent"] = json!(user_agent);
let event = Track {
user: self.user.clone(),
event: event_name.clone(),
properties: send,
..Default::default()
};
let _ = self.sender.try_send(AnalyticsMsg::BatchMessage(event.into()));
}
fn get_search(&self, aggregate: SearchAggregator) {
let _ = self.sender.try_send(AnalyticsMsg::AggregateGetSearch(aggregate));
}
fn post_search(&self, aggregate: SearchAggregator) {
let _ = self.sender.try_send(AnalyticsMsg::AggregatePostSearch(aggregate));
}
fn add_documents(
&self,
documents_query: &UpdateDocumentsQuery,
index_creation: bool,
request: &HttpRequest,
) {
let aggregate = DocumentsAggregator::from_query(documents_query, index_creation, request);
let _ = self.sender.try_send(AnalyticsMsg::AggregateAddDocuments(aggregate));
}
fn delete_documents(&self, kind: DocumentDeletionKind, request: &HttpRequest) {
let aggregate = DocumentsDeletionAggregator::from_query(kind, request);
let _ = self.sender.try_send(AnalyticsMsg::AggregateDeleteDocuments(aggregate));
}
fn update_documents(
&self,
documents_query: &UpdateDocumentsQuery,
index_creation: bool,
request: &HttpRequest,
) {
let aggregate = DocumentsAggregator::from_query(documents_query, index_creation, request);
let _ = self.sender.try_send(AnalyticsMsg::AggregateUpdateDocuments(aggregate));
}
fn get_tasks(&self, query: &TasksFilterQueryRaw, request: &HttpRequest) {
let aggregate = TasksAggregator::from_query(query, request);
let _ = self.sender.try_send(AnalyticsMsg::AggregateTasks(aggregate));
}
fn health_seen(&self, request: &HttpRequest) {
let aggregate = HealthAggregator::from_query(request);
let _ = self.sender.try_send(AnalyticsMsg::AggregateHealth(aggregate));
}
}
/// This structure represent the `infos` field we send in the analytics.
/// It's quite close to the `Opt` structure except all sensitive informations
/// have been simplified to a boolean.
/// It's send as-is in amplitude thus you should never update a name of the
/// struct without the approval of the PM.
#[derive(Debug, Clone, Serialize)]
struct Infos {
env: String,
db_path: bool,
import_dump: bool,
dump_dir: bool,
ignore_missing_dump: bool,
ignore_dump_if_db_exists: bool,
import_snapshot: bool,
schedule_snapshot: bool,
snapshot_dir: bool,
snapshot_interval_sec: u64,
ignore_missing_snapshot: bool,
ignore_snapshot_if_db_exists: bool,
http_addr: bool,
max_index_size: Byte,
max_task_db_size: Byte,
http_payload_size_limit: Byte,
disable_auto_batching: bool,
log_level: String,
max_indexing_memory: MaxMemory,
max_indexing_threads: MaxThreads,
with_configuration_file: bool,
ssl_auth_path: bool,
ssl_cert_path: bool,
ssl_key_path: bool,
ssl_ocsp_path: bool,
ssl_require_auth: bool,
ssl_resumption: bool,
ssl_tickets: bool,
}
impl From<Opt> for Infos {
fn from(options: Opt) -> Self {
// We wants to decompose this whole struct by hand to be sure we don't forget
// to add analytics when we add a field in the Opt.
// Thus we must not insert `..` at the end.
let Opt {
db_path,
http_addr,
master_key: _,
env,
max_index_size,
max_task_db_size,
http_payload_size_limit,
ssl_cert_path,
ssl_key_path,
ssl_auth_path,
ssl_ocsp_path,
ssl_require_auth,
ssl_resumption,
ssl_tickets,
import_snapshot,
ignore_missing_snapshot,
ignore_snapshot_if_db_exists,
snapshot_dir,
schedule_snapshot,
snapshot_interval_sec,
import_dump,
ignore_missing_dump,
ignore_dump_if_db_exists,
dump_dir,
log_level,
indexer_options,
scheduler_options,
config_file_path,
#[cfg(all(not(debug_assertions), feature = "analytics"))]
no_analytics: _,
} = options;
let SchedulerConfig { disable_auto_batching } = scheduler_options;
let IndexerOpts {
log_every_n: _,
max_nb_chunks: _,
max_indexing_memory,
max_indexing_threads,
} = indexer_options;
// We're going to override every sensible information.
// We consider information sensible if it contains a path, an address, or a key.
Self {
env,
db_path: db_path != PathBuf::from("./data.ms"),
import_dump: import_dump.is_some(),
dump_dir: dump_dir != PathBuf::from("dumps/"),
ignore_missing_dump,
ignore_dump_if_db_exists,
import_snapshot: import_snapshot.is_some(),
schedule_snapshot,
snapshot_dir: snapshot_dir != PathBuf::from("snapshots/"),
snapshot_interval_sec,
ignore_missing_snapshot,
ignore_snapshot_if_db_exists,
http_addr: http_addr != default_http_addr(),
max_index_size,
max_task_db_size,
http_payload_size_limit,
disable_auto_batching,
log_level,
max_indexing_memory,
max_indexing_threads,
with_configuration_file: config_file_path.is_some(),
ssl_auth_path: ssl_auth_path.is_some(),
ssl_cert_path: ssl_cert_path.is_some(),
ssl_key_path: ssl_key_path.is_some(),
ssl_ocsp_path: ssl_ocsp_path.is_some(),
ssl_require_auth,
ssl_resumption,
ssl_tickets,
}
}
}
pub struct Segment {
inbox: Receiver<AnalyticsMsg>,
user: User,
opt: Opt,
batcher: AutoBatcher,
get_search_aggregator: SearchAggregator,
post_search_aggregator: SearchAggregator,
add_documents_aggregator: DocumentsAggregator,
delete_documents_aggregator: DocumentsDeletionAggregator,
update_documents_aggregator: DocumentsAggregator,
get_tasks_aggregator: TasksAggregator,
health_aggregator: HealthAggregator,
}
impl Segment {
fn compute_traits(opt: &Opt, stats: Stats) -> Value {
static FIRST_START_TIMESTAMP: Lazy<Instant> = Lazy::new(Instant::now);
static SYSTEM: Lazy<Value> = Lazy::new(|| {
let mut sys = System::new_all();
sys.refresh_all();
let kernel_version = sys
.kernel_version()
.map(|k| k.split_once("-").map(|(k, _)| k.to_string()))
.flatten();
json!({
"distribution": sys.name(),
"kernel_version": kernel_version,
"cores": sys.cpus().len(),
"ram_size": sys.total_memory(),
"disk_size": sys.disks().iter().map(|disk| disk.total_space()).max(),
"server_provider": std::env::var("MEILI_SERVER_PROVIDER").ok(),
})
});
let number_of_documents =
stats.indexes.values().map(|index| index.number_of_documents).collect::<Vec<u64>>();
json!({
"start_since_days": FIRST_START_TIMESTAMP.elapsed().as_secs() / (60 * 60 * 24), // one day
"system": *SYSTEM,
"stats": {
"database_size": stats.database_size,
"indexes_number": stats.indexes.len(),
"documents_number": number_of_documents,
},
"infos": Infos::from(opt.clone()),
})
}
async fn run(mut self, index_scheduler: Arc<IndexScheduler>) {
const INTERVAL: Duration = Duration::from_secs(60 * 60); // one hour
// The first batch must be sent after one hour.
let mut interval =
tokio::time::interval_at(tokio::time::Instant::now() + INTERVAL, INTERVAL);
loop {
select! {
_ = interval.tick() => {
self.tick(index_scheduler.clone()).await;
},
msg = self.inbox.recv() => {
match msg {
Some(AnalyticsMsg::BatchMessage(msg)) => drop(self.batcher.push(msg).await),
Some(AnalyticsMsg::AggregateGetSearch(agreg)) => self.get_search_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregatePostSearch(agreg)) => self.post_search_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregateAddDocuments(agreg)) => self.add_documents_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregateDeleteDocuments(agreg)) => self.delete_documents_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregateUpdateDocuments(agreg)) => self.update_documents_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregateTasks(agreg)) => self.get_tasks_aggregator.aggregate(agreg),
Some(AnalyticsMsg::AggregateHealth(agreg)) => self.health_aggregator.aggregate(agreg),
None => (),
}
}
}
}
}
async fn tick(&mut self, index_scheduler: Arc<IndexScheduler>) {
if let Ok(stats) = create_all_stats(index_scheduler.into(), &SearchRules::default()) {
let _ = self
.batcher
.push(Identify {
context: Some(json!({
"app": {
"version": env!("CARGO_PKG_VERSION").to_string(),
},
})),
user: self.user.clone(),
traits: Self::compute_traits(&self.opt, stats),
..Default::default()
})
.await;
}
let get_search = std::mem::take(&mut self.get_search_aggregator)
.into_event(&self.user, "Documents Searched GET");
let post_search = std::mem::take(&mut self.post_search_aggregator)
.into_event(&self.user, "Documents Searched POST");
let add_documents = std::mem::take(&mut self.add_documents_aggregator)
.into_event(&self.user, "Documents Added");
let delete_documents = std::mem::take(&mut self.delete_documents_aggregator)
.into_event(&self.user, "Documents Deleted");
let update_documents = std::mem::take(&mut self.update_documents_aggregator)
.into_event(&self.user, "Documents Updated");
let get_tasks =
std::mem::take(&mut self.get_tasks_aggregator).into_event(&self.user, "Tasks Seen");
let health =
std::mem::take(&mut self.health_aggregator).into_event(&self.user, "Health Seen");
if let Some(get_search) = get_search {
let _ = self.batcher.push(get_search).await;
}
if let Some(post_search) = post_search {
let _ = self.batcher.push(post_search).await;
}
if let Some(add_documents) = add_documents {
let _ = self.batcher.push(add_documents).await;
}
if let Some(delete_documents) = delete_documents {
let _ = self.batcher.push(delete_documents).await;
}
if let Some(update_documents) = update_documents {
let _ = self.batcher.push(update_documents).await;
}
if let Some(get_tasks) = get_tasks {
let _ = self.batcher.push(get_tasks).await;
}
if let Some(health) = health {
let _ = self.batcher.push(health).await;
}
let _ = self.batcher.flush().await;
}
}
#[derive(Default)]
pub struct SearchAggregator {
timestamp: Option<OffsetDateTime>,
// context
user_agents: HashSet<String>,
// requests
total_received: usize,
total_succeeded: usize,
time_spent: BinaryHeap<usize>,
// sort
sort_with_geo_point: bool,
// every time a request has a filter, this field must be incremented by the number of terms it contains
sort_sum_of_criteria_terms: usize,
// every time a request has a filter, this field must be incremented by one
sort_total_number_of_criteria: usize,
// filter
filter_with_geo_radius: bool,
// every time a request has a filter, this field must be incremented by the number of terms it contains
filter_sum_of_criteria_terms: usize,
// every time a request has a filter, this field must be incremented by one
filter_total_number_of_criteria: usize,
used_syntax: HashMap<String, usize>,
// q
// The maximum number of terms in a q request
max_terms_number: usize,
// every time a search is done, we increment the counter linked to the used settings
matching_strategy: HashMap<String, usize>,
// pagination
max_limit: usize,
max_offset: usize,
finite_pagination: usize,
// formatting
max_attributes_to_retrieve: usize,
max_attributes_to_highlight: usize,
highlight_pre_tag: bool,
highlight_post_tag: bool,
max_attributes_to_crop: usize,
crop_marker: bool,
show_matches_position: bool,
crop_length: bool,
// facets
facets_sum_of_terms: usize,
facets_total_number_of_facets: usize,
}
impl SearchAggregator {
pub fn from_query(query: &SearchQuery, request: &HttpRequest) -> Self {
let mut ret = Self::default();
ret.timestamp = Some(OffsetDateTime::now_utc());
ret.total_received = 1;
ret.user_agents = extract_user_agents(request).into_iter().collect();
if let Some(ref sort) = query.sort {
ret.sort_total_number_of_criteria = 1;
ret.sort_with_geo_point = sort.iter().any(|s| s.contains("_geoPoint("));
ret.sort_sum_of_criteria_terms = sort.len();
}
if let Some(ref filter) = query.filter {
static RE: Lazy<Regex> = Lazy::new(|| Regex::new("AND | OR").unwrap());
ret.filter_total_number_of_criteria = 1;
let syntax = match filter {
Value::String(_) => "string".to_string(),
Value::Array(values) => {
if values.iter().map(|v| v.to_string()).any(|s| RE.is_match(&s)) {
"mixed".to_string()
} else {
"array".to_string()
}
}
_ => "none".to_string(),
};
// convert the string to a HashMap
ret.used_syntax.insert(syntax, 1);
let stringified_filters = filter.to_string();
ret.filter_with_geo_radius = stringified_filters.contains("_geoRadius(");
ret.filter_sum_of_criteria_terms = RE.split(&stringified_filters).count();
}
if let Some(ref q) = query.q {
ret.max_terms_number = q.split_whitespace().count();
}
if query.is_finite_pagination() {
let limit = query.hits_per_page.unwrap_or_else(DEFAULT_SEARCH_LIMIT);
ret.max_limit = limit;
ret.max_offset = query.page.unwrap_or(1).saturating_sub(1) * limit;
ret.finite_pagination = 1;
} else {
ret.max_limit = query.limit;
ret.max_offset = query.offset;
ret.finite_pagination = 0;
}
ret.matching_strategy.insert(format!("{:?}", query.matching_strategy), 1);
ret.highlight_pre_tag = query.highlight_pre_tag != DEFAULT_HIGHLIGHT_PRE_TAG();
ret.highlight_post_tag = query.highlight_post_tag != DEFAULT_HIGHLIGHT_POST_TAG();
ret.crop_marker = query.crop_marker != DEFAULT_CROP_MARKER();
ret.crop_length = query.crop_length != DEFAULT_CROP_LENGTH();
ret.show_matches_position = query.show_matches_position;
ret
}
pub fn succeed(&mut self, result: &SearchResult) {
self.total_succeeded = self.total_succeeded.saturating_add(1);
self.time_spent.push(result.processing_time_ms as usize);
}
/// Aggregate one [SearchAggregator] into another.
pub fn aggregate(&mut self, mut other: Self) {
if self.timestamp.is_none() {
self.timestamp = other.timestamp;
}
// context
for user_agent in other.user_agents.into_iter() {
self.user_agents.insert(user_agent);
}
// request
self.total_received = self.total_received.saturating_add(other.total_received);
self.total_succeeded = self.total_succeeded.saturating_add(other.total_succeeded);
self.time_spent.append(&mut other.time_spent);
// sort
self.sort_with_geo_point |= other.sort_with_geo_point;
self.sort_sum_of_criteria_terms =
self.sort_sum_of_criteria_terms.saturating_add(other.sort_sum_of_criteria_terms);
self.sort_total_number_of_criteria =
self.sort_total_number_of_criteria.saturating_add(other.sort_total_number_of_criteria);
// filter
self.filter_with_geo_radius |= other.filter_with_geo_radius;
self.filter_sum_of_criteria_terms =
self.filter_sum_of_criteria_terms.saturating_add(other.filter_sum_of_criteria_terms);
self.filter_total_number_of_criteria = self
.filter_total_number_of_criteria
.saturating_add(other.filter_total_number_of_criteria);
for (key, value) in other.used_syntax.into_iter() {
let used_syntax = self.used_syntax.entry(key).or_insert(0);
*used_syntax = used_syntax.saturating_add(value);
}
// q
self.max_terms_number = self.max_terms_number.max(other.max_terms_number);
// pagination
self.max_limit = self.max_limit.max(other.max_limit);
self.max_offset = self.max_offset.max(other.max_offset);
self.finite_pagination += other.finite_pagination;
// formatting
self.max_attributes_to_retrieve =
self.max_attributes_to_retrieve.max(other.max_attributes_to_retrieve);
self.max_attributes_to_highlight =
self.max_attributes_to_highlight.max(other.max_attributes_to_highlight);
self.highlight_pre_tag |= other.highlight_pre_tag;
self.highlight_post_tag |= other.highlight_post_tag;
self.max_attributes_to_crop = self.max_attributes_to_crop.max(other.max_attributes_to_crop);
self.crop_marker |= other.crop_marker;
self.show_matches_position |= other.show_matches_position;
self.crop_length |= other.crop_length;
// facets
self.facets_sum_of_terms =
self.facets_sum_of_terms.saturating_add(other.facets_sum_of_terms);
self.facets_total_number_of_facets =
self.facets_total_number_of_facets.saturating_add(other.facets_total_number_of_facets);
// matching strategy
for (key, value) in other.matching_strategy.into_iter() {
let matching_strategy = self.matching_strategy.entry(key).or_insert(0);
*matching_strategy = matching_strategy.saturating_add(value);
}
}
pub fn into_event(self, user: &User, event_name: &str) -> Option<Track> {
if self.total_received == 0 {
None
} else {
// the index of the 99th percentage of value
let percentile_99th = 0.99 * (self.total_succeeded as f64 - 1.) + 1.;
// we get all the values in a sorted manner
let time_spent = self.time_spent.into_sorted_vec();
// We are only interested by the slowest value of the 99th fastest results
let time_spent = time_spent.get(percentile_99th as usize);
let properties = json!({
"user-agent": self.user_agents,
"requests": {
"99th_response_time": time_spent.map(|t| format!("{:.2}", t)),
"total_succeeded": self.total_succeeded,
"total_failed": self.total_received.saturating_sub(self.total_succeeded), // just to be sure we never panics
"total_received": self.total_received,
},
"sort": {
"with_geoPoint": self.sort_with_geo_point,
"avg_criteria_number": format!("{:.2}", self.sort_sum_of_criteria_terms as f64 / self.sort_total_number_of_criteria as f64),
},
"filter": {
"with_geoRadius": self.filter_with_geo_radius,
"avg_criteria_number": format!("{:.2}", self.filter_sum_of_criteria_terms as f64 / self.filter_total_number_of_criteria as f64),
"most_used_syntax": self.used_syntax.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)),
},
"q": {
"max_terms_number": self.max_terms_number,
},
"pagination": {
"max_limit": self.max_limit,
"max_offset": self.max_offset,
"most_used_navigation": if self.finite_pagination > (self.total_received / 2) { "exhaustive" } else { "estimated" },
},
"formatting": {
"max_attributes_to_retrieve": self.max_attributes_to_retrieve,
"max_attributes_to_highlight": self.max_attributes_to_highlight,
"highlight_pre_tag": self.highlight_pre_tag,
"highlight_post_tag": self.highlight_post_tag,
"max_attributes_to_crop": self.max_attributes_to_crop,
"crop_marker": self.crop_marker,
"show_matches_position": self.show_matches_position,
"crop_length": self.crop_length,
},
"facets": {
"avg_facets_number": format!("{:.2}", self.facets_sum_of_terms as f64 / self.facets_total_number_of_facets as f64),
},
"matching_strategy": {
"most_used_strategy": self.matching_strategy.iter().max_by_key(|(_, v)| *v).map(|(k, _)| json!(k)).unwrap_or_else(|| json!(null)),
}
});
Some(Track {
timestamp: self.timestamp,
user: user.clone(),
event: event_name.to_string(),
properties,
..Default::default()
})
}
}
}
#[derive(Default)]
pub struct DocumentsAggregator {
timestamp: Option<OffsetDateTime>,
// set to true when at least one request was received
updated: bool,
// context
user_agents: HashSet<String>,
content_types: HashSet<String>,
primary_keys: HashSet<String>,
index_creation: bool,
}
impl DocumentsAggregator {
pub fn from_query(
documents_query: &UpdateDocumentsQuery,
index_creation: bool,
request: &HttpRequest,
) -> Self {
let mut ret = Self::default();
ret.timestamp = Some(OffsetDateTime::now_utc());
ret.updated = true;
ret.user_agents = extract_user_agents(request).into_iter().collect();
if let Some(primary_key) = documents_query.primary_key.clone() {
ret.primary_keys.insert(primary_key);
}
let content_type = request
.headers()
.get(CONTENT_TYPE)
.and_then(|s| s.to_str().ok())
.unwrap_or("unknown")
.to_string();
ret.content_types.insert(content_type);
ret.index_creation = index_creation;
ret
}
/// Aggregate one [DocumentsAggregator] into another.
pub fn aggregate(&mut self, other: Self) {
if self.timestamp.is_none() {
self.timestamp = other.timestamp;
}
self.updated |= other.updated;
// we can't create a union because there is no `into_union` method
for user_agent in other.user_agents {
self.user_agents.insert(user_agent);
}
for primary_key in other.primary_keys {
self.primary_keys.insert(primary_key);
}
for content_type in other.content_types {
self.content_types.insert(content_type);
}
self.index_creation |= other.index_creation;
}
pub fn into_event(self, user: &User, event_name: &str) -> Option<Track> {
if !self.updated {
None
} else {
let properties = json!({
"user-agent": self.user_agents,
"payload_type": self.content_types,
"primary_key": self.primary_keys,
"index_creation": self.index_creation,
});
Some(Track {
timestamp: self.timestamp,
user: user.clone(),
event: event_name.to_string(),
properties,
..Default::default()
})
}
}
}
#[derive(Default, Serialize)]
pub struct DocumentsDeletionAggregator {
#[serde(skip)]
timestamp: Option<OffsetDateTime>,
// context
#[serde(rename = "user-agent")]
user_agents: HashSet<String>,
total_received: usize,
per_document_id: bool,
clear_all: bool,
per_batch: bool,
}
impl DocumentsDeletionAggregator {
pub fn from_query(kind: DocumentDeletionKind, request: &HttpRequest) -> Self {
let mut ret = Self::default();
ret.timestamp = Some(OffsetDateTime::now_utc());
ret.user_agents = extract_user_agents(request).into_iter().collect();
ret.total_received = 1;
match kind {
DocumentDeletionKind::PerDocumentId => ret.per_document_id = true,
DocumentDeletionKind::ClearAll => ret.clear_all = true,
DocumentDeletionKind::PerBatch => ret.per_batch = true,
}
ret
}
/// Aggregate one [DocumentsAggregator] into another.
pub fn aggregate(&mut self, other: Self) {
if self.timestamp.is_none() {
self.timestamp = other.timestamp;
}
// we can't create a union because there is no `into_union` method
for user_agent in other.user_agents {
self.user_agents.insert(user_agent);
}
self.total_received = self.total_received.saturating_add(other.total_received);
self.per_document_id |= other.per_document_id;
self.clear_all |= other.clear_all;
self.per_batch |= other.per_batch;
}
pub fn into_event(self, user: &User, event_name: &str) -> Option<Track> {
// if we had no timestamp it means we never encountered any events and
// thus we don't need to send this event.
let timestamp = self.timestamp?;
Some(Track {
timestamp: Some(timestamp),
user: user.clone(),
event: event_name.to_string(),
properties: serde_json::to_value(self).ok()?,
..Default::default()
})
}
}
#[derive(Default, Serialize)]
pub struct TasksAggregator {
#[serde(skip)]
timestamp: Option<OffsetDateTime>,
// context
#[serde(rename = "user-agent")]
user_agents: HashSet<String>,
filtered_by_uid: bool,
filtered_by_index_uid: bool,
filtered_by_type: bool,
filtered_by_status: bool,
filtered_by_canceled_by: bool,
filtered_by_before_enqueued_at: bool,
filtered_by_after_enqueued_at: bool,
filtered_by_before_started_at: bool,
filtered_by_after_started_at: bool,
filtered_by_before_finished_at: bool,
filtered_by_after_finished_at: bool,
total_received: usize,
}
impl TasksAggregator {
pub fn from_query(query: &TasksFilterQueryRaw, request: &HttpRequest) -> Self {
Self {
timestamp: Some(OffsetDateTime::now_utc()),
user_agents: extract_user_agents(request).into_iter().collect(),
filtered_by_uid: query.common.uids.is_some(),
filtered_by_index_uid: query.common.index_uids.is_some(),
filtered_by_type: query.common.types.is_some(),
filtered_by_status: query.common.statuses.is_some(),
filtered_by_canceled_by: query.common.canceled_by.is_some(),
filtered_by_before_enqueued_at: query.dates.before_enqueued_at.is_some(),
filtered_by_after_enqueued_at: query.dates.after_enqueued_at.is_some(),
filtered_by_before_started_at: query.dates.before_started_at.is_some(),
filtered_by_after_started_at: query.dates.after_started_at.is_some(),
filtered_by_before_finished_at: query.dates.before_finished_at.is_some(),
filtered_by_after_finished_at: query.dates.after_finished_at.is_some(),
total_received: 1,
}
}
/// Aggregate one [DocumentsAggregator] into another.
pub fn aggregate(&mut self, other: Self) {
if self.timestamp.is_none() {
self.timestamp = other.timestamp;
}
// we can't create a union because there is no `into_union` method
for user_agent in other.user_agents {
self.user_agents.insert(user_agent);
}
self.filtered_by_uid |= other.filtered_by_uid;
self.filtered_by_index_uid |= other.filtered_by_index_uid;
self.filtered_by_type |= other.filtered_by_type;
self.filtered_by_status |= other.filtered_by_status;
self.filtered_by_canceled_by |= other.filtered_by_canceled_by;
self.filtered_by_before_enqueued_at |= other.filtered_by_before_enqueued_at;
self.filtered_by_after_enqueued_at |= other.filtered_by_after_enqueued_at;
self.filtered_by_before_started_at |= other.filtered_by_before_started_at;
self.filtered_by_after_started_at |= other.filtered_by_after_started_at;
self.filtered_by_before_finished_at |= other.filtered_by_before_finished_at;
self.filtered_by_after_finished_at |= other.filtered_by_after_finished_at;
self.filtered_by_after_finished_at |= other.filtered_by_after_finished_at;
self.total_received = self.total_received.saturating_add(other.total_received);
}
pub fn into_event(self, user: &User, event_name: &str) -> Option<Track> {
// if we had no timestamp it means we never encountered any events and
// thus we don't need to send this event.
let timestamp = self.timestamp?;
Some(Track {
timestamp: Some(timestamp),
user: user.clone(),
event: event_name.to_string(),
properties: serde_json::to_value(self).ok()?,
..Default::default()
})
}
}
#[derive(Default, Serialize)]
pub struct HealthAggregator {
#[serde(skip)]
timestamp: Option<OffsetDateTime>,
// context
#[serde(rename = "user-agent")]
user_agents: HashSet<String>,
total_received: usize,
}
impl HealthAggregator {
pub fn from_query(request: &HttpRequest) -> Self {
let mut ret = Self::default();
ret.timestamp = Some(OffsetDateTime::now_utc());
ret.user_agents = extract_user_agents(request).into_iter().collect();
ret.total_received = 1;
ret
}
/// Aggregate one [DocumentsAggregator] into another.
pub fn aggregate(&mut self, other: Self) {
if self.timestamp.is_none() {
self.timestamp = other.timestamp;
}
// we can't create a union because there is no `into_union` method
for user_agent in other.user_agents {
self.user_agents.insert(user_agent);
}
self.total_received = self.total_received.saturating_add(other.total_received);
}
pub fn into_event(self, user: &User, event_name: &str) -> Option<Track> {
// if we had no timestamp it means we never encountered any events and
// thus we don't need to send this event.
let timestamp = self.timestamp?;
Some(Track {
timestamp: Some(timestamp),
user: user.clone(),
event: event_name.to_string(),
properties: serde_json::to_value(self).ok()?,
..Default::default()
})
}
}

166
meilisearch/src/error.rs Normal file
View file

@ -0,0 +1,166 @@
use actix_web as aweb;
use aweb::error::{JsonPayloadError, QueryPayloadError};
use meilisearch_types::document_formats::{DocumentFormatError, PayloadType};
use meilisearch_types::error::{Code, ErrorCode, ResponseError};
use meilisearch_types::index_uid::IndexUidFormatError;
use serde_json::Value;
use tokio::task::JoinError;
#[derive(Debug, thiserror::Error)]
pub enum MeilisearchHttpError {
#[error("A Content-Type header is missing. Accepted values for the Content-Type header are: {}",
.0.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", "))]
MissingContentType(Vec<String>),
#[error(
"The Content-Type `{0}` is invalid. Accepted values for the Content-Type header are: {}",
.1.iter().map(|s| format!("`{}`", s)).collect::<Vec<_>>().join(", ")
)]
InvalidContentType(String, Vec<String>),
#[error("Document `{0}` not found.")]
DocumentNotFound(String),
#[error("Invalid syntax for the filter parameter: `expected {}, found: {1}`.", .0.join(", "))]
InvalidExpression(&'static [&'static str], Value),
#[error("A {0} payload is missing.")]
MissingPayload(PayloadType),
#[error("The provided payload reached the size limit.")]
PayloadTooLarge,
#[error("Two indexes must be given for each swap. The list `{:?}` contains {} indexes.",
.0, .0.len()
)]
SwapIndexPayloadWrongLength(Vec<String>),
#[error(transparent)]
IndexUid(#[from] IndexUidFormatError),
#[error(transparent)]
SerdeJson(#[from] serde_json::Error),
#[error(transparent)]
HeedError(#[from] meilisearch_types::heed::Error),
#[error(transparent)]
IndexScheduler(#[from] index_scheduler::Error),
#[error(transparent)]
Milli(#[from] meilisearch_types::milli::Error),
#[error(transparent)]
Payload(#[from] PayloadError),
#[error(transparent)]
FileStore(#[from] file_store::Error),
#[error(transparent)]
DocumentFormat(#[from] DocumentFormatError),
#[error(transparent)]
Join(#[from] JoinError),
}
impl ErrorCode for MeilisearchHttpError {
fn error_code(&self) -> Code {
match self {
MeilisearchHttpError::MissingContentType(_) => Code::MissingContentType,
MeilisearchHttpError::MissingPayload(_) => Code::MissingPayload,
MeilisearchHttpError::InvalidContentType(_, _) => Code::InvalidContentType,
MeilisearchHttpError::DocumentNotFound(_) => Code::DocumentNotFound,
MeilisearchHttpError::InvalidExpression(_, _) => Code::Filter,
MeilisearchHttpError::PayloadTooLarge => Code::PayloadTooLarge,
MeilisearchHttpError::SwapIndexPayloadWrongLength(_) => Code::BadRequest,
MeilisearchHttpError::IndexUid(e) => e.error_code(),
MeilisearchHttpError::SerdeJson(_) => Code::Internal,
MeilisearchHttpError::HeedError(_) => Code::Internal,
MeilisearchHttpError::IndexScheduler(e) => e.error_code(),
MeilisearchHttpError::Milli(e) => e.error_code(),
MeilisearchHttpError::Payload(e) => e.error_code(),
MeilisearchHttpError::FileStore(_) => Code::Internal,
MeilisearchHttpError::DocumentFormat(e) => e.error_code(),
MeilisearchHttpError::Join(_) => Code::Internal,
}
}
}
impl From<MeilisearchHttpError> for aweb::Error {
fn from(other: MeilisearchHttpError) -> Self {
aweb::Error::from(ResponseError::from(other))
}
}
impl From<aweb::error::PayloadError> for MeilisearchHttpError {
fn from(error: aweb::error::PayloadError) -> Self {
MeilisearchHttpError::Payload(PayloadError::Payload(error))
}
}
#[derive(Debug, thiserror::Error)]
pub enum PayloadError {
#[error(transparent)]
Payload(aweb::error::PayloadError),
#[error(transparent)]
Json(JsonPayloadError),
#[error(transparent)]
Query(QueryPayloadError),
#[error("The json payload provided is malformed. `{0}`.")]
MalformedPayload(serde_json::error::Error),
#[error("A json payload is missing.")]
MissingPayload,
#[error("Error while writing the playload to disk: `{0}`.")]
ReceivePayloadErr(Box<dyn std::error::Error + Send + Sync + 'static>),
}
impl ErrorCode for PayloadError {
fn error_code(&self) -> Code {
match self {
PayloadError::Payload(e) => match e {
aweb::error::PayloadError::Incomplete(_) => Code::Internal,
aweb::error::PayloadError::EncodingCorrupted => Code::Internal,
aweb::error::PayloadError::Overflow => Code::PayloadTooLarge,
aweb::error::PayloadError::UnknownLength => Code::Internal,
aweb::error::PayloadError::Http2Payload(_) => Code::Internal,
aweb::error::PayloadError::Io(_) => Code::Internal,
_ => todo!(),
},
PayloadError::Json(err) => match err {
JsonPayloadError::Overflow { .. } => Code::PayloadTooLarge,
JsonPayloadError::ContentType => Code::UnsupportedMediaType,
JsonPayloadError::Payload(aweb::error::PayloadError::Overflow) => {
Code::PayloadTooLarge
}
JsonPayloadError::Payload(_) => Code::BadRequest,
JsonPayloadError::Deserialize(_) => Code::BadRequest,
JsonPayloadError::Serialize(_) => Code::Internal,
_ => Code::Internal,
},
PayloadError::Query(err) => match err {
QueryPayloadError::Deserialize(_) => Code::BadRequest,
_ => Code::Internal,
},
PayloadError::MissingPayload => Code::MissingPayload,
PayloadError::MalformedPayload(_) => Code::MalformedPayload,
PayloadError::ReceivePayloadErr(_) => Code::Internal,
}
}
}
impl From<JsonPayloadError> for PayloadError {
fn from(other: JsonPayloadError) -> Self {
match other {
JsonPayloadError::Deserialize(e)
if e.classify() == serde_json::error::Category::Eof
&& e.line() == 1
&& e.column() == 0 =>
{
Self::MissingPayload
}
JsonPayloadError::Deserialize(e)
if e.classify() != serde_json::error::Category::Data =>
{
Self::MalformedPayload(e)
}
_ => Self::Json(other),
}
}
}
impl From<QueryPayloadError> for PayloadError {
fn from(other: QueryPayloadError) -> Self {
Self::Query(other)
}
}
impl From<PayloadError> for aweb::Error {
fn from(other: PayloadError) -> Self {
aweb::Error::from(ResponseError::from(other))
}
}

View file

@ -0,0 +1,25 @@
use meilisearch_types::error::{Code, ErrorCode};
#[derive(Debug, thiserror::Error)]
pub enum AuthenticationError {
#[error("The Authorization header is missing. It must use the bearer authorization method.")]
MissingAuthorizationHeader,
#[error("The provided API key is invalid.")]
InvalidToken,
// Triggered on configuration error.
#[error("An internal error has occurred. `Irretrievable state`.")]
IrretrievableState,
#[error("Meilisearch is running without a master key. To access this API endpoint, you must have set a master key at launch.")]
MissingMasterKey,
}
impl ErrorCode for AuthenticationError {
fn error_code(&self) -> Code {
match self {
AuthenticationError::MissingAuthorizationHeader => Code::MissingAuthorizationHeader,
AuthenticationError::InvalidToken => Code::InvalidToken,
AuthenticationError::IrretrievableState => Code::Internal,
AuthenticationError::MissingMasterKey => Code::MissingMasterKey,
}
}
}

View file

@ -0,0 +1,247 @@
mod error;
use std::marker::PhantomData;
use std::ops::Deref;
use std::pin::Pin;
use actix_web::FromRequest;
pub use error::AuthenticationError;
use futures::future::err;
use futures::Future;
use meilisearch_auth::{AuthController, AuthFilter};
use meilisearch_types::error::{Code, ResponseError};
pub struct GuardedData<P, D> {
data: D,
filters: AuthFilter,
_marker: PhantomData<P>,
}
impl<P, D> GuardedData<P, D> {
pub fn filters(&self) -> &AuthFilter {
&self.filters
}
async fn auth_bearer(
auth: AuthController,
token: String,
index: Option<String>,
data: Option<D>,
) -> Result<Self, ResponseError>
where
P: Policy + 'static,
{
let missing_master_key = auth.get_master_key().is_none();
match Self::authenticate(auth, token, index).await? {
Some(filters) => match data {
Some(data) => Ok(Self { data, filters, _marker: PhantomData }),
None => Err(AuthenticationError::IrretrievableState.into()),
},
None if missing_master_key => Err(AuthenticationError::MissingMasterKey.into()),
None => Err(AuthenticationError::InvalidToken.into()),
}
}
async fn auth_token(auth: AuthController, data: Option<D>) -> Result<Self, ResponseError>
where
P: Policy + 'static,
{
let missing_master_key = auth.get_master_key().is_none();
match Self::authenticate(auth, String::new(), None).await? {
Some(filters) => match data {
Some(data) => Ok(Self { data, filters, _marker: PhantomData }),
None => Err(AuthenticationError::IrretrievableState.into()),
},
None if missing_master_key => Err(AuthenticationError::MissingMasterKey.into()),
None => Err(AuthenticationError::MissingAuthorizationHeader.into()),
}
}
async fn authenticate(
auth: AuthController,
token: String,
index: Option<String>,
) -> Result<Option<AuthFilter>, ResponseError>
where
P: Policy + 'static,
{
tokio::task::spawn_blocking(move || P::authenticate(auth, token.as_ref(), index.as_deref()))
.await
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))
}
}
impl<P, D> Deref for GuardedData<P, D> {
type Target = D;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<P: Policy + 'static, D: 'static + Clone> FromRequest for GuardedData<P, D> {
type Error = ResponseError;
type Future = Pin<Box<dyn Future<Output = Result<Self, Self::Error>>>>;
fn from_request(
req: &actix_web::HttpRequest,
_payload: &mut actix_web::dev::Payload,
) -> Self::Future {
match req.app_data::<AuthController>().cloned() {
Some(auth) => match req
.headers()
.get("Authorization")
.map(|type_token| type_token.to_str().unwrap_or_default().splitn(2, ' '))
{
Some(mut type_token) => match type_token.next() {
Some("Bearer") => {
// TODO: find a less hardcoded way?
let index = req.match_info().get("index_uid");
match type_token.next() {
Some(token) => Box::pin(Self::auth_bearer(
auth,
token.to_string(),
index.map(String::from),
req.app_data::<D>().cloned(),
)),
None => Box::pin(err(AuthenticationError::InvalidToken.into())),
}
}
_otherwise => {
Box::pin(err(AuthenticationError::MissingAuthorizationHeader.into()))
}
},
None => Box::pin(Self::auth_token(auth, req.app_data::<D>().cloned())),
},
None => Box::pin(err(AuthenticationError::IrretrievableState.into())),
}
}
}
pub trait Policy {
fn authenticate(auth: AuthController, token: &str, index: Option<&str>) -> Option<AuthFilter>;
}
pub mod policies {
use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation};
use meilisearch_auth::{AuthController, AuthFilter, SearchRules};
// reexport actions in policies in order to be used in routes configuration.
pub use meilisearch_types::keys::{actions, Action};
use serde::{Deserialize, Serialize};
use time::OffsetDateTime;
use uuid::Uuid;
use crate::extractors::authentication::Policy;
fn tenant_token_validation() -> Validation {
let mut validation = Validation::default();
validation.validate_exp = false;
validation.required_spec_claims.remove("exp");
validation.algorithms = vec![Algorithm::HS256, Algorithm::HS384, Algorithm::HS512];
validation
}
/// Extracts the key id used to sign the payload, without performing any validation.
fn extract_key_id(token: &str) -> Option<Uuid> {
let mut validation = tenant_token_validation();
validation.insecure_disable_signature_validation();
let dummy_key = DecodingKey::from_secret(b"secret");
let token_data = decode::<Claims>(token, &dummy_key, &validation).ok()?;
// get token fields without validating it.
let Claims { api_key_uid, .. } = token_data.claims;
Some(api_key_uid)
}
fn is_keys_action(action: u8) -> bool {
use actions::*;
matches!(action, KEYS_GET | KEYS_CREATE | KEYS_UPDATE | KEYS_DELETE)
}
pub struct ActionPolicy<const A: u8>;
impl<const A: u8> Policy for ActionPolicy<A> {
fn authenticate(
auth: AuthController,
token: &str,
index: Option<&str>,
) -> Option<AuthFilter> {
// authenticate if token is the master key.
// master key can only have access to keys routes.
// if master key is None only keys routes are inaccessible.
if auth.get_master_key().map_or_else(|| !is_keys_action(A), |mk| mk == token) {
return Some(AuthFilter::default());
}
// Tenant token
if let Some(filters) = ActionPolicy::<A>::authenticate_tenant_token(&auth, token, index)
{
return Some(filters);
} else if let Some(action) = Action::from_repr(A) {
// API key
if let Ok(Some(uid)) = auth.get_optional_uid_from_encoded_key(token.as_bytes()) {
if let Ok(true) = auth.is_key_authorized(uid, action, index) {
return auth.get_key_filters(uid, None).ok();
}
}
}
None
}
}
impl<const A: u8> ActionPolicy<A> {
fn authenticate_tenant_token(
auth: &AuthController,
token: &str,
index: Option<&str>,
) -> Option<AuthFilter> {
// Only search action can be accessed by a tenant token.
if A != actions::SEARCH {
return None;
}
let uid = extract_key_id(token)?;
// check if parent key is authorized to do the action.
if auth.is_key_authorized(uid, Action::Search, index).ok()? {
// Check if tenant token is valid.
let key = auth.generate_key(uid)?;
let data = decode::<Claims>(
token,
&DecodingKey::from_secret(key.as_bytes()),
&tenant_token_validation(),
)
.ok()?;
// Check index access if an index restriction is provided.
if let Some(index) = index {
if !data.claims.search_rules.is_index_authorized(index) {
return None;
}
}
// Check if token is expired.
if let Some(exp) = data.claims.exp {
if OffsetDateTime::now_utc().unix_timestamp() > exp {
return None;
}
}
return auth.get_key_filters(uid, Some(data.claims.search_rules)).ok();
}
None
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct Claims {
search_rules: SearchRules,
exp: Option<i64>,
api_key_uid: Uuid,
}
}

View file

@ -0,0 +1,4 @@
pub mod payload;
#[macro_use]
pub mod authentication;
pub mod sequential_extractor;

View file

@ -0,0 +1,69 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_http::encoding::Decoder as Decompress;
use actix_web::{dev, web, FromRequest, HttpRequest};
use futures::future::{ready, Ready};
use futures::Stream;
use crate::error::MeilisearchHttpError;
pub struct Payload {
payload: Decompress<dev::Payload>,
limit: usize,
}
pub struct PayloadConfig {
limit: usize,
}
impl PayloadConfig {
pub fn new(limit: usize) -> Self {
Self { limit }
}
}
impl Default for PayloadConfig {
fn default() -> Self {
Self { limit: 256 * 1024 }
}
}
impl FromRequest for Payload {
type Error = MeilisearchHttpError;
type Future = Ready<Result<Payload, Self::Error>>;
#[inline]
fn from_request(req: &HttpRequest, payload: &mut dev::Payload) -> Self::Future {
let limit = req
.app_data::<PayloadConfig>()
.map(|c| c.limit)
.unwrap_or(PayloadConfig::default().limit);
ready(Ok(Payload {
payload: Decompress::from_headers(payload.take(), req.headers()),
limit,
}))
}
}
impl Stream for Payload {
type Item = Result<web::Bytes, MeilisearchHttpError>;
#[inline]
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match Pin::new(&mut self.payload).poll_next(cx) {
Poll::Ready(Some(result)) => match result {
Ok(bytes) => match self.limit.checked_sub(bytes.len()) {
Some(new_limit) => {
self.limit = new_limit;
Poll::Ready(Some(Ok(bytes)))
}
None => Poll::Ready(Some(Err(MeilisearchHttpError::PayloadTooLarge))),
},
x => Poll::Ready(Some(x.map_err(MeilisearchHttpError::from))),
},
otherwise => otherwise.map(|o| o.map(|o| o.map_err(MeilisearchHttpError::from))),
}
}
}

View file

@ -0,0 +1,151 @@
#![allow(non_snake_case)]
use std::future::Future;
use std::pin::Pin;
use std::task::Poll;
use actix_web::dev::Payload;
use actix_web::{FromRequest, Handler, HttpRequest};
use pin_project_lite::pin_project;
/// `SeqHandler` is an actix `Handler` that enforces that extractors errors are returned in the
/// same order as they are defined in the wrapped handler. This is needed because, by default, actix
/// resolves the extractors concurrently, whereas we always need the authentication extractor to
/// throw first.
#[derive(Clone)]
pub struct SeqHandler<H>(pub H);
pub struct SeqFromRequest<T>(T);
/// This macro implements `FromRequest` for arbitrary arity handler, except for one, which is
/// useless anyway.
macro_rules! gen_seq {
($ty:ident; $($T:ident)+) => {
pin_project! {
pub struct $ty<$($T: FromRequest), +> {
$(
#[pin]
$T: ExtractFuture<$T::Future, $T, $T::Error>,
)+
}
}
impl<$($T: FromRequest), +> Future for $ty<$($T),+> {
type Output = Result<SeqFromRequest<($($T),+)>, actix_web::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
let mut count_fut = 0;
let mut count_finished = 0;
$(
count_fut += 1;
match this.$T.as_mut().project() {
ExtractProj::Future { fut } => match fut.poll(cx) {
Poll::Ready(Ok(output)) => {
count_finished += 1;
let _ = this
.$T
.as_mut()
.project_replace(ExtractFuture::Done { output });
}
Poll::Ready(Err(error)) => {
count_finished += 1;
let _ = this
.$T
.as_mut()
.project_replace(ExtractFuture::Error { error });
}
Poll::Pending => (),
},
ExtractProj::Done { .. } => count_finished += 1,
ExtractProj::Error { .. } => {
// short circuit if all previous are finished and we had an error.
if count_finished == count_fut {
match this.$T.project_replace(ExtractFuture::Empty) {
ExtractReplaceProj::Error { error } => {
return Poll::Ready(Err(error.into()))
}
_ => unreachable!("Invalid future state"),
}
} else {
count_finished += 1;
}
}
ExtractProj::Empty => unreachable!("From request polled after being finished. {}", stringify!($T)),
}
)+
if count_fut == count_finished {
let result = (
$(
match this.$T.project_replace(ExtractFuture::Empty) {
ExtractReplaceProj::Done { output } => output,
ExtractReplaceProj::Error { error } => return Poll::Ready(Err(error.into())),
_ => unreachable!("Invalid future state"),
},
)+
);
Poll::Ready(Ok(SeqFromRequest(result)))
} else {
Poll::Pending
}
}
}
impl<$($T: FromRequest,)+> FromRequest for SeqFromRequest<($($T,)+)> {
type Error = actix_web::Error;
type Future = $ty<$($T),+>;
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
$ty {
$(
$T: ExtractFuture::Future {
fut: $T::from_request(req, payload),
},
)+
}
}
}
impl<Han, $($T: FromRequest),+> Handler<SeqFromRequest<($($T),+)>> for SeqHandler<Han>
where
Han: Handler<($($T),+)>,
{
type Output = Han::Output;
type Future = Han::Future;
fn call(&self, args: SeqFromRequest<($($T),+)>) -> Self::Future {
self.0.call(args.0)
}
}
};
}
// Not working for a single argument, but then, it is not really necessary.
// gen_seq! { SeqFromRequestFut1; A }
gen_seq! { SeqFromRequestFut2; A B }
gen_seq! { SeqFromRequestFut3; A B C }
gen_seq! { SeqFromRequestFut4; A B C D }
gen_seq! { SeqFromRequestFut5; A B C D E }
gen_seq! { SeqFromRequestFut6; A B C D E F }
pin_project! {
#[project = ExtractProj]
#[project_replace = ExtractReplaceProj]
enum ExtractFuture<Fut, Res, Err> {
Future {
#[pin]
fut: Fut,
},
Done {
output: Res,
},
Error {
error: Err,
},
Empty,
}
}

417
meilisearch/src/lib.rs Normal file
View file

@ -0,0 +1,417 @@
#![allow(rustdoc::private_intra_doc_links)]
#[macro_use]
pub mod error;
pub mod analytics;
#[macro_use]
pub mod extractors;
pub mod option;
pub mod routes;
pub mod search;
#[cfg(feature = "metrics")]
pub mod metrics;
#[cfg(feature = "metrics")]
pub mod route_metrics;
use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use actix_cors::Cors;
use actix_http::body::MessageBody;
use actix_web::dev::{ServiceFactory, ServiceResponse};
use actix_web::error::JsonPayloadError;
use actix_web::web::Data;
use actix_web::{middleware, web, HttpRequest};
use analytics::Analytics;
use anyhow::bail;
use error::PayloadError;
use extractors::payload::PayloadConfig;
use http::header::CONTENT_TYPE;
use index_scheduler::{IndexScheduler, IndexSchedulerOptions};
use log::error;
use meilisearch_auth::AuthController;
use meilisearch_types::milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
use meilisearch_types::milli::update::{IndexDocumentsConfig, IndexDocumentsMethod};
use meilisearch_types::settings::apply_settings_to_builder;
use meilisearch_types::tasks::KindWithContent;
use meilisearch_types::versioning::{check_version_file, create_version_file};
use meilisearch_types::{compression, milli, VERSION_FILE_NAME};
pub use option::Opt;
use crate::error::MeilisearchHttpError;
pub static AUTOBATCHING_ENABLED: AtomicBool = AtomicBool::new(false);
/// Check if a db is empty. It does not provide any information on the
/// validity of the data in it.
/// We consider a database as non empty when it's a non empty directory.
fn is_empty_db(db_path: impl AsRef<Path>) -> bool {
let db_path = db_path.as_ref();
if !db_path.exists() {
true
// if we encounter an error or if the db is a file we consider the db non empty
} else if let Ok(dir) = db_path.read_dir() {
dir.count() == 0
} else {
true
}
}
pub fn create_app(
index_scheduler: Data<IndexScheduler>,
auth_controller: AuthController,
opt: Opt,
analytics: Arc<dyn Analytics>,
enable_dashboard: bool,
) -> actix_web::App<
impl ServiceFactory<
actix_web::dev::ServiceRequest,
Config = (),
Response = ServiceResponse<impl MessageBody>,
Error = actix_web::Error,
InitError = (),
>,
> {
let app = actix_web::App::new()
.configure(|s| {
configure_data(
s,
index_scheduler.clone(),
auth_controller.clone(),
&opt,
analytics.clone(),
)
})
.configure(routes::configure)
.configure(|s| dashboard(s, enable_dashboard));
#[cfg(feature = "metrics")]
let app = app.configure(|s| configure_metrics_route(s, opt.enable_metrics_route));
#[cfg(feature = "metrics")]
let app = app.wrap(Condition::new(opt.enable_metrics_route, route_metrics::RouteMetrics));
app.wrap(
Cors::default()
.send_wildcard()
.allow_any_header()
.allow_any_origin()
.allow_any_method()
.max_age(86_400), // 24h
)
.wrap(middleware::Logger::default())
.wrap(middleware::Compress::default())
.wrap(middleware::NormalizePath::new(middleware::TrailingSlash::Trim))
}
// TODO: TAMO: Finish setting up things
pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(Arc<IndexScheduler>, AuthController)> {
// we don't want to create anything in the data.ms yet, thus we
// wrap our two builders in a closure that'll be executed later.
let auth_controller_builder = || AuthController::new(&opt.db_path, &opt.master_key);
let index_scheduler_builder = || {
IndexScheduler::new(IndexSchedulerOptions {
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
auth_path: opt.db_path.join("auth"),
tasks_path: opt.db_path.join("tasks"),
update_file_path: opt.db_path.join("update_files"),
indexes_path: opt.db_path.join("indexes"),
snapshots_path: opt.snapshot_dir.clone(),
dumps_path: opt.dump_dir.clone(),
task_db_size: opt.max_task_db_size.get_bytes() as usize,
index_size: opt.max_index_size.get_bytes() as usize,
indexer_config: (&opt.indexer_options).try_into()?,
autobatching_enabled: !opt.scheduler_options.disable_auto_batching,
})
};
enum OnFailure {
RemoveDb,
KeepDb,
}
let meilisearch_builder = |on_failure: OnFailure| -> anyhow::Result<_> {
// if anything wrong happens we delete the `data.ms` entirely.
match (
index_scheduler_builder().map_err(anyhow::Error::from),
auth_controller_builder().map_err(anyhow::Error::from),
create_version_file(&opt.db_path).map_err(anyhow::Error::from),
) {
(Ok(i), Ok(a), Ok(())) => Ok((i, a)),
(Err(e), _, _) | (_, Err(e), _) | (_, _, Err(e)) => {
if matches!(on_failure, OnFailure::RemoveDb) {
std::fs::remove_dir_all(&opt.db_path)?;
}
Err(e)
}
}
};
let empty_db = is_empty_db(&opt.db_path);
let (index_scheduler, auth_controller) = if let Some(ref snapshot_path) = opt.import_snapshot {
let snapshot_path_exists = snapshot_path.exists();
if empty_db && snapshot_path_exists {
match compression::from_tar_gz(snapshot_path, &opt.db_path) {
Ok(()) => meilisearch_builder(OnFailure::RemoveDb)?,
Err(e) => {
std::fs::remove_dir_all(&opt.db_path)?;
return Err(e);
}
}
} else if !empty_db && !opt.ignore_snapshot_if_db_exists {
bail!(
"database already exists at {:?}, try to delete it or rename it",
opt.db_path.canonicalize().unwrap_or_else(|_| opt.db_path.to_owned())
)
} else if !snapshot_path_exists && !opt.ignore_missing_snapshot {
bail!("snapshot doesn't exist at {}", snapshot_path.display())
} else {
meilisearch_builder(OnFailure::RemoveDb)?
}
} else if let Some(ref path) = opt.import_dump {
let src_path_exists = path.exists();
if empty_db && src_path_exists {
let (mut index_scheduler, mut auth_controller) =
meilisearch_builder(OnFailure::RemoveDb)?;
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
Ok(()) => (index_scheduler, auth_controller),
Err(e) => {
std::fs::remove_dir_all(&opt.db_path)?;
return Err(e);
}
}
} else if !empty_db && !opt.ignore_dump_if_db_exists {
bail!(
"database already exists at {:?}, try to delete it or rename it",
opt.db_path.canonicalize().unwrap_or_else(|_| opt.db_path.to_owned())
)
} else if !src_path_exists && !opt.ignore_missing_dump {
bail!("dump doesn't exist at {:?}", path)
} else {
let (mut index_scheduler, mut auth_controller) =
meilisearch_builder(OnFailure::RemoveDb)?;
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
Ok(()) => (index_scheduler, auth_controller),
Err(e) => {
std::fs::remove_dir_all(&opt.db_path)?;
return Err(e);
}
}
}
} else {
if !empty_db {
check_version_file(&opt.db_path)?;
}
meilisearch_builder(OnFailure::KeepDb)?
};
// We create a loop in a thread that registers snapshotCreation tasks
let index_scheduler = Arc::new(index_scheduler);
if opt.schedule_snapshot {
let snapshot_delay = Duration::from_secs(opt.snapshot_interval_sec);
let index_scheduler = index_scheduler.clone();
thread::Builder::new()
.name(String::from("register-snapshot-tasks"))
.spawn(move || loop {
thread::sleep(snapshot_delay);
if let Err(e) = index_scheduler.register(KindWithContent::SnapshotCreation) {
error!("Error while registering snapshot: {}", e);
}
})
.unwrap();
}
Ok((index_scheduler, auth_controller))
}
fn import_dump(
db_path: &Path,
dump_path: &Path,
index_scheduler: &mut IndexScheduler,
auth: &mut AuthController,
) -> Result<(), anyhow::Error> {
let reader = File::open(dump_path)?;
let mut dump_reader = dump::DumpReader::open(reader)?;
if let Some(date) = dump_reader.date() {
log::info!(
"Importing a dump of meilisearch `{:?}` from the {}",
dump_reader.version(), // TODO: get the meilisearch version instead of the dump version
date
);
} else {
log::info!(
"Importing a dump of meilisearch `{:?}`",
dump_reader.version(), // TODO: get the meilisearch version instead of the dump version
);
}
let instance_uid = dump_reader.instance_uid()?;
// 1. Import the instance-uid.
if let Some(ref instance_uid) = instance_uid {
// we don't want to panic if there is an error with the instance-uid.
let _ = std::fs::write(db_path.join("instance-uid"), instance_uid.to_string().as_bytes());
};
// 2. Import the `Key`s.
let mut keys = Vec::new();
auth.raw_delete_all_keys()?;
for key in dump_reader.keys()? {
let key = key?;
auth.raw_insert_key(key.clone())?;
keys.push(key);
}
let indexer_config = index_scheduler.indexer_config();
// /!\ The tasks must be imported AFTER importing the indexes or else the scheduler might
// try to process tasks while we're trying to import the indexes.
// 3. Import the indexes.
for index_reader in dump_reader.indexes()? {
let mut index_reader = index_reader?;
let metadata = index_reader.metadata();
log::info!("Importing index `{}`.", metadata.uid);
let index = index_scheduler.create_raw_index(&metadata.uid)?;
let mut wtxn = index.write_txn()?;
let mut builder = milli::update::Settings::new(&mut wtxn, &index, indexer_config);
// 3.1 Import the primary key if there is one.
if let Some(ref primary_key) = metadata.primary_key {
builder.set_primary_key(primary_key.to_string());
}
// 3.2 Import the settings.
log::info!("Importing the settings.");
let settings = index_reader.settings()?;
apply_settings_to_builder(&settings, &mut builder);
builder.execute(|indexing_step| log::debug!("update: {:?}", indexing_step), || false)?;
// 3.3 Import the documents.
// 3.3.1 We need to recreate the grenad+obkv format accepted by the index.
log::info!("Importing the documents.");
let file = tempfile::tempfile()?;
let mut builder = DocumentsBatchBuilder::new(BufWriter::new(file));
for document in index_reader.documents()? {
builder.append_json_object(&document?)?;
}
// This flush the content of the batch builder.
let file = builder.into_inner()?.into_inner()?;
// 3.3.2 We feed it to the milli index.
let reader = BufReader::new(file);
let reader = DocumentsBatchReader::from_reader(reader)?;
let builder = milli::update::IndexDocuments::new(
&mut wtxn,
&index,
indexer_config,
IndexDocumentsConfig {
update_method: IndexDocumentsMethod::ReplaceDocuments,
..Default::default()
},
|indexing_step| log::debug!("update: {:?}", indexing_step),
|| false,
)?;
let (builder, user_result) = builder.add_documents(reader)?;
log::info!("{} documents found.", user_result?);
builder.execute()?;
wtxn.commit()?;
log::info!("All documents successfully imported.");
}
// 4. Import the tasks.
for ret in dump_reader.tasks()? {
let (task, file) = ret?;
index_scheduler.register_dumped_task(task, file)?;
}
Ok(())
}
pub fn configure_data(
config: &mut web::ServiceConfig,
index_scheduler: Data<IndexScheduler>,
auth: AuthController,
opt: &Opt,
analytics: Arc<dyn Analytics>,
) {
let http_payload_size_limit = opt.http_payload_size_limit.get_bytes() as usize;
config
.app_data(index_scheduler)
.app_data(auth)
.app_data(web::Data::from(analytics))
.app_data(
web::JsonConfig::default()
.content_type(|mime| mime == mime::APPLICATION_JSON)
.error_handler(|err, req: &HttpRequest| match err {
JsonPayloadError::ContentType => match req.headers().get(CONTENT_TYPE) {
Some(content_type) => MeilisearchHttpError::InvalidContentType(
content_type.to_str().unwrap_or("unknown").to_string(),
vec![mime::APPLICATION_JSON.to_string()],
)
.into(),
None => MeilisearchHttpError::MissingContentType(vec![
mime::APPLICATION_JSON.to_string(),
])
.into(),
},
err => PayloadError::from(err).into(),
}),
)
.app_data(PayloadConfig::new(http_payload_size_limit))
.app_data(
web::QueryConfig::default().error_handler(|err, _req| PayloadError::from(err).into()),
);
}
#[cfg(feature = "mini-dashboard")]
pub fn dashboard(config: &mut web::ServiceConfig, enable_frontend: bool) {
use actix_web::HttpResponse;
use static_files::Resource;
mod generated {
include!(concat!(env!("OUT_DIR"), "/generated.rs"));
}
if enable_frontend {
let generated = generated::generate();
// Generate routes for mini-dashboard assets
for (path, resource) in generated.into_iter() {
let Resource { mime_type, data, .. } = resource;
// Redirect index.html to /
if path == "index.html" {
config.service(web::resource("/").route(web::get().to(move || async move {
HttpResponse::Ok().content_type(mime_type).body(data)
})));
} else {
config.service(web::resource(path).route(web::get().to(move || async move {
HttpResponse::Ok().content_type(mime_type).body(data)
})));
}
}
} else {
config.service(web::resource("/").route(web::get().to(routes::running)));
}
}
#[cfg(not(feature = "mini-dashboard"))]
pub fn dashboard(config: &mut web::ServiceConfig, _enable_frontend: bool) {
config.service(web::resource("/").route(web::get().to(routes::running)));
}
#[cfg(feature = "metrics")]
pub fn configure_metrics_route(config: &mut web::ServiceConfig, enable_metrics_route: bool) {
if enable_metrics_route {
config.service(
web::resource("/metrics").route(web::get().to(crate::route_metrics::get_metrics)),
);
}
}

166
meilisearch/src/main.rs Normal file
View file

@ -0,0 +1,166 @@
use std::env;
use std::path::PathBuf;
use std::sync::Arc;
use actix_web::http::KeepAlive;
use actix_web::web::Data;
use actix_web::HttpServer;
use index_scheduler::IndexScheduler;
use meilisearch::analytics::Analytics;
use meilisearch::{analytics, create_app, setup_meilisearch, Opt};
use meilisearch_auth::AuthController;
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
/// does all the setup before meilisearch is launched
fn setup(opt: &Opt) -> anyhow::Result<()> {
let mut log_builder = env_logger::Builder::new();
log_builder.parse_filters(&opt.log_level);
if opt.log_level == "info" {
// if we are in info we only allow the warn log_level for milli
log_builder.filter_module("milli", log::LevelFilter::Warn);
}
log_builder.init();
Ok(())
}
#[actix_web::main]
async fn main() -> anyhow::Result<()> {
let (opt, config_read_from) = Opt::try_build()?;
setup(&opt)?;
match opt.env.as_ref() {
"production" => {
if opt.master_key.is_none() {
anyhow::bail!(
"In production mode, the environment variable MEILI_MASTER_KEY is mandatory"
)
}
}
"development" => (),
_ => unreachable!(),
}
let (index_scheduler, auth_controller) = setup_meilisearch(&opt)?;
#[cfg(all(not(debug_assertions), feature = "analytics"))]
let analytics = if !opt.no_analytics {
analytics::SegmentAnalytics::new(&opt, index_scheduler.clone()).await
} else {
analytics::MockAnalytics::new(&opt)
};
#[cfg(any(debug_assertions, not(feature = "analytics")))]
let analytics = analytics::MockAnalytics::new(&opt);
print_launch_resume(&opt, analytics.clone(), config_read_from);
run_http(index_scheduler, auth_controller, opt, analytics).await?;
Ok(())
}
async fn run_http(
index_scheduler: Arc<IndexScheduler>,
auth_controller: AuthController,
opt: Opt,
analytics: Arc<dyn Analytics>,
) -> anyhow::Result<()> {
let enable_dashboard = &opt.env == "development";
let opt_clone = opt.clone();
let index_scheduler = Data::from(index_scheduler);
let http_server = HttpServer::new(move || {
create_app(
index_scheduler.clone(),
auth_controller.clone(),
opt.clone(),
analytics.clone(),
enable_dashboard,
)
})
// Disable signals allows the server to terminate immediately when a user enter CTRL-C
.disable_signals()
.keep_alive(KeepAlive::Os);
if let Some(config) = opt_clone.get_ssl_config()? {
http_server.bind_rustls(opt_clone.http_addr, config)?.run().await?;
} else {
http_server.bind(&opt_clone.http_addr)?.run().await?;
}
Ok(())
}
pub fn print_launch_resume(
opt: &Opt,
analytics: Arc<dyn Analytics>,
config_read_from: Option<PathBuf>,
) {
let commit_sha = option_env!("VERGEN_GIT_SHA").unwrap_or("unknown");
let commit_date = option_env!("VERGEN_GIT_COMMIT_TIMESTAMP").unwrap_or("unknown");
let protocol =
if opt.ssl_cert_path.is_some() && opt.ssl_key_path.is_some() { "https" } else { "http" };
let ascii_name = r#"
888b d888 d8b 888 d8b 888
8888b d8888 Y8P 888 Y8P 888
88888b.d88888 888 888
888Y88888P888 .d88b. 888 888 888 .d8888b .d88b. 8888b. 888d888 .d8888b 88888b.
888 Y888P 888 d8P Y8b 888 888 888 88K d8P Y8b "88b 888P" d88P" 888 "88b
888 Y8P 888 88888888 888 888 888 "Y8888b. 88888888 .d888888 888 888 888 888
888 " 888 Y8b. 888 888 888 X88 Y8b. 888 888 888 Y88b. 888 888
888 888 "Y8888 888 888 888 88888P' "Y8888 "Y888888 888 "Y8888P 888 888
"#;
eprintln!("{}", ascii_name);
eprintln!(
"Config file path:\t{:?}",
config_read_from
.map(|config_file_path| config_file_path.display().to_string())
.unwrap_or_else(|| "none".to_string())
);
eprintln!("Database path:\t\t{:?}", opt.db_path);
eprintln!("Server listening on:\t\"{}://{}\"", protocol, opt.http_addr);
eprintln!("Environment:\t\t{:?}", opt.env);
eprintln!("Commit SHA:\t\t{:?}", commit_sha.to_string());
eprintln!("Commit date:\t\t{:?}", commit_date.to_string());
eprintln!("Package version:\t{:?}", env!("CARGO_PKG_VERSION").to_string());
#[cfg(all(not(debug_assertions), feature = "analytics"))]
{
if !opt.no_analytics {
eprintln!(
"
Thank you for using Meilisearch!
We collect anonymized analytics to improve our product and your experience. To learn more, including how to turn off analytics, visit our dedicated documentation page: https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html
Anonymous telemetry:\t\"Enabled\""
);
} else {
eprintln!("Anonymous telemetry:\t\"Disabled\"");
}
}
if let Some(instance_uid) = analytics.instance_uid() {
eprintln!("Instance UID:\t\t\"{}\"", instance_uid);
}
eprintln!();
if opt.master_key.is_some() {
eprintln!("A Master Key has been set. Requests to Meilisearch won't be authorized unless you provide an authentication key.");
} else {
eprintln!("No master key found; The server will accept unidentified requests. \
If you need some protection in development mode, please export a key: export MEILI_MASTER_KEY=xxx");
}
eprintln!();
eprintln!("Documentation:\t\thttps://docs.meilisearch.com");
eprintln!("Source code:\t\thttps://github.com/meilisearch/meilisearch");
eprintln!("Contact:\t\thttps://docs.meilisearch.com/resources/contact.html");
eprintln!();
}

View file

@ -0,0 +1,36 @@
use lazy_static::lazy_static;
use prometheus::{
opts, register_histogram_vec, register_int_counter_vec, register_int_gauge,
register_int_gauge_vec, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec,
};
const HTTP_RESPONSE_TIME_CUSTOM_BUCKETS: &[f64; 14] = &[
0.0005, 0.0008, 0.00085, 0.0009, 0.00095, 0.001, 0.00105, 0.0011, 0.00115, 0.0012, 0.0015,
0.002, 0.003, 1.0,
];
lazy_static! {
pub static ref HTTP_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
opts!("http_requests_total", "HTTP requests total"),
&["method", "path"]
)
.expect("Can't create a metric");
pub static ref MEILISEARCH_DB_SIZE_BYTES: IntGauge =
register_int_gauge!(opts!("meilisearch_db_size_bytes", "Meilisearch Db Size In Bytes"))
.expect("Can't create a metric");
pub static ref MEILISEARCH_INDEX_COUNT: IntGauge =
register_int_gauge!(opts!("meilisearch_index_count", "Meilisearch Index Count"))
.expect("Can't create a metric");
pub static ref MEILISEARCH_INDEX_DOCS_COUNT: IntGaugeVec = register_int_gauge_vec!(
opts!("meilisearch_index_docs_count", "Meilisearch Index Docs Count"),
&["index"]
)
.expect("Can't create a metric");
pub static ref HTTP_RESPONSE_TIME_SECONDS: HistogramVec = register_histogram_vec!(
"http_response_time_seconds",
"HTTP response times",
&["method", "path"],
HTTP_RESPONSE_TIME_CUSTOM_BUCKETS.to_vec()
)
.expect("Can't create a metric");
}

761
meilisearch/src/option.rs Normal file
View file

@ -0,0 +1,761 @@
use std::convert::TryFrom;
use std::env::VarError;
use std::ffi::OsStr;
use std::io::{BufReader, Read};
use std::num::ParseIntError;
use std::ops::Deref;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::Arc;
use std::{env, fmt, fs};
use byte_unit::{Byte, ByteError};
use clap::Parser;
use meilisearch_types::milli::update::IndexerConfig;
use rustls::server::{
AllowAnyAnonymousOrAuthenticatedClient, AllowAnyAuthenticatedClient, ServerSessionMemoryCache,
};
use rustls::RootCertStore;
use rustls_pemfile::{certs, pkcs8_private_keys, rsa_private_keys};
use serde::{Deserialize, Serialize};
use sysinfo::{RefreshKind, System, SystemExt};
const POSSIBLE_ENV: [&str; 2] = ["development", "production"];
const MEILI_DB_PATH: &str = "MEILI_DB_PATH";
const MEILI_HTTP_ADDR: &str = "MEILI_HTTP_ADDR";
const MEILI_MASTER_KEY: &str = "MEILI_MASTER_KEY";
const MEILI_ENV: &str = "MEILI_ENV";
#[cfg(all(not(debug_assertions), feature = "analytics"))]
const MEILI_NO_ANALYTICS: &str = "MEILI_NO_ANALYTICS";
const MEILI_MAX_INDEX_SIZE: &str = "MEILI_MAX_INDEX_SIZE";
const MEILI_MAX_TASK_DB_SIZE: &str = "MEILI_MAX_TASK_DB_SIZE";
const MEILI_HTTP_PAYLOAD_SIZE_LIMIT: &str = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT";
const MEILI_SSL_CERT_PATH: &str = "MEILI_SSL_CERT_PATH";
const MEILI_SSL_KEY_PATH: &str = "MEILI_SSL_KEY_PATH";
const MEILI_SSL_AUTH_PATH: &str = "MEILI_SSL_AUTH_PATH";
const MEILI_SSL_OCSP_PATH: &str = "MEILI_SSL_OCSP_PATH";
const MEILI_SSL_REQUIRE_AUTH: &str = "MEILI_SSL_REQUIRE_AUTH";
const MEILI_SSL_RESUMPTION: &str = "MEILI_SSL_RESUMPTION";
const MEILI_SSL_TICKETS: &str = "MEILI_SSL_TICKETS";
const MEILI_IMPORT_SNAPSHOT: &str = "MEILI_IMPORT_SNAPSHOT";
const MEILI_IGNORE_MISSING_SNAPSHOT: &str = "MEILI_IGNORE_MISSING_SNAPSHOT";
const MEILI_IGNORE_SNAPSHOT_IF_DB_EXISTS: &str = "MEILI_IGNORE_SNAPSHOT_IF_DB_EXISTS";
const MEILI_SNAPSHOT_DIR: &str = "MEILI_SNAPSHOT_DIR";
const MEILI_SCHEDULE_SNAPSHOT: &str = "MEILI_SCHEDULE_SNAPSHOT";
const MEILI_SNAPSHOT_INTERVAL_SEC: &str = "MEILI_SNAPSHOT_INTERVAL_SEC";
const MEILI_IMPORT_DUMP: &str = "MEILI_IMPORT_DUMP";
const MEILI_IGNORE_MISSING_DUMP: &str = "MEILI_IGNORE_MISSING_DUMP";
const MEILI_IGNORE_DUMP_IF_DB_EXISTS: &str = "MEILI_IGNORE_DUMP_IF_DB_EXISTS";
const MEILI_DUMP_DIR: &str = "MEILI_DUMP_DIR";
const MEILI_LOG_LEVEL: &str = "MEILI_LOG_LEVEL";
#[cfg(feature = "metrics")]
const MEILI_ENABLE_METRICS_ROUTE: &str = "MEILI_ENABLE_METRICS_ROUTE";
const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml";
const DEFAULT_DB_PATH: &str = "./data.ms";
const DEFAULT_HTTP_ADDR: &str = "localhost:7700";
const DEFAULT_ENV: &str = "development";
const DEFAULT_MAX_INDEX_SIZE: &str = "100 GiB";
const DEFAULT_MAX_TASK_DB_SIZE: &str = "100 GiB";
const DEFAULT_HTTP_PAYLOAD_SIZE_LIMIT: &str = "100 MB";
const DEFAULT_SNAPSHOT_DIR: &str = "snapshots/";
const DEFAULT_SNAPSHOT_INTERVAL_SEC: u64 = 86400;
const DEFAULT_DUMP_DIR: &str = "dumps/";
const DEFAULT_LOG_LEVEL: &str = "INFO";
const MEILI_MAX_INDEXING_MEMORY: &str = "MEILI_MAX_INDEXING_MEMORY";
const MEILI_MAX_INDEXING_THREADS: &str = "MEILI_MAX_INDEXING_THREADS";
const DISABLE_AUTO_BATCHING: &str = "DISABLE_AUTO_BATCHING";
const DEFAULT_LOG_EVERY_N: usize = 100000;
#[derive(Debug, Clone, Parser, Deserialize)]
#[clap(version, next_display_order = None)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Opt {
/// Designates the location where database files will be created and retrieved.
#[clap(long, env = MEILI_DB_PATH, default_value_os_t = default_db_path())]
#[serde(default = "default_db_path")]
pub db_path: PathBuf,
/// Sets the HTTP address and port Meilisearch will use.
#[clap(long, env = MEILI_HTTP_ADDR, default_value_t = default_http_addr())]
#[serde(default = "default_http_addr")]
pub http_addr: String,
/// Sets the instance's master key, automatically protecting all routes except `GET /health`.
#[clap(long, env = MEILI_MASTER_KEY)]
pub master_key: Option<String>,
/// Configures the instance's environment. Value must be either `production` or `development`.
#[clap(long, env = MEILI_ENV, default_value_t = default_env(), value_parser = POSSIBLE_ENV)]
#[serde(default = "default_env")]
pub env: String,
/// Deactivates Meilisearch's built-in telemetry when provided.
///
/// Meilisearch automatically collects data from all instances that do not opt out using this flag.
/// All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted
/// at any time.
#[cfg(all(not(debug_assertions), feature = "analytics"))]
#[serde(default)] // we can't send true
#[clap(long, env = MEILI_NO_ANALYTICS)]
pub no_analytics: bool,
/// Sets the maximum size of the index. Value must be given in bytes or explicitly stating a base unit (for instance: 107374182400, '107.7Gb', or '107374 Mb').
#[clap(long, env = MEILI_MAX_INDEX_SIZE, default_value_t = default_max_index_size())]
#[serde(default = "default_max_index_size")]
pub max_index_size: Byte,
/// Sets the maximum size of the task database. Value must be given in bytes or explicitly stating a
/// base unit (for instance: 107374182400, '107.7Gb', or '107374 Mb').
#[clap(long, env = MEILI_MAX_TASK_DB_SIZE, default_value_t = default_max_task_db_size())]
#[serde(default = "default_max_task_db_size")]
pub max_task_db_size: Byte,
/// Sets the maximum size of accepted payloads. Value must be given in bytes or explicitly stating a
/// base unit (for instance: 107374182400, '107.7Gb', or '107374 Mb').
#[clap(long, env = MEILI_HTTP_PAYLOAD_SIZE_LIMIT, default_value_t = default_http_payload_size_limit())]
#[serde(default = "default_http_payload_size_limit")]
pub http_payload_size_limit: Byte,
/// Sets the server's SSL certificates.
#[clap(long, env = MEILI_SSL_CERT_PATH, value_parser)]
pub ssl_cert_path: Option<PathBuf>,
/// Sets the server's SSL key files.
#[clap(long, env = MEILI_SSL_KEY_PATH, value_parser)]
pub ssl_key_path: Option<PathBuf>,
/// Enables client authentication in the specified path.
#[clap(long, env = MEILI_SSL_AUTH_PATH, value_parser)]
pub ssl_auth_path: Option<PathBuf>,
/// Sets the server's OCSP file. *Optional*
///
/// Reads DER-encoded OCSP response from OCSPFILE and staple to certificate.
#[clap(long, env = MEILI_SSL_OCSP_PATH, value_parser)]
pub ssl_ocsp_path: Option<PathBuf>,
/// Makes SSL authentication mandatory.
#[serde(default)]
#[clap(long, env = MEILI_SSL_REQUIRE_AUTH)]
pub ssl_require_auth: bool,
/// Activates SSL session resumption.
#[serde(default)]
#[clap(long, env = MEILI_SSL_RESUMPTION)]
pub ssl_resumption: bool,
/// Activates SSL tickets.
#[serde(default)]
#[clap(long, env = MEILI_SSL_TICKETS)]
pub ssl_tickets: bool,
/// Launches Meilisearch after importing a previously-generated snapshot at the given filepath.
#[clap(long, env = MEILI_IMPORT_SNAPSHOT)]
pub import_snapshot: Option<PathBuf>,
/// Prevents a Meilisearch instance from throwing an error when `--import-snapshot`
/// does not point to a valid snapshot file.
///
/// This command will throw an error if `--import-snapshot` is not defined.
#[clap(
long,
env = MEILI_IGNORE_MISSING_SNAPSHOT,
requires = "import_snapshot"
)]
#[serde(default)]
pub ignore_missing_snapshot: bool,
/// Prevents a Meilisearch instance with an existing database from throwing an
/// error when using `--import-snapshot`. Instead, the snapshot will be ignored
/// and Meilisearch will launch using the existing database.
///
/// This command will throw an error if `--import-snapshot` is not defined.
#[clap(
long,
env = MEILI_IGNORE_SNAPSHOT_IF_DB_EXISTS,
requires = "import_snapshot"
)]
#[serde(default)]
pub ignore_snapshot_if_db_exists: bool,
/// Sets the directory where Meilisearch will store snapshots.
#[clap(long, env = MEILI_SNAPSHOT_DIR, default_value_os_t = default_snapshot_dir())]
#[serde(default = "default_snapshot_dir")]
pub snapshot_dir: PathBuf,
/// Activates scheduled snapshots when provided. Snapshots are disabled by default.
#[clap(long, env = MEILI_SCHEDULE_SNAPSHOT)]
#[serde(default)]
pub schedule_snapshot: bool,
/// Defines the interval between each snapshot. Value must be given in seconds.
#[clap(long, env = MEILI_SNAPSHOT_INTERVAL_SEC, default_value_t = default_snapshot_interval_sec())]
#[serde(default = "default_snapshot_interval_sec")]
pub snapshot_interval_sec: u64,
/// Imports the dump file located at the specified path. Path must point to a `.dump` file.
/// If a database already exists, Meilisearch will throw an error and abort launch.
#[clap(long, env = MEILI_IMPORT_DUMP, conflicts_with = "import_snapshot")]
pub import_dump: Option<PathBuf>,
/// Prevents Meilisearch from throwing an error when `--import-dump` does not point to
/// a valid dump file. Instead, Meilisearch will start normally without importing any dump.
///
/// This option will trigger an error if `--import-dump` is not defined.
#[clap(long, env = MEILI_IGNORE_MISSING_DUMP, requires = "import_dump")]
#[serde(default)]
pub ignore_missing_dump: bool,
/// Prevents a Meilisearch instance with an existing database from throwing an error
/// when using `--import-dump`. Instead, the dump will be ignored and Meilisearch will
/// launch using the existing database.
///
/// This option will trigger an error if `--import-dump` is not defined.
#[clap(long, env = MEILI_IGNORE_DUMP_IF_DB_EXISTS, requires = "import_dump")]
#[serde(default)]
pub ignore_dump_if_db_exists: bool,
/// Sets the directory where Meilisearch will create dump files.
#[clap(long, env = MEILI_DUMP_DIR, default_value_os_t = default_dump_dir())]
#[serde(default = "default_dump_dir")]
pub dump_dir: PathBuf,
/// Defines how much detail should be present in Meilisearch's logs.
///
/// Meilisearch currently supports five log levels, listed in order of increasing verbosity: ERROR, WARN, INFO, DEBUG, TRACE.
#[clap(long, env = MEILI_LOG_LEVEL, default_value_t = default_log_level())]
#[serde(default = "default_log_level")]
pub log_level: String,
/// Enables Prometheus metrics and /metrics route.
#[cfg(feature = "metrics")]
#[clap(long, env = MEILI_ENABLE_METRICS_ROUTE)]
#[serde(default)]
pub enable_metrics_route: bool,
#[serde(flatten)]
#[clap(flatten)]
pub indexer_options: IndexerOpts,
#[serde(flatten)]
#[clap(flatten)]
pub scheduler_options: SchedulerConfig,
/// Set the path to a configuration file that should be used to setup the engine.
/// Format must be TOML.
#[clap(long)]
pub config_file_path: Option<PathBuf>,
}
impl Opt {
/// Whether analytics should be enabled or not.
#[cfg(all(not(debug_assertions), feature = "analytics"))]
pub fn analytics(&self) -> bool {
!self.no_analytics
}
/// Build a new Opt from config file, env vars and cli args.
pub fn try_build() -> anyhow::Result<(Self, Option<PathBuf>)> {
// Parse the args to get the config_file_path.
let mut opts = Opt::parse();
let mut config_read_from = None;
let user_specified_config_file_path = opts
.config_file_path
.clone()
.or_else(|| env::var("MEILI_CONFIG_FILE_PATH").map(PathBuf::from).ok());
let config_file_path = user_specified_config_file_path
.clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_CONFIG_FILE_PATH));
match std::fs::read(&config_file_path) {
Ok(config) => {
// If the file is successfully read, we deserialize it with `toml`.
let opt_from_config = toml::from_slice::<Opt>(&config)?;
// Return an error if config file contains 'config_file_path'
// Using that key in the config file doesn't make sense bc it creates a logical loop (config file referencing itself)
if opt_from_config.config_file_path.is_some() {
anyhow::bail!("`config_file_path` is not supported in the configuration file")
}
// We inject the values from the toml in the corresponding env vars if needs be. Doing so, we respect the priority toml < env vars < cli args.
opt_from_config.export_to_env();
// Once injected we parse the cli args once again to take the new env vars into scope.
opts = Opt::parse();
config_read_from = Some(config_file_path);
}
Err(e) => {
if let Some(path) = user_specified_config_file_path {
// If we have an error while reading the file defined by the user.
anyhow::bail!(
"unable to open or read the {:?} configuration file: {}.",
path,
e,
)
}
}
}
Ok((opts, config_read_from))
}
/// Exports the opts values to their corresponding env vars if they are not set.
fn export_to_env(self) {
let Opt {
db_path,
http_addr,
master_key,
env,
max_index_size,
max_task_db_size,
http_payload_size_limit,
ssl_cert_path,
ssl_key_path,
ssl_auth_path,
ssl_ocsp_path,
ssl_require_auth,
ssl_resumption,
ssl_tickets,
snapshot_dir,
schedule_snapshot,
snapshot_interval_sec,
dump_dir,
log_level,
indexer_options,
scheduler_options,
import_snapshot: _,
ignore_missing_snapshot: _,
ignore_snapshot_if_db_exists: _,
import_dump: _,
ignore_missing_dump: _,
ignore_dump_if_db_exists: _,
config_file_path: _,
#[cfg(all(not(debug_assertions), feature = "analytics"))]
no_analytics,
#[cfg(feature = "metrics")]
enable_metrics_route,
} = self;
export_to_env_if_not_present(MEILI_DB_PATH, db_path);
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
if let Some(master_key) = master_key {
export_to_env_if_not_present(MEILI_MASTER_KEY, master_key);
}
export_to_env_if_not_present(MEILI_ENV, env);
#[cfg(all(not(debug_assertions), feature = "analytics"))]
{
export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string());
}
export_to_env_if_not_present(MEILI_MAX_INDEX_SIZE, max_index_size.to_string());
export_to_env_if_not_present(MEILI_MAX_TASK_DB_SIZE, max_task_db_size.to_string());
export_to_env_if_not_present(
MEILI_HTTP_PAYLOAD_SIZE_LIMIT,
http_payload_size_limit.to_string(),
);
if let Some(ssl_cert_path) = ssl_cert_path {
export_to_env_if_not_present(MEILI_SSL_CERT_PATH, ssl_cert_path);
}
if let Some(ssl_key_path) = ssl_key_path {
export_to_env_if_not_present(MEILI_SSL_KEY_PATH, ssl_key_path);
}
if let Some(ssl_auth_path) = ssl_auth_path {
export_to_env_if_not_present(MEILI_SSL_AUTH_PATH, ssl_auth_path);
}
if let Some(ssl_ocsp_path) = ssl_ocsp_path {
export_to_env_if_not_present(MEILI_SSL_OCSP_PATH, ssl_ocsp_path);
}
export_to_env_if_not_present(MEILI_SSL_REQUIRE_AUTH, ssl_require_auth.to_string());
export_to_env_if_not_present(MEILI_SSL_RESUMPTION, ssl_resumption.to_string());
export_to_env_if_not_present(MEILI_SSL_TICKETS, ssl_tickets.to_string());
export_to_env_if_not_present(MEILI_SNAPSHOT_DIR, snapshot_dir);
export_to_env_if_not_present(MEILI_SCHEDULE_SNAPSHOT, schedule_snapshot.to_string());
export_to_env_if_not_present(
MEILI_SNAPSHOT_INTERVAL_SEC,
snapshot_interval_sec.to_string(),
);
export_to_env_if_not_present(MEILI_DUMP_DIR, dump_dir);
export_to_env_if_not_present(MEILI_LOG_LEVEL, log_level);
#[cfg(feature = "metrics")]
{
export_to_env_if_not_present(
MEILI_ENABLE_METRICS_ROUTE,
enable_metrics_route.to_string(),
);
}
indexer_options.export_to_env();
scheduler_options.export_to_env();
}
pub fn get_ssl_config(&self) -> anyhow::Result<Option<rustls::ServerConfig>> {
if let (Some(cert_path), Some(key_path)) = (&self.ssl_cert_path, &self.ssl_key_path) {
let config = rustls::ServerConfig::builder().with_safe_defaults();
let config = match &self.ssl_auth_path {
Some(auth_path) => {
let roots = load_certs(auth_path.to_path_buf())?;
let mut client_auth_roots = RootCertStore::empty();
for root in roots {
client_auth_roots.add(&root).unwrap();
}
if self.ssl_require_auth {
let verifier = AllowAnyAuthenticatedClient::new(client_auth_roots);
config.with_client_cert_verifier(verifier)
} else {
let verifier =
AllowAnyAnonymousOrAuthenticatedClient::new(client_auth_roots);
config.with_client_cert_verifier(verifier)
}
}
None => config.with_no_client_auth(),
};
let certs = load_certs(cert_path.to_path_buf())?;
let privkey = load_private_key(key_path.to_path_buf())?;
let ocsp = load_ocsp(&self.ssl_ocsp_path)?;
let mut config = config
.with_single_cert_with_ocsp_and_sct(certs, privkey, ocsp, vec![])
.map_err(|_| anyhow::anyhow!("bad certificates/private key"))?;
config.key_log = Arc::new(rustls::KeyLogFile::new());
if self.ssl_resumption {
config.session_storage = ServerSessionMemoryCache::new(256);
}
if self.ssl_tickets {
config.ticketer = rustls::Ticketer::new().unwrap();
}
Ok(Some(config))
} else {
Ok(None)
}
}
}
#[derive(Debug, Clone, Parser, Deserialize)]
pub struct IndexerOpts {
/// Sets the amount of documents to skip before printing
/// a log regarding the indexing advancement.
#[serde(default = "default_log_every_n")]
#[clap(long, default_value_t = default_log_every_n(), hide = true)] // 100k
pub log_every_n: usize,
/// Grenad max number of chunks in bytes.
#[clap(long, hide = true)]
pub max_nb_chunks: Option<usize>,
/// Sets the maximum amount of RAM Meilisearch can use when indexing. By default, Meilisearch
/// uses no more than two thirds of available memory.
#[clap(long, env = MEILI_MAX_INDEXING_MEMORY, default_value_t)]
#[serde(default)]
pub max_indexing_memory: MaxMemory,
/// Sets the maximum number of threads Meilisearch can use during indexation. By default, the
/// indexer avoids using more than half of a machine's total processing units. This ensures
/// Meilisearch is always ready to perform searches, even while you are updating an index.
#[clap(long, env = MEILI_MAX_INDEXING_THREADS, default_value_t)]
#[serde(default)]
pub max_indexing_threads: MaxThreads,
}
impl IndexerOpts {
/// Exports the values to their corresponding env vars if they are not set.
pub fn export_to_env(self) {
let IndexerOpts {
max_indexing_memory,
max_indexing_threads,
log_every_n: _,
max_nb_chunks: _,
} = self;
if let Some(max_indexing_memory) = max_indexing_memory.0 {
export_to_env_if_not_present(
MEILI_MAX_INDEXING_MEMORY,
max_indexing_memory.to_string(),
);
}
export_to_env_if_not_present(
MEILI_MAX_INDEXING_THREADS,
max_indexing_threads.0.to_string(),
);
}
}
#[derive(Debug, Clone, Parser, Default, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct SchedulerConfig {
/// Deactivates auto-batching when provided.
#[clap(long, env = DISABLE_AUTO_BATCHING)]
#[serde(default)]
pub disable_auto_batching: bool,
}
impl SchedulerConfig {
pub fn export_to_env(self) {
let SchedulerConfig { disable_auto_batching } = self;
export_to_env_if_not_present(DISABLE_AUTO_BATCHING, disable_auto_batching.to_string());
}
}
impl TryFrom<&IndexerOpts> for IndexerConfig {
type Error = anyhow::Error;
fn try_from(other: &IndexerOpts) -> Result<Self, Self::Error> {
let thread_pool = rayon::ThreadPoolBuilder::new()
.thread_name(|index| format!("indexing-thread:{index}"))
.num_threads(*other.max_indexing_threads)
.build()?;
Ok(Self {
log_every_n: Some(other.log_every_n),
max_nb_chunks: other.max_nb_chunks,
max_memory: other.max_indexing_memory.map(|b| b.get_bytes() as usize),
thread_pool: Some(thread_pool),
max_positions_per_attributes: None,
..Default::default()
})
}
}
impl Default for IndexerOpts {
fn default() -> Self {
Self {
log_every_n: 100_000,
max_nb_chunks: None,
max_indexing_memory: MaxMemory::default(),
max_indexing_threads: MaxThreads::default(),
}
}
}
/// A type used to detect the max memory available and use 2/3 of it.
#[derive(Debug, Clone, Copy, Deserialize, Serialize)]
pub struct MaxMemory(Option<Byte>);
impl FromStr for MaxMemory {
type Err = ByteError;
fn from_str(s: &str) -> Result<MaxMemory, ByteError> {
Byte::from_str(s).map(Some).map(MaxMemory)
}
}
impl Default for MaxMemory {
fn default() -> MaxMemory {
MaxMemory(total_memory_bytes().map(|bytes| bytes * 2 / 3).map(Byte::from_bytes))
}
}
impl fmt::Display for MaxMemory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
Some(memory) => write!(f, "{}", memory.get_appropriate_unit(true)),
None => f.write_str("unknown"),
}
}
}
impl Deref for MaxMemory {
type Target = Option<Byte>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl MaxMemory {
pub fn unlimited() -> Self {
Self(None)
}
}
/// Returns the total amount of bytes available or `None` if this system isn't supported.
fn total_memory_bytes() -> Option<u64> {
if System::IS_SUPPORTED {
let memory_kind = RefreshKind::new().with_memory();
let mut system = System::new_with_specifics(memory_kind);
system.refresh_memory();
Some(system.total_memory())
} else {
None
}
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize)]
pub struct MaxThreads(usize);
impl FromStr for MaxThreads {
type Err = ParseIntError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
usize::from_str(s).map(Self)
}
}
impl Default for MaxThreads {
fn default() -> Self {
MaxThreads(num_cpus::get() / 2)
}
}
impl fmt::Display for MaxThreads {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl Deref for MaxThreads {
type Target = usize;
fn deref(&self) -> &Self::Target {
&self.0
}
}
fn load_certs(filename: PathBuf) -> anyhow::Result<Vec<rustls::Certificate>> {
let certfile =
fs::File::open(filename).map_err(|_| anyhow::anyhow!("cannot open certificate file"))?;
let mut reader = BufReader::new(certfile);
certs(&mut reader)
.map(|certs| certs.into_iter().map(rustls::Certificate).collect())
.map_err(|_| anyhow::anyhow!("cannot read certificate file"))
}
fn load_private_key(filename: PathBuf) -> anyhow::Result<rustls::PrivateKey> {
let rsa_keys = {
let keyfile = fs::File::open(filename.clone())
.map_err(|_| anyhow::anyhow!("cannot open private key file"))?;
let mut reader = BufReader::new(keyfile);
rsa_private_keys(&mut reader)
.map_err(|_| anyhow::anyhow!("file contains invalid rsa private key"))?
};
let pkcs8_keys = {
let keyfile = fs::File::open(filename)
.map_err(|_| anyhow::anyhow!("cannot open private key file"))?;
let mut reader = BufReader::new(keyfile);
pkcs8_private_keys(&mut reader).map_err(|_| {
anyhow::anyhow!(
"file contains invalid pkcs8 private key (encrypted keys not supported)"
)
})?
};
// prefer to load pkcs8 keys
if !pkcs8_keys.is_empty() {
Ok(rustls::PrivateKey(pkcs8_keys[0].clone()))
} else {
assert!(!rsa_keys.is_empty());
Ok(rustls::PrivateKey(rsa_keys[0].clone()))
}
}
fn load_ocsp(filename: &Option<PathBuf>) -> anyhow::Result<Vec<u8>> {
let mut ret = Vec::new();
if let Some(ref name) = filename {
fs::File::open(name)
.map_err(|_| anyhow::anyhow!("cannot open ocsp file"))?
.read_to_end(&mut ret)
.map_err(|_| anyhow::anyhow!("cannot read oscp file"))?;
}
Ok(ret)
}
/// Checks if the key is defined in the environment variables.
/// If not, inserts it with the given value.
pub fn export_to_env_if_not_present<T>(key: &str, value: T)
where
T: AsRef<OsStr>,
{
if let Err(VarError::NotPresent) = std::env::var(key) {
std::env::set_var(key, value);
}
}
/// Functions used to get default value for `Opt` fields, needs to be function because of serde's default attribute.
fn default_db_path() -> PathBuf {
PathBuf::from(DEFAULT_DB_PATH)
}
pub fn default_http_addr() -> String {
DEFAULT_HTTP_ADDR.to_string()
}
fn default_env() -> String {
DEFAULT_ENV.to_string()
}
fn default_max_index_size() -> Byte {
Byte::from_str(DEFAULT_MAX_INDEX_SIZE).unwrap()
}
fn default_max_task_db_size() -> Byte {
Byte::from_str(DEFAULT_MAX_TASK_DB_SIZE).unwrap()
}
fn default_http_payload_size_limit() -> Byte {
Byte::from_str(DEFAULT_HTTP_PAYLOAD_SIZE_LIMIT).unwrap()
}
fn default_snapshot_dir() -> PathBuf {
PathBuf::from(DEFAULT_SNAPSHOT_DIR)
}
fn default_snapshot_interval_sec() -> u64 {
DEFAULT_SNAPSHOT_INTERVAL_SEC
}
fn default_dump_dir() -> PathBuf {
PathBuf::from(DEFAULT_DUMP_DIR)
}
fn default_log_level() -> String {
DEFAULT_LOG_LEVEL.to_string()
}
fn default_log_every_n() -> usize {
DEFAULT_LOG_EVERY_N
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_valid_opt() {
assert!(Opt::try_parse_from(Some("")).is_ok());
}
#[test]
#[ignore]
fn test_meilli_config_file_path_valid() {
temp_env::with_vars(
vec![("MEILI_CONFIG_FILE_PATH", Some("../config.toml"))], // Relative path in meilisearch package
|| {
assert!(Opt::try_build().is_ok());
},
);
}
#[test]
#[ignore]
fn test_meilli_config_file_path_invalid() {
temp_env::with_vars(vec![("MEILI_CONFIG_FILE_PATH", Some("../configgg.toml"))], || {
let possible_error_messages = [
"unable to open or read the \"../configgg.toml\" configuration file: No such file or directory (os error 2).",
"unable to open or read the \"../configgg.toml\" configuration file: The system cannot find the file specified. (os error 2).", // Windows
];
let error_message = Opt::try_build().unwrap_err().to_string();
assert!(
possible_error_messages.contains(&error_message.as_str()),
"Expected onf of {:?}, got {:?}.",
possible_error_messages,
error_message
);
});
}
}

View file

@ -0,0 +1,104 @@
use std::future::{ready, Ready};
use actix_web::dev::{self, Service, ServiceRequest, ServiceResponse, Transform};
use actix_web::http::header;
use actix_web::{Error, HttpResponse};
use futures_util::future::LocalBoxFuture;
use meilisearch_auth::actions;
use meilisearch_lib::MeiliSearch;
use meilisearch_types::error::ResponseError;
use prometheus::{Encoder, HistogramTimer, TextEncoder};
use crate::extractors::authentication::policies::ActionPolicy;
use crate::extractors::authentication::GuardedData;
pub async fn get_metrics(
meilisearch: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, MeiliSearch>,
) -> Result<HttpResponse, ResponseError> {
let search_rules = &meilisearch.filters().search_rules;
let response = meilisearch.get_all_stats(search_rules).await?;
crate::metrics::MEILISEARCH_DB_SIZE_BYTES.set(response.database_size as i64);
crate::metrics::MEILISEARCH_INDEX_COUNT.set(response.indexes.len() as i64);
for (index, value) in response.indexes.iter() {
crate::metrics::MEILISEARCH_INDEX_DOCS_COUNT
.with_label_values(&[index])
.set(value.number_of_documents as i64);
}
let encoder = TextEncoder::new();
let mut buffer = vec![];
encoder.encode(&prometheus::gather(), &mut buffer).expect("Failed to encode metrics");
let response = String::from_utf8(buffer).expect("Failed to convert bytes to string");
Ok(HttpResponse::Ok().insert_header(header::ContentType(mime::TEXT_PLAIN)).body(response))
}
pub struct RouteMetrics;
// Middleware factory is `Transform` trait from actix-service crate
// `S` - type of the next service
// `B` - type of response's body
impl<S, B> Transform<S, ServiceRequest> for RouteMetrics
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<B>;
type Error = Error;
type InitError = ();
type Transform = RouteMetricsMiddleware<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(RouteMetricsMiddleware { service }))
}
}
pub struct RouteMetricsMiddleware<S> {
service: S,
}
impl<S, B> Service<ServiceRequest> for RouteMetricsMiddleware<S>
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<B>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
dev::forward_ready!(service);
fn call(&self, req: ServiceRequest) -> Self::Future {
let mut histogram_timer: Option<HistogramTimer> = None;
let request_path = req.path();
let is_registered_resource = req.resource_map().has_resource(request_path);
if is_registered_resource {
let request_method = req.method().to_string();
histogram_timer = Some(
crate::metrics::HTTP_RESPONSE_TIME_SECONDS
.with_label_values(&[&request_method, request_path])
.start_timer(),
);
crate::metrics::HTTP_REQUESTS_TOTAL
.with_label_values(&[&request_method, request_path])
.inc();
}
let fut = self.service.call(req);
Box::pin(async move {
let res = fut.await?;
if let Some(histogram_timer) = histogram_timer {
histogram_timer.observe_duration();
};
Ok(res)
})
}
}

View file

@ -0,0 +1,158 @@
use std::str;
use actix_web::{web, HttpRequest, HttpResponse};
use meilisearch_auth::error::AuthControllerError;
use meilisearch_auth::AuthController;
use meilisearch_types::error::{Code, ResponseError};
use meilisearch_types::keys::{Action, Key};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use time::OffsetDateTime;
use uuid::Uuid;
use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::Pagination;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(
web::resource("")
.route(web::post().to(SeqHandler(create_api_key)))
.route(web::get().to(SeqHandler(list_api_keys))),
)
.service(
web::resource("/{key}")
.route(web::get().to(SeqHandler(get_api_key)))
.route(web::patch().to(SeqHandler(patch_api_key)))
.route(web::delete().to(SeqHandler(delete_api_key))),
);
}
pub async fn create_api_key(
auth_controller: GuardedData<ActionPolicy<{ actions::KEYS_CREATE }>, AuthController>,
body: web::Json<Value>,
_req: HttpRequest,
) -> Result<HttpResponse, ResponseError> {
let v = body.into_inner();
let res = tokio::task::spawn_blocking(move || -> Result<_, AuthControllerError> {
let key = auth_controller.create_key(v)?;
Ok(KeyView::from_key(key, &auth_controller))
})
.await
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))??;
Ok(HttpResponse::Created().json(res))
}
pub async fn list_api_keys(
auth_controller: GuardedData<ActionPolicy<{ actions::KEYS_GET }>, AuthController>,
paginate: web::Query<Pagination>,
) -> Result<HttpResponse, ResponseError> {
let page_view = tokio::task::spawn_blocking(move || -> Result<_, AuthControllerError> {
let keys = auth_controller.list_keys()?;
let page_view = paginate
.auto_paginate_sized(keys.into_iter().map(|k| KeyView::from_key(k, &auth_controller)));
Ok(page_view)
})
.await
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))??;
Ok(HttpResponse::Ok().json(page_view))
}
pub async fn get_api_key(
auth_controller: GuardedData<ActionPolicy<{ actions::KEYS_GET }>, AuthController>,
path: web::Path<AuthParam>,
) -> Result<HttpResponse, ResponseError> {
let key = path.into_inner().key;
let res = tokio::task::spawn_blocking(move || -> Result<_, AuthControllerError> {
let uid =
Uuid::parse_str(&key).or_else(|_| auth_controller.get_uid_from_encoded_key(&key))?;
let key = auth_controller.get_key(uid)?;
Ok(KeyView::from_key(key, &auth_controller))
})
.await
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))??;
Ok(HttpResponse::Ok().json(res))
}
pub async fn patch_api_key(
auth_controller: GuardedData<ActionPolicy<{ actions::KEYS_UPDATE }>, AuthController>,
body: web::Json<Value>,
path: web::Path<AuthParam>,
) -> Result<HttpResponse, ResponseError> {
let key = path.into_inner().key;
let body = body.into_inner();
let res = tokio::task::spawn_blocking(move || -> Result<_, AuthControllerError> {
let uid =
Uuid::parse_str(&key).or_else(|_| auth_controller.get_uid_from_encoded_key(&key))?;
let key = auth_controller.update_key(uid, body)?;
Ok(KeyView::from_key(key, &auth_controller))
})
.await
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))??;
Ok(HttpResponse::Ok().json(res))
}
pub async fn delete_api_key(
auth_controller: GuardedData<ActionPolicy<{ actions::KEYS_DELETE }>, AuthController>,
path: web::Path<AuthParam>,
) -> Result<HttpResponse, ResponseError> {
let key = path.into_inner().key;
tokio::task::spawn_blocking(move || {
let uid =
Uuid::parse_str(&key).or_else(|_| auth_controller.get_uid_from_encoded_key(&key))?;
auth_controller.delete_key(uid)
})
.await
.map_err(|e| ResponseError::from_msg(e.to_string(), Code::Internal))??;
Ok(HttpResponse::NoContent().finish())
}
#[derive(Deserialize)]
pub struct AuthParam {
key: String,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct KeyView {
name: Option<String>,
description: Option<String>,
key: String,
uid: Uuid,
actions: Vec<Action>,
indexes: Vec<String>,
#[serde(serialize_with = "time::serde::rfc3339::option::serialize")]
expires_at: Option<OffsetDateTime>,
#[serde(serialize_with = "time::serde::rfc3339::serialize")]
created_at: OffsetDateTime,
#[serde(serialize_with = "time::serde::rfc3339::serialize")]
updated_at: OffsetDateTime,
}
impl KeyView {
fn from_key(key: Key, auth: &AuthController) -> Self {
let generated_key = auth.generate_key(key.uid).unwrap_or_default();
KeyView {
name: key.name,
description: key.description,
key: generated_key,
uid: key.uid,
actions: key.actions,
indexes: key.indexes.into_iter().map(String::from).collect(),
expires_at: key.expires_at,
created_at: key.created_at,
updated_at: key.updated_at,
}
}
}

View file

@ -0,0 +1,37 @@
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::IndexScheduler;
use log::debug;
use meilisearch_auth::AuthController;
use meilisearch_types::error::ResponseError;
use meilisearch_types::tasks::KindWithContent;
use serde_json::json;
use crate::analytics::Analytics;
use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::SummarizedTaskView;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(web::resource("").route(web::post().to(SeqHandler(create_dump))));
}
pub async fn create_dump(
index_scheduler: GuardedData<ActionPolicy<{ actions::DUMPS_CREATE }>, Data<IndexScheduler>>,
auth_controller: GuardedData<ActionPolicy<{ actions::DUMPS_CREATE }>, AuthController>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
analytics.publish("Dump Created".to_string(), json!({}), Some(&req));
let task = KindWithContent::DumpCreation {
keys: auth_controller.list_keys()?,
instance_uid: analytics.instance_uid().cloned(),
};
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}

View file

@ -0,0 +1,438 @@
use actix_web::http::header::CONTENT_TYPE;
use actix_web::web::Data;
use actix_web::{web, HttpMessage, HttpRequest, HttpResponse};
use bstr::ByteSlice;
use futures::StreamExt;
use index_scheduler::IndexScheduler;
use log::debug;
use meilisearch_types::document_formats::{read_csv, read_json, read_ndjson, PayloadType};
use meilisearch_types::error::ResponseError;
use meilisearch_types::heed::RoTxn;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::milli::update::IndexDocumentsMethod;
use meilisearch_types::star_or::StarOr;
use meilisearch_types::tasks::KindWithContent;
use meilisearch_types::{milli, Document, Index};
use mime::Mime;
use once_cell::sync::Lazy;
use serde::Deserialize;
use serde_cs::vec::CS;
use serde_json::Value;
use std::io::ErrorKind;
use tempfile::tempfile;
use tokio::fs::File;
use tokio::io::{AsyncSeekExt, AsyncWriteExt};
use crate::analytics::{Analytics, DocumentDeletionKind};
use crate::error::MeilisearchHttpError;
use crate::error::PayloadError::ReceivePayloadErr;
use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData;
use crate::extractors::payload::Payload;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::routes::{fold_star_or, PaginationView, SummarizedTaskView};
static ACCEPTED_CONTENT_TYPE: Lazy<Vec<String>> = Lazy::new(|| {
vec!["application/json".to_string(), "application/x-ndjson".to_string(), "text/csv".to_string()]
});
/// Extracts the mime type from the content type and return
/// a meilisearch error if anything bad happen.
fn extract_mime_type(req: &HttpRequest) -> Result<Option<Mime>, MeilisearchHttpError> {
match req.mime_type() {
Ok(Some(mime)) => Ok(Some(mime)),
Ok(None) => Ok(None),
Err(_) => match req.headers().get(CONTENT_TYPE) {
Some(content_type) => Err(MeilisearchHttpError::InvalidContentType(
content_type.as_bytes().as_bstr().to_string(),
ACCEPTED_CONTENT_TYPE.clone(),
)),
None => Err(MeilisearchHttpError::MissingContentType(ACCEPTED_CONTENT_TYPE.clone())),
},
}
}
#[derive(Deserialize)]
pub struct DocumentParam {
index_uid: String,
document_id: String,
}
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(
web::resource("")
.route(web::get().to(SeqHandler(get_all_documents)))
.route(web::post().to(SeqHandler(add_documents)))
.route(web::put().to(SeqHandler(update_documents)))
.route(web::delete().to(SeqHandler(clear_all_documents))),
)
// this route needs to be before the /documents/{document_id} to match properly
.service(web::resource("/delete-batch").route(web::post().to(SeqHandler(delete_documents))))
.service(
web::resource("/{document_id}")
.route(web::get().to(SeqHandler(get_document)))
.route(web::delete().to(SeqHandler(delete_document))),
);
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct GetDocument {
fields: Option<CS<StarOr<String>>>,
}
pub async fn get_document(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
path: web::Path<DocumentParam>,
params: web::Query<GetDocument>,
) -> Result<HttpResponse, ResponseError> {
let GetDocument { fields } = params.into_inner();
let attributes_to_retrieve = fields.and_then(fold_star_or);
let index = index_scheduler.index(&path.index_uid)?;
let document = retrieve_document(&index, &path.document_id, attributes_to_retrieve)?;
debug!("returns: {:?}", document);
Ok(HttpResponse::Ok().json(document))
}
pub async fn delete_document(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
path: web::Path<DocumentParam>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
analytics.delete_documents(DocumentDeletionKind::PerDocumentId, &req);
let DocumentParam { document_id, index_uid } = path.into_inner();
let task = KindWithContent::DocumentDeletion { index_uid, documents_ids: vec![document_id] };
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct BrowseQuery {
#[serde(default)]
offset: usize,
#[serde(default = "crate::routes::PAGINATION_DEFAULT_LIMIT")]
limit: usize,
fields: Option<CS<StarOr<String>>>,
}
pub async fn get_all_documents(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
params: web::Query<BrowseQuery>,
) -> Result<HttpResponse, ResponseError> {
debug!("called with params: {:?}", params);
let BrowseQuery { limit, offset, fields } = params.into_inner();
let attributes_to_retrieve = fields.and_then(fold_star_or);
let index = index_scheduler.index(&index_uid)?;
let (total, documents) = retrieve_documents(&index, offset, limit, attributes_to_retrieve)?;
let ret = PaginationView::new(offset, limit, total as usize, documents);
debug!("returns: {:?}", ret);
Ok(HttpResponse::Ok().json(ret))
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct UpdateDocumentsQuery {
pub primary_key: Option<String>,
}
pub async fn add_documents(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_ADD }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
params: web::Query<UpdateDocumentsQuery>,
body: Payload,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
debug!("called with params: {:?}", params);
let params = params.into_inner();
analytics.add_documents(&params, index_scheduler.index(&index_uid).is_err(), &req);
let allow_index_creation = index_scheduler.filters().allow_index_creation;
let task = document_addition(
extract_mime_type(&req)?,
index_scheduler,
index_uid.into_inner(),
params.primary_key,
body,
IndexDocumentsMethod::ReplaceDocuments,
allow_index_creation,
)
.await?;
Ok(HttpResponse::Accepted().json(task))
}
pub async fn update_documents(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_ADD }>, Data<IndexScheduler>>,
path: web::Path<String>,
params: web::Query<UpdateDocumentsQuery>,
body: Payload,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
debug!("called with params: {:?}", params);
let index_uid = path.into_inner();
analytics.update_documents(&params, index_scheduler.index(&index_uid).is_err(), &req);
let allow_index_creation = index_scheduler.filters().allow_index_creation;
let task = document_addition(
extract_mime_type(&req)?,
index_scheduler,
index_uid,
params.into_inner().primary_key,
body,
IndexDocumentsMethod::UpdateDocuments,
allow_index_creation,
)
.await?;
Ok(HttpResponse::Accepted().json(task))
}
async fn document_addition(
mime_type: Option<Mime>,
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_ADD }>, Data<IndexScheduler>>,
index_uid: String,
primary_key: Option<String>,
mut body: Payload,
method: IndexDocumentsMethod,
allow_index_creation: bool,
) -> Result<SummarizedTaskView, MeilisearchHttpError> {
let format = match mime_type.as_ref().map(|m| (m.type_().as_str(), m.subtype().as_str())) {
Some(("application", "json")) => PayloadType::Json,
Some(("application", "x-ndjson")) => PayloadType::Ndjson,
Some(("text", "csv")) => PayloadType::Csv,
Some((type_, subtype)) => {
return Err(MeilisearchHttpError::InvalidContentType(
format!("{}/{}", type_, subtype),
ACCEPTED_CONTENT_TYPE.clone(),
))
}
None => {
return Err(MeilisearchHttpError::MissingContentType(ACCEPTED_CONTENT_TYPE.clone()))
}
};
// is your indexUid valid?
let index_uid = IndexUid::try_from(index_uid)?.into_inner();
let (uuid, mut update_file) = index_scheduler.create_update_file()?;
let temp_file = match tempfile() {
Ok(temp_file) => temp_file,
Err(e) => {
return Err(MeilisearchHttpError::Payload(ReceivePayloadErr(Box::new(e))));
}
};
let mut buffer = File::from_std(temp_file);
let mut buffer_write_size: usize = 0;
while let Some(bytes) = body.next().await {
let byte = &bytes?;
if byte.is_empty() && buffer_write_size == 0 {
return Err(MeilisearchHttpError::MissingPayload(format));
}
match buffer.write_all(byte).await {
Ok(()) => buffer_write_size += 1,
Err(e) => {
return Err(MeilisearchHttpError::Payload(ReceivePayloadErr(Box::new(e))));
}
};
}
if let Err(e) = buffer.flush().await {
return Err(MeilisearchHttpError::Payload(ReceivePayloadErr(Box::new(e))));
}
if buffer_write_size == 0 {
return Err(MeilisearchHttpError::MissingPayload(format));
}
if let Err(e) = buffer.seek(std::io::SeekFrom::Start(0)).await {
return Err(MeilisearchHttpError::Payload(ReceivePayloadErr(Box::new(e))));
};
let read_file = buffer.into_std().await;
let documents_count =
tokio::task::spawn_blocking(move || -> Result<_, MeilisearchHttpError> {
let documents_count = match format {
PayloadType::Json => read_json(&read_file, update_file.as_file_mut())?,
PayloadType::Csv => read_csv(&read_file, update_file.as_file_mut())?,
PayloadType::Ndjson => read_ndjson(&read_file, update_file.as_file_mut())?,
};
// we NEED to persist the file here because we moved the `udpate_file` in another task.
update_file.persist()?;
Ok(documents_count)
})
.await;
let documents_count = match documents_count {
Ok(Ok(documents_count)) => documents_count as u64,
// in this case the file has not possibly be persisted.
Ok(Err(e)) => return Err(e),
Err(e) => {
// Here the file MAY have been persisted or not.
// We don't know thus we ignore the file not found error.
match index_scheduler.delete_update_file(uuid) {
Ok(()) => (),
Err(index_scheduler::Error::FileStore(file_store::Error::IoError(e)))
if e.kind() == ErrorKind::NotFound => {}
Err(e) => {
log::warn!("Unknown error happened while deleting a malformed update file with uuid {uuid}: {e}");
}
}
// We still want to return the original error to the end user.
return Err(e.into());
}
};
let task = KindWithContent::DocumentAdditionOrUpdate {
method,
content_file: uuid,
documents_count,
primary_key,
allow_index_creation,
index_uid,
};
let scheduler = index_scheduler.clone();
let task = match tokio::task::spawn_blocking(move || scheduler.register(task)).await? {
Ok(task) => task,
Err(e) => {
index_scheduler.delete_update_file(uuid)?;
return Err(e.into());
}
};
debug!("returns: {:?}", task);
Ok(task.into())
}
pub async fn delete_documents(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
path: web::Path<String>,
body: web::Json<Vec<Value>>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
debug!("called with params: {:?}", body);
analytics.delete_documents(DocumentDeletionKind::PerBatch, &req);
let ids = body
.iter()
.map(|v| v.as_str().map(String::from).unwrap_or_else(|| v.to_string()))
.collect();
let task =
KindWithContent::DocumentDeletion { index_uid: path.into_inner(), documents_ids: ids };
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}
pub async fn clear_all_documents(
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_DELETE }>, Data<IndexScheduler>>,
path: web::Path<String>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
analytics.delete_documents(DocumentDeletionKind::ClearAll, &req);
let task = KindWithContent::DocumentClear { index_uid: path.into_inner() };
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}
fn all_documents<'a>(
index: &Index,
rtxn: &'a RoTxn,
) -> Result<impl Iterator<Item = Result<Document, ResponseError>> + 'a, ResponseError> {
let fields_ids_map = index.fields_ids_map(rtxn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
Ok(index.all_documents(rtxn)?.map(move |ret| {
ret.map_err(ResponseError::from).and_then(|(_key, document)| -> Result<_, ResponseError> {
Ok(milli::obkv_to_json(&all_fields, &fields_ids_map, document)?)
})
}))
}
fn retrieve_documents<S: AsRef<str>>(
index: &Index,
offset: usize,
limit: usize,
attributes_to_retrieve: Option<Vec<S>>,
) -> Result<(u64, Vec<Document>), ResponseError> {
let rtxn = index.read_txn()?;
let mut documents = Vec::new();
for document in all_documents(index, &rtxn)?.skip(offset).take(limit) {
let document = match &attributes_to_retrieve {
Some(attributes_to_retrieve) => permissive_json_pointer::select_values(
&document?,
attributes_to_retrieve.iter().map(|s| s.as_ref()),
),
None => document?,
};
documents.push(document);
}
let number_of_documents = index.number_of_documents(&rtxn)?;
Ok((number_of_documents, documents))
}
fn retrieve_document<S: AsRef<str>>(
index: &Index,
doc_id: &str,
attributes_to_retrieve: Option<Vec<S>>,
) -> Result<Document, ResponseError> {
let txn = index.read_txn()?;
let fields_ids_map = index.fields_ids_map(&txn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
let internal_id = index
.external_documents_ids(&txn)?
.get(doc_id.as_bytes())
.ok_or_else(|| MeilisearchHttpError::DocumentNotFound(doc_id.to_string()))?;
let document = index
.documents(&txn, std::iter::once(internal_id))?
.into_iter()
.next()
.map(|(_, d)| d)
.ok_or_else(|| MeilisearchHttpError::DocumentNotFound(doc_id.to_string()))?;
let document = meilisearch_types::milli::obkv_to_json(&all_fields, &fields_ids_map, document)?;
let document = match &attributes_to_retrieve {
Some(attributes_to_retrieve) => permissive_json_pointer::select_values(
&document,
attributes_to_retrieve.iter().map(|s| s.as_ref()),
),
None => document,
};
Ok(document)
}

View file

@ -0,0 +1,214 @@
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::IndexScheduler;
use log::debug;
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::milli::{self, FieldDistribution, Index};
use meilisearch_types::tasks::KindWithContent;
use serde::{Deserialize, Serialize};
use serde_json::json;
use time::OffsetDateTime;
use super::{Pagination, SummarizedTaskView};
use crate::analytics::Analytics;
use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::{AuthenticationError, GuardedData};
use crate::extractors::sequential_extractor::SeqHandler;
pub mod documents;
pub mod search;
pub mod settings;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(
web::resource("")
.route(web::get().to(list_indexes))
.route(web::post().to(SeqHandler(create_index))),
)
.service(
web::scope("/{index_uid}")
.service(
web::resource("")
.route(web::get().to(SeqHandler(get_index)))
.route(web::patch().to(SeqHandler(update_index)))
.route(web::delete().to(SeqHandler(delete_index))),
)
.service(web::resource("/stats").route(web::get().to(SeqHandler(get_index_stats))))
.service(web::scope("/documents").configure(documents::configure))
.service(web::scope("/search").configure(search::configure))
.service(web::scope("/settings").configure(settings::configure)),
);
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct IndexView {
pub uid: String,
#[serde(with = "time::serde::rfc3339")]
pub created_at: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub updated_at: OffsetDateTime,
pub primary_key: Option<String>,
}
impl IndexView {
fn new(uid: String, index: &Index) -> Result<IndexView, milli::Error> {
let rtxn = index.read_txn()?;
Ok(IndexView {
uid,
created_at: index.created_at(&rtxn)?,
updated_at: index.updated_at(&rtxn)?,
primary_key: index.primary_key(&rtxn)?.map(String::from),
})
}
}
pub async fn list_indexes(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_GET }>, Data<IndexScheduler>>,
paginate: web::Query<Pagination>,
) -> Result<HttpResponse, ResponseError> {
let search_rules = &index_scheduler.filters().search_rules;
let indexes: Vec<_> = index_scheduler.indexes()?;
let indexes = indexes
.into_iter()
.filter(|(name, _)| search_rules.is_index_authorized(name))
.map(|(name, index)| IndexView::new(name, &index))
.collect::<Result<Vec<_>, _>>()?;
let ret = paginate.auto_paginate_sized(indexes.into_iter());
debug!("returns: {:?}", ret);
Ok(HttpResponse::Ok().json(ret))
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct IndexCreateRequest {
uid: String,
primary_key: Option<String>,
}
pub async fn create_index(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_CREATE }>, Data<IndexScheduler>>,
body: web::Json<IndexCreateRequest>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
let IndexCreateRequest { primary_key, uid } = body.into_inner();
let uid = IndexUid::try_from(uid)?.into_inner();
let allow_index_creation = index_scheduler.filters().search_rules.is_index_authorized(&uid);
if allow_index_creation {
analytics.publish(
"Index Created".to_string(),
json!({ "primary_key": primary_key }),
Some(&req),
);
let task = KindWithContent::IndexCreation { index_uid: uid, primary_key };
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
Ok(HttpResponse::Accepted().json(task))
} else {
Err(AuthenticationError::InvalidToken.into())
}
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
#[allow(dead_code)]
pub struct UpdateIndexRequest {
uid: Option<String>,
primary_key: Option<String>,
}
pub async fn get_index(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_GET }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
) -> Result<HttpResponse, ResponseError> {
let index = index_scheduler.index(&index_uid)?;
let index_view = IndexView::new(index_uid.into_inner(), &index)?;
debug!("returns: {:?}", index_view);
Ok(HttpResponse::Ok().json(index_view))
}
pub async fn update_index(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_UPDATE }>, Data<IndexScheduler>>,
path: web::Path<String>,
body: web::Json<UpdateIndexRequest>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
debug!("called with params: {:?}", body);
let body = body.into_inner();
analytics.publish(
"Index Updated".to_string(),
json!({ "primary_key": body.primary_key}),
Some(&req),
);
let task = KindWithContent::IndexUpdate {
index_uid: path.into_inner(),
primary_key: body.primary_key,
};
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}
pub async fn delete_index(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_DELETE }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
) -> Result<HttpResponse, ResponseError> {
let task = KindWithContent::IndexDeletion { index_uid: index_uid.into_inner() };
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
Ok(HttpResponse::Accepted().json(task))
}
pub async fn get_index_stats(
index_scheduler: GuardedData<ActionPolicy<{ actions::STATS_GET }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
analytics.publish("Stats Seen".to_string(), json!({ "per_index_uid": true }), Some(&req));
let stats = IndexStats::new((*index_scheduler).clone(), index_uid.into_inner())?;
debug!("returns: {:?}", stats);
Ok(HttpResponse::Ok().json(stats))
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct IndexStats {
pub number_of_documents: u64,
pub is_indexing: bool,
pub field_distribution: FieldDistribution,
}
impl IndexStats {
pub fn new(
index_scheduler: Data<IndexScheduler>,
index_uid: String,
) -> Result<Self, ResponseError> {
// we check if there is currently a task processing associated with this index.
let is_processing = index_scheduler.is_index_processing(&index_uid)?;
let index = index_scheduler.index(&index_uid)?;
let rtxn = index.read_txn()?;
Ok(IndexStats {
number_of_documents: index.number_of_documents(&rtxn)?,
is_indexing: is_processing,
field_distribution: index.field_distribution(&rtxn)?,
})
}
}

View file

@ -0,0 +1,225 @@
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::IndexScheduler;
use log::debug;
use meilisearch_auth::IndexSearchRules;
use meilisearch_types::error::ResponseError;
use serde::Deserialize;
use serde_cs::vec::CS;
use serde_json::Value;
use crate::analytics::{Analytics, SearchAggregator};
use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData;
use crate::extractors::sequential_extractor::SeqHandler;
use crate::search::{
perform_search, MatchingStrategy, SearchQuery, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
DEFAULT_SEARCH_OFFSET,
};
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(
web::resource("")
.route(web::get().to(SeqHandler(search_with_url_query)))
.route(web::post().to(SeqHandler(search_with_post))),
);
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct SearchQueryGet {
q: Option<String>,
#[serde(default = "DEFAULT_SEARCH_OFFSET")]
offset: usize,
#[serde(default = "DEFAULT_SEARCH_LIMIT")]
limit: usize,
page: Option<usize>,
hits_per_page: Option<usize>,
attributes_to_retrieve: Option<CS<String>>,
attributes_to_crop: Option<CS<String>>,
#[serde(default = "DEFAULT_CROP_LENGTH")]
crop_length: usize,
attributes_to_highlight: Option<CS<String>>,
filter: Option<String>,
sort: Option<String>,
#[serde(default = "Default::default")]
show_matches_position: bool,
facets: Option<CS<String>>,
#[serde(default = "DEFAULT_HIGHLIGHT_PRE_TAG")]
highlight_pre_tag: String,
#[serde(default = "DEFAULT_HIGHLIGHT_POST_TAG")]
highlight_post_tag: String,
#[serde(default = "DEFAULT_CROP_MARKER")]
crop_marker: String,
#[serde(default)]
matching_strategy: MatchingStrategy,
}
impl From<SearchQueryGet> for SearchQuery {
fn from(other: SearchQueryGet) -> Self {
let filter = match other.filter {
Some(f) => match serde_json::from_str(&f) {
Ok(v) => Some(v),
_ => Some(Value::String(f)),
},
None => None,
};
Self {
q: other.q,
offset: other.offset,
limit: other.limit,
page: other.page,
hits_per_page: other.hits_per_page,
attributes_to_retrieve: other.attributes_to_retrieve.map(|o| o.into_iter().collect()),
attributes_to_crop: other.attributes_to_crop.map(|o| o.into_iter().collect()),
crop_length: other.crop_length,
attributes_to_highlight: other.attributes_to_highlight.map(|o| o.into_iter().collect()),
filter,
sort: other.sort.map(|attr| fix_sort_query_parameters(&attr)),
show_matches_position: other.show_matches_position,
facets: other.facets.map(|o| o.into_iter().collect()),
highlight_pre_tag: other.highlight_pre_tag,
highlight_post_tag: other.highlight_post_tag,
crop_marker: other.crop_marker,
matching_strategy: other.matching_strategy,
}
}
}
/// Incorporate search rules in search query
fn add_search_rules(query: &mut SearchQuery, rules: IndexSearchRules) {
query.filter = match (query.filter.take(), rules.filter) {
(None, rules_filter) => rules_filter,
(filter, None) => filter,
(Some(filter), Some(rules_filter)) => {
let filter = match filter {
Value::Array(filter) => filter,
filter => vec![filter],
};
let rules_filter = match rules_filter {
Value::Array(rules_filter) => rules_filter,
rules_filter => vec![rules_filter],
};
Some(Value::Array([filter, rules_filter].concat()))
}
}
}
// TODO: TAMO: split on :asc, and :desc, instead of doing some weird things
/// Transform the sort query parameter into something that matches the post expected format.
fn fix_sort_query_parameters(sort_query: &str) -> Vec<String> {
let mut sort_parameters = Vec::new();
let mut merge = false;
for current_sort in sort_query.trim_matches('"').split(',').map(|s| s.trim()) {
if current_sort.starts_with("_geoPoint(") {
sort_parameters.push(current_sort.to_string());
merge = true;
} else if merge && !sort_parameters.is_empty() {
let s = sort_parameters.last_mut().unwrap();
s.push(',');
s.push_str(current_sort);
if current_sort.ends_with("):desc") || current_sort.ends_with("):asc") {
merge = false;
}
} else {
sort_parameters.push(current_sort.to_string());
merge = false;
}
}
sort_parameters
}
pub async fn search_with_url_query(
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
params: web::Query<SearchQueryGet>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
debug!("called with params: {:?}", params);
let mut query: SearchQuery = params.into_inner().into();
// Tenant token search_rules.
if let Some(search_rules) =
index_scheduler.filters().search_rules.get_index_search_rules(&index_uid)
{
add_search_rules(&mut query, search_rules);
}
let mut aggregate = SearchAggregator::from_query(&query, &req);
let index = index_scheduler.index(&index_uid)?;
let search_result = tokio::task::spawn_blocking(move || perform_search(&index, query)).await?;
if let Ok(ref search_result) = search_result {
aggregate.succeed(search_result);
}
analytics.get_search(aggregate);
let search_result = search_result?;
debug!("returns: {:?}", search_result);
Ok(HttpResponse::Ok().json(search_result))
}
pub async fn search_with_post(
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
params: web::Json<SearchQuery>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
let mut query = params.into_inner();
debug!("search called with params: {:?}", query);
// Tenant token search_rules.
if let Some(search_rules) =
index_scheduler.filters().search_rules.get_index_search_rules(&index_uid)
{
add_search_rules(&mut query, search_rules);
}
let mut aggregate = SearchAggregator::from_query(&query, &req);
let index = index_scheduler.index(&index_uid)?;
let search_result = tokio::task::spawn_blocking(move || perform_search(&index, query)).await?;
if let Ok(ref search_result) = search_result {
aggregate.succeed(search_result);
}
analytics.post_search(aggregate);
let search_result = search_result?;
debug!("returns: {:?}", search_result);
Ok(HttpResponse::Ok().json(search_result))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_fix_sort_query_parameters() {
let sort = fix_sort_query_parameters("_geoPoint(12, 13):asc");
assert_eq!(sort, vec!["_geoPoint(12,13):asc".to_string()]);
let sort = fix_sort_query_parameters("doggo:asc,_geoPoint(12.45,13.56):desc");
assert_eq!(sort, vec!["doggo:asc".to_string(), "_geoPoint(12.45,13.56):desc".to_string(),]);
let sort = fix_sort_query_parameters(
"doggo:asc , _geoPoint(12.45, 13.56, 2590352):desc , catto:desc",
);
assert_eq!(
sort,
vec![
"doggo:asc".to_string(),
"_geoPoint(12.45,13.56,2590352):desc".to_string(),
"catto:desc".to_string(),
]
);
let sort = fix_sort_query_parameters("doggo:asc , _geoPoint(1, 2), catto:desc");
// This is ugly but eh, I don't want to write a full parser just for this unused route
assert_eq!(sort, vec!["doggo:asc".to_string(), "_geoPoint(1,2),catto:desc".to_string(),]);
}
}

View file

@ -0,0 +1,565 @@
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::IndexScheduler;
use log::debug;
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::settings::{settings, Settings, Unchecked};
use meilisearch_types::tasks::KindWithContent;
use serde_json::json;
use crate::analytics::Analytics;
use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData;
use crate::routes::SummarizedTaskView;
#[macro_export]
macro_rules! make_setting_route {
($route:literal, $update_verb:ident, $type:ty, $attr:ident, $camelcase_attr:literal, $analytics_var:ident, $analytics:expr) => {
pub mod $attr {
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse, Resource};
use index_scheduler::IndexScheduler;
use log::debug;
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::milli::update::Setting;
use meilisearch_types::settings::{settings, Settings};
use meilisearch_types::tasks::KindWithContent;
use $crate::analytics::Analytics;
use $crate::extractors::authentication::policies::*;
use $crate::extractors::authentication::GuardedData;
use $crate::extractors::sequential_extractor::SeqHandler;
use $crate::routes::SummarizedTaskView;
pub async fn delete(
index_scheduler: GuardedData<
ActionPolicy<{ actions::SETTINGS_UPDATE }>,
Data<IndexScheduler>,
>,
index_uid: web::Path<String>,
) -> Result<HttpResponse, ResponseError> {
let new_settings = Settings { $attr: Setting::Reset, ..Default::default() };
let allow_index_creation = index_scheduler.filters().allow_index_creation;
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
let task = KindWithContent::SettingsUpdate {
index_uid,
new_settings: Box::new(new_settings),
is_deletion: true,
allow_index_creation,
};
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task))
.await??
.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}
pub async fn update(
index_scheduler: GuardedData<
ActionPolicy<{ actions::SETTINGS_UPDATE }>,
Data<IndexScheduler>,
>,
index_uid: actix_web::web::Path<String>,
body: actix_web::web::Json<Option<$type>>,
req: HttpRequest,
$analytics_var: web::Data<dyn Analytics>,
) -> std::result::Result<HttpResponse, ResponseError> {
let body = body.into_inner();
$analytics(&body, &req);
let new_settings = Settings {
$attr: match body {
Some(inner_body) => Setting::Set(inner_body),
None => Setting::Reset,
},
..Default::default()
};
let allow_index_creation = index_scheduler.filters().allow_index_creation;
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
let task = KindWithContent::SettingsUpdate {
index_uid,
new_settings: Box::new(new_settings),
is_deletion: false,
allow_index_creation,
};
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task))
.await??
.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}
pub async fn get(
index_scheduler: GuardedData<
ActionPolicy<{ actions::SETTINGS_GET }>,
Data<IndexScheduler>,
>,
index_uid: actix_web::web::Path<String>,
) -> std::result::Result<HttpResponse, ResponseError> {
let index = index_scheduler.index(&index_uid)?;
let rtxn = index.read_txn()?;
let settings = settings(&index, &rtxn)?;
debug!("returns: {:?}", settings);
let mut json = serde_json::json!(&settings);
let val = json[$camelcase_attr].take();
Ok(HttpResponse::Ok().json(val))
}
pub fn resources() -> Resource {
Resource::new($route)
.route(web::get().to(SeqHandler(get)))
.route(web::$update_verb().to(SeqHandler(update)))
.route(web::delete().to(SeqHandler(delete)))
}
}
};
}
make_setting_route!(
"/filterable-attributes",
put,
std::collections::BTreeSet<String>,
filterable_attributes,
"filterableAttributes",
analytics,
|setting: &Option<std::collections::BTreeSet<String>>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"FilterableAttributes Updated".to_string(),
json!({
"filterable_attributes": {
"total": setting.as_ref().map(|filter| filter.len()).unwrap_or(0),
"has_geo": setting.as_ref().map(|filter| filter.contains("_geo")).unwrap_or(false),
}
}),
Some(req),
);
}
);
make_setting_route!(
"/sortable-attributes",
put,
std::collections::BTreeSet<String>,
sortable_attributes,
"sortableAttributes",
analytics,
|setting: &Option<std::collections::BTreeSet<String>>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"SortableAttributes Updated".to_string(),
json!({
"sortable_attributes": {
"total": setting.as_ref().map(|sort| sort.len()),
"has_geo": setting.as_ref().map(|sort| sort.contains("_geo")),
},
}),
Some(req),
);
}
);
make_setting_route!(
"/displayed-attributes",
put,
Vec<String>,
displayed_attributes,
"displayedAttributes",
analytics,
|displayed: &Option<Vec<String>>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"DisplayedAttributes Updated".to_string(),
json!({
"displayed_attributes": {
"total": displayed.as_ref().map(|displayed| displayed.len()),
"with_wildcard": displayed.as_ref().map(|displayed| displayed.iter().any(|displayed| displayed == "*")),
},
}),
Some(req),
);
}
);
make_setting_route!(
"/typo-tolerance",
patch,
meilisearch_types::settings::TypoSettings,
typo_tolerance,
"typoTolerance",
analytics,
|setting: &Option<meilisearch_types::settings::TypoSettings>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"TypoTolerance Updated".to_string(),
json!({
"typo_tolerance": {
"enabled": setting.as_ref().map(|s| !matches!(s.enabled, Setting::Set(false))),
"disable_on_attributes": setting
.as_ref()
.and_then(|s| s.disable_on_attributes.as_ref().set().map(|m| !m.is_empty())),
"disable_on_words": setting
.as_ref()
.and_then(|s| s.disable_on_words.as_ref().set().map(|m| !m.is_empty())),
"min_word_size_for_one_typo": setting
.as_ref()
.and_then(|s| s.min_word_size_for_typos
.as_ref()
.set()
.map(|s| s.one_typo.set()))
.flatten(),
"min_word_size_for_two_typos": setting
.as_ref()
.and_then(|s| s.min_word_size_for_typos
.as_ref()
.set()
.map(|s| s.two_typos.set()))
.flatten(),
},
}),
Some(req),
);
}
);
make_setting_route!(
"/searchable-attributes",
put,
Vec<String>,
searchable_attributes,
"searchableAttributes",
analytics,
|setting: &Option<Vec<String>>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"SearchableAttributes Updated".to_string(),
json!({
"searchable_attributes": {
"total": setting.as_ref().map(|searchable| searchable.len()),
"with_wildcard": setting.as_ref().map(|searchable| searchable.iter().any(|searchable| searchable == "*")),
},
}),
Some(req),
);
}
);
make_setting_route!(
"/stop-words",
put,
std::collections::BTreeSet<String>,
stop_words,
"stopWords",
analytics,
|stop_words: &Option<std::collections::BTreeSet<String>>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"StopWords Updated".to_string(),
json!({
"stop_words": {
"total": stop_words.as_ref().map(|stop_words| stop_words.len()),
},
}),
Some(req),
);
}
);
make_setting_route!(
"/synonyms",
put,
std::collections::BTreeMap<String, Vec<String>>,
synonyms,
"synonyms",
analytics,
|synonyms: &Option<std::collections::BTreeMap<String, Vec<String>>>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"Synonyms Updated".to_string(),
json!({
"synonyms": {
"total": synonyms.as_ref().map(|synonyms| synonyms.len()),
},
}),
Some(req),
);
}
);
make_setting_route!(
"/distinct-attribute",
put,
String,
distinct_attribute,
"distinctAttribute",
analytics,
|distinct: &Option<String>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"DistinctAttribute Updated".to_string(),
json!({
"distinct_attribute": {
"set": distinct.is_some(),
}
}),
Some(req),
);
}
);
make_setting_route!(
"/ranking-rules",
put,
Vec<String>,
ranking_rules,
"rankingRules",
analytics,
|setting: &Option<Vec<String>>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"RankingRules Updated".to_string(),
json!({
"ranking_rules": {
"words_position": setting.as_ref().map(|rr| rr.iter().position(|s| s == "words")),
"typo_position": setting.as_ref().map(|rr| rr.iter().position(|s| s == "typo")),
"proximity_position": setting.as_ref().map(|rr| rr.iter().position(|s| s == "proximity")),
"attribute_position": setting.as_ref().map(|rr| rr.iter().position(|s| s == "attribute")),
"sort_position": setting.as_ref().map(|rr| rr.iter().position(|s| s == "sort")),
"exactness_position": setting.as_ref().map(|rr| rr.iter().position(|s| s == "exactness")),
"values": setting.as_ref().map(|rr| rr.iter().filter(|s| !s.contains(':')).cloned().collect::<Vec<_>>().join(", ")),
}
}),
Some(req),
);
}
);
make_setting_route!(
"/faceting",
patch,
meilisearch_types::settings::FacetingSettings,
faceting,
"faceting",
analytics,
|setting: &Option<meilisearch_types::settings::FacetingSettings>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"Faceting Updated".to_string(),
json!({
"faceting": {
"max_values_per_facet": setting.as_ref().and_then(|s| s.max_values_per_facet.set()),
},
}),
Some(req),
);
}
);
make_setting_route!(
"/pagination",
patch,
meilisearch_types::settings::PaginationSettings,
pagination,
"pagination",
analytics,
|setting: &Option<meilisearch_types::settings::PaginationSettings>, req: &HttpRequest| {
use serde_json::json;
analytics.publish(
"Pagination Updated".to_string(),
json!({
"pagination": {
"max_total_hits": setting.as_ref().and_then(|s| s.max_total_hits.set()),
},
}),
Some(req),
);
}
);
macro_rules! generate_configure {
($($mod:ident),*) => {
pub fn configure(cfg: &mut web::ServiceConfig) {
use crate::extractors::sequential_extractor::SeqHandler;
cfg.service(
web::resource("")
.route(web::patch().to(SeqHandler(update_all)))
.route(web::get().to(SeqHandler(get_all)))
.route(web::delete().to(SeqHandler(delete_all))))
$(.service($mod::resources()))*;
}
};
}
generate_configure!(
filterable_attributes,
sortable_attributes,
displayed_attributes,
searchable_attributes,
distinct_attribute,
stop_words,
synonyms,
ranking_rules,
typo_tolerance,
pagination,
faceting
);
pub async fn update_all(
index_scheduler: GuardedData<ActionPolicy<{ actions::SETTINGS_UPDATE }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
body: web::Json<Settings<Unchecked>>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
let new_settings = body.into_inner();
analytics.publish(
"Settings Updated".to_string(),
json!({
"ranking_rules": {
"words_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| s == "words")),
"typo_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| s == "typo")),
"proximity_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| s == "proximity")),
"attribute_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| s == "attribute")),
"sort_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| s == "sort")),
"exactness_position": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().position(|s| s == "exactness")),
"values": new_settings.ranking_rules.as_ref().set().map(|rr| rr.iter().filter(|s| !s.contains(':')).cloned().collect::<Vec<_>>().join(", ")),
},
"searchable_attributes": {
"total": new_settings.searchable_attributes.as_ref().set().map(|searchable| searchable.len()),
"with_wildcard": new_settings.searchable_attributes.as_ref().set().map(|searchable| searchable.iter().any(|searchable| searchable == "*")),
},
"displayed_attributes": {
"total": new_settings.displayed_attributes.as_ref().set().map(|displayed| displayed.len()),
"with_wildcard": new_settings.displayed_attributes.as_ref().set().map(|displayed| displayed.iter().any(|displayed| displayed == "*")),
},
"sortable_attributes": {
"total": new_settings.sortable_attributes.as_ref().set().map(|sort| sort.len()),
"has_geo": new_settings.sortable_attributes.as_ref().set().map(|sort| sort.iter().any(|s| s == "_geo")),
},
"filterable_attributes": {
"total": new_settings.filterable_attributes.as_ref().set().map(|filter| filter.len()),
"has_geo": new_settings.filterable_attributes.as_ref().set().map(|filter| filter.iter().any(|s| s == "_geo")),
},
"distinct_attribute": {
"set": new_settings.distinct_attribute.as_ref().set().is_some()
},
"typo_tolerance": {
"enabled": new_settings.typo_tolerance
.as_ref()
.set()
.and_then(|s| s.enabled.as_ref().set())
.copied(),
"disable_on_attributes": new_settings.typo_tolerance
.as_ref()
.set()
.and_then(|s| s.disable_on_attributes.as_ref().set().map(|m| !m.is_empty())),
"disable_on_words": new_settings.typo_tolerance
.as_ref()
.set()
.and_then(|s| s.disable_on_words.as_ref().set().map(|m| !m.is_empty())),
"min_word_size_for_one_typo": new_settings.typo_tolerance
.as_ref()
.set()
.and_then(|s| s.min_word_size_for_typos
.as_ref()
.set()
.map(|s| s.one_typo.set()))
.flatten(),
"min_word_size_for_two_typos": new_settings.typo_tolerance
.as_ref()
.set()
.and_then(|s| s.min_word_size_for_typos
.as_ref()
.set()
.map(|s| s.two_typos.set()))
.flatten(),
},
"faceting": {
"max_values_per_facet": new_settings.faceting
.as_ref()
.set()
.and_then(|s| s.max_values_per_facet.as_ref().set()),
},
"pagination": {
"max_total_hits": new_settings.pagination
.as_ref()
.set()
.and_then(|s| s.max_total_hits.as_ref().set()),
},
"stop_words": {
"total": new_settings.stop_words.as_ref().set().map(|stop_words| stop_words.len()),
},
"synonyms": {
"total": new_settings.synonyms.as_ref().set().map(|synonyms| synonyms.len()),
},
}),
Some(&req),
);
let allow_index_creation = index_scheduler.filters().allow_index_creation;
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
let task = KindWithContent::SettingsUpdate {
index_uid,
new_settings: Box::new(new_settings),
is_deletion: false,
allow_index_creation,
};
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}
pub async fn get_all(
index_scheduler: GuardedData<ActionPolicy<{ actions::SETTINGS_GET }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
) -> Result<HttpResponse, ResponseError> {
let index = index_scheduler.index(&index_uid)?;
let rtxn = index.read_txn()?;
let new_settings = settings(&index, &rtxn)?;
debug!("returns: {:?}", new_settings);
Ok(HttpResponse::Ok().json(new_settings))
}
pub async fn delete_all(
index_scheduler: GuardedData<ActionPolicy<{ actions::SETTINGS_UPDATE }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
) -> Result<HttpResponse, ResponseError> {
let new_settings = Settings::cleared().into_unchecked();
let allow_index_creation = index_scheduler.filters().allow_index_creation;
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
let task = KindWithContent::SettingsUpdate {
index_uid,
new_settings: Box::new(new_settings),
is_deletion: true,
allow_index_creation,
};
let task: SummarizedTaskView =
tokio::task::spawn_blocking(move || index_scheduler.register(task)).await??.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}

View file

@ -0,0 +1,339 @@
use std::collections::BTreeMap;
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::{IndexScheduler, Query};
use log::debug;
use meilisearch_types::error::ResponseError;
use meilisearch_types::settings::{Settings, Unchecked};
use meilisearch_types::star_or::StarOr;
use meilisearch_types::tasks::{Kind, Status, Task, TaskId};
use serde::{Deserialize, Serialize};
use serde_json::json;
use time::OffsetDateTime;
use self::indexes::IndexStats;
use crate::analytics::Analytics;
use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::GuardedData;
mod api_key;
mod dump;
pub mod indexes;
mod swap_indexes;
pub mod tasks;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(web::scope("/tasks").configure(tasks::configure))
.service(web::resource("/health").route(web::get().to(get_health)))
.service(web::scope("/keys").configure(api_key::configure))
.service(web::scope("/dumps").configure(dump::configure))
.service(web::resource("/stats").route(web::get().to(get_stats)))
.service(web::resource("/version").route(web::get().to(get_version)))
.service(web::scope("/indexes").configure(indexes::configure))
.service(web::scope("/swap-indexes").configure(swap_indexes::configure));
}
/// Extracts the raw values from the `StarOr` types and
/// return None if a `StarOr::Star` is encountered.
pub fn fold_star_or<T, O>(content: impl IntoIterator<Item = StarOr<T>>) -> Option<O>
where
O: FromIterator<T>,
{
content
.into_iter()
.map(|value| match value {
StarOr::Star => None,
StarOr::Other(val) => Some(val),
})
.collect()
}
const PAGINATION_DEFAULT_LIMIT: fn() -> usize = || 20;
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct SummarizedTaskView {
task_uid: TaskId,
index_uid: Option<String>,
status: Status,
#[serde(rename = "type")]
kind: Kind,
#[serde(serialize_with = "time::serde::rfc3339::serialize")]
enqueued_at: OffsetDateTime,
}
impl From<Task> for SummarizedTaskView {
fn from(task: Task) -> Self {
SummarizedTaskView {
task_uid: task.uid,
index_uid: task.index_uid().map(|s| s.to_string()),
status: task.status,
kind: task.kind.as_kind(),
enqueued_at: task.enqueued_at,
}
}
}
#[derive(Debug, Clone, Copy, Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct Pagination {
#[serde(default)]
pub offset: usize,
#[serde(default = "PAGINATION_DEFAULT_LIMIT")]
pub limit: usize,
}
#[derive(Debug, Clone, Serialize)]
pub struct PaginationView<T> {
pub results: Vec<T>,
pub offset: usize,
pub limit: usize,
pub total: usize,
}
impl Pagination {
/// Given the full data to paginate, returns the selected section.
pub fn auto_paginate_sized<T>(
self,
content: impl IntoIterator<Item = T> + ExactSizeIterator,
) -> PaginationView<T>
where
T: Serialize,
{
let total = content.len();
let content: Vec<_> = content.into_iter().skip(self.offset).take(self.limit).collect();
self.format_with(total, content)
}
/// Given an iterator and the total number of elements, returns the selected section.
pub fn auto_paginate_unsized<T>(
self,
total: usize,
content: impl IntoIterator<Item = T>,
) -> PaginationView<T>
where
T: Serialize,
{
let content: Vec<_> = content.into_iter().skip(self.offset).take(self.limit).collect();
self.format_with(total, content)
}
/// Given the data already paginated + the total number of elements, it stores
/// everything in a [PaginationResult].
pub fn format_with<T>(self, total: usize, results: Vec<T>) -> PaginationView<T>
where
T: Serialize,
{
PaginationView { results, offset: self.offset, limit: self.limit, total }
}
}
impl<T> PaginationView<T> {
pub fn new(offset: usize, limit: usize, total: usize, results: Vec<T>) -> Self {
Self { offset, limit, results, total }
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(clippy::large_enum_variant)]
#[serde(tag = "name")]
pub enum UpdateType {
ClearAll,
Customs,
DocumentsAddition {
#[serde(skip_serializing_if = "Option::is_none")]
number: Option<usize>,
},
DocumentsPartial {
#[serde(skip_serializing_if = "Option::is_none")]
number: Option<usize>,
},
DocumentsDeletion {
#[serde(skip_serializing_if = "Option::is_none")]
number: Option<usize>,
},
Settings {
settings: Settings<Unchecked>,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProcessedUpdateResult {
pub update_id: u64,
#[serde(rename = "type")]
pub update_type: UpdateType,
pub duration: f64, // in seconds
#[serde(with = "time::serde::rfc3339")]
pub enqueued_at: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub processed_at: OffsetDateTime,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct FailedUpdateResult {
pub update_id: u64,
#[serde(rename = "type")]
pub update_type: UpdateType,
pub error: ResponseError,
pub duration: f64, // in seconds
#[serde(with = "time::serde::rfc3339")]
pub enqueued_at: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub processed_at: OffsetDateTime,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct EnqueuedUpdateResult {
pub update_id: u64,
#[serde(rename = "type")]
pub update_type: UpdateType,
#[serde(with = "time::serde::rfc3339")]
pub enqueued_at: OffsetDateTime,
#[serde(skip_serializing_if = "Option::is_none", with = "time::serde::rfc3339::option")]
pub started_processing_at: Option<OffsetDateTime>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase", tag = "status")]
pub enum UpdateStatusResponse {
Enqueued {
#[serde(flatten)]
content: EnqueuedUpdateResult,
},
Processing {
#[serde(flatten)]
content: EnqueuedUpdateResult,
},
Failed {
#[serde(flatten)]
content: FailedUpdateResult,
},
Processed {
#[serde(flatten)]
content: ProcessedUpdateResult,
},
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct IndexUpdateResponse {
pub update_id: u64,
}
impl IndexUpdateResponse {
pub fn with_id(update_id: u64) -> Self {
Self { update_id }
}
}
/// Always return a 200 with:
/// ```json
/// {
/// "status": "Meilisearch is running"
/// }
/// ```
pub async fn running() -> HttpResponse {
HttpResponse::Ok().json(serde_json::json!({ "status": "Meilisearch is running" }))
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Stats {
pub database_size: u64,
#[serde(serialize_with = "time::serde::rfc3339::option::serialize")]
pub last_update: Option<OffsetDateTime>,
pub indexes: BTreeMap<String, IndexStats>,
}
async fn get_stats(
index_scheduler: GuardedData<ActionPolicy<{ actions::STATS_GET }>, Data<IndexScheduler>>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
analytics.publish("Stats Seen".to_string(), json!({ "per_index_uid": false }), Some(&req));
let search_rules = &index_scheduler.filters().search_rules;
let stats = create_all_stats((*index_scheduler).clone(), search_rules)?;
debug!("returns: {:?}", stats);
Ok(HttpResponse::Ok().json(stats))
}
pub fn create_all_stats(
index_scheduler: Data<IndexScheduler>,
search_rules: &meilisearch_auth::SearchRules,
) -> Result<Stats, ResponseError> {
let mut last_task: Option<OffsetDateTime> = None;
let mut indexes = BTreeMap::new();
let mut database_size = 0;
let processing_task = index_scheduler.get_tasks_from_authorized_indexes(
Query { statuses: Some(vec![Status::Processing]), limit: Some(1), ..Query::default() },
search_rules.authorized_indexes(),
)?;
let processing_index = processing_task.first().and_then(|task| task.index_uid());
for (name, index) in index_scheduler.indexes()? {
if !search_rules.is_index_authorized(&name) {
continue;
}
database_size += index.on_disk_size()?;
let rtxn = index.read_txn()?;
let stats = IndexStats {
number_of_documents: index.number_of_documents(&rtxn)?,
is_indexing: processing_index.map_or(false, |index_name| name == index_name),
field_distribution: index.field_distribution(&rtxn)?,
};
let updated_at = index.updated_at(&rtxn)?;
last_task = last_task.map_or(Some(updated_at), |last| Some(last.max(updated_at)));
indexes.insert(name, stats);
}
let stats = Stats { database_size, last_update: last_task, indexes };
Ok(stats)
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct VersionResponse {
commit_sha: String,
commit_date: String,
pkg_version: String,
}
async fn get_version(
_index_scheduler: GuardedData<ActionPolicy<{ actions::VERSION }>, Data<IndexScheduler>>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> HttpResponse {
analytics.publish("Version Seen".to_string(), json!(null), Some(&req));
let commit_sha = option_env!("VERGEN_GIT_SHA").unwrap_or("unknown");
let commit_date = option_env!("VERGEN_GIT_COMMIT_TIMESTAMP").unwrap_or("unknown");
HttpResponse::Ok().json(VersionResponse {
commit_sha: commit_sha.to_string(),
commit_date: commit_date.to_string(),
pkg_version: env!("CARGO_PKG_VERSION").to_string(),
})
}
#[derive(Serialize)]
struct KeysResponse {
private: Option<String>,
public: Option<String>,
}
pub async fn get_health(
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
analytics.health_seen(&req);
Ok(HttpResponse::Ok().json(serde_json::json!({ "status": "available" })))
}

View file

@ -0,0 +1,59 @@
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::IndexScheduler;
use meilisearch_types::error::ResponseError;
use meilisearch_types::tasks::{IndexSwap, KindWithContent};
use serde::Deserialize;
use serde_json::json;
use super::SummarizedTaskView;
use crate::analytics::Analytics;
use crate::error::MeilisearchHttpError;
use crate::extractors::authentication::policies::*;
use crate::extractors::authentication::{AuthenticationError, GuardedData};
use crate::extractors::sequential_extractor::SeqHandler;
pub fn configure(cfg: &mut web::ServiceConfig) {
cfg.service(web::resource("").route(web::post().to(SeqHandler(swap_indexes))));
}
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct SwapIndexesPayload {
indexes: Vec<String>,
}
pub async fn swap_indexes(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_SWAP }>, Data<IndexScheduler>>,
params: web::Json<Vec<SwapIndexesPayload>>,
req: HttpRequest,
analytics: web::Data<dyn Analytics>,
) -> Result<HttpResponse, ResponseError> {
analytics.publish(
"Indexes Swapped".to_string(),
json!({
"swap_operation_number": params.len(),
}),
Some(&req),
);
let search_rules = &index_scheduler.filters().search_rules;
let mut swaps = vec![];
for SwapIndexesPayload { indexes } in params.into_inner().into_iter() {
let (lhs, rhs) = match indexes.as_slice() {
[lhs, rhs] => (lhs, rhs),
_ => {
return Err(MeilisearchHttpError::SwapIndexPayloadWrongLength(indexes).into());
}
};
if !search_rules.is_index_authorized(lhs) || !search_rules.is_index_authorized(rhs) {
return Err(AuthenticationError::InvalidToken.into());
}
swaps.push(IndexSwap { indexes: (lhs.clone(), rhs.clone()) });
}
let task = KindWithContent::IndexSwap { swaps };
let task = index_scheduler.register(task)?;
let task: SummarizedTaskView = task.into();
Ok(HttpResponse::Accepted().json(task))
}

File diff suppressed because it is too large Load diff

701
meilisearch/src/search.rs Normal file
View file

@ -0,0 +1,701 @@
use std::cmp::min;
use std::collections::{BTreeMap, BTreeSet, HashSet};
use std::str::FromStr;
use std::time::Instant;
use either::Either;
use meilisearch_types::settings::DEFAULT_PAGINATION_MAX_TOTAL_HITS;
use meilisearch_types::{milli, Document};
use milli::tokenizer::TokenizerBuilder;
use milli::{
AscDesc, FieldId, FieldsIdsMap, Filter, FormatOptions, Index, MatchBounds, MatcherBuilder,
SortError, TermsMatchingStrategy, DEFAULT_VALUES_PER_FACET,
};
use regex::Regex;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use crate::error::MeilisearchHttpError;
type MatchesPosition = BTreeMap<String, Vec<MatchBounds>>;
pub const DEFAULT_SEARCH_OFFSET: fn() -> usize = || 0;
pub const DEFAULT_SEARCH_LIMIT: fn() -> usize = || 20;
pub const DEFAULT_CROP_LENGTH: fn() -> usize = || 10;
pub const DEFAULT_CROP_MARKER: fn() -> String = || "".to_string();
pub const DEFAULT_HIGHLIGHT_PRE_TAG: fn() -> String = || "<em>".to_string();
pub const DEFAULT_HIGHLIGHT_POST_TAG: fn() -> String = || "</em>".to_string();
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct SearchQuery {
pub q: Option<String>,
#[serde(default = "DEFAULT_SEARCH_OFFSET")]
pub offset: usize,
#[serde(default = "DEFAULT_SEARCH_LIMIT")]
pub limit: usize,
pub page: Option<usize>,
pub hits_per_page: Option<usize>,
pub attributes_to_retrieve: Option<BTreeSet<String>>,
pub attributes_to_crop: Option<Vec<String>>,
#[serde(default = "DEFAULT_CROP_LENGTH")]
pub crop_length: usize,
pub attributes_to_highlight: Option<HashSet<String>>,
// Default to false
#[serde(default = "Default::default")]
pub show_matches_position: bool,
pub filter: Option<Value>,
pub sort: Option<Vec<String>>,
pub facets: Option<Vec<String>>,
#[serde(default = "DEFAULT_HIGHLIGHT_PRE_TAG")]
pub highlight_pre_tag: String,
#[serde(default = "DEFAULT_HIGHLIGHT_POST_TAG")]
pub highlight_post_tag: String,
#[serde(default = "DEFAULT_CROP_MARKER")]
pub crop_marker: String,
#[serde(default)]
pub matching_strategy: MatchingStrategy,
}
impl SearchQuery {
pub fn is_finite_pagination(&self) -> bool {
self.page.or(self.hits_per_page).is_some()
}
}
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub enum MatchingStrategy {
/// Remove query words from last to first
Last,
/// All query words are mandatory
All,
}
impl Default for MatchingStrategy {
fn default() -> Self {
Self::Last
}
}
impl From<MatchingStrategy> for TermsMatchingStrategy {
fn from(other: MatchingStrategy) -> Self {
match other {
MatchingStrategy::Last => Self::Last,
MatchingStrategy::All => Self::All,
}
}
}
#[derive(Debug, Clone, Serialize, PartialEq, Eq)]
pub struct SearchHit {
#[serde(flatten)]
pub document: Document,
#[serde(rename = "_formatted", skip_serializing_if = "Document::is_empty")]
pub formatted: Document,
#[serde(rename = "_matchesPosition", skip_serializing_if = "Option::is_none")]
pub matches_position: Option<MatchesPosition>,
}
#[derive(Serialize, Debug, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct SearchResult {
pub hits: Vec<SearchHit>,
pub query: String,
pub processing_time_ms: u128,
#[serde(flatten)]
pub hits_info: HitsInfo,
#[serde(skip_serializing_if = "Option::is_none")]
pub facet_distribution: Option<BTreeMap<String, BTreeMap<String, u64>>>,
}
#[derive(Serialize, Debug, Clone, PartialEq, Eq)]
#[serde(untagged)]
pub enum HitsInfo {
#[serde(rename_all = "camelCase")]
Pagination { hits_per_page: usize, page: usize, total_pages: usize, total_hits: usize },
#[serde(rename_all = "camelCase")]
OffsetLimit { limit: usize, offset: usize, estimated_total_hits: usize },
}
pub fn perform_search(
index: &Index,
query: SearchQuery,
) -> Result<SearchResult, MeilisearchHttpError> {
let before_search = Instant::now();
let rtxn = index.read_txn()?;
let mut search = index.search(&rtxn);
if let Some(ref query) = query.q {
search.query(query);
}
let is_finite_pagination = query.is_finite_pagination();
search.terms_matching_strategy(query.matching_strategy.into());
let max_total_hits = index
.pagination_max_total_hits(&rtxn)
.map_err(milli::Error::from)?
.unwrap_or(DEFAULT_PAGINATION_MAX_TOTAL_HITS);
search.exhaustive_number_hits(is_finite_pagination);
// compute the offset on the limit depending on the pagination mode.
let (offset, limit) = if is_finite_pagination {
let limit = query.hits_per_page.unwrap_or_else(DEFAULT_SEARCH_LIMIT);
let page = query.page.unwrap_or(1);
// page 0 gives a limit of 0 forcing Meilisearch to return no document.
page.checked_sub(1).map_or((0, 0), |p| (limit * p, limit))
} else {
(query.offset, query.limit)
};
// Make sure that a user can't get more documents than the hard limit,
// we align that on the offset too.
let offset = min(offset, max_total_hits);
let limit = min(limit, max_total_hits.saturating_sub(offset));
search.offset(offset);
search.limit(limit);
if let Some(ref filter) = query.filter {
if let Some(facets) = parse_filter(filter)? {
search.filter(facets);
}
}
if let Some(ref sort) = query.sort {
let sort = match sort.iter().map(|s| AscDesc::from_str(s)).collect() {
Ok(sorts) => sorts,
Err(asc_desc_error) => {
return Err(milli::Error::from(SortError::from(asc_desc_error)).into())
}
};
search.sort_criteria(sort);
}
let milli::SearchResult { documents_ids, matching_words, candidates, .. } = search.execute()?;
let fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
let displayed_ids = index
.displayed_fields_ids(&rtxn)?
.map(|fields| fields.into_iter().collect::<BTreeSet<_>>())
.unwrap_or_else(|| fields_ids_map.iter().map(|(id, _)| id).collect());
let fids = |attrs: &BTreeSet<String>| {
let mut ids = BTreeSet::new();
for attr in attrs {
if attr == "*" {
ids = displayed_ids.clone();
break;
}
if let Some(id) = fields_ids_map.id(attr) {
ids.insert(id);
}
}
ids
};
// The attributes to retrieve are the ones explicitly marked as to retrieve (all by default),
// but these attributes must be also be present
// - in the fields_ids_map
// - in the the displayed attributes
let to_retrieve_ids: BTreeSet<_> = query
.attributes_to_retrieve
.as_ref()
.map(fids)
.unwrap_or_else(|| displayed_ids.clone())
.intersection(&displayed_ids)
.cloned()
.collect();
let attr_to_highlight = query.attributes_to_highlight.unwrap_or_default();
let attr_to_crop = query.attributes_to_crop.unwrap_or_default();
// Attributes in `formatted_options` correspond to the attributes that will be in `_formatted`
// These attributes are:
// - the attributes asked to be highlighted or cropped (with `attributesToCrop` or `attributesToHighlight`)
// - the attributes asked to be retrieved: these attributes will not be highlighted/cropped
// But these attributes must be also present in displayed attributes
let formatted_options = compute_formatted_options(
&attr_to_highlight,
&attr_to_crop,
query.crop_length,
&to_retrieve_ids,
&fields_ids_map,
&displayed_ids,
);
let tokenizer = TokenizerBuilder::default().build();
let mut formatter_builder = MatcherBuilder::new(matching_words, tokenizer);
formatter_builder.crop_marker(query.crop_marker);
formatter_builder.highlight_prefix(query.highlight_pre_tag);
formatter_builder.highlight_suffix(query.highlight_post_tag);
let mut documents = Vec::new();
let documents_iter = index.documents(&rtxn, documents_ids)?;
for (_id, obkv) in documents_iter {
// First generate a document with all the displayed fields
let displayed_document = make_document(&displayed_ids, &fields_ids_map, obkv)?;
// select the attributes to retrieve
let attributes_to_retrieve = to_retrieve_ids
.iter()
.map(|&fid| fields_ids_map.name(fid).expect("Missing field name"));
let mut document =
permissive_json_pointer::select_values(&displayed_document, attributes_to_retrieve);
let (matches_position, formatted) = format_fields(
&displayed_document,
&fields_ids_map,
&formatter_builder,
&formatted_options,
query.show_matches_position,
&displayed_ids,
)?;
if let Some(sort) = query.sort.as_ref() {
insert_geo_distance(sort, &mut document);
}
let hit = SearchHit { document, formatted, matches_position };
documents.push(hit);
}
let number_of_hits = min(candidates.len() as usize, max_total_hits);
let hits_info = if is_finite_pagination {
let hits_per_page = query.hits_per_page.unwrap_or_else(DEFAULT_SEARCH_LIMIT);
// If hit_per_page is 0, then pages can't be computed and so we respond 0.
let total_pages = (number_of_hits + hits_per_page.saturating_sub(1))
.checked_div(hits_per_page)
.unwrap_or(0);
HitsInfo::Pagination {
hits_per_page,
page: query.page.unwrap_or(1),
total_pages,
total_hits: number_of_hits,
}
} else {
HitsInfo::OffsetLimit { limit: query.limit, offset, estimated_total_hits: number_of_hits }
};
let facet_distribution = match query.facets {
Some(ref fields) => {
let mut facet_distribution = index.facets_distribution(&rtxn);
let max_values_by_facet = index
.max_values_per_facet(&rtxn)
.map_err(milli::Error::from)?
.unwrap_or(DEFAULT_VALUES_PER_FACET);
facet_distribution.max_values_per_facet(max_values_by_facet);
if fields.iter().all(|f| f != "*") {
facet_distribution.facets(fields);
}
let distribution = facet_distribution.candidates(candidates).execute()?;
Some(distribution)
}
None => None,
};
let result = SearchResult {
hits: documents,
hits_info,
query: query.q.clone().unwrap_or_default(),
processing_time_ms: before_search.elapsed().as_millis(),
facet_distribution,
};
Ok(result)
}
fn insert_geo_distance(sorts: &[String], document: &mut Document) {
lazy_static::lazy_static! {
static ref GEO_REGEX: Regex =
Regex::new(r"_geoPoint\(\s*([[:digit:].\-]+)\s*,\s*([[:digit:].\-]+)\s*\)").unwrap();
};
if let Some(capture_group) = sorts.iter().find_map(|sort| GEO_REGEX.captures(sort)) {
// TODO: TAMO: milli encountered an internal error, what do we want to do?
let base = [capture_group[1].parse().unwrap(), capture_group[2].parse().unwrap()];
let geo_point = &document.get("_geo").unwrap_or(&json!(null));
if let Some((lat, lng)) = geo_point["lat"].as_f64().zip(geo_point["lng"].as_f64()) {
let distance = milli::distance_between_two_points(&base, &[lat, lng]);
document.insert("_geoDistance".to_string(), json!(distance.round() as usize));
}
}
}
fn compute_formatted_options(
attr_to_highlight: &HashSet<String>,
attr_to_crop: &[String],
query_crop_length: usize,
to_retrieve_ids: &BTreeSet<FieldId>,
fields_ids_map: &FieldsIdsMap,
displayed_ids: &BTreeSet<FieldId>,
) -> BTreeMap<FieldId, FormatOptions> {
let mut formatted_options = BTreeMap::new();
add_highlight_to_formatted_options(
&mut formatted_options,
attr_to_highlight,
fields_ids_map,
displayed_ids,
);
add_crop_to_formatted_options(
&mut formatted_options,
attr_to_crop,
query_crop_length,
fields_ids_map,
displayed_ids,
);
// Should not return `_formatted` if no valid attributes to highlight/crop
if !formatted_options.is_empty() {
add_non_formatted_ids_to_formatted_options(&mut formatted_options, to_retrieve_ids);
}
formatted_options
}
fn add_highlight_to_formatted_options(
formatted_options: &mut BTreeMap<FieldId, FormatOptions>,
attr_to_highlight: &HashSet<String>,
fields_ids_map: &FieldsIdsMap,
displayed_ids: &BTreeSet<FieldId>,
) {
for attr in attr_to_highlight {
let new_format = FormatOptions { highlight: true, crop: None };
if attr == "*" {
for id in displayed_ids {
formatted_options.insert(*id, new_format);
}
break;
}
if let Some(id) = fields_ids_map.id(attr) {
if displayed_ids.contains(&id) {
formatted_options.insert(id, new_format);
}
}
}
}
fn add_crop_to_formatted_options(
formatted_options: &mut BTreeMap<FieldId, FormatOptions>,
attr_to_crop: &[String],
crop_length: usize,
fields_ids_map: &FieldsIdsMap,
displayed_ids: &BTreeSet<FieldId>,
) {
for attr in attr_to_crop {
let mut split = attr.rsplitn(2, ':');
let (attr_name, attr_len) = match split.next().zip(split.next()) {
Some((len, name)) => {
let crop_len = len.parse::<usize>().unwrap_or(crop_length);
(name, crop_len)
}
None => (attr.as_str(), crop_length),
};
if attr_name == "*" {
for id in displayed_ids {
formatted_options
.entry(*id)
.and_modify(|f| f.crop = Some(attr_len))
.or_insert(FormatOptions { highlight: false, crop: Some(attr_len) });
}
}
if let Some(id) = fields_ids_map.id(attr_name) {
if displayed_ids.contains(&id) {
formatted_options
.entry(id)
.and_modify(|f| f.crop = Some(attr_len))
.or_insert(FormatOptions { highlight: false, crop: Some(attr_len) });
}
}
}
}
fn add_non_formatted_ids_to_formatted_options(
formatted_options: &mut BTreeMap<FieldId, FormatOptions>,
to_retrieve_ids: &BTreeSet<FieldId>,
) {
for id in to_retrieve_ids {
formatted_options.entry(*id).or_insert(FormatOptions { highlight: false, crop: None });
}
}
fn make_document(
displayed_attributes: &BTreeSet<FieldId>,
field_ids_map: &FieldsIdsMap,
obkv: obkv::KvReaderU16,
) -> Result<Document, MeilisearchHttpError> {
let mut document = serde_json::Map::new();
// recreate the original json
for (key, value) in obkv.iter() {
let value = serde_json::from_slice(value)?;
let key = field_ids_map.name(key).expect("Missing field name").to_string();
document.insert(key, value);
}
// select the attributes to retrieve
let displayed_attributes = displayed_attributes
.iter()
.map(|&fid| field_ids_map.name(fid).expect("Missing field name"));
let document = permissive_json_pointer::select_values(&document, displayed_attributes);
Ok(document)
}
fn format_fields<'a, A: AsRef<[u8]>>(
document: &Document,
field_ids_map: &FieldsIdsMap,
builder: &MatcherBuilder<'a, A>,
formatted_options: &BTreeMap<FieldId, FormatOptions>,
compute_matches: bool,
displayable_ids: &BTreeSet<FieldId>,
) -> Result<(Option<MatchesPosition>, Document), MeilisearchHttpError> {
let mut matches_position = compute_matches.then(BTreeMap::new);
let mut document = document.clone();
// select the attributes to retrieve
let displayable_names =
displayable_ids.iter().map(|&fid| field_ids_map.name(fid).expect("Missing field name"));
permissive_json_pointer::map_leaf_values(&mut document, displayable_names, |key, value| {
// To get the formatting option of each key we need to see all the rules that applies
// to the value and merge them together. eg. If a user said he wanted to highlight `doggo`
// and crop `doggo.name`. `doggo.name` needs to be highlighted + cropped while `doggo.age` is only
// highlighted.
let format = formatted_options
.iter()
.filter(|(field, _option)| {
let name = field_ids_map.name(**field).unwrap();
milli::is_faceted_by(name, key) || milli::is_faceted_by(key, name)
})
.map(|(_, option)| *option)
.reduce(|acc, option| acc.merge(option));
let mut infos = Vec::new();
*value = format_value(std::mem::take(value), builder, format, &mut infos, compute_matches);
if let Some(matches) = matches_position.as_mut() {
if !infos.is_empty() {
matches.insert(key.to_owned(), infos);
}
}
});
let selectors = formatted_options
.keys()
// This unwrap must be safe since we got the ids from the fields_ids_map just
// before.
.map(|&fid| field_ids_map.name(fid).unwrap());
let document = permissive_json_pointer::select_values(&document, selectors);
Ok((matches_position, document))
}
fn format_value<'a, A: AsRef<[u8]>>(
value: Value,
builder: &MatcherBuilder<'a, A>,
format_options: Option<FormatOptions>,
infos: &mut Vec<MatchBounds>,
compute_matches: bool,
) -> Value {
match value {
Value::String(old_string) => {
let mut matcher = builder.build(&old_string);
if compute_matches {
let matches = matcher.matches();
infos.extend_from_slice(&matches[..]);
}
match format_options {
Some(format_options) => {
let value = matcher.format(format_options);
Value::String(value.into_owned())
}
None => Value::String(old_string),
}
}
Value::Array(values) => Value::Array(
values
.into_iter()
.map(|v| {
format_value(
v,
builder,
format_options.map(|format_options| FormatOptions {
highlight: format_options.highlight,
crop: None,
}),
infos,
compute_matches,
)
})
.collect(),
),
Value::Object(object) => Value::Object(
object
.into_iter()
.map(|(k, v)| {
(
k,
format_value(
v,
builder,
format_options.map(|format_options| FormatOptions {
highlight: format_options.highlight,
crop: None,
}),
infos,
compute_matches,
),
)
})
.collect(),
),
Value::Number(number) => {
let s = number.to_string();
let mut matcher = builder.build(&s);
if compute_matches {
let matches = matcher.matches();
infos.extend_from_slice(&matches[..]);
}
match format_options {
Some(format_options) => {
let value = matcher.format(format_options);
Value::String(value.into_owned())
}
None => Value::Number(number),
}
}
value => value,
}
}
fn parse_filter(facets: &Value) -> Result<Option<Filter>, MeilisearchHttpError> {
match facets {
Value::String(expr) => {
let condition = Filter::from_str(expr)?;
Ok(condition)
}
Value::Array(arr) => parse_filter_array(arr),
v => Err(MeilisearchHttpError::InvalidExpression(&["Array"], v.clone())),
}
}
fn parse_filter_array(arr: &[Value]) -> Result<Option<Filter>, MeilisearchHttpError> {
let mut ands = Vec::new();
for value in arr {
match value {
Value::String(s) => ands.push(Either::Right(s.as_str())),
Value::Array(arr) => {
let mut ors = Vec::new();
for value in arr {
match value {
Value::String(s) => ors.push(s.as_str()),
v => {
return Err(MeilisearchHttpError::InvalidExpression(
&["String"],
v.clone(),
))
}
}
}
ands.push(Either::Left(ors));
}
v => {
return Err(MeilisearchHttpError::InvalidExpression(
&["String", "[String]"],
v.clone(),
))
}
}
}
Ok(Filter::from_array(ands)?)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_insert_geo_distance() {
let value: Document = serde_json::from_str(
r#"{
"_geo": {
"lat": 50.629973371633746,
"lng": 3.0569447399419567
},
"city": "Lille",
"id": "1"
}"#,
)
.unwrap();
let sorters = &["_geoPoint(50.629973371633746,3.0569447399419567):desc".to_string()];
let mut document = value.clone();
insert_geo_distance(sorters, &mut document);
assert_eq!(document.get("_geoDistance"), Some(&json!(0)));
let sorters = &["_geoPoint(50.629973371633746, 3.0569447399419567):asc".to_string()];
let mut document = value.clone();
insert_geo_distance(sorters, &mut document);
assert_eq!(document.get("_geoDistance"), Some(&json!(0)));
let sorters =
&["_geoPoint( 50.629973371633746 , 3.0569447399419567 ):desc".to_string()];
let mut document = value.clone();
insert_geo_distance(sorters, &mut document);
assert_eq!(document.get("_geoDistance"), Some(&json!(0)));
let sorters = &[
"prix:asc",
"villeneuve:desc",
"_geoPoint(50.629973371633746, 3.0569447399419567):asc",
"ubu:asc",
]
.map(|s| s.to_string());
let mut document = value.clone();
insert_geo_distance(sorters, &mut document);
assert_eq!(document.get("_geoDistance"), Some(&json!(0)));
// only the first geoPoint is used to compute the distance
let sorters = &[
"chien:desc",
"_geoPoint(50.629973371633746, 3.0569447399419567):asc",
"pangolin:desc",
"_geoPoint(100.0, -80.0):asc",
"chat:asc",
]
.map(|s| s.to_string());
let mut document = value.clone();
insert_geo_distance(sorters, &mut document);
assert_eq!(document.get("_geoDistance"), Some(&json!(0)));
// there was no _geoPoint so nothing is inserted in the document
let sorters = &["chien:asc".to_string()];
let mut document = value;
insert_geo_distance(sorters, &mut document);
assert_eq!(document.get("_geoDistance"), None);
}
}

View file

@ -0,0 +1,12 @@
{
"indices": [{
"uid": "test",
"primaryKey": "id"
}, {
"uid": "test2",
"primaryKey": "test2_id"
}
],
"dbVersion": "0.13.0",
"dumpVersion": "1"
}

View file

@ -0,0 +1,77 @@
{"id":0,"isActive":false,"balance":"$2,668.55","picture":"http://placehold.it/32x32","age":36,"color":"Green","name":"Lucas Hess","gender":"male","email":"lucashess@chorizon.com","phone":"+1 (998) 478-2597","address":"412 Losee Terrace, Blairstown, Georgia, 2825","about":"Mollit ad in exercitation quis. Anim est ut consequat fugiat duis magna aliquip velit nisi. Commodo eiusmod est consequat proident consectetur aliqua enim fugiat. Aliqua adipisicing laboris elit proident enim veniam laboris mollit. Incididunt fugiat minim ad nostrud deserunt tempor in. Id irure officia labore qui est labore nulla nisi. Magna sit quis tempor esse consectetur amet labore duis aliqua consequat.\r\n","registered":"2016-06-21T09:30:25 -02:00","latitude":-44.174957,"longitude":-145.725388,"tags":["bug","bug"]}
{"id":1,"isActive":true,"balance":"$1,706.13","picture":"http://placehold.it/32x32","age":27,"color":"Green","name":"Cherry Orr","gender":"female","email":"cherryorr@chorizon.com","phone":"+1 (995) 479-3174","address":"442 Beverly Road, Ventress, New Mexico, 3361","about":"Exercitation officia mollit proident nostrud ea. Pariatur voluptate labore nostrud magna duis non elit et incididunt Lorem velit duis amet commodo. Irure in velit laboris pariatur. Do tempor ex deserunt duis minim amet.\r\n","registered":"2020-03-18T11:12:21 -01:00","latitude":-24.356932,"longitude":27.184808,"tags":["new issue","bug"]}
{"id":2,"isActive":true,"balance":"$2,467.47","picture":"http://placehold.it/32x32","age":34,"color":"blue","name":"Patricia Goff","gender":"female","email":"patriciagoff@chorizon.com","phone":"+1 (864) 463-2277","address":"866 Hornell Loop, Cresaptown, Ohio, 1700","about":"Non culpa duis dolore Lorem aliqua. Labore veniam laborum cupidatat nostrud ea exercitation. Esse nostrud sit veniam laborum minim ullamco nulla aliqua est cillum magna. Duis non esse excepteur veniam voluptate sunt cupidatat nostrud consequat sint adipisicing ut excepteur. Incididunt sit aliquip non id magna amet deserunt esse quis dolor.\r\n","registered":"2014-10-28T12:59:30 -01:00","latitude":-64.008555,"longitude":11.867098,"tags":["good first issue"]}
{"id":3,"isActive":true,"balance":"$3,344.40","picture":"http://placehold.it/32x32","age":35,"color":"blue","name":"Adeline Flynn","gender":"female","email":"adelineflynn@chorizon.com","phone":"+1 (994) 600-2840","address":"428 Paerdegat Avenue, Hollymead, Pennsylvania, 948","about":"Ex velit magna minim labore dolor id laborum incididunt. Proident dolor fugiat exercitation ad adipisicing amet dolore. Veniam nisi pariatur aute eu amet sint elit duis exercitation. Eu fugiat Lorem nostrud consequat aute sunt. Minim excepteur cillum laboris enim tempor adipisicing nulla reprehenderit ea velit Lorem qui in incididunt. Esse ipsum mollit deserunt ea exercitation ex aliqua anim magna cupidatat culpa.\r\n","registered":"2014-03-27T06:24:45 -01:00","latitude":-74.485173,"longitude":-11.059859,"tags":["bug","good first issue","wontfix","new issue"]}
{"id":4,"isActive":false,"balance":"$2,575.78","picture":"http://placehold.it/32x32","age":39,"color":"Green","name":"Mariana Pacheco","gender":"female","email":"marianapacheco@chorizon.com","phone":"+1 (820) 414-2223","address":"664 Rapelye Street, Faywood, California, 7320","about":"Sint cillum enim eu Lorem dolore. Est excepteur cillum consequat incididunt. Ut consectetur et do culpa eiusmod ex ut id proident aliqua. Sunt dolor anim minim labore incididunt deserunt enim velit sunt ut in velit. Nulla ipsum cillum qui est minim officia in occaecat exercitation Lorem sunt. Aliqua minim excepteur tempor incididunt dolore. Quis amet ullamco et proident aliqua magna consequat.\r\n","registered":"2015-09-02T03:23:35 -02:00","latitude":75.763501,"longitude":-78.777124,"tags":["new issue"]}
{"id":5,"isActive":true,"balance":"$3,793.09","picture":"http://placehold.it/32x32","age":20,"color":"Green","name":"Warren Watson","gender":"male","email":"warrenwatson@chorizon.com","phone":"+1 (807) 583-2427","address":"671 Prince Street, Faxon, Connecticut, 4275","about":"Cillum incididunt mollit labore ipsum elit ea. Lorem labore consectetur nulla ea fugiat sint esse cillum ea commodo id qui. Sint cillum mollit dolore enim quis esse. Nisi labore duis dolor tempor laborum laboris ad minim pariatur in excepteur sit. Aliqua anim amet sunt ullamco labore amet culpa irure esse eiusmod deserunt consequat Lorem nostrud.\r\n","registered":"2017-06-04T06:02:17 -02:00","latitude":29.979223,"longitude":25.358943,"tags":["wontfix","wontfix","wontfix"]}
{"id":6,"isActive":true,"balance":"$2,919.70","picture":"http://placehold.it/32x32","age":20,"color":"blue","name":"Shelia Berry","gender":"female","email":"sheliaberry@chorizon.com","phone":"+1 (853) 511-2651","address":"437 Forrest Street, Coventry, Illinois, 2056","about":"Id occaecat qui voluptate proident culpa cillum nisi reprehenderit. Pariatur nostrud proident adipisicing reprehenderit eiusmod qui minim proident aliqua id cupidatat laboris deserunt. Proident sint laboris sit mollit dolor qui incididunt quis veniam cillum cupidatat ad nostrud ut. Aliquip consequat eiusmod eiusmod irure tempor do incididunt id culpa laboris eiusmod.\r\n","registered":"2018-07-11T02:45:01 -02:00","latitude":54.815991,"longitude":-118.690609,"tags":["good first issue","bug","wontfix","new issue"]}
{"id":7,"isActive":true,"balance":"$1,349.50","picture":"http://placehold.it/32x32","age":28,"color":"Green","name":"Chrystal Boyd","gender":"female","email":"chrystalboyd@chorizon.com","phone":"+1 (936) 563-2802","address":"670 Croton Loop, Sussex, Florida, 4692","about":"Consequat ex voluptate consectetur laborum nulla. Qui voluptate Lorem amet labore est esse sunt. Nulla cupidatat consequat quis incididunt exercitation aliquip reprehenderit ea ea adipisicing reprehenderit id consectetur quis. Exercitation est incididunt ullamco non proident consequat. Nisi veniam aliquip fugiat voluptate ex id aute duis ullamco magna ipsum ad laborum ipsum. Cupidatat velit dolore esse nisi.\r\n","registered":"2016-11-01T07:36:04 -01:00","latitude":-24.711933,"longitude":147.246705,"tags":[]}
{"id":8,"isActive":false,"balance":"$3,999.56","picture":"http://placehold.it/32x32","age":30,"color":"brown","name":"Martin Porter","gender":"male","email":"martinporter@chorizon.com","phone":"+1 (895) 580-2304","address":"577 Regent Place, Aguila, Guam, 6554","about":"Nostrud nulla labore ex excepteur labore enim cillum pariatur in do Lorem eiusmod ullamco est. Labore aliquip id ut nisi commodo pariatur ea esse laboris. Incididunt eu dolor esse excepteur nulla minim proident non cillum nisi dolore incididunt ipsum tempor.\r\n","registered":"2014-09-20T02:08:30 -02:00","latitude":-88.344273,"longitude":37.964466,"tags":[]}
{"id":9,"isActive":true,"balance":"$3,729.71","picture":"http://placehold.it/32x32","age":26,"color":"blue","name":"Kelli Mendez","gender":"female","email":"kellimendez@chorizon.com","phone":"+1 (936) 401-2236","address":"242 Caton Place, Grazierville, Alabama, 3968","about":"Consectetur occaecat dolore esse eiusmod enim ea aliqua eiusmod amet velit laborum. Velit quis consequat consectetur velit fugiat labore commodo amet do. Magna minim est ad commodo consequat fugiat. Laboris duis Lorem ipsum irure sit ipsum consequat tempor sit. Est ad nulla duis quis velit anim id nulla. Cupidatat ea esse laboris eu veniam cupidatat proident veniam quis.\r\n","registered":"2018-05-04T10:35:30 -02:00","latitude":49.37551,"longitude":41.872323,"tags":["new issue","new issue"]}
{"id":10,"isActive":false,"balance":"$1,127.47","picture":"http://placehold.it/32x32","age":27,"color":"blue","name":"Maddox Johns","gender":"male","email":"maddoxjohns@chorizon.com","phone":"+1 (892) 470-2357","address":"756 Beard Street, Avalon, Louisiana, 114","about":"Voluptate et dolor magna do do. Id do enim ut nulla esse culpa fugiat excepteur quis. Nostrud ad aliquip aliqua qui esse ut consequat proident deserunt esse cupidatat do elit fugiat. Sint cillum aliquip cillum laboris laborum laboris ad aliquip enim reprehenderit cillum eu sint. Sint ut ad duis do culpa non eiusmod amet non ipsum commodo. Pariatur aliquip sit deserunt non. Ut consequat pariatur deserunt veniam est sit eiusmod officia aliquip commodo sunt in eu duis.\r\n","registered":"2016-04-22T06:41:25 -02:00","latitude":66.640229,"longitude":-17.222666,"tags":["new issue","good first issue","good first issue","new issue"]}
{"id":11,"isActive":true,"balance":"$1,351.43","picture":"http://placehold.it/32x32","age":28,"color":"Green","name":"Evans Wagner","gender":"male","email":"evanswagner@chorizon.com","phone":"+1 (889) 496-2332","address":"118 Monaco Place, Lutsen, Delaware, 6209","about":"Sunt consectetur enim ipsum consectetur occaecat reprehenderit nulla pariatur. Cupidatat do exercitation tempor voluptate duis nostrud dolor consectetur. Excepteur aliquip Lorem voluptate cillum est. Nisi velit nulla nostrud ea id officia laboris et.\r\n","registered":"2016-10-27T01:26:31 -02:00","latitude":-77.673222,"longitude":-142.657214,"tags":["good first issue","good first issue"]}
{"id":12,"isActive":false,"balance":"$3,394.96","picture":"http://placehold.it/32x32","age":25,"color":"blue","name":"Aida Kirby","gender":"female","email":"aidakirby@chorizon.com","phone":"+1 (942) 532-2325","address":"797 Engert Avenue, Wilsonia, Idaho, 6532","about":"Mollit aute esse Lorem do laboris anim reprehenderit excepteur. Ipsum culpa esse voluptate officia cupidatat minim. Velit officia proident nostrud sunt irure labore. Culpa ex commodo amet dolor amet voluptate Lorem ex esse commodo fugiat quis non. Ex est adipisicing veniam sunt dolore ut aliqua nisi ex sit. Esse voluptate esse anim id adipisicing enim aute ea exercitation tempor cillum.\r\n","registered":"2018-06-18T04:39:57 -02:00","latitude":-58.062041,"longitude":34.999254,"tags":["new issue","wontfix","bug","new issue"]}
{"id":13,"isActive":true,"balance":"$2,812.62","picture":"http://placehold.it/32x32","age":40,"color":"blue","name":"Nelda Burris","gender":"female","email":"neldaburris@chorizon.com","phone":"+1 (813) 600-2576","address":"160 Opal Court, Fowlerville, Tennessee, 2170","about":"Ipsum aliquip adipisicing elit magna. Veniam irure quis laborum laborum sint velit amet. Irure non eiusmod laborum fugiat qui quis Lorem culpa veniam commodo. Fugiat cupidatat dolore et consequat pariatur enim ex velit consequat deserunt quis. Deserunt et quis laborum cupidatat cillum minim cupidatat nisi do commodo commodo labore cupidatat ea. In excepteur sit nostrud nulla nostrud dolor sint. Et anim culpa aliquip laborum Lorem elit.\r\n","registered":"2015-08-15T12:39:53 -02:00","latitude":66.6871,"longitude":179.549488,"tags":["wontfix"]}
{"id":14,"isActive":true,"balance":"$1,718.33","picture":"http://placehold.it/32x32","age":35,"color":"blue","name":"Jennifer Hart","gender":"female","email":"jenniferhart@chorizon.com","phone":"+1 (850) 537-2513","address":"124 Veranda Place, Nash, Utah, 985","about":"Amet amet voluptate in occaecat pariatur. Nulla ipsum esse quis qui in quis qui. Non est non nisi qui tempor commodo consequat fugiat. Sint eu ipsum aute anim anim. Ea nostrud excepteur exercitation consectetur Lorem.\r\n","registered":"2016-09-04T11:46:59 -02:00","latitude":-66.827751,"longitude":99.220079,"tags":["wontfix","bug","new issue","new issue"]}
{"id":15,"isActive":false,"balance":"$2,698.16","picture":"http://placehold.it/32x32","age":28,"color":"blue","name":"Aurelia Contreras","gender":"female","email":"aureliacontreras@chorizon.com","phone":"+1 (932) 442-3103","address":"655 Dwight Street, Grapeview, Palau, 8356","about":"Qui adipisicing consectetur aute veniam culpa ipsum. Occaecat occaecat ut mollit enim enim elit Lorem nostrud Lorem. Consequat laborum mollit nulla aute cillum sunt mollit commodo velit culpa. Pariatur pariatur velit nostrud tempor. In minim enim cillum exercitation in laboris labore ea sunt in incididunt fugiat.\r\n","registered":"2014-09-11T10:43:15 -02:00","latitude":-71.328973,"longitude":133.404895,"tags":["wontfix","bug","good first issue"]}
{"id":16,"isActive":true,"balance":"$3,303.25","picture":"http://placehold.it/32x32","age":28,"color":"brown","name":"Estella Bass","gender":"female","email":"estellabass@chorizon.com","phone":"+1 (825) 436-2909","address":"435 Rockwell Place, Garberville, Wisconsin, 2230","about":"Sit eiusmod mollit velit non. Qui ea in exercitation elit reprehenderit occaecat tempor minim officia. Culpa amet voluptate sit eiusmod pariatur.\r\n","registered":"2017-11-23T09:32:09 -01:00","latitude":81.17014,"longitude":-145.262693,"tags":["new issue"]}
{"id":17,"isActive":false,"balance":"$3,579.20","picture":"http://placehold.it/32x32","age":25,"color":"brown","name":"Ortega Brennan","gender":"male","email":"ortegabrennan@chorizon.com","phone":"+1 (906) 526-2287","address":"440 Berry Street, Rivera, Maine, 1849","about":"Veniam velit non laboris consectetur sit aliquip enim proident velit in ipsum reprehenderit reprehenderit. Dolor qui nulla adipisicing ad magna dolore do ut duis et aute est. Qui est elit cupidatat nostrud. Laboris voluptate reprehenderit minim sint exercitation cupidatat ipsum sint consectetur velit sunt et officia incididunt. Ut amet Lorem minim deserunt officia officia irure qui et Lorem deserunt culpa sit.\r\n","registered":"2016-03-31T02:17:13 -02:00","latitude":-68.407524,"longitude":-113.642067,"tags":["new issue","wontfix"]}
{"id":18,"isActive":false,"balance":"$1,484.92","picture":"http://placehold.it/32x32","age":39,"color":"blue","name":"Leonard Tillman","gender":"male","email":"leonardtillman@chorizon.com","phone":"+1 (864) 541-3456","address":"985 Provost Street, Charco, New Hampshire, 8632","about":"Consectetur ut magna sit id officia nostrud ipsum. Lorem cupidatat laborum nostrud aliquip magna qui est cupidatat exercitation et. Officia qui magna commodo id cillum magna ut ad veniam sunt sint ex. Id minim do in do exercitation aliquip incididunt ex esse. Nisi aliqua quis excepteur qui aute excepteur dolore eu pariatur irure id eu cupidatat eiusmod. Aliqua amet et dolore enim et eiusmod qui irure pariatur qui officia adipisicing nulla duis.\r\n","registered":"2018-05-06T08:21:27 -02:00","latitude":-8.581801,"longitude":-61.910062,"tags":["wontfix","new issue","bug","bug"]}
{"id":19,"isActive":true,"balance":"$3,572.55","picture":"http://placehold.it/32x32","age":33,"color":"brown","name":"Dale Payne","gender":"male","email":"dalepayne@chorizon.com","phone":"+1 (814) 469-3499","address":"536 Dare Court, Ironton, Arkansas, 8605","about":"Et velit cupidatat velit incididunt mollit. Occaecat do labore aliqua dolore excepteur occaecat ut veniam ad ullamco tempor. Ut anim laboris deserunt culpa esse. Pariatur Lorem nulla cillum cupidatat nostrud Lorem commodo reprehenderit ut est. In dolor cillum reprehenderit laboris incididunt ad reprehenderit aute ipsum officia id in consequat. Culpa exercitation voluptate fugiat est Lorem ipsum in dolore dolor consequat Lorem et.\r\n","registered":"2019-10-11T01:01:33 -02:00","latitude":-18.280968,"longitude":-126.091797,"tags":["bug","wontfix","wontfix","wontfix"]}
{"id":20,"isActive":true,"balance":"$1,986.48","picture":"http://placehold.it/32x32","age":38,"color":"Green","name":"Florence Long","gender":"female","email":"florencelong@chorizon.com","phone":"+1 (972) 557-3858","address":"519 Hendrickson Street, Templeton, Hawaii, 2389","about":"Quis officia occaecat veniam veniam. Ex minim enim labore cupidatat qui. Proident esse deserunt laborum laboris sunt nostrud.\r\n","registered":"2016-05-02T09:18:59 -02:00","latitude":-27.110866,"longitude":-45.09445,"tags":[]}
{"id":21,"isActive":true,"balance":"$1,440.09","picture":"http://placehold.it/32x32","age":40,"color":"blue","name":"Levy Whitley","gender":"male","email":"levywhitley@chorizon.com","phone":"+1 (911) 458-2411","address":"187 Thomas Street, Hachita, North Carolina, 2989","about":"Velit laboris non minim elit sint deserunt fugiat. Aute minim ex commodo aute cillum aliquip fugiat pariatur nulla eiusmod pariatur consectetur. Qui ex ea qui laborum veniam adipisicing magna minim ut. In irure anim voluptate mollit et. Adipisicing labore ea mollit magna aliqua culpa velit est. Excepteur nisi veniam enim velit in ad officia irure laboris.\r\n","registered":"2014-04-30T07:31:38 -02:00","latitude":-6.537315,"longitude":171.813536,"tags":["bug"]}
{"id":22,"isActive":false,"balance":"$2,938.57","picture":"http://placehold.it/32x32","age":35,"color":"blue","name":"Bernard Mcfarland","gender":"male","email":"bernardmcfarland@chorizon.com","phone":"+1 (979) 442-3386","address":"409 Hall Street, Keyport, Federated States Of Micronesia, 7011","about":"Reprehenderit irure aute et anim ullamco enim est tempor id ipsum mollit veniam aute ullamco. Consectetur dolor velit tempor est reprehenderit ut id non est ullamco voluptate. Commodo aute ullamco culpa non voluptate incididunt non culpa culpa nisi id proident cupidatat.\r\n","registered":"2017-08-10T10:07:59 -02:00","latitude":63.766795,"longitude":68.177069,"tags":[]}
{"id":23,"isActive":true,"balance":"$1,678.49","picture":"http://placehold.it/32x32","age":31,"color":"brown","name":"Blanca Mcclain","gender":"female","email":"blancamcclain@chorizon.com","phone":"+1 (976) 439-2772","address":"176 Crooke Avenue, Valle, Virginia, 5373","about":"Aliquip sunt irure ut consectetur elit. Cillum amet incididunt et anim elit in incididunt adipisicing fugiat veniam esse veniam. Nisi qui sit occaecat tempor nostrud est aute cillum anim excepteur laboris magna in. Fugiat fugiat veniam cillum laborum ut pariatur amet nulla nulla. Nostrud mollit in laborum minim exercitation aute. Lorem aute ipsum laboris est adipisicing qui ullamco tempor adipisicing cupidatat mollit.\r\n","registered":"2015-10-12T11:57:28 -02:00","latitude":-8.944564,"longitude":-150.711709,"tags":["bug","wontfix","good first issue"]}
{"id":24,"isActive":true,"balance":"$2,276.87","picture":"http://placehold.it/32x32","age":28,"color":"brown","name":"Espinoza Ford","gender":"male","email":"espinozaford@chorizon.com","phone":"+1 (945) 429-3975","address":"137 Bowery Street, Itmann, District Of Columbia, 1864","about":"Deserunt nisi aliquip esse occaecat laborum qui aliqua excepteur ea cupidatat dolore magna consequat. Culpa aliquip cillum incididunt proident est officia consequat duis. Elit tempor ut cupidatat nisi ea sint non labore aliquip amet. Deserunt labore cupidatat laboris dolor duis occaecat velit aliquip reprehenderit esse. Sit ad qui consectetur id anim nisi amet eiusmod.\r\n","registered":"2014-03-26T02:16:08 -01:00","latitude":-37.137666,"longitude":-51.811757,"tags":["wontfix","bug"]}
{"id":25,"isActive":true,"balance":"$3,973.43","picture":"http://placehold.it/32x32","age":29,"color":"Green","name":"Sykes Conley","gender":"male","email":"sykesconley@chorizon.com","phone":"+1 (851) 401-3916","address":"345 Grand Street, Woodlands, Missouri, 4461","about":"Pariatur ullamco duis reprehenderit ad sit dolore. Dolore ex fugiat labore incididunt nostrud. Minim deserunt officia sunt enim magna elit veniam reprehenderit nisi cupidatat dolor eiusmod. Veniam laboris sint cillum et laboris nostrud culpa laboris anim. Incididunt velit pariatur cupidatat sit dolore in. Voluptate consectetur officia id nostrud velit mollit dolor. Id laboris consectetur culpa sunt pariatur minim sunt laboris sit.\r\n","registered":"2015-09-12T06:03:56 -02:00","latitude":67.282955,"longitude":-64.341323,"tags":["wontfix"]}
{"id":26,"isActive":false,"balance":"$1,431.50","picture":"http://placehold.it/32x32","age":35,"color":"blue","name":"Barlow Duran","gender":"male","email":"barlowduran@chorizon.com","phone":"+1 (995) 436-2562","address":"481 Everett Avenue, Allison, Nebraska, 3065","about":"Proident quis eu officia adipisicing aliquip. Lorem laborum magna dolor et incididunt cillum excepteur et amet. Veniam consectetur officia fugiat magna consequat dolore elit aute exercitation fugiat excepteur ullamco. Sit qui proident reprehenderit ea ad qui culpa exercitation reprehenderit anim cupidatat. Nulla et duis Lorem cillum duis pariatur amet voluptate labore ut aliqua mollit anim ea. Nostrud incididunt et proident adipisicing non consequat tempor ullamco adipisicing incididunt. Incididunt cupidatat tempor fugiat officia qui eiusmod reprehenderit.\r\n","registered":"2017-06-29T04:28:43 -02:00","latitude":-38.70606,"longitude":55.02816,"tags":["new issue"]}
{"id":27,"isActive":true,"balance":"$3,478.27","picture":"http://placehold.it/32x32","age":31,"color":"blue","name":"Schwartz Morgan","gender":"male","email":"schwartzmorgan@chorizon.com","phone":"+1 (861) 507-2067","address":"451 Lincoln Road, Fairlee, Washington, 2717","about":"Labore eiusmod sint dolore sunt eiusmod esse et in id aliquip. Aliqua consequat occaecat laborum labore ipsum enim non nostrud adipisicing adipisicing cillum occaecat. Duis minim est culpa sunt nulla ullamco adipisicing magna irure. Occaecat quis irure eiusmod fugiat quis commodo reprehenderit labore cillum commodo id et.\r\n","registered":"2016-05-10T08:34:54 -02:00","latitude":-75.886403,"longitude":93.044471,"tags":["bug","bug","wontfix","wontfix"]}
{"id":28,"isActive":true,"balance":"$2,825.59","picture":"http://placehold.it/32x32","age":32,"color":"blue","name":"Kristy Leon","gender":"female","email":"kristyleon@chorizon.com","phone":"+1 (948) 465-2563","address":"594 Macon Street, Floris, South Dakota, 3565","about":"Proident veniam voluptate magna id do. Laboris enim dolor culpa quis. Esse voluptate elit commodo duis incididunt velit aliqua. Qui aute commodo incididunt elit eu Lorem dolore. Non esse duis do reprehenderit culpa minim. Ullamco consequat id do exercitation exercitation mollit ipsum velit eiusmod quis.\r\n","registered":"2014-12-14T04:10:29 -01:00","latitude":-50.01615,"longitude":-68.908804,"tags":["wontfix","good first issue"]}
{"id":29,"isActive":false,"balance":"$3,028.03","picture":"http://placehold.it/32x32","age":39,"color":"blue","name":"Ashley Pittman","gender":"male","email":"ashleypittman@chorizon.com","phone":"+1 (928) 507-3523","address":"646 Adelphi Street, Clara, Colorado, 6056","about":"Incididunt cillum consectetur nulla sit sit labore nulla sit. Ullamco nisi mollit reprehenderit tempor irure in Lorem duis. Sunt eu aute laboris dolore commodo ipsum sint cupidatat veniam amet culpa incididunt aute ad. Quis dolore aliquip id aute mollit eiusmod nisi ipsum ut labore adipisicing do culpa.\r\n","registered":"2016-01-07T10:40:48 -01:00","latitude":-58.766037,"longitude":-124.828485,"tags":["wontfix"]}
{"id":30,"isActive":true,"balance":"$2,021.11","picture":"http://placehold.it/32x32","age":32,"color":"blue","name":"Stacy Espinoza","gender":"female","email":"stacyespinoza@chorizon.com","phone":"+1 (999) 487-3253","address":"931 Alabama Avenue, Bangor, Alaska, 8215","about":"Id reprehenderit cupidatat exercitation anim ad nisi irure. Minim est proident mollit laborum. Duis ad duis eiusmod quis.\r\n","registered":"2014-07-16T06:15:53 -02:00","latitude":41.560197,"longitude":177.697,"tags":["new issue","new issue","bug"]}
{"id":31,"isActive":false,"balance":"$3,609.82","picture":"http://placehold.it/32x32","age":32,"color":"blue","name":"Vilma Garza","gender":"female","email":"vilmagarza@chorizon.com","phone":"+1 (944) 585-2021","address":"565 Tech Place, Sedley, Puerto Rico, 858","about":"Excepteur et fugiat mollit incididunt cupidatat. Mollit nisi veniam sint eu exercitation amet labore. Voluptate est magna est amet qui minim excepteur cupidatat dolor quis id excepteur aliqua reprehenderit. Proident nostrud ex veniam officia nisi enim occaecat ex magna officia id consectetur ad eu. In et est reprehenderit cupidatat ad minim veniam proident nulla elit nisi veniam proident ex. Eu in irure sit veniam amet incididunt fugiat proident quis ullamco laboris.\r\n","registered":"2017-06-30T07:43:52 -02:00","latitude":-12.574889,"longitude":-54.771186,"tags":["new issue","wontfix","wontfix"]}
{"id":32,"isActive":false,"balance":"$2,882.34","picture":"http://placehold.it/32x32","age":38,"color":"brown","name":"June Dunlap","gender":"female","email":"junedunlap@chorizon.com","phone":"+1 (997) 504-2937","address":"353 Cozine Avenue, Goodville, Indiana, 1438","about":"Non dolore ut Lorem dolore amet veniam fugiat reprehenderit ut amet ea ut. Non aliquip cillum ad occaecat non et sint quis proident velit laborum ullamco et. Quis qui tempor eu voluptate et proident duis est commodo laboris ex enim. Nisi aliquip laboris nostrud veniam aliqua ullamco. Et officia proident dolor aliqua incididunt veniam proident.\r\n","registered":"2016-08-23T08:54:11 -02:00","latitude":-27.883363,"longitude":-163.919683,"tags":["new issue","new issue","bug","wontfix"]}
{"id":33,"isActive":true,"balance":"$3,556.54","picture":"http://placehold.it/32x32","age":33,"color":"brown","name":"Cecilia Greer","gender":"female","email":"ceciliagreer@chorizon.com","phone":"+1 (977) 573-3498","address":"696 Withers Street, Lydia, Oklahoma, 3220","about":"Dolor pariatur veniam ad enim eiusmod fugiat ullamco nulla veniam. Dolore dolor sit excepteur veniam adipisicing adipisicing excepteur commodo qui reprehenderit magna exercitation enim reprehenderit. Cupidatat eu ullamco excepteur sint do. Et cupidatat ex adipisicing veniam eu tempor reprehenderit ut eiusmod amet proident veniam nostrud. Tempor ex enim mollit laboris magna tempor. Et aliqua nostrud esse pariatur quis. Ut pariatur ea ipsum pariatur.\r\n","registered":"2017-01-13T11:30:12 -01:00","latitude":60.467215,"longitude":84.684575,"tags":["wontfix","good first issue","good first issue","wontfix"]}
{"id":34,"isActive":true,"balance":"$1,413.35","picture":"http://placehold.it/32x32","age":33,"color":"brown","name":"Mckay Schroeder","gender":"male","email":"mckayschroeder@chorizon.com","phone":"+1 (816) 480-3657","address":"958 Miami Court, Rehrersburg, Northern Mariana Islands, 567","about":"Amet do velit excepteur tempor sit eu voluptate. Excepteur amet culpa ipsum in pariatur mollit amet nisi veniam. Laboris elit consectetur id anim qui laboris. Reprehenderit mollit laboris occaecat esse sunt Lorem Lorem sunt occaecat.\r\n","registered":"2016-02-08T04:50:15 -01:00","latitude":-72.413287,"longitude":-159.254371,"tags":["good first issue"]}
{"id":35,"isActive":true,"balance":"$2,306.53","picture":"http://placehold.it/32x32","age":34,"color":"blue","name":"Sawyer Mccormick","gender":"male","email":"sawyermccormick@chorizon.com","phone":"+1 (829) 569-3012","address":"749 Apollo Street, Eastvale, Texas, 7373","about":"Est irure ex occaecat aute. Lorem ad ullamco esse cillum deserunt qui proident anim officia dolore. Incididunt tempor cupidatat nulla cupidatat ullamco reprehenderit Lorem. Laboris tempor do pariatur sint non officia id qui deserunt amet Lorem pariatur consectetur exercitation. Adipisicing reprehenderit pariatur duis ex cupidatat cillum ad laboris ex. Sunt voluptate pariatur esse amet dolore minim aliquip reprehenderit nisi velit mollit.\r\n","registered":"2019-11-30T11:53:23 -01:00","latitude":-48.978194,"longitude":110.950191,"tags":["good first issue","new issue","new issue","bug"]}
{"id":36,"isActive":false,"balance":"$1,844.54","picture":"http://placehold.it/32x32","age":37,"color":"brown","name":"Barbra Valenzuela","gender":"female","email":"barbravalenzuela@chorizon.com","phone":"+1 (992) 512-2649","address":"617 Schenck Court, Reinerton, Michigan, 2908","about":"Deserunt adipisicing nisi et amet aliqua amet. Veniam occaecat et elit excepteur veniam. Aute irure culpa nostrud occaecat. Excepteur sit aute mollit commodo. Do ex pariatur consequat sint Lorem veniam laborum excepteur. Non voluptate ex laborum enim irure. Adipisicing excepteur anim elit esse.\r\n","registered":"2019-03-29T01:59:31 -01:00","latitude":45.193723,"longitude":-12.486778,"tags":["new issue","new issue","wontfix","wontfix"]}
{"id":37,"isActive":false,"balance":"$3,469.82","picture":"http://placehold.it/32x32","age":39,"color":"brown","name":"Opal Weiss","gender":"female","email":"opalweiss@chorizon.com","phone":"+1 (809) 400-3079","address":"535 Bogart Street, Frizzleburg, Arizona, 5222","about":"Reprehenderit nostrud minim adipisicing voluptate nisi consequat id sint. Proident tempor est esse cupidatat minim irure esse do do sint dolor. In officia duis et voluptate Lorem minim cupidatat ipsum enim qui dolor quis in Lorem. Aliquip commodo ex quis exercitation reprehenderit. Lorem id reprehenderit cillum adipisicing sunt ipsum incididunt incididunt.\r\n","registered":"2019-09-04T07:22:28 -02:00","latitude":72.50376,"longitude":61.656435,"tags":["bug","bug","good first issue","good first issue"]}
{"id":38,"isActive":true,"balance":"$1,992.38","picture":"http://placehold.it/32x32","age":40,"color":"Green","name":"Christina Short","gender":"female","email":"christinashort@chorizon.com","phone":"+1 (884) 589-2705","address":"594 Willmohr Street, Dexter, Montana, 660","about":"Quis commodo eu dolor incididunt. Nisi magna mollit nostrud do consequat irure exercitation mollit aute deserunt. Magna aute quis occaecat incididunt deserunt tempor nostrud sint ullamco ipsum. Anim in occaecat exercitation laborum nostrud eiusmod reprehenderit ea culpa et sit. Culpa voluptate consectetur nostrud do eu fugiat excepteur officia pariatur enim duis amet.\r\n","registered":"2014-01-21T09:31:56 -01:00","latitude":-42.762739,"longitude":77.052349,"tags":["bug","new issue"]}
{"id":39,"isActive":false,"balance":"$1,722.85","picture":"http://placehold.it/32x32","age":29,"color":"brown","name":"Golden Horton","gender":"male","email":"goldenhorton@chorizon.com","phone":"+1 (903) 426-2489","address":"191 Schenck Avenue, Mayfair, North Dakota, 5000","about":"Cillum velit aliqua velit in quis do mollit in et veniam. Nostrud proident non irure commodo. Ea culpa duis enim adipisicing do sint et est culpa reprehenderit officia laborum. Non et nostrud tempor nostrud nostrud ea duis esse laboris occaecat laborum. In eu ipsum sit tempor esse eiusmod enim aliquip aute. Officia ea anim ea ea. Consequat aute deserunt tempor nulla nisi tempor velit.\r\n","registered":"2015-08-19T02:56:41 -02:00","latitude":69.922534,"longitude":9.881433,"tags":["bug"]}
{"id":40,"isActive":false,"balance":"$1,656.54","picture":"http://placehold.it/32x32","age":21,"color":"blue","name":"Stafford Emerson","gender":"male","email":"staffordemerson@chorizon.com","phone":"+1 (992) 455-2573","address":"523 Thornton Street, Conway, Vermont, 6331","about":"Adipisicing cupidatat elit minim elit nostrud elit non eiusmod sunt ut. Enim minim irure officia irure occaecat mollit eu nostrud eiusmod adipisicing sunt. Elit deserunt commodo minim dolor qui. Nostrud officia ex proident mollit et dolor tempor pariatur. Ex consequat tempor eiusmod irure mollit cillum laboris est veniam ea mollit deserunt. Tempor sit voluptate excepteur elit ullamco.\r\n","registered":"2019-02-16T04:07:08 -01:00","latitude":-29.143111,"longitude":-57.207703,"tags":["wontfix","good first issue","good first issue"]}
{"id":41,"isActive":false,"balance":"$1,861.56","picture":"http://placehold.it/32x32","age":21,"color":"brown","name":"Salinas Gamble","gender":"male","email":"salinasgamble@chorizon.com","phone":"+1 (901) 525-2373","address":"991 Nostrand Avenue, Kansas, Mississippi, 6756","about":"Consequat tempor adipisicing cupidatat aliquip. Mollit proident incididunt ad ipsum laborum. Dolor in elit minim aliquip aliquip voluptate reprehenderit mollit eiusmod excepteur aliquip minim nulla cupidatat.\r\n","registered":"2017-08-21T05:47:53 -02:00","latitude":-22.593819,"longitude":-63.613004,"tags":["good first issue","bug","bug","wontfix"]}
{"id":42,"isActive":true,"balance":"$3,179.74","picture":"http://placehold.it/32x32","age":34,"color":"brown","name":"Graciela Russell","gender":"female","email":"gracielarussell@chorizon.com","phone":"+1 (893) 464-3951","address":"361 Greenpoint Avenue, Shrewsbury, New Jersey, 4713","about":"Ex amet duis incididunt consequat minim dolore deserunt reprehenderit adipisicing in mollit aliqua adipisicing sunt. In ullamco eu qui est eiusmod qui. Fugiat esse est Lorem dolore nisi mollit exercitation. Aliquip occaecat esse exercitation ex non aute velit excepteur duis aliquip id. Velit id non aliquip fugiat minim qui exercitation culpa tempor consectetur. Minim dolor labore ea aute aute eu.\r\n","registered":"2015-05-18T09:52:56 -02:00","latitude":-14.634444,"longitude":12.931783,"tags":["wontfix","bug","wontfix"]}
{"id":43,"isActive":true,"balance":"$1,777.38","picture":"http://placehold.it/32x32","age":25,"color":"blue","name":"Arnold Bender","gender":"male","email":"arnoldbender@chorizon.com","phone":"+1 (945) 581-3808","address":"781 Lorraine Street, Gallina, American Samoa, 1832","about":"Et mollit laboris duis ut duis eiusmod aute laborum duis irure labore deserunt. Ut occaecat ullamco quis excepteur. Et commodo non sint laboris tempor laboris aliqua consequat magna ea aute minim tempor pariatur. Dolore occaecat qui irure Lorem nulla consequat non.\r\n","registered":"2018-12-23T02:26:30 -01:00","latitude":41.208579,"longitude":51.948925,"tags":["bug","good first issue","good first issue","wontfix"]}
{"id":44,"isActive":true,"balance":"$2,893.45","picture":"http://placehold.it/32x32","age":22,"color":"Green","name":"Joni Spears","gender":"female","email":"jonispears@chorizon.com","phone":"+1 (916) 565-2124","address":"307 Harwood Place, Canterwood, Maryland, 2047","about":"Dolore consequat deserunt aliquip duis consequat minim occaecat enim est. Nulla aute reprehenderit est enim duis cillum ullamco aliquip eiusmod sunt. Labore eiusmod aliqua Lorem velit aliqua quis ex mollit mollit duis culpa et qui in. Cupidatat est id ullamco irure dolor nulla.\r\n","registered":"2015-03-01T12:38:28 -01:00","latitude":8.19071,"longitude":146.323808,"tags":["wontfix","new issue","good first issue","good first issue"]}
{"id":45,"isActive":true,"balance":"$2,830.36","picture":"http://placehold.it/32x32","age":20,"color":"brown","name":"Irene Bennett","gender":"female","email":"irenebennett@chorizon.com","phone":"+1 (904) 431-2211","address":"353 Ridgecrest Terrace, Springdale, Marshall Islands, 2686","about":"Consectetur Lorem dolor reprehenderit sunt duis. Pariatur non velit velit veniam elit reprehenderit in. Aute quis Lorem quis pariatur Lorem incididunt nulla magna adipisicing. Et id occaecat labore officia occaecat occaecat adipisicing.\r\n","registered":"2018-04-17T05:18:51 -02:00","latitude":-36.435177,"longitude":-127.552573,"tags":["bug","wontfix"]}
{"id":46,"isActive":true,"balance":"$1,348.04","picture":"http://placehold.it/32x32","age":34,"color":"Green","name":"Lawson Curtis","gender":"male","email":"lawsoncurtis@chorizon.com","phone":"+1 (896) 532-2172","address":"942 Gerritsen Avenue, Southmont, Kansas, 8915","about":"Amet consectetur minim aute nostrud excepteur sint labore in culpa. Mollit qui quis ea amet sint ex incididunt nulla. Elit id esse ea consectetur laborum consequat occaecat aute consectetur ex. Commodo duis aute elit occaecat cupidatat non consequat ad officia qui dolore nostrud reprehenderit. Occaecat velit velit adipisicing exercitation consectetur. Incididunt et amet nostrud tempor do esse ullamco est Lorem irure. Eu aliqua eu exercitation sint.\r\n","registered":"2016-08-23T01:41:09 -02:00","latitude":-48.783539,"longitude":20.492944,"tags":[]}
{"id":47,"isActive":true,"balance":"$1,132.41","picture":"http://placehold.it/32x32","age":38,"color":"Green","name":"Goff May","gender":"male","email":"goffmay@chorizon.com","phone":"+1 (859) 453-3415","address":"225 Rutledge Street, Boonville, Massachusetts, 4081","about":"Sint occaecat velit anim sint reprehenderit est. Adipisicing ea pariatur amet id non ex. Aute id laborum tempor aliquip magna ex eu incididunt aliquip eiusmod elit quis dolor. Anim est minim deserunt amet exercitation nulla elit nulla nulla culpa ullamco. Velit consectetur ipsum amet proident labore excepteur ut id excepteur voluptate commodo. Exercitation et laboris labore esse est laboris consectetur et sint.\r\n","registered":"2014-10-25T07:32:30 -02:00","latitude":13.079225,"longitude":76.215086,"tags":["bug"]}
{"id":48,"isActive":true,"balance":"$1,201.87","picture":"http://placehold.it/32x32","age":38,"color":"Green","name":"Goodman Becker","gender":"male","email":"goodmanbecker@chorizon.com","phone":"+1 (825) 470-3437","address":"388 Seigel Street, Sisquoc, Kentucky, 8231","about":"Velit excepteur aute esse fugiat laboris aliqua magna. Est ex sit do labore ullamco aliquip. Duis ea commodo nostrud in fugiat. Aliqua consequat mollit dolore excepteur nisi ullamco commodo ea nostrud ea minim. Minim occaecat ut laboris ea consectetur veniam ipsum qui sit tempor incididunt anim amet eu. Velit sint incididunt eu adipisicing ipsum qui labore. Anim commodo labore reprehenderit aliquip labore elit minim deserunt amet exercitation officia non ea consectetur.\r\n","registered":"2019-09-05T04:49:03 -02:00","latitude":-23.792094,"longitude":-13.621221,"tags":["bug","bug","wontfix","new issue"]}
{"id":49,"isActive":true,"balance":"$1,476.39","picture":"http://placehold.it/32x32","age":28,"color":"brown","name":"Maureen Dale","gender":"female","email":"maureendale@chorizon.com","phone":"+1 (984) 538-3684","address":"817 Newton Street, Bannock, Wyoming, 1468","about":"Tempor mollit exercitation excepteur cupidatat reprehenderit ad ex. Nulla laborum proident incididunt quis. Esse laborum deserunt qui anim. Sunt incididunt pariatur cillum anim proident eu ullamco dolor excepteur. Ullamco amet culpa nostrud adipisicing duis aliqua consequat duis non eu id mollit velit. Deserunt ullamco amet in occaecat.\r\n","registered":"2018-04-26T06:04:40 -02:00","latitude":-64.196802,"longitude":-117.396238,"tags":["wontfix"]}
{"id":50,"isActive":true,"balance":"$1,947.08","picture":"http://placehold.it/32x32","age":21,"color":"Green","name":"Guerra Mcintyre","gender":"male","email":"guerramcintyre@chorizon.com","phone":"+1 (951) 536-2043","address":"423 Lombardy Street, Stewart, West Virginia, 908","about":"Sunt proident proident deserunt exercitation consectetur deserunt labore non commodo amet. Duis aute aliqua amet deserunt consectetur velit. Quis Lorem dolore occaecat deserunt reprehenderit non esse ullamco nostrud enim sunt ea fugiat. Elit amet veniam eu magna tempor. Mollit cupidatat laboris ex deserunt et labore sit tempor nostrud anim. Tempor aliqua occaecat voluptate reprehenderit eiusmod aliqua incididunt officia.\r\n","registered":"2015-07-16T05:11:42 -02:00","latitude":79.733743,"longitude":-20.602356,"tags":["bug","good first issue","good first issue"]}
{"id":51,"isActive":true,"balance":"$2,960.90","picture":"http://placehold.it/32x32","age":23,"color":"blue","name":"Key Cervantes","gender":"male","email":"keycervantes@chorizon.com","phone":"+1 (931) 474-3865","address":"410 Barbey Street, Vernon, Oregon, 2328","about":"Duis amet minim eu consectetur laborum ad exercitation eiusmod nulla velit cillum consectetur. Nostrud aliqua cillum minim veniam quis do cupidatat mollit laborum. Culpa fugiat consectetur cillum non occaecat tempor non fugiat esse pariatur in ullamco. Occaecat amet officia et culpa officia deserunt in qui magna aute consequat eiusmod.\r\n","registered":"2019-12-15T12:13:35 -01:00","latitude":47.627647,"longitude":117.049918,"tags":["new issue"]}
{"id":52,"isActive":false,"balance":"$1,884.02","picture":"http://placehold.it/32x32","age":35,"color":"blue","name":"Karen Nelson","gender":"female","email":"karennelson@chorizon.com","phone":"+1 (993) 528-3607","address":"930 Frank Court, Dunbar, New York, 8810","about":"Occaecat officia veniam consectetur aliqua laboris dolor irure nulla. Lorem ipsum sit nisi veniam mollit ea sint nisi irure. Eiusmod officia do laboris nostrud enim ullamco nulla officia in Lorem qui. Sint sunt incididunt quis reprehenderit incididunt. Sit dolore nulla consequat ea magna.\r\n","registered":"2014-06-23T09:21:44 -02:00","latitude":-59.059033,"longitude":76.565373,"tags":["new issue","bug"]}
{"id":53,"isActive":true,"balance":"$3,559.55","picture":"http://placehold.it/32x32","age":32,"color":"brown","name":"Caitlin Burnett","gender":"female","email":"caitlinburnett@chorizon.com","phone":"+1 (945) 480-2796","address":"516 Senator Street, Emory, Iowa, 4145","about":"In aliqua ea esse in. Magna aute cupidatat culpa enim proident ad adipisicing laborum consequat exercitation nisi. Qui esse aliqua duis anim nulla esse enim nostrud ipsum tempor. Lorem deserunt ullamco do mollit culpa ipsum duis Lorem velit duis occaecat.\r\n","registered":"2019-01-09T02:26:31 -01:00","latitude":-82.774237,"longitude":42.316194,"tags":["bug","good first issue"]}
{"id":54,"isActive":true,"balance":"$2,113.29","picture":"http://placehold.it/32x32","age":28,"color":"Green","name":"Richards Walls","gender":"male","email":"richardswalls@chorizon.com","phone":"+1 (865) 517-2982","address":"959 Brightwater Avenue, Stevens, Nevada, 2968","about":"Ad aute Lorem non pariatur anim ullamco ad amet eiusmod tempor velit. Mollit et tempor nisi aute adipisicing exercitation mollit do amet amet est fugiat enim. Ex voluptate nulla id tempor officia ullamco cillum dolor irure irure mollit et magna nisi. Pariatur voluptate qui laboris dolor id. Eu ipsum nulla dolore aute voluptate deserunt anim aliqua. Ut enim enim velit officia est nisi. Duis amet ut veniam aliquip minim tempor Lorem amet Lorem dolor duis.\r\n","registered":"2014-09-25T06:51:22 -02:00","latitude":80.09202,"longitude":87.49759,"tags":["wontfix","wontfix","bug"]}
{"id":55,"isActive":true,"balance":"$1,977.66","picture":"http://placehold.it/32x32","age":36,"color":"brown","name":"Combs Stanley","gender":"male","email":"combsstanley@chorizon.com","phone":"+1 (827) 419-2053","address":"153 Beverley Road, Siglerville, South Carolina, 3666","about":"Commodo ullamco consequat eu ipsum eiusmod aute voluptate in. Ea laboris id deserunt nostrud pariatur et laboris minim tempor quis qui consequat non esse. Magna elit commodo mollit veniam Lorem enim nisi pariatur. Nisi non nisi adipisicing ea ipsum laborum dolore cillum. Amet do nisi esse laboris ipsum proident non veniam ullamco ea cupidatat sunt. Aliquip aute cillum quis laboris consectetur enim eiusmod nisi non id ullamco cupidatat sunt.\r\n","registered":"2019-08-22T07:53:15 -02:00","latitude":78.386181,"longitude":143.661058,"tags":[]}
{"id":56,"isActive":false,"balance":"$3,886.12","picture":"http://placehold.it/32x32","age":23,"color":"brown","name":"Tucker Barry","gender":"male","email":"tuckerbarry@chorizon.com","phone":"+1 (808) 544-3433","address":"805 Jamaica Avenue, Cornfields, Minnesota, 3689","about":"Enim est sunt ullamco nulla aliqua commodo. Enim minim veniam non fugiat id tempor ad velit quis velit ad sunt consectetur laborum. Cillum deserunt tempor est adipisicing Lorem esse qui. Magna quis sunt cillum ea officia adipisicing eiusmod eu et nisi consectetur.\r\n","registered":"2016-08-29T07:28:00 -02:00","latitude":71.701551,"longitude":9.903068,"tags":[]}
{"id":57,"isActive":false,"balance":"$1,844.56","picture":"http://placehold.it/32x32","age":20,"color":"Green","name":"Kaitlin Conner","gender":"female","email":"kaitlinconner@chorizon.com","phone":"+1 (862) 467-2666","address":"501 Knight Court, Joppa, Rhode Island, 274","about":"Occaecat id reprehenderit pariatur ea. Incididunt laborum reprehenderit ipsum velit labore excepteur nostrud voluptate officia ut culpa. Sint sunt in qui duis cillum aliqua do ullamco. Non do aute excepteur non labore sint consectetur tempor ad ea fugiat commodo labore. Dolor tempor culpa Lorem voluptate esse nostrud anim tempor irure reprehenderit. Deserunt ipsum cillum fugiat ut labore labore anim. In aliqua sunt dolore irure reprehenderit voluptate commodo consequat mollit amet laboris sit anim.\r\n","registered":"2019-05-30T06:38:24 -02:00","latitude":15.613464,"longitude":171.965629,"tags":[]}
{"id":58,"isActive":true,"balance":"$2,876.10","picture":"http://placehold.it/32x32","age":38,"color":"Green","name":"Mamie Fischer","gender":"female","email":"mamiefischer@chorizon.com","phone":"+1 (948) 545-3901","address":"599 Hunterfly Place, Haena, Georgia, 6005","about":"Cillum eu aliquip ipsum anim in dolore labore ea. Laboris velit esse ea ea aute do adipisicing ullamco elit laborum aute tempor. Esse consectetur quis irure occaecat nisi cillum et consectetur cillum cillum quis quis commodo.\r\n","registered":"2019-05-27T05:07:10 -02:00","latitude":70.915079,"longitude":-48.813584,"tags":["bug","wontfix","wontfix","good first issue"]}
{"id":59,"isActive":true,"balance":"$1,921.58","picture":"http://placehold.it/32x32","age":31,"color":"Green","name":"Harper Carson","gender":"male","email":"harpercarson@chorizon.com","phone":"+1 (912) 430-3243","address":"883 Dennett Place, Knowlton, New Mexico, 9219","about":"Exercitation minim esse proident cillum velit et deserunt incididunt adipisicing minim. Cillum Lorem consectetur laborum id consequat exercitation velit. Magna dolor excepteur sunt deserunt dolor ullamco non sint proident ipsum. Reprehenderit voluptate sit veniam consectetur ea sunt duis labore deserunt ipsum aute. Eiusmod aliqua anim voluptate id duis tempor aliqua commodo sunt. Do officia ea consectetur nostrud eiusmod laborum.\r\n","registered":"2019-12-07T07:33:15 -01:00","latitude":-60.812605,"longitude":-27.129016,"tags":["bug","new issue"]}
{"id":60,"isActive":true,"balance":"$1,770.93","picture":"http://placehold.it/32x32","age":23,"color":"brown","name":"Jody Herrera","gender":"female","email":"jodyherrera@chorizon.com","phone":"+1 (890) 583-3222","address":"261 Jay Street, Strykersville, Ohio, 9248","about":"Sit adipisicing pariatur irure non sint cupidatat ex ipsum pariatur exercitation ea. Enim consequat enim eu eu sint eu elit ex esse aliquip. Pariatur ipsum dolore veniam nisi id tempor elit exercitation dolore ad fugiat labore velit.\r\n","registered":"2016-05-21T01:00:02 -02:00","latitude":-36.846586,"longitude":131.156223,"tags":[]}
{"id":61,"isActive":false,"balance":"$2,813.41","picture":"http://placehold.it/32x32","age":37,"color":"Green","name":"Charles Castillo","gender":"male","email":"charlescastillo@chorizon.com","phone":"+1 (934) 467-2108","address":"675 Morton Street, Rew, Pennsylvania, 137","about":"Velit amet laborum amet sunt sint sit cupidatat deserunt dolor laborum consectetur veniam. Minim cupidatat amet exercitation nostrud ex deserunt ad Lorem amet aute consectetur labore reprehenderit. Minim mollit aliqua et deserunt ex nisi. Id irure dolor labore consequat ipsum consectetur.\r\n","registered":"2019-06-10T02:54:22 -02:00","latitude":-16.423202,"longitude":-146.293752,"tags":["new issue","new issue"]}
{"id":62,"isActive":true,"balance":"$3,341.35","picture":"http://placehold.it/32x32","age":33,"color":"blue","name":"Estelle Ramirez","gender":"female","email":"estelleramirez@chorizon.com","phone":"+1 (816) 459-2073","address":"636 Nolans Lane, Camptown, California, 7794","about":"Dolor proident incididunt ex labore quis ullamco duis. Sit esse laboris nisi eu voluptate nulla cupidatat nulla fugiat veniam. Culpa cillum est esse dolor consequat. Pariatur ex sit irure qui do fugiat. Fugiat culpa veniam est nisi excepteur quis cupidatat et minim in esse minim dolor et. Anim aliquip labore dolor occaecat nisi sunt dolore pariatur veniam nostrud est ut.\r\n","registered":"2015-02-14T01:05:50 -01:00","latitude":-46.591249,"longitude":-83.385587,"tags":["good first issue","bug"]}
{"id":63,"isActive":true,"balance":"$2,478.30","picture":"http://placehold.it/32x32","age":21,"color":"blue","name":"Knowles Hebert","gender":"male","email":"knowleshebert@chorizon.com","phone":"+1 (819) 409-2308","address":"361 Kathleen Court, Gratton, Connecticut, 7254","about":"Esse mollit nulla eiusmod esse duis non proident excepteur labore. Nisi ex culpa do mollit dolor ea deserunt elit anim ipsum nostrud. Cupidatat nostrud duis ipsum dolore amet et. Veniam in cillum ea cillum deserunt excepteur officia laboris nulla. Commodo incididunt aliquip qui sunt dolore occaecat labore do laborum irure. Labore culpa duis pariatur reprehenderit ad laboris occaecat anim cillum et fugiat ea.\r\n","registered":"2016-03-08T08:34:52 -01:00","latitude":71.042482,"longitude":152.460406,"tags":["good first issue","wontfix"]}
{"id":64,"isActive":false,"balance":"$2,559.09","picture":"http://placehold.it/32x32","age":28,"color":"brown","name":"Thelma Mckenzie","gender":"female","email":"thelmamckenzie@chorizon.com","phone":"+1 (941) 596-2777","address":"202 Leonard Street, Riverton, Illinois, 8577","about":"Non ad ipsum elit commodo fugiat Lorem ipsum reprehenderit. Commodo incididunt officia cillum eiusmod officia proident ea incididunt ullamco magna commodo consectetur dolor. Nostrud esse nisi ea laboris. Veniam et dolore nulla excepteur pariatur laborum non. Eiusmod reprehenderit do tempor esse eu eu aliquip. Magna quis consectetur ipsum adipisicing mollit elit ad elit.\r\n","registered":"2020-04-14T12:43:06 -02:00","latitude":16.026129,"longitude":105.464476,"tags":[]}
{"id":65,"isActive":true,"balance":"$1,025.08","picture":"http://placehold.it/32x32","age":34,"color":"blue","name":"Carole Rowland","gender":"female","email":"carolerowland@chorizon.com","phone":"+1 (862) 558-3448","address":"941 Melba Court, Bluetown, Florida, 9555","about":"Ullamco occaecat ipsum aliqua sit proident eu. Occaecat ut consectetur proident culpa aliqua excepteur quis qui anim irure sit proident mollit irure. Proident cupidatat deserunt dolor adipisicing.\r\n","registered":"2014-12-01T05:55:35 -01:00","latitude":-0.191998,"longitude":43.389652,"tags":["wontfix"]}
{"id":66,"isActive":true,"balance":"$1,061.49","picture":"http://placehold.it/32x32","age":35,"color":"brown","name":"Higgins Aguilar","gender":"male","email":"higginsaguilar@chorizon.com","phone":"+1 (911) 540-3791","address":"132 Sackman Street, Layhill, Guam, 8729","about":"Anim ea dolore exercitation minim. Proident cillum non deserunt cupidatat veniam non occaecat aute ullamco irure velit laboris ex aliquip. Voluptate incididunt non ex nulla est ipsum. Amet anim do velit sunt irure sint minim nisi occaecat proident tempor elit exercitation nostrud.\r\n","registered":"2015-04-05T02:10:07 -02:00","latitude":74.702813,"longitude":151.314972,"tags":["bug"]}
{"id":67,"isActive":true,"balance":"$3,510.14","picture":"http://placehold.it/32x32","age":28,"color":"brown","name":"Ilene Gillespie","gender":"female","email":"ilenegillespie@chorizon.com","phone":"+1 (937) 575-2676","address":"835 Lake Street, Naomi, Alabama, 4131","about":"Quis laborum consequat id cupidatat exercitation aute ad ex nulla dolore velit qui proident minim. Et do consequat nisi eiusmod exercitation exercitation enim voluptate elit ullamco. Cupidatat ut adipisicing consequat aute est voluptate sit ipsum culpa ullamco. Ex pariatur ex qui quis qui.\r\n","registered":"2015-06-28T09:41:45 -02:00","latitude":71.573342,"longitude":-95.295989,"tags":["wontfix","wontfix"]}
{"id":68,"isActive":false,"balance":"$1,539.98","picture":"http://placehold.it/32x32","age":24,"color":"Green","name":"Angelina Dyer","gender":"female","email":"angelinadyer@chorizon.com","phone":"+1 (948) 574-3949","address":"575 Division Place, Gorham, Louisiana, 3458","about":"Cillum magna eu est veniam incididunt laboris laborum elit mollit incididunt proident non mollit. Dolor mollit culpa ullamco dolore aliqua adipisicing culpa officia. Reprehenderit minim nisi fugiat consectetur dolore.\r\n","registered":"2014-07-08T06:34:36 -02:00","latitude":-85.649593,"longitude":66.126018,"tags":["good first issue"]}
{"id":69,"isActive":true,"balance":"$3,367.69","picture":"http://placehold.it/32x32","age":30,"color":"brown","name":"Marks Burt","gender":"male","email":"marksburt@chorizon.com","phone":"+1 (895) 497-3138","address":"819 Village Road, Wadsworth, Delaware, 6099","about":"Fugiat tempor aute voluptate proident exercitation tempor esse dolor id. Duis aliquip exercitation Lorem elit magna sint sit. Culpa adipisicing occaecat aliqua officia reprehenderit laboris sint aliquip. Magna do sunt consequat excepteur nisi do commodo non. Cillum officia nostrud consequat excepteur elit proident in. Tempor ipsum in ut qui cupidatat exercitation est nulla exercitation voluptate.\r\n","registered":"2014-08-31T06:12:18 -02:00","latitude":26.854112,"longitude":-143.313948,"tags":["good first issue"]}
{"id":70,"isActive":false,"balance":"$3,755.72","picture":"http://placehold.it/32x32","age":23,"color":"blue","name":"Glass Perkins","gender":"male","email":"glassperkins@chorizon.com","phone":"+1 (923) 486-3725","address":"899 Roosevelt Court, Belleview, Idaho, 1737","about":"Esse magna id labore sunt qui eu enim esse cillum consequat enim eu culpa enim. Duis veniam cupidatat deserunt sunt irure ad Lorem proident aliqua mollit. Laborum mollit aute nulla est. Sunt id proident incididunt ipsum et dolor consectetur laborum enim dolor officia dolore laborum. Est commodo duis et ea consequat labore id id eu aliqua. Qui veniam sit eu aliquip ad sit dolor ullamco et laborum voluptate quis fugiat ex. Exercitation dolore cillum amet ad nisi consectetur occaecat sit aliqua laborum qui proident aliqua exercitation.\r\n","registered":"2015-05-22T05:44:33 -02:00","latitude":54.27147,"longitude":-65.065604,"tags":["wontfix"]}
{"id":71,"isActive":true,"balance":"$3,381.63","picture":"http://placehold.it/32x32","age":38,"color":"Green","name":"Candace Sawyer","gender":"female","email":"candacesawyer@chorizon.com","phone":"+1 (830) 404-2636","address":"334 Arkansas Drive, Bordelonville, Tennessee, 8449","about":"Et aliqua elit incididunt et aliqua. Deserunt ut elit proident ullamco ut. Ex exercitation amet non eu reprehenderit ea voluptate qui sit reprehenderit ad sint excepteur.\r\n","registered":"2014-04-04T08:45:00 -02:00","latitude":6.484262,"longitude":-37.054928,"tags":["new issue","new issue"]}
{"id":72,"isActive":true,"balance":"$1,640.98","picture":"http://placehold.it/32x32","age":27,"color":"Green","name":"Hendricks Martinez","gender":"male","email":"hendricksmartinez@chorizon.com","phone":"+1 (857) 566-3245","address":"636 Agate Court, Newry, Utah, 3304","about":"Do sit culpa amet incididunt officia enim occaecat incididunt excepteur enim tempor deserunt qui. Excepteur adipisicing anim consectetur adipisicing proident anim laborum qui. Aliquip nostrud cupidatat sit ullamco.\r\n","registered":"2018-06-15T10:36:11 -02:00","latitude":86.746034,"longitude":10.347893,"tags":["new issue"]}
{"id":73,"isActive":false,"balance":"$1,239.74","picture":"http://placehold.it/32x32","age":38,"color":"blue","name":"Eleanor Shepherd","gender":"female","email":"eleanorshepherd@chorizon.com","phone":"+1 (894) 567-2617","address":"670 Lafayette Walk, Darlington, Palau, 8803","about":"Adipisicing ad incididunt id veniam magna cupidatat et labore eu deserunt mollit. Lorem voluptate exercitation elit eu aliquip cupidatat occaecat anim excepteur reprehenderit est est. Ipsum excepteur ea mollit qui nisi laboris ex qui. Cillum velit culpa culpa commodo laboris nisi Lorem non elit deserunt incididunt. Officia quis velit nulla sint incididunt duis mollit tempor adipisicing qui officia eu nisi Lorem. Do proident pariatur ex enim nostrud eu aute esse deserunt eu velit quis culpa exercitation. Occaecat ad cupidatat ullamco consequat duis anim deserunt occaecat aliqua sunt consectetur ipsum magna.\r\n","registered":"2020-02-29T12:15:28 -01:00","latitude":35.749621,"longitude":-94.40842,"tags":["good first issue","new issue","new issue","bug"]}
{"id":74,"isActive":true,"balance":"$1,180.90","picture":"http://placehold.it/32x32","age":36,"color":"Green","name":"Stark Wong","gender":"male","email":"starkwong@chorizon.com","phone":"+1 (805) 575-3055","address":"522 Bond Street, Bawcomville, Wisconsin, 324","about":"Aute qui sit incididunt eu adipisicing exercitation sunt nostrud. Id laborum incididunt proident ipsum est cillum esse. Officia ullamco eu ut Lorem do minim ea dolor consequat sit eu est voluptate. Id commodo cillum enim culpa aliquip ullamco nisi Lorem cillum ipsum cupidatat anim officia eu. Dolore sint elit labore pariatur. Officia duis nulla voluptate et nulla ut voluptate laboris eu commodo veniam qui veniam.\r\n","registered":"2020-01-25T10:47:48 -01:00","latitude":-80.452139,"longitude":160.72546,"tags":["wontfix"]}
{"id":75,"isActive":false,"balance":"$1,913.42","picture":"http://placehold.it/32x32","age":24,"color":"Green","name":"Emma Jacobs","gender":"female","email":"emmajacobs@chorizon.com","phone":"+1 (899) 554-3847","address":"173 Tapscott Street, Esmont, Maine, 7450","about":"Laboris consequat consectetur tempor labore ullamco ullamco voluptate quis quis duis ut ad. In est irure quis amet sunt nulla ad ut sit labore ut eu quis duis. Nostrud cupidatat aliqua sunt occaecat minim id consequat officia deserunt laborum. Ea dolor reprehenderit laborum veniam exercitation est nostrud excepteur laborum minim id qui et.\r\n","registered":"2019-03-29T06:24:13 -01:00","latitude":-35.53722,"longitude":155.703874,"tags":[]}
{"id":76,"isActive":false,"balance":"$1,274.29","picture":"http://placehold.it/32x32","age":25,"color":"Green","name":"Clarice Gardner","gender":"female","email":"claricegardner@chorizon.com","phone":"+1 (810) 407-3258","address":"894 Brooklyn Road, Utting, New Hampshire, 6404","about":"Elit occaecat aute ea adipisicing mollit cupidatat aliquip excepteur veniam minim. Sunt quis dolore in commodo aute esse quis. Lorem in cillum commodo eu anim commodo mollit. Adipisicing enim sunt adipisicing cupidatat adipisicing eiusmod eu do sit nisi.\r\n","registered":"2014-10-20T10:13:32 -02:00","latitude":17.11935,"longitude":65.38197,"tags":["new issue","wontfix"]}

View file

@ -0,0 +1,59 @@
{
"rankingRules": [
"typo",
"words",
"proximity",
"attribute",
"wordsPosition",
"exactness"
],
"distinctAttribute": "email",
"searchableAttributes": [
"balance",
"picture",
"age",
"color",
"name",
"gender",
"email",
"phone",
"address",
"about",
"registered",
"latitude",
"longitude",
"tags"
],
"displayedAttributes": [
"id",
"isActive",
"balance",
"picture",
"age",
"color",
"name",
"gender",
"email",
"phone",
"address",
"about",
"registered",
"latitude",
"longitude",
"tags"
],
"stopWords": [
"in",
"ad"
],
"synonyms": {
"wolverine": ["xmen", "logan"],
"logan": ["wolverine", "xmen"]
},
"filterableAttributes": [
"gender",
"color",
"tags",
"isActive"
]
}

View file

@ -0,0 +1,2 @@
{"status": "processed","updateId": 0,"type": {"name":"Settings","settings":{"ranking_rules":{"Update":["Typo","Words","Proximity","Attribute","WordsPosition","Exactness"]},"distinct_attribute":"Nothing","primary_key":"Nothing","searchable_attributes":{"Update":["balance","picture","age","color","name","gender","email","phone","address","about","registered","latitude","longitude","tags"]},"displayed_attributes":{"Update":["about","address","age","balance","color","email","gender","id","isActive","latitude","longitude","name","phone","picture","registered","tags"]},"stop_words":"Nothing","synonyms":"Nothing","filterable_attributes":"Nothing"}}}
{"status": "processed", "updateId": 1, "type": { "name": "DocumentsAddition"}}

File diff suppressed because it is too large Load diff

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,693 @@
use std::collections::{HashMap, HashSet};
use ::time::format_description::well_known::Rfc3339;
use maplit::{hashmap, hashset};
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use time::{Duration, OffsetDateTime};
use crate::common::Server;
pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'static str>>> =
Lazy::new(|| {
let mut authorizations = hashmap! {
("POST", "/indexes/products/search") => hashset!{"search", "*"},
("GET", "/indexes/products/search") => hashset!{"search", "*"},
("POST", "/indexes/products/documents") => hashset!{"documents.add", "documents.*", "*"},
("GET", "/indexes/products/documents") => hashset!{"documents.get", "documents.*", "*"},
("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "documents.*", "*"},
("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "documents.*", "*"},
("GET", "/tasks") => hashset!{"tasks.get", "tasks.*", "*"},
("DELETE", "/tasks") => hashset!{"tasks.delete", "tasks.*", "*"},
("GET", "/tasks?indexUid=products") => hashset!{"tasks.get", "tasks.*", "*"},
("GET", "/tasks/0") => hashset!{"tasks.get", "tasks.*", "*"},
("PATCH", "/indexes/products/") => hashset!{"indexes.update", "indexes.*", "*"},
("GET", "/indexes/products/") => hashset!{"indexes.get", "indexes.*", "*"},
("DELETE", "/indexes/products/") => hashset!{"indexes.delete", "indexes.*", "*"},
("POST", "/indexes") => hashset!{"indexes.create", "indexes.*", "*"},
("GET", "/indexes") => hashset!{"indexes.get", "indexes.*", "*"},
("POST", "/swap-indexes") => hashset!{"indexes.swap", "indexes.*", "*"},
("GET", "/indexes/products/settings") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/displayed-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/distinct-attribute") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/filterable-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/ranking-rules") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/searchable-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/sortable-attributes") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/stop-words") => hashset!{"settings.get", "settings.*", "*"},
("GET", "/indexes/products/settings/synonyms") => hashset!{"settings.get", "settings.*", "*"},
("DELETE", "/indexes/products/settings") => hashset!{"settings.update", "settings.*", "*"},
("PATCH", "/indexes/products/settings") => hashset!{"settings.update", "settings.*", "*"},
("PATCH", "/indexes/products/settings/typo-tolerance") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/displayed-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/distinct-attribute") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/filterable-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/ranking-rules") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/searchable-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/sortable-attributes") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/stop-words") => hashset!{"settings.update", "settings.*", "*"},
("PUT", "/indexes/products/settings/synonyms") => hashset!{"settings.update", "settings.*", "*"},
("GET", "/indexes/products/stats") => hashset!{"stats.get", "stats.*", "*"},
("GET", "/stats") => hashset!{"stats.get", "stats.*", "*"},
("POST", "/dumps") => hashset!{"dumps.create", "dumps.*", "*"},
("GET", "/version") => hashset!{"version", "*"},
("PATCH", "/keys/mykey/") => hashset!{"keys.update", "*"},
("GET", "/keys/mykey/") => hashset!{"keys.get", "*"},
("DELETE", "/keys/mykey/") => hashset!{"keys.delete", "*"},
("POST", "/keys") => hashset!{"keys.create", "*"},
("GET", "/keys") => hashset!{"keys.get", "*"},
};
if cfg!(feature = "metrics") {
authorizations.insert(("GET", "/metrics"), hashset! {"metrics.get", "metrics.*", "*"});
}
authorizations
});
pub static ALL_ACTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| {
AUTHORIZATIONS.values().cloned().reduce(|l, r| l.union(&r).cloned().collect()).unwrap()
});
static INVALID_RESPONSE: Lazy<Value> = Lazy::new(|| {
json!({"message": "The provided API key is invalid.",
"code": "invalid_api_key",
"type": "auth",
"link": "https://docs.meilisearch.com/errors#invalid_api_key"
})
});
#[actix_rt::test]
async fn error_access_expired_key() {
use std::{thread, time};
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let content = json!({
"indexes": ["products"],
"actions": ALL_ACTIONS.clone(),
"expiresAt": (OffsetDateTime::now_utc() + Duration::seconds(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
// wait until the key is expired.
thread::sleep(time::Duration::new(1, 0));
for (method, route) in AUTHORIZATIONS.keys() {
let (response, code) = server.dummy_request(method, route).await;
assert_eq!(response, INVALID_RESPONSE.clone(), "on route: {:?} - {:?}", method, route);
assert_eq!(403, code, "{:?}", &response);
}
}
#[actix_rt::test]
async fn error_access_unauthorized_index() {
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let content = json!({
"indexes": ["sales"],
"actions": ALL_ACTIONS.clone(),
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
for (method, route) in AUTHORIZATIONS
.keys()
// filter `products` index routes
.filter(|(_, route)| route.starts_with("/indexes/products"))
{
let (response, code) = server.dummy_request(method, route).await;
assert_eq!(response, INVALID_RESPONSE.clone(), "on route: {:?} - {:?}", method, route);
assert_eq!(403, code, "{:?}", &response);
}
}
#[actix_rt::test]
async fn error_access_unauthorized_action() {
let mut server = Server::new_auth().await;
for ((method, route), action) in AUTHORIZATIONS.iter() {
// create a new API key letting only the needed action.
server.use_api_key("MASTER_KEY");
let content = json!({
"indexes": ["products"],
"actions": ALL_ACTIONS.difference(action).collect::<Vec<_>>(),
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let (response, code) = server.dummy_request(method, route).await;
assert_eq!(response, INVALID_RESPONSE.clone(), "on route: {:?} - {:?}", method, route);
assert_eq!(403, code, "{:?}", &response);
}
}
#[actix_rt::test]
async fn access_authorized_master_key() {
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
// master key must have access to all routes.
for ((method, route), _) in AUTHORIZATIONS.iter() {
let (response, code) = server.dummy_request(method, route).await;
assert_ne!(response, INVALID_RESPONSE.clone(), "on route: {:?} - {:?}", method, route);
assert_ne!(code, 403);
}
}
#[actix_rt::test]
async fn access_authorized_restricted_index() {
let mut server = Server::new_auth().await;
for ((method, route), actions) in AUTHORIZATIONS.iter() {
for action in actions {
// create a new API key letting only the needed action.
server.use_api_key("MASTER_KEY");
let content = json!({
"indexes": ["products"],
"actions": [action],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let (response, code) = server.dummy_request(method, route).await;
assert_ne!(
response,
INVALID_RESPONSE.clone(),
"on route: {:?} - {:?} with action: {:?}",
method,
route,
action
);
assert_ne!(code, 403);
}
}
}
#[actix_rt::test]
async fn access_authorized_no_index_restriction() {
let mut server = Server::new_auth().await;
for ((method, route), actions) in AUTHORIZATIONS.iter() {
for action in actions {
// create a new API key letting only the needed action.
server.use_api_key("MASTER_KEY");
let content = json!({
"indexes": ["*"],
"actions": [action],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let (response, code) = server.dummy_request(method, route).await;
assert_ne!(
response,
INVALID_RESPONSE.clone(),
"on route: {:?} - {:?} with action: {:?}",
method,
route,
action
);
assert_ne!(code, 403);
}
}
}
#[actix_rt::test]
async fn access_authorized_stats_restricted_index() {
let mut server = Server::new_auth().await;
server.use_admin_key("MASTER_KEY").await;
// create index `test`
let index = server.index("test");
let (response, code) = index.create(Some("id")).await;
assert_eq!(202, code, "{:?}", &response);
// create index `products`
let index = server.index("products");
let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await;
// create key with access on `products` index only.
let content = json!({
"indexes": ["products"],
"actions": ["stats.get"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
// use created key.
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let (response, code) = server.stats().await;
assert_eq!(200, code, "{:?}", &response);
// key should have access on `products` index.
assert!(response["indexes"].get("products").is_some());
// key should not have access on `test` index.
assert!(response["indexes"].get("test").is_none());
}
#[actix_rt::test]
async fn access_authorized_stats_no_index_restriction() {
let mut server = Server::new_auth().await;
server.use_admin_key("MASTER_KEY").await;
// create index `test`
let index = server.index("test");
let (response, code) = index.create(Some("id")).await;
assert_eq!(202, code, "{:?}", &response);
// create index `products`
let index = server.index("products");
let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await;
// create key with access on all indexes.
let content = json!({
"indexes": ["*"],
"actions": ["stats.get"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
// use created key.
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let (response, code) = server.stats().await;
assert_eq!(200, code, "{:?}", &response);
// key should have access on `products` index.
assert!(response["indexes"].get("products").is_some());
// key should have access on `test` index.
assert!(response["indexes"].get("test").is_some());
}
#[actix_rt::test]
async fn list_authorized_indexes_restricted_index() {
let mut server = Server::new_auth().await;
server.use_admin_key("MASTER_KEY").await;
// create index `test`
let index = server.index("test");
let (response, code) = index.create(Some("id")).await;
assert_eq!(202, code, "{:?}", &response);
// create index `products`
let index = server.index("products");
let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await;
// create key with access on `products` index only.
let content = json!({
"indexes": ["products"],
"actions": ["indexes.get"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
// use created key.
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let (response, code) = server.list_indexes(None, None).await;
assert_eq!(200, code, "{:?}", &response);
let response = response["results"].as_array().unwrap();
// key should have access on `products` index.
assert!(response.iter().any(|index| index["uid"] == "products"));
// key should not have access on `test` index.
assert!(!response.iter().any(|index| index["uid"] == "test"));
}
#[actix_rt::test]
async fn list_authorized_indexes_no_index_restriction() {
let mut server = Server::new_auth().await;
server.use_admin_key("MASTER_KEY").await;
// create index `test`
let index = server.index("test");
let (response, code) = index.create(Some("id")).await;
assert_eq!(202, code, "{:?}", &response);
// create index `products`
let index = server.index("products");
let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await;
// create key with access on all indexes.
let content = json!({
"indexes": ["*"],
"actions": ["indexes.get"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
// use created key.
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let (response, code) = server.list_indexes(None, None).await;
assert_eq!(200, code, "{:?}", &response);
let response = response["results"].as_array().unwrap();
// key should have access on `products` index.
assert!(response.iter().any(|index| index["uid"] == "products"));
// key should have access on `test` index.
assert!(response.iter().any(|index| index["uid"] == "test"));
}
#[actix_rt::test]
async fn list_authorized_tasks_restricted_index() {
let mut server = Server::new_auth().await;
server.use_admin_key("MASTER_KEY").await;
// create index `test`
let index = server.index("test");
let (response, code) = index.create(Some("id")).await;
assert_eq!(202, code, "{:?}", &response);
// create index `products`
let index = server.index("products");
let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await;
// create key with access on `products` index only.
let content = json!({
"indexes": ["products"],
"actions": ["tasks.get"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
// use created key.
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let (response, code) = server.service.get("/tasks").await;
assert_eq!(200, code, "{:?}", &response);
println!("{}", response);
let response = response["results"].as_array().unwrap();
// key should have access on `products` index.
assert!(response.iter().any(|task| task["indexUid"] == "products"));
// key should not have access on `test` index.
assert!(!response.iter().any(|task| task["indexUid"] == "test"));
}
#[actix_rt::test]
async fn list_authorized_tasks_no_index_restriction() {
let mut server = Server::new_auth().await;
server.use_admin_key("MASTER_KEY").await;
// create index `test`
let index = server.index("test");
let (response, code) = index.create(Some("id")).await;
assert_eq!(202, code, "{:?}", &response);
// create index `products`
let index = server.index("products");
let (response, code) = index.create(Some("product_id")).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await;
// create key with access on all indexes.
let content = json!({
"indexes": ["*"],
"actions": ["tasks.get"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
// use created key.
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let (response, code) = server.service.get("/tasks").await;
assert_eq!(200, code, "{:?}", &response);
let response = response["results"].as_array().unwrap();
// key should have access on `products` index.
assert!(response.iter().any(|task| task["indexUid"] == "products"));
// key should have access on `test` index.
assert!(response.iter().any(|task| task["indexUid"] == "test"));
}
#[actix_rt::test]
async fn error_creating_index_without_action() {
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
// create key with access on all indexes.
let content = json!({
"indexes": ["*"],
// Give all action but the ones allowing to create an index.
"actions": ALL_ACTIONS.iter().cloned().filter(|a| !AUTHORIZATIONS.get(&("POST","/indexes")).unwrap().contains(a)).collect::<Vec<_>>(),
"expiresAt": "2050-11-13T00:00:00Z"
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
// use created key.
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
let expected_error = json!({
"message": "Index `test` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
// try to create a index via add documents route
let index = server.index("test");
let documents = json!([
{
"id": 1,
"content": "foo",
}
]);
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
let response = index.wait_task(task_id).await;
assert_eq!(response["status"], "failed");
assert_eq!(response["error"], expected_error.clone());
// try to create a index via add settings route
let settings = json!({ "distinctAttribute": "test"});
let (response, code) = index.update_settings(settings).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
let response = index.wait_task(task_id).await;
assert_eq!(response["status"], "failed");
assert_eq!(response["error"], expected_error.clone());
// try to create a index via add specialized settings route
let (response, code) = index.update_distinct_attribute(json!("test")).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
let response = index.wait_task(task_id).await;
assert_eq!(response["status"], "failed");
assert_eq!(response["error"], expected_error.clone());
}
#[actix_rt::test]
async fn lazy_create_index() {
let mut server = Server::new_auth().await;
// create key with access on all indexes.
let contents = vec![
json!({
"indexes": ["*"],
"actions": ["*"],
"expiresAt": "2050-11-13T00:00:00Z"
}),
json!({
"indexes": ["*"],
"actions": ["indexes.*", "documents.*", "settings.*", "tasks.*"],
"expiresAt": "2050-11-13T00:00:00Z"
}),
json!({
"indexes": ["*"],
"actions": ["indexes.create", "documents.add", "settings.update", "tasks.get"],
"expiresAt": "2050-11-13T00:00:00Z"
}),
];
for content in contents {
server.use_api_key("MASTER_KEY");
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
// use created key.
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
// try to create a index via add documents route
let index = server.index("test");
let documents = json!([
{
"id": 1,
"content": "foo",
}
]);
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await;
let (response, code) = index.get_task(task_id).await;
assert_eq!(200, code, "{:?}", &response);
assert_eq!(response["status"], "succeeded");
// try to create a index via add settings route
let index = server.index("test1");
let settings = json!({ "distinctAttribute": "test"});
let (response, code) = index.update_settings(settings).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await;
let (response, code) = index.get_task(task_id).await;
assert_eq!(200, code, "{:?}", &response);
assert_eq!(response["status"], "succeeded");
// try to create a index via add specialized settings route
let index = server.index("test2");
let (response, code) = index.update_distinct_attribute(json!("test")).await;
assert_eq!(202, code, "{:?}", &response);
let task_id = response["taskUid"].as_u64().unwrap();
index.wait_task(task_id).await;
let (response, code) = index.get_task(task_id).await;
assert_eq!(200, code, "{:?}", &response);
assert_eq!(response["status"], "succeeded");
}
}
#[actix_rt::test]
async fn error_creating_index_without_index() {
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
// create key with access on all indexes.
let content = json!({
"indexes": ["unexpected"],
"actions": ["*"],
"expiresAt": "2050-11-13T00:00:00Z"
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(201, code, "{:?}", &response);
assert!(response["key"].is_string());
// use created key.
let key = response["key"].as_str().unwrap();
server.use_api_key(key);
// try to create a index via add documents route
let index = server.index("test");
let documents = json!([
{
"id": 1,
"content": "foo",
}
]);
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(403, code, "{:?}", &response);
// try to create a index via add settings route
let index = server.index("test1");
let settings = json!({ "distinctAttribute": "test"});
let (response, code) = index.update_settings(settings).await;
assert_eq!(403, code, "{:?}", &response);
// try to create a index via add specialized settings route
let index = server.index("test2");
let (response, code) = index.update_distinct_attribute(json!("test")).await;
assert_eq!(403, code, "{:?}", &response);
// try to create a index via create index route
let index = server.index("test3");
let (response, code) = index.create(None).await;
assert_eq!(403, code, "{:?}", &response);
}

View file

@ -0,0 +1,64 @@
mod api_keys;
mod authorization;
mod payload;
mod tenant_token;
use actix_web::http::StatusCode;
use serde_json::{json, Value};
use crate::common::Server;
impl Server {
pub fn use_api_key(&mut self, api_key: impl AsRef<str>) {
self.service.api_key = Some(api_key.as_ref().to_string());
}
/// Fetch and use the default admin key for nexts http requests.
pub async fn use_admin_key(&mut self, master_key: impl AsRef<str>) {
self.use_api_key(master_key);
let (response, code) = self.list_api_keys().await;
assert_eq!(200, code, "{:?}", response);
let admin_key = &response["results"][1]["key"];
self.use_api_key(admin_key.as_str().unwrap());
}
pub async fn add_api_key(&self, content: Value) -> (Value, StatusCode) {
let url = "/keys";
self.service.post(url, content).await
}
pub async fn get_api_key(&self, key: impl AsRef<str>) -> (Value, StatusCode) {
let url = format!("/keys/{}", key.as_ref());
self.service.get(url).await
}
pub async fn patch_api_key(&self, key: impl AsRef<str>, content: Value) -> (Value, StatusCode) {
let url = format!("/keys/{}", key.as_ref());
self.service.patch(url, content).await
}
pub async fn list_api_keys(&self) -> (Value, StatusCode) {
let url = "/keys";
self.service.get(url).await
}
pub async fn delete_api_key(&self, key: impl AsRef<str>) -> (Value, StatusCode) {
let url = format!("/keys/{}", key.as_ref());
self.service.delete(url).await
}
pub async fn dummy_request(
&self,
method: impl AsRef<str>,
url: impl AsRef<str>,
) -> (Value, StatusCode) {
match method.as_ref() {
"POST" => self.service.post(url, json!({})).await,
"PUT" => self.service.put(url, json!({})).await,
"PATCH" => self.service.patch(url, json!({})).await,
"GET" => self.service.get(url).await,
"DELETE" => self.service.delete(url).await,
_ => unreachable!(),
}
}
}

View file

@ -0,0 +1,275 @@
use actix_web::test;
use serde_json::{json, Value};
use crate::common::Server;
#[actix_rt::test]
async fn error_api_key_bad_content_types() {
let content = json!({
"indexes": ["products"],
"actions": [
"documents.add"
],
"expiresAt": "2050-11-13T00:00:00Z"
});
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/keys")
.set_payload(content.to_string())
.insert_header(("content-type", "text/plain"))
.insert_header(("Authorization", "Bearer MASTER_KEY"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
assert_eq!(
response["message"],
json!(
r#"The Content-Type `text/plain` is invalid. Accepted values for the Content-Type header are: `application/json`"#
)
);
assert_eq!(response["code"], "invalid_content_type");
assert_eq!(response["type"], "invalid_request");
assert_eq!(response["link"], "https://docs.meilisearch.com/errors#invalid_content_type");
// patch
let req = test::TestRequest::patch()
.uri("/keys/d0552b41536279a0ad88bd595327b96f01176a60c2243e906c52ac02375f9bc4")
.set_payload(content.to_string())
.insert_header(("content-type", "text/plain"))
.insert_header(("Authorization", "Bearer MASTER_KEY"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
assert_eq!(
response["message"],
json!(
r#"The Content-Type `text/plain` is invalid. Accepted values for the Content-Type header are: `application/json`"#
)
);
assert_eq!(response["code"], "invalid_content_type");
assert_eq!(response["type"], "invalid_request");
assert_eq!(response["link"], "https://docs.meilisearch.com/errors#invalid_content_type");
}
#[actix_rt::test]
async fn error_api_key_empty_content_types() {
let content = json!({
"indexes": ["products"],
"actions": [
"documents.add"
],
"expiresAt": "2050-11-13T00:00:00Z"
});
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/keys")
.set_payload(content.to_string())
.insert_header(("content-type", ""))
.insert_header(("Authorization", "Bearer MASTER_KEY"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
assert_eq!(
response["message"],
json!(
r#"The Content-Type `` is invalid. Accepted values for the Content-Type header are: `application/json`"#
)
);
assert_eq!(response["code"], "invalid_content_type");
assert_eq!(response["type"], "invalid_request");
assert_eq!(response["link"], "https://docs.meilisearch.com/errors#invalid_content_type");
// patch
let req = test::TestRequest::patch()
.uri("/keys/d0552b41536279a0ad88bd595327b96f01176a60c2243e906c52ac02375f9bc4")
.set_payload(content.to_string())
.insert_header(("content-type", ""))
.insert_header(("Authorization", "Bearer MASTER_KEY"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
assert_eq!(
response["message"],
json!(
r#"The Content-Type `` is invalid. Accepted values for the Content-Type header are: `application/json`"#
)
);
assert_eq!(response["code"], "invalid_content_type");
assert_eq!(response["type"], "invalid_request");
assert_eq!(response["link"], "https://docs.meilisearch.com/errors#invalid_content_type");
}
#[actix_rt::test]
async fn error_api_key_missing_content_types() {
let content = json!({
"indexes": ["products"],
"actions": [
"documents.add"
],
"expiresAt": "2050-11-13T00:00:00Z"
});
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/keys")
.set_payload(content.to_string())
.insert_header(("Authorization", "Bearer MASTER_KEY"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
assert_eq!(
response["message"],
json!(
r#"A Content-Type header is missing. Accepted values for the Content-Type header are: `application/json`"#
)
);
assert_eq!(response["code"], "missing_content_type");
assert_eq!(response["type"], "invalid_request");
assert_eq!(response["link"], "https://docs.meilisearch.com/errors#missing_content_type");
// patch
let req = test::TestRequest::patch()
.uri("/keys/d0552b41536279a0ad88bd595327b96f01176a60c2243e906c52ac02375f9bc4")
.set_payload(content.to_string())
.insert_header(("Authorization", "Bearer MASTER_KEY"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
assert_eq!(
response["message"],
json!(
r#"A Content-Type header is missing. Accepted values for the Content-Type header are: `application/json`"#
)
);
assert_eq!(response["code"], "missing_content_type");
assert_eq!(response["type"], "invalid_request");
assert_eq!(response["link"], "https://docs.meilisearch.com/errors#missing_content_type");
}
#[actix_rt::test]
async fn error_api_key_empty_payload() {
let content = "";
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/keys")
.set_payload(content)
.insert_header(("Authorization", "Bearer MASTER_KEY"))
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(response["code"], json!("missing_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#missing_payload"));
assert_eq!(response["message"], json!(r#"A json payload is missing."#));
// patch
let req = test::TestRequest::patch()
.uri("/keys/d0552b41536279a0ad88bd595327b96f01176a60c2243e906c52ac02375f9bc4")
.set_payload(content)
.insert_header(("Authorization", "Bearer MASTER_KEY"))
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(response["code"], json!("missing_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#missing_payload"));
assert_eq!(response["message"], json!(r#"A json payload is missing."#));
}
#[actix_rt::test]
async fn error_api_key_malformed_payload() {
let content = r#"{"malormed": "payload""#;
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/keys")
.set_payload(content)
.insert_header(("Authorization", "Bearer MASTER_KEY"))
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(response["code"], json!("malformed_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#malformed_payload"));
assert_eq!(
response["message"],
json!(
r#"The json payload provided is malformed. `EOF while parsing an object at line 1 column 22`."#
)
);
// patch
let req = test::TestRequest::patch()
.uri("/keys/d0552b41536279a0ad88bd595327b96f01176a60c2243e906c52ac02375f9bc4")
.set_payload(content)
.insert_header(("Authorization", "Bearer MASTER_KEY"))
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(response["code"], json!("malformed_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#malformed_payload"));
assert_eq!(
response["message"],
json!(
r#"The json payload provided is malformed. `EOF while parsing an object at line 1 column 22`."#
)
);
}

View file

@ -0,0 +1,565 @@
use std::collections::HashMap;
use ::time::format_description::well_known::Rfc3339;
use maplit::hashmap;
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use time::{Duration, OffsetDateTime};
use super::authorization::{ALL_ACTIONS, AUTHORIZATIONS};
use crate::common::Server;
fn generate_tenant_token(
parent_uid: impl AsRef<str>,
parent_key: impl AsRef<str>,
mut body: HashMap<&str, Value>,
) -> String {
use jsonwebtoken::{encode, EncodingKey, Header};
let parent_uid = parent_uid.as_ref();
body.insert("apiKeyUid", json!(parent_uid));
encode(&Header::default(), &body, &EncodingKey::from_secret(parent_key.as_ref().as_bytes()))
.unwrap()
}
static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
"title": "Shazam!",
"id": "287947",
"color": ["green", "blue"]
},
{
"title": "Captain Marvel",
"id": "299537",
"color": ["yellow", "blue"]
},
{
"title": "Escape Room",
"id": "522681",
"color": ["yellow", "red"]
},
{
"title": "How to Train Your Dragon: The Hidden World",
"id": "166428",
"color": ["green", "red"]
},
{
"title": "Glass",
"id": "450465",
"color": ["blue", "red"]
}
])
});
static INVALID_RESPONSE: Lazy<Value> = Lazy::new(|| {
json!({"message": "The provided API key is invalid.",
"code": "invalid_api_key",
"type": "auth",
"link": "https://docs.meilisearch.com/errors#invalid_api_key"
})
});
static ACCEPTED_KEYS: Lazy<Vec<Value>> = Lazy::new(|| {
vec![
json!({
"indexes": ["*"],
"actions": ["*"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::days(1)).format(&Rfc3339).unwrap()
}),
json!({
"indexes": ["*"],
"actions": ["search"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::days(1)).format(&Rfc3339).unwrap()
}),
json!({
"indexes": ["sales"],
"actions": ["*"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::days(1)).format(&Rfc3339).unwrap()
}),
json!({
"indexes": ["sales"],
"actions": ["search"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::days(1)).format(&Rfc3339).unwrap()
}),
]
});
static REFUSED_KEYS: Lazy<Vec<Value>> = Lazy::new(|| {
vec![
// no search action
json!({
"indexes": ["*"],
"actions": ALL_ACTIONS.iter().cloned().filter(|a| *a != "search" && *a != "*").collect::<Vec<_>>(),
"expiresAt": (OffsetDateTime::now_utc() + Duration::days(1)).format(&Rfc3339).unwrap()
}),
json!({
"indexes": ["sales"],
"actions": ALL_ACTIONS.iter().cloned().filter(|a| *a != "search" && *a != "*").collect::<Vec<_>>(),
"expiresAt": (OffsetDateTime::now_utc() + Duration::days(1)).format(&Rfc3339).unwrap()
}),
// bad index
json!({
"indexes": ["products"],
"actions": ["*"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::days(1)).format(&Rfc3339).unwrap()
}),
json!({
"indexes": ["products"],
"actions": ["search"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::days(1)).format(&Rfc3339).unwrap()
}),
]
});
macro_rules! compute_authorized_search {
($tenant_tokens:expr, $filter:expr, $expected_count:expr) => {
let mut server = Server::new_auth().await;
server.use_admin_key("MASTER_KEY").await;
let index = server.index("sales");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
index
.update_settings(json!({"filterableAttributes": ["color"]}))
.await;
index.wait_task(1).await;
drop(index);
for key_content in ACCEPTED_KEYS.iter() {
server.use_api_key("MASTER_KEY");
let (response, code) = server.add_api_key(key_content.clone()).await;
assert_eq!(code, 201);
let key = response["key"].as_str().unwrap();
let uid = response["uid"].as_str().unwrap();
for tenant_token in $tenant_tokens.iter() {
let web_token = generate_tenant_token(&uid, &key, tenant_token.clone());
server.use_api_key(&web_token);
let index = server.index("sales");
index
.search(json!({ "filter": $filter }), |response, code| {
assert_eq!(
code, 200,
"{} using tenant_token: {:?} generated with parent_key: {:?}",
response, tenant_token, key_content
);
assert_eq!(
response["hits"].as_array().unwrap().len(),
$expected_count,
"{} using tenant_token: {:?} generated with parent_key: {:?}",
response,
tenant_token,
key_content
);
})
.await;
}
}
};
}
macro_rules! compute_forbidden_search {
($tenant_tokens:expr, $parent_keys:expr) => {
let mut server = Server::new_auth().await;
server.use_admin_key("MASTER_KEY").await;
let index = server.index("sales");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
drop(index);
for key_content in $parent_keys.iter() {
server.use_api_key("MASTER_KEY");
let (response, code) = server.add_api_key(key_content.clone()).await;
assert_eq!(code, 201, "{:?}", response);
let key = response["key"].as_str().unwrap();
let uid = response["uid"].as_str().unwrap();
for tenant_token in $tenant_tokens.iter() {
let web_token = generate_tenant_token(&uid, &key, tenant_token.clone());
server.use_api_key(&web_token);
let index = server.index("sales");
index
.search(json!({}), |response, code| {
assert_eq!(
response,
INVALID_RESPONSE.clone(),
"{} using tenant_token: {:?} generated with parent_key: {:?}",
response,
tenant_token,
key_content
);
assert_eq!(
code, 403,
"{} using tenant_token: {:?} generated with parent_key: {:?}",
response, tenant_token, key_content
);
})
.await;
}
}
};
}
#[actix_rt::test]
async fn search_authorized_simple_token() {
let tenant_tokens = vec![
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!(["*"]),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!(["sales"]),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"*": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["*"]),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"sales": {}}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"sales": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["sales"]),
"exp" => Value::Null
},
];
compute_authorized_search!(tenant_tokens, {}, 5);
}
#[actix_rt::test]
async fn search_authorized_filter_token() {
let tenant_tokens = vec![
hashmap! {
"searchRules" => json!({"*": {"filter": "color = blue"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": {"filter": "color = blue"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": {"filter": ["color = blue"]}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": {"filter": ["color = blue"]}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
// filter on sales should override filters on *
hashmap! {
"searchRules" => json!({
"*": {"filter": "color = green"},
"sales": {"filter": "color = blue"}
}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({
"*": {},
"sales": {"filter": "color = blue"}
}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({
"*": {"filter": "color = green"},
"sales": {"filter": ["color = blue"]}
}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({
"*": {},
"sales": {"filter": ["color = blue"]}
}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
];
compute_authorized_search!(tenant_tokens, {}, 3);
}
#[actix_rt::test]
async fn filter_search_authorized_filter_token() {
let tenant_tokens = vec![
hashmap! {
"searchRules" => json!({"*": {"filter": "color = blue"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": {"filter": "color = blue"}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": {"filter": ["color = blue"]}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": {"filter": ["color = blue"]}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
// filter on sales should override filters on *
hashmap! {
"searchRules" => json!({
"*": {"filter": "color = green"},
"sales": {"filter": "color = blue"}
}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({
"*": {},
"sales": {"filter": "color = blue"}
}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({
"*": {"filter": "color = green"},
"sales": {"filter": ["color = blue"]}
}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({
"*": {},
"sales": {"filter": ["color = blue"]}
}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
];
compute_authorized_search!(tenant_tokens, "color = yellow", 1);
}
#[actix_rt::test]
async fn error_search_token_forbidden_parent_key() {
let tenant_tokens = vec![
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!(["*"]),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!(["sales"]),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
];
compute_forbidden_search!(tenant_tokens, REFUSED_KEYS);
}
#[actix_rt::test]
async fn error_search_forbidden_token() {
let tenant_tokens = vec![
// bad index
hashmap! {
"searchRules" => json!({"products": {}}),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!(["products"]),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"products": {}}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!({"products": Value::Null}),
"exp" => Value::Null
},
hashmap! {
"searchRules" => json!(["products"]),
"exp" => Value::Null
},
// expired token
hashmap! {
"searchRules" => json!({"*": {}}),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"*": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!(["*"]),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": {}}),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!({"sales": Value::Null}),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
hashmap! {
"searchRules" => json!(["sales"]),
"exp" => json!((OffsetDateTime::now_utc() - Duration::hours(1)).unix_timestamp())
},
];
compute_forbidden_search!(tenant_tokens, ACCEPTED_KEYS);
}
#[actix_rt::test]
async fn error_access_forbidden_routes() {
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let content = json!({
"indexes": ["*"],
"actions": ["*"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(code, 201);
assert!(response["key"].is_string());
let key = response["key"].as_str().unwrap();
let uid = response["uid"].as_str().unwrap();
let tenant_token = hashmap! {
"searchRules" => json!(["*"]),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
};
let web_token = generate_tenant_token(uid, key, tenant_token);
server.use_api_key(&web_token);
for ((method, route), actions) in AUTHORIZATIONS.iter() {
if !actions.contains("search") {
let (response, code) = server.dummy_request(method, route).await;
assert_eq!(response, INVALID_RESPONSE.clone());
assert_eq!(code, 403);
}
}
}
#[actix_rt::test]
async fn error_access_expired_parent_key() {
use std::{thread, time};
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let content = json!({
"indexes": ["*"],
"actions": ["*"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::seconds(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(code, 201);
assert!(response["key"].is_string());
let key = response["key"].as_str().unwrap();
let uid = response["uid"].as_str().unwrap();
let tenant_token = hashmap! {
"searchRules" => json!(["*"]),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
};
let web_token = generate_tenant_token(uid, key, tenant_token);
server.use_api_key(&web_token);
// test search request while parent_key is not expired
let (response, code) = server.dummy_request("POST", "/indexes/products/search").await;
assert_ne!(response, INVALID_RESPONSE.clone());
assert_ne!(code, 403);
// wait until the key is expired.
thread::sleep(time::Duration::new(1, 0));
let (response, code) = server.dummy_request("POST", "/indexes/products/search").await;
assert_eq!(response, INVALID_RESPONSE.clone());
assert_eq!(code, 403);
}
#[actix_rt::test]
async fn error_access_modified_token() {
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
let content = json!({
"indexes": ["*"],
"actions": ["*"],
"expiresAt": (OffsetDateTime::now_utc() + Duration::hours(1)).format(&Rfc3339).unwrap(),
});
let (response, code) = server.add_api_key(content).await;
assert_eq!(code, 201);
assert!(response["key"].is_string());
let key = response["key"].as_str().unwrap();
let uid = response["uid"].as_str().unwrap();
let tenant_token = hashmap! {
"searchRules" => json!(["products"]),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
};
let web_token = generate_tenant_token(uid, key, tenant_token);
server.use_api_key(&web_token);
// test search request while web_token is valid
let (response, code) = server.dummy_request("POST", "/indexes/products/search").await;
assert_ne!(response, INVALID_RESPONSE.clone());
assert_ne!(code, 403);
let tenant_token = hashmap! {
"searchRules" => json!(["*"]),
"exp" => json!((OffsetDateTime::now_utc() + Duration::hours(1)).unix_timestamp())
};
let alt = generate_tenant_token(uid, key, tenant_token);
let altered_token = [
web_token.split('.').next().unwrap(),
alt.split('.').nth(1).unwrap(),
web_token.split('.').nth(2).unwrap(),
]
.join(".");
server.use_api_key(&altered_token);
let (response, code) = server.dummy_request("POST", "/indexes/products/search").await;
assert_eq!(response, INVALID_RESPONSE.clone());
assert_eq!(code, 403);
}

View file

@ -0,0 +1,78 @@
use std::io::{Read, Write};
use actix_http::header::TryIntoHeaderPair;
use bytes::Bytes;
use flate2::read::{GzDecoder, ZlibDecoder};
use flate2::write::{GzEncoder, ZlibEncoder};
use flate2::Compression;
#[derive(Clone, Copy)]
pub enum Encoder {
Plain,
Gzip,
Deflate,
Brotli,
}
impl Encoder {
pub fn encode(self: &Encoder, body: impl Into<Bytes>) -> impl Into<Bytes> {
match self {
Self::Gzip => {
let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
encoder.write_all(&body.into()).expect("Failed to encode request body");
encoder.finish().expect("Failed to encode request body")
}
Self::Deflate => {
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
encoder.write_all(&body.into()).expect("Failed to encode request body");
encoder.finish().unwrap()
}
Self::Plain => Vec::from(body.into()),
Self::Brotli => {
let mut encoder = brotli::CompressorWriter::new(Vec::new(), 32 * 1024, 3, 22);
encoder.write_all(&body.into()).expect("Failed to encode request body");
encoder.flush().expect("Failed to encode request body");
encoder.into_inner()
}
}
}
pub fn decode(self: &Encoder, bytes: impl Into<Bytes>) -> impl Into<Bytes> {
let mut buffer = Vec::new();
let input = bytes.into();
match self {
Self::Gzip => {
GzDecoder::new(input.as_ref())
.read_to_end(&mut buffer)
.expect("Invalid gzip stream");
}
Self::Deflate => {
ZlibDecoder::new(input.as_ref())
.read_to_end(&mut buffer)
.expect("Invalid zlib stream");
}
Self::Plain => {
buffer.write_all(input.as_ref()).expect("Unexpected memory copying issue");
}
Self::Brotli => {
brotli::Decompressor::new(input.as_ref(), 4096)
.read_to_end(&mut buffer)
.expect("Invalid brotli stream");
}
};
buffer
}
pub fn header(self: &Encoder) -> Option<impl TryIntoHeaderPair> {
match self {
Self::Plain => None,
Self::Gzip => Some(("Content-Encoding", "gzip")),
Self::Deflate => Some(("Content-Encoding", "deflate")),
Self::Brotli => Some(("Content-Encoding", "br")),
}
}
pub fn iterator() -> impl Iterator<Item = Self> {
[Self::Plain, Self::Gzip, Self::Deflate, Self::Brotli].iter().copied()
}
}

View file

@ -0,0 +1,241 @@
use std::fmt::Write;
use std::panic::{catch_unwind, resume_unwind, UnwindSafe};
use std::time::Duration;
use actix_web::http::StatusCode;
use serde_json::{json, Value};
use tokio::time::sleep;
use urlencoding::encode as urlencode;
use super::encoder::Encoder;
use super::service::Service;
pub struct Index<'a> {
pub uid: String,
pub service: &'a Service,
pub encoder: Encoder,
}
#[allow(dead_code)]
impl Index<'_> {
pub async fn get(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}", urlencode(self.uid.as_ref()));
self.service.get(url).await
}
pub async fn load_test_set(&self) -> u64 {
let url = format!("/indexes/{}/documents", urlencode(self.uid.as_ref()));
let (response, code) =
self.service.post_str(url, include_str!("../assets/test_set.json")).await;
assert_eq!(code, 202);
let update_id = response["taskUid"].as_i64().unwrap();
self.wait_task(update_id as u64).await;
update_id as u64
}
pub async fn create(&self, primary_key: Option<&str>) -> (Value, StatusCode) {
let body = json!({
"uid": self.uid,
"primaryKey": primary_key,
});
self.service.post_encoded("/indexes", body, self.encoder).await
}
pub async fn update(&self, primary_key: Option<&str>) -> (Value, StatusCode) {
let body = json!({
"primaryKey": primary_key,
});
let url = format!("/indexes/{}", urlencode(self.uid.as_ref()));
self.service.patch_encoded(url, body, self.encoder).await
}
pub async fn delete(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}", urlencode(self.uid.as_ref()));
self.service.delete(url).await
}
pub async fn add_documents(
&self,
documents: Value,
primary_key: Option<&str>,
) -> (Value, StatusCode) {
let url = match primary_key {
Some(key) => {
format!("/indexes/{}/documents?primaryKey={}", urlencode(self.uid.as_ref()), key)
}
None => format!("/indexes/{}/documents", urlencode(self.uid.as_ref())),
};
self.service.post_encoded(url, documents, self.encoder).await
}
pub async fn update_documents(
&self,
documents: Value,
primary_key: Option<&str>,
) -> (Value, StatusCode) {
let url = match primary_key {
Some(key) => {
format!("/indexes/{}/documents?primaryKey={}", urlencode(self.uid.as_ref()), key)
}
None => format!("/indexes/{}/documents", urlencode(self.uid.as_ref())),
};
self.service.put_encoded(url, documents, self.encoder).await
}
pub async fn wait_task(&self, update_id: u64) -> Value {
// try several times to get status, or panic to not wait forever
let url = format!("/tasks/{}", update_id);
for _ in 0..100 {
let (response, status_code) = self.service.get(&url).await;
assert_eq!(200, status_code, "response: {}", response);
if response["status"] == "succeeded" || response["status"] == "failed" {
return response;
}
// wait 0.5 second.
sleep(Duration::from_millis(500)).await;
}
panic!("Timeout waiting for update id");
}
pub async fn get_task(&self, update_id: u64) -> (Value, StatusCode) {
let url = format!("/tasks/{}", update_id);
self.service.get(url).await
}
pub async fn list_tasks(&self) -> (Value, StatusCode) {
let url = format!("/tasks?indexUids={}", self.uid);
self.service.get(url).await
}
pub async fn filtered_tasks(&self, types: &[&str], statuses: &[&str]) -> (Value, StatusCode) {
let mut url = format!("/tasks?indexUids={}", self.uid);
if !types.is_empty() {
let _ = write!(url, "&types={}", types.join(","));
}
if !statuses.is_empty() {
let _ = write!(url, "&statuses={}", statuses.join(","));
}
self.service.get(url).await
}
pub async fn get_document(
&self,
id: u64,
options: Option<GetDocumentOptions>,
) -> (Value, StatusCode) {
let mut url = format!("/indexes/{}/documents/{}", urlencode(self.uid.as_ref()), id);
if let Some(fields) = options.and_then(|o| o.fields) {
let _ = write!(url, "?fields={}", fields.join(","));
}
self.service.get(url).await
}
pub async fn get_all_documents(&self, options: GetAllDocumentsOptions) -> (Value, StatusCode) {
let mut url = format!("/indexes/{}/documents?", urlencode(self.uid.as_ref()));
if let Some(limit) = options.limit {
let _ = write!(url, "limit={}&", limit);
}
if let Some(offset) = options.offset {
let _ = write!(url, "offset={}&", offset);
}
if let Some(attributes_to_retrieve) = options.attributes_to_retrieve {
let _ = write!(url, "fields={}&", attributes_to_retrieve.join(","));
}
self.service.get(url).await
}
pub async fn delete_document(&self, id: u64) -> (Value, StatusCode) {
let url = format!("/indexes/{}/documents/{}", urlencode(self.uid.as_ref()), id);
self.service.delete(url).await
}
pub async fn clear_all_documents(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}/documents", urlencode(self.uid.as_ref()));
self.service.delete(url).await
}
pub async fn delete_batch(&self, ids: Vec<u64>) -> (Value, StatusCode) {
let url = format!("/indexes/{}/documents/delete-batch", urlencode(self.uid.as_ref()));
self.service.post_encoded(url, serde_json::to_value(&ids).unwrap(), self.encoder).await
}
pub async fn settings(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}/settings", urlencode(self.uid.as_ref()));
self.service.get(url).await
}
pub async fn update_settings(&self, settings: Value) -> (Value, StatusCode) {
let url = format!("/indexes/{}/settings", urlencode(self.uid.as_ref()));
self.service.patch_encoded(url, settings, self.encoder).await
}
pub async fn delete_settings(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}/settings", urlencode(self.uid.as_ref()));
self.service.delete(url).await
}
pub async fn stats(&self) -> (Value, StatusCode) {
let url = format!("/indexes/{}/stats", urlencode(self.uid.as_ref()));
self.service.get(url).await
}
/// Performs both GET and POST search queries
pub async fn search(
&self,
query: Value,
test: impl Fn(Value, StatusCode) + UnwindSafe + Clone,
) {
let (response, code) = self.search_post(query.clone()).await;
let t = test.clone();
if let Err(e) = catch_unwind(move || t(response, code)) {
eprintln!("Error with post search");
resume_unwind(e);
}
let (response, code) = self.search_get(query).await;
if let Err(e) = catch_unwind(move || test(response, code)) {
eprintln!("Error with get search");
resume_unwind(e);
}
}
pub async fn search_post(&self, query: Value) -> (Value, StatusCode) {
let url = format!("/indexes/{}/search", urlencode(self.uid.as_ref()));
self.service.post_encoded(url, query, self.encoder).await
}
pub async fn search_get(&self, query: Value) -> (Value, StatusCode) {
let params = yaup::to_string(&query).unwrap();
let url = format!("/indexes/{}/search?{}", urlencode(self.uid.as_ref()), params);
self.service.get(url).await
}
pub async fn update_distinct_attribute(&self, value: Value) -> (Value, StatusCode) {
let url =
format!("/indexes/{}/settings/{}", urlencode(self.uid.as_ref()), "distinct-attribute");
self.service.put_encoded(url, value, self.encoder).await
}
pub async fn get_distinct_attribute(&self) -> (Value, StatusCode) {
let url =
format!("/indexes/{}/settings/{}", urlencode(self.uid.as_ref()), "distinct-attribute");
self.service.get(url).await
}
}
pub struct GetDocumentOptions {
pub fields: Option<Vec<&'static str>>,
}
#[derive(Debug, Default)]
pub struct GetAllDocumentsOptions {
pub limit: Option<usize>,
pub offset: Option<usize>,
pub attributes_to_retrieve: Option<Vec<&'static str>>,
}

View file

@ -0,0 +1,24 @@
pub mod encoder;
pub mod index;
pub mod server;
pub mod service;
pub use index::{GetAllDocumentsOptions, GetDocumentOptions};
pub use server::{default_settings, Server};
/// Performs a search test on both post and get routes
#[macro_export]
macro_rules! test_post_get_search {
($server:expr, $query:expr, |$response:ident, $status_code:ident | $block:expr) => {
let post_query: meilisearch::routes::search::SearchQueryPost =
serde_json::from_str(&$query.clone().to_string()).unwrap();
let get_query: meilisearch::routes::search::SearchQuery = post_query.into();
let get_query = ::serde_url_params::to_string(&get_query).unwrap();
let ($response, $status_code) = $server.search_get(&get_query).await;
let _ = ::std::panic::catch_unwind(|| $block)
.map_err(|e| panic!("panic in get route: {:?}", e.downcast_ref::<&str>().unwrap()));
let ($response, $status_code) = $server.search_post($query).await;
let _ = ::std::panic::catch_unwind(|| $block)
.map_err(|e| panic!("panic in post route: {:?}", e.downcast_ref::<&str>().unwrap()));
};
}

View file

@ -0,0 +1,204 @@
#![allow(dead_code)]
use std::path::Path;
use std::time::Duration;
use actix_http::body::MessageBody;
use actix_web::dev::ServiceResponse;
use actix_web::http::StatusCode;
use byte_unit::{Byte, ByteUnit};
use clap::Parser;
use meilisearch::option::{IndexerOpts, MaxMemory, Opt};
use meilisearch::{analytics, create_app, setup_meilisearch};
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use tempfile::TempDir;
use tokio::time::sleep;
use super::index::Index;
use super::service::Service;
use crate::common::encoder::Encoder;
pub struct Server {
pub service: Service,
// hold ownership to the tempdir while we use the server instance.
_dir: Option<TempDir>,
}
pub static TEST_TEMP_DIR: Lazy<TempDir> = Lazy::new(|| TempDir::new().unwrap());
impl Server {
pub async fn new() -> Self {
let dir = TempDir::new().unwrap();
if cfg!(windows) {
std::env::set_var("TMP", TEST_TEMP_DIR.path());
} else {
std::env::set_var("TMPDIR", TEST_TEMP_DIR.path());
}
let options = default_settings(dir.path());
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
let service = Service { index_scheduler, auth, options, api_key: None };
Server { service, _dir: Some(dir) }
}
pub async fn new_auth_with_options(mut options: Opt, dir: TempDir) -> Self {
if cfg!(windows) {
std::env::set_var("TMP", TEST_TEMP_DIR.path());
} else {
std::env::set_var("TMPDIR", TEST_TEMP_DIR.path());
}
options.master_key = Some("MASTER_KEY".to_string());
let (index_scheduler, auth) = setup_meilisearch(&options).unwrap();
let service = Service { index_scheduler, auth, options, api_key: None };
Server { service, _dir: Some(dir) }
}
pub async fn new_auth() -> Self {
let dir = TempDir::new().unwrap();
let options = default_settings(dir.path());
Self::new_auth_with_options(options, dir).await
}
pub async fn new_with_options(options: Opt) -> Result<Self, anyhow::Error> {
let (index_scheduler, auth) = setup_meilisearch(&options)?;
let service = Service { index_scheduler, auth, options, api_key: None };
Ok(Server { service, _dir: None })
}
pub async fn init_web_app(
&self,
) -> impl actix_web::dev::Service<
actix_http::Request,
Response = ServiceResponse<impl MessageBody>,
Error = actix_web::Error,
> {
actix_web::test::init_service(create_app(
self.service.index_scheduler.clone().into(),
self.service.auth.clone(),
self.service.options.clone(),
analytics::MockAnalytics::new(&self.service.options),
true,
))
.await
}
/// Returns a view to an index. There is no guarantee that the index exists.
pub fn index(&self, uid: impl AsRef<str>) -> Index<'_> {
self.index_with_encoder(uid, Encoder::Plain)
}
pub fn index_with_encoder(&self, uid: impl AsRef<str>, encoder: Encoder) -> Index<'_> {
Index { uid: uid.as_ref().to_string(), service: &self.service, encoder }
}
pub async fn list_indexes(
&self,
offset: Option<usize>,
limit: Option<usize>,
) -> (Value, StatusCode) {
let (offset, limit) = (
offset.map(|offset| format!("offset={offset}")),
limit.map(|limit| format!("limit={limit}")),
);
let query_parameter = offset
.as_ref()
.zip(limit.as_ref())
.map(|(offset, limit)| format!("{offset}&{limit}"))
.or_else(|| offset.xor(limit));
if let Some(query_parameter) = query_parameter {
self.service.get(format!("/indexes?{query_parameter}")).await
} else {
self.service.get("/indexes").await
}
}
pub async fn version(&self) -> (Value, StatusCode) {
self.service.get("/version").await
}
pub async fn stats(&self) -> (Value, StatusCode) {
self.service.get("/stats").await
}
pub async fn tasks(&self) -> (Value, StatusCode) {
self.service.get("/tasks").await
}
pub async fn tasks_filter(&self, filter: Value) -> (Value, StatusCode) {
self.service.get(format!("/tasks?{}", yaup::to_string(&filter).unwrap())).await
}
pub async fn get_dump_status(&self, uid: &str) -> (Value, StatusCode) {
self.service.get(format!("/dumps/{}/status", uid)).await
}
pub async fn create_dump(&self) -> (Value, StatusCode) {
self.service.post("/dumps", json!(null)).await
}
pub async fn index_swap(&self, value: Value) -> (Value, StatusCode) {
self.service.post("/swap-indexes", value).await
}
pub async fn cancel_tasks(&self, value: Value) -> (Value, StatusCode) {
self.service
.post(format!("/tasks/cancel?{}", yaup::to_string(&value).unwrap()), json!(null))
.await
}
pub async fn delete_tasks(&self, value: Value) -> (Value, StatusCode) {
self.service.delete(format!("/tasks?{}", yaup::to_string(&value).unwrap())).await
}
pub async fn wait_task(&self, update_id: u64) -> Value {
// try several times to get status, or panic to not wait forever
let url = format!("/tasks/{}", update_id);
for _ in 0..100 {
let (response, status_code) = self.service.get(&url).await;
assert_eq!(200, status_code, "response: {}", response);
if response["status"] == "succeeded" || response["status"] == "failed" {
return response;
}
// wait 0.5 second.
sleep(Duration::from_millis(500)).await;
}
panic!("Timeout waiting for update id");
}
pub async fn get_task(&self, update_id: u64) -> (Value, StatusCode) {
let url = format!("/tasks/{}", update_id);
self.service.get(url).await
}
}
pub fn default_settings(dir: impl AsRef<Path>) -> Opt {
Opt {
db_path: dir.as_ref().join("db"),
dump_dir: dir.as_ref().join("dumps"),
env: "development".to_owned(),
#[cfg(all(not(debug_assertions), feature = "analytics"))]
no_analytics: true,
max_index_size: Byte::from_unit(100.0, ByteUnit::MiB).unwrap(),
max_task_db_size: Byte::from_unit(1.0, ByteUnit::GiB).unwrap(),
http_payload_size_limit: Byte::from_unit(10.0, ByteUnit::MiB).unwrap(),
snapshot_dir: ".".into(),
indexer_options: IndexerOpts {
// memory has to be unlimited because several meilisearch are running in test context.
max_indexing_memory: MaxMemory::unlimited(),
..Parser::parse_from(None as Option<&str>)
},
#[cfg(feature = "metrics")]
enable_metrics_route: true,
..Parser::parse_from(None as Option<&str>)
}
}

View file

@ -0,0 +1,123 @@
use std::sync::Arc;
use actix_web::http::header::ContentType;
use actix_web::http::StatusCode;
use actix_web::test;
use actix_web::test::TestRequest;
use index_scheduler::IndexScheduler;
use meilisearch::{analytics, create_app, Opt};
use meilisearch_auth::AuthController;
use serde_json::Value;
use crate::common::encoder::Encoder;
pub struct Service {
pub index_scheduler: Arc<IndexScheduler>,
pub auth: AuthController,
pub options: Opt,
pub api_key: Option<String>,
}
impl Service {
pub async fn post(&self, url: impl AsRef<str>, body: Value) -> (Value, StatusCode) {
self.post_encoded(url, body, Encoder::Plain).await
}
pub async fn post_encoded(
&self,
url: impl AsRef<str>,
body: Value,
encoder: Encoder,
) -> (Value, StatusCode) {
let mut req = test::TestRequest::post().uri(url.as_ref());
req = self.encode(req, body, encoder);
self.request(req).await
}
/// Send a test post request from a text body, with a `content-type:application/json` header.
pub async fn post_str(
&self,
url: impl AsRef<str>,
body: impl AsRef<str>,
) -> (Value, StatusCode) {
let req = test::TestRequest::post()
.uri(url.as_ref())
.set_payload(body.as_ref().to_string())
.insert_header(("content-type", "application/json"));
self.request(req).await
}
pub async fn get(&self, url: impl AsRef<str>) -> (Value, StatusCode) {
let req = test::TestRequest::get().uri(url.as_ref());
self.request(req).await
}
pub async fn put(&self, url: impl AsRef<str>, body: Value) -> (Value, StatusCode) {
self.put_encoded(url, body, Encoder::Plain).await
}
pub async fn put_encoded(
&self,
url: impl AsRef<str>,
body: Value,
encoder: Encoder,
) -> (Value, StatusCode) {
let mut req = test::TestRequest::put().uri(url.as_ref());
req = self.encode(req, body, encoder);
self.request(req).await
}
pub async fn patch(&self, url: impl AsRef<str>, body: Value) -> (Value, StatusCode) {
self.patch_encoded(url, body, Encoder::Plain).await
}
pub async fn patch_encoded(
&self,
url: impl AsRef<str>,
body: Value,
encoder: Encoder,
) -> (Value, StatusCode) {
let mut req = test::TestRequest::patch().uri(url.as_ref());
req = self.encode(req, body, encoder);
self.request(req).await
}
pub async fn delete(&self, url: impl AsRef<str>) -> (Value, StatusCode) {
let req = test::TestRequest::delete().uri(url.as_ref());
self.request(req).await
}
pub async fn request(&self, mut req: test::TestRequest) -> (Value, StatusCode) {
let app = test::init_service(create_app(
self.index_scheduler.clone().into(),
self.auth.clone(),
self.options.clone(),
analytics::MockAnalytics::new(&self.options),
true,
))
.await;
if let Some(api_key) = &self.api_key {
req = req.insert_header(("Authorization", ["Bearer ", api_key].concat()));
}
let req = req.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response = serde_json::from_slice(&body).unwrap_or_default();
(response, status_code)
}
fn encode(&self, req: TestRequest, body: Value, encoder: Encoder) -> TestRequest {
let bytes = serde_json::to_string(&body).expect("Failed to serialize test data to json");
let encoded_body = encoder.encode(bytes);
let header = encoder.header();
match header {
Some(header) => req.insert_header(header),
None => req,
}
.set_payload(encoded_body)
.insert_header(ContentType::json())
}
}

View file

@ -0,0 +1,158 @@
#![allow(dead_code)]
mod common;
use actix_web::test;
use serde_json::{json, Value};
use crate::common::Server;
enum HttpVerb {
Put,
Patch,
Post,
Get,
Delete,
}
impl HttpVerb {
fn test_request(&self) -> test::TestRequest {
match self {
HttpVerb::Put => test::TestRequest::put(),
HttpVerb::Patch => test::TestRequest::patch(),
HttpVerb::Post => test::TestRequest::post(),
HttpVerb::Get => test::TestRequest::get(),
HttpVerb::Delete => test::TestRequest::delete(),
}
}
}
#[actix_rt::test]
async fn error_json_bad_content_type() {
use HttpVerb::{Patch, Post, Put};
let routes = [
// all the routes except the dumps that can be created without any body or content-type
// and the search that is not a strict json
(Post, "/indexes"),
(Post, "/indexes/doggo/documents/delete-batch"),
(Post, "/indexes/doggo/search"),
(Patch, "/indexes/doggo/settings"),
(Put, "/indexes/doggo/settings/displayed-attributes"),
(Put, "/indexes/doggo/settings/distinct-attribute"),
(Put, "/indexes/doggo/settings/filterable-attributes"),
(Put, "/indexes/doggo/settings/ranking-rules"),
(Put, "/indexes/doggo/settings/searchable-attributes"),
(Put, "/indexes/doggo/settings/sortable-attributes"),
(Put, "/indexes/doggo/settings/stop-words"),
(Put, "/indexes/doggo/settings/synonyms"),
];
let bad_content_types = [
"application/csv",
"application/x-ndjson",
"application/x-www-form-urlencoded",
"text/plain",
"json",
"application",
"json/application",
];
let document = "{}";
let server = Server::new().await;
let app = server.init_web_app().await;
for (verb, route) in routes {
// Good content-type, we probably have an error since we didn't send anything in the json
// so we only ensure we didn't get a bad media type error.
let req = verb
.test_request()
.uri(route)
.set_payload(document)
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
assert_ne!(status_code, 415,
"calling the route `{}` with a content-type of json isn't supposed to throw a bad media type error", route);
// No content-type.
let req = verb.test_request().uri(route).set_payload(document).to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415, "calling the route `{}` without content-type is supposed to throw a bad media type error", route);
assert_eq!(
response,
json!({
"message": r#"A Content-Type header is missing. Accepted values for the Content-Type header are: `application/json`"#,
"code": "missing_content_type",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#missing_content_type",
}),
"when calling the route `{}` with no content-type",
route,
);
for bad_content_type in bad_content_types {
// Always bad content-type
let req = verb
.test_request()
.uri(route)
.set_payload(document.to_string())
.insert_header(("content-type", bad_content_type))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
let expected_error_message = format!(
r#"The Content-Type `{}` is invalid. Accepted values for the Content-Type header are: `application/json`"#,
bad_content_type
);
assert_eq!(
response,
json!({
"message": expected_error_message,
"code": "invalid_content_type",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_content_type",
}),
"when calling the route `{}` with a content-type of `{}`",
route,
bad_content_type,
);
}
}
}
#[actix_rt::test]
async fn extract_actual_content_type() {
let route = "/indexes/doggo/documents";
let documents = "[{}]";
let server = Server::new().await;
let app = server.init_web_app().await;
// Good content-type, we probably have an error since we didn't send anything in the json
// so we only ensure we didn't get a bad media type error.
let req = test::TestRequest::post()
.uri(route)
.set_payload(documents)
.insert_header(("content-type", "application/json; charset=utf-8"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
assert_ne!(status_code, 415,
"calling the route `{}` with a content-type of json isn't supposed to throw a bad media type error", route);
let req = test::TestRequest::put()
.uri(route)
.set_payload(documents)
.insert_header(("content-type", "application/json; charset=latin-1"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
assert_ne!(status_code, 415,
"calling the route `{}` with a content-type of json isn't supposed to throw a bad media type error", route);
}

View file

@ -0,0 +1,25 @@
use crate::common::Server;
#[cfg(feature = "mini-dashboard")]
#[actix_rt::test]
async fn dashboard_assets_load() {
let server = Server::new().await;
mod generated {
include!(concat!(env!("OUT_DIR"), "/generated.rs"));
}
let generated = generated::generate();
for (path, _) in generated.into_iter() {
let path = if path == "index.html" {
// "index.html" redirects to "/"
"/".to_owned()
} else {
"/".to_owned() + path
};
let (_, status_code) = server.service.get(&path).await;
assert_eq!(status_code, 200);
}
}

View file

@ -0,0 +1,988 @@
use actix_web::test;
use serde_json::{json, Value};
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::encoder::Encoder;
use crate::common::{GetAllDocumentsOptions, Server};
/// This is the basic usage of our API and every other tests uses the content-type application/json
#[actix_rt::test]
async fn add_documents_test_json_content_types() {
let document = json!([
{
"id": 1,
"content": "Bouvier Bernois",
}
]);
// this is a what is expected and should work
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 202);
assert_eq!(response["taskUid"], 0);
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 202);
assert_eq!(response["taskUid"], 1);
}
/// Here we try to send a single document instead of an array with a single document inside.
#[actix_rt::test]
async fn add_single_document_test_json_content_types() {
let document = json!({
"id": 1,
"content": "Bouvier Bernois",
});
// this is a what is expected and should work
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 202);
assert_eq!(response["taskUid"], 0);
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 202);
assert_eq!(response["taskUid"], 1);
}
/// Here we try sending encoded (compressed) document request
#[actix_rt::test]
async fn add_single_document_gzip_encoded() {
let document = json!({
"id": 1,
"content": "Bouvier Bernois",
});
// this is a what is expected and should work
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let document = serde_json::to_string(&document).unwrap();
let encoder = Encoder::Gzip;
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(encoder.encode(document.clone()))
.insert_header(("content-type", "application/json"))
.insert_header(encoder.header().unwrap())
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 202);
assert_eq!(response["taskUid"], 0);
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(encoder.encode(document))
.insert_header(("content-type", "application/json"))
.insert_header(encoder.header().unwrap())
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 202);
assert_eq!(response["taskUid"], 1);
}
/// Here we try document request with every encoding
#[actix_rt::test]
async fn add_single_document_with_every_encoding() {
let document = json!({
"id": 1,
"content": "Bouvier Bernois",
});
// this is a what is expected and should work
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let document = serde_json::to_string(&document).unwrap();
for (task_uid, encoder) in Encoder::iterator().enumerate() {
let mut req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(encoder.encode(document.clone()))
.insert_header(("content-type", "application/json"));
req = match encoder.header() {
Some(header) => req.insert_header(header),
None => req,
};
let req = req.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 202);
assert_eq!(response["taskUid"], task_uid);
}
}
/// any other content-type is must be refused
#[actix_rt::test]
async fn error_add_documents_test_bad_content_types() {
let document = json!([
{
"id": 1,
"content": "Leonberg",
}
]);
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "text/plain"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
assert_eq!(
response["message"],
json!(
r#"The Content-Type `text/plain` is invalid. Accepted values for the Content-Type header are: `application/json`, `application/x-ndjson`, `text/csv`"#
)
);
assert_eq!(response["code"], "invalid_content_type");
assert_eq!(response["type"], "invalid_request");
assert_eq!(response["link"], "https://docs.meilisearch.com/errors#invalid_content_type");
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "text/plain"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
assert_eq!(
response["message"],
json!(
r#"The Content-Type `text/plain` is invalid. Accepted values for the Content-Type header are: `application/json`, `application/x-ndjson`, `text/csv`"#
)
);
assert_eq!(response["code"], "invalid_content_type");
assert_eq!(response["type"], "invalid_request");
assert_eq!(response["link"], "https://docs.meilisearch.com/errors#invalid_content_type");
}
/// missing content-type must be refused
#[actix_rt::test]
async fn error_add_documents_test_no_content_type() {
let document = json!([
{
"id": 1,
"content": "Leonberg",
}
]);
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
assert_eq!(
response["message"],
json!(
r#"A Content-Type header is missing. Accepted values for the Content-Type header are: `application/json`, `application/x-ndjson`, `text/csv`"#
)
);
assert_eq!(response["code"], "missing_content_type");
assert_eq!(response["type"], "invalid_request");
assert_eq!(response["link"], "https://docs.meilisearch.com/errors#missing_content_type");
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 415);
assert_eq!(
response["message"],
json!(
r#"A Content-Type header is missing. Accepted values for the Content-Type header are: `application/json`, `application/x-ndjson`, `text/csv`"#
)
);
assert_eq!(response["code"], "missing_content_type");
assert_eq!(response["type"], "invalid_request");
assert_eq!(response["link"], "https://docs.meilisearch.com/errors#missing_content_type");
}
#[actix_rt::test]
async fn error_add_malformed_csv_documents() {
let document = "id, content\n1234, hello, world\n12, hello world";
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "text/csv"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(
response["message"],
json!(
r#"The `csv` payload provided is malformed: `CSV error: record 1 (line: 2, byte: 12): found record with 3 fields, but the previous record has 2 fields`."#
)
);
assert_eq!(response["code"], json!("malformed_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#malformed_payload"));
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "text/csv"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(
response["message"],
json!(
r#"The `csv` payload provided is malformed: `CSV error: record 1 (line: 2, byte: 12): found record with 3 fields, but the previous record has 2 fields`."#
)
);
assert_eq!(response["code"], json!("malformed_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#malformed_payload"));
}
#[actix_rt::test]
async fn error_add_malformed_json_documents() {
let document = r#"[{"id": 1}, {id: 2}]"#;
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(
response["message"],
json!(
r#"The `json` payload provided is malformed. `Couldn't serialize document value: key must be a string at line 1 column 14`."#
)
);
assert_eq!(response["code"], json!("malformed_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#malformed_payload"));
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(
response["message"],
json!(
r#"The `json` payload provided is malformed. `Couldn't serialize document value: key must be a string at line 1 column 14`."#
)
);
assert_eq!(response["code"], json!("malformed_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#malformed_payload"));
// truncate
// length = 100
let long = "0123456789".repeat(10);
let document = format!("\"{}\"", long);
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document)
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(
response["message"],
json!(
r#"The `json` payload provided is malformed. `Couldn't serialize document value: data are neither an object nor a list of objects`."#
)
);
assert_eq!(response["code"], json!("malformed_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#malformed_payload"));
// add one more char to the long string to test if the truncating works.
let document = format!("\"{}m\"", long);
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document)
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(
response["message"],
json!("The `json` payload provided is malformed. `Couldn't serialize document value: data are neither an object nor a list of objects`.")
);
assert_eq!(response["code"], json!("malformed_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#malformed_payload"));
}
#[actix_rt::test]
async fn error_add_malformed_ndjson_documents() {
let document = "{\"id\": 1}\n{id: 2}";
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/x-ndjson"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(
response["message"],
json!(
r#"The `ndjson` payload provided is malformed. `Couldn't serialize document value: trailing characters at line 2 column 1`."#
)
);
assert_eq!(response["code"], json!("malformed_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#malformed_payload"));
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/x-ndjson"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(
response["message"],
json!("The `ndjson` payload provided is malformed. `Couldn't serialize document value: trailing characters at line 2 column 1`.")
);
assert_eq!(response["code"], json!("malformed_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#malformed_payload"));
}
#[actix_rt::test]
async fn error_add_missing_payload_csv_documents() {
let document = "";
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "text/csv"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(response["message"], json!(r#"A csv payload is missing."#));
assert_eq!(response["code"], json!("missing_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#missing_payload"));
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "text/csv"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(response["message"], json!(r#"A csv payload is missing."#));
assert_eq!(response["code"], json!("missing_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#missing_payload"));
}
#[actix_rt::test]
async fn error_add_missing_payload_json_documents() {
let document = "";
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(response["message"], json!(r#"A json payload is missing."#));
assert_eq!(response["code"], json!("missing_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#missing_payload"));
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/json"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(response["message"], json!(r#"A json payload is missing."#));
assert_eq!(response["code"], json!("missing_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#missing_payload"));
}
#[actix_rt::test]
async fn error_add_missing_payload_ndjson_documents() {
let document = "";
let server = Server::new().await;
let app = server.init_web_app().await;
// post
let req = test::TestRequest::post()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/x-ndjson"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(response["message"], json!(r#"A ndjson payload is missing."#));
assert_eq!(response["code"], json!("missing_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#missing_payload"));
// put
let req = test::TestRequest::put()
.uri("/indexes/dog/documents")
.set_payload(document.to_string())
.insert_header(("content-type", "application/x-ndjson"))
.to_request();
let res = test::call_service(&app, req).await;
let status_code = res.status();
let body = test::read_body(res).await;
let response: Value = serde_json::from_slice(&body).unwrap_or_default();
assert_eq!(status_code, 400);
assert_eq!(response["message"], json!(r#"A ndjson payload is missing."#));
assert_eq!(response["code"], json!("missing_payload"));
assert_eq!(response["type"], json!("invalid_request"));
assert_eq!(response["link"], json!("https://docs.meilisearch.com/errors#missing_payload"));
}
#[actix_rt::test]
async fn add_documents_no_index_creation() {
let server = Server::new().await;
let index = server.index("test");
let documents = json!([
{
"id": 1,
"content": "foo",
}
]);
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
assert_eq!(response["taskUid"], 0);
/*
* currently we dont check these field to stay ISO with meilisearch
* assert_eq!(response["status"], "pending");
* assert_eq!(response["meta"]["type"], "DocumentsAddition");
* assert_eq!(response["meta"]["format"], "Json");
* assert_eq!(response["meta"]["primaryKey"], Value::Null);
* assert!(response.get("enqueuedAt").is_some());
*/
index.wait_task(0).await;
let (response, code) = index.get_task(0).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "succeeded");
assert_eq!(response["uid"], 0);
assert_eq!(response["type"], "documentAdditionOrUpdate");
assert_eq!(response["details"]["receivedDocuments"], 1);
assert_eq!(response["details"]["indexedDocuments"], 1);
let processed_at =
OffsetDateTime::parse(response["finishedAt"].as_str().unwrap(), &Rfc3339).unwrap();
let enqueued_at =
OffsetDateTime::parse(response["enqueuedAt"].as_str().unwrap(), &Rfc3339).unwrap();
assert!(processed_at > enqueued_at);
// index was created, and primary key was inferred.
let (response, code) = index.get().await;
assert_eq!(code, 200);
assert_eq!(response["primaryKey"], "id");
}
#[actix_rt::test]
async fn error_document_add_create_index_bad_uid() {
let server = Server::new().await;
let index = server.index("883 fj!");
let (response, code) = index.add_documents(json!([{"id": 1}]), None).await;
let expected_response = json!({
"message": "`883 fj!` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).",
"code": "invalid_index_uid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"
});
assert_eq!(code, 400);
assert_eq!(response, expected_response);
}
#[actix_rt::test]
async fn document_addition_with_primary_key() {
let server = Server::new().await;
let index = server.index("test");
let documents = json!([
{
"primary": 1,
"content": "foo",
}
]);
let (response, code) = index.add_documents(documents, Some("primary")).await;
assert_eq!(code, 202, "response: {}", response);
index.wait_task(0).await;
let (response, code) = index.get_task(0).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "succeeded");
assert_eq!(response["uid"], 0);
assert_eq!(response["type"], "documentAdditionOrUpdate");
assert_eq!(response["details"]["receivedDocuments"], 1);
assert_eq!(response["details"]["indexedDocuments"], 1);
let (response, code) = index.get().await;
assert_eq!(code, 200);
assert_eq!(response["primaryKey"], "primary");
}
#[actix_rt::test]
async fn replace_document() {
let server = Server::new().await;
let index = server.index("test");
let documents = json!([
{
"doc_id": 1,
"content": "foo",
}
]);
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202, "response: {}", response);
index.wait_task(0).await;
let documents = json!([
{
"doc_id": 1,
"other": "bar",
}
]);
let (_response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
index.wait_task(1).await;
let (response, code) = index.get_task(1).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "succeeded");
let (response, code) = index.get_document(1, None).await;
assert_eq!(code, 200);
assert_eq!(response.to_string(), r##"{"doc_id":1,"other":"bar"}"##);
}
#[actix_rt::test]
async fn add_no_documents() {
let server = Server::new().await;
let index = server.index("test");
let (_response, code) = index.add_documents(json!([]), None).await;
assert_eq!(code, 202);
}
#[actix_rt::test]
async fn add_larger_dataset() {
let server = Server::new().await;
let index = server.index("test");
let update_id = index.load_test_set().await;
let (response, code) = index.get_task(update_id).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "documentAdditionOrUpdate");
assert_eq!(response["details"]["indexedDocuments"], 77);
assert_eq!(response["details"]["receivedDocuments"], 77);
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions { limit: Some(1000), ..Default::default() })
.await;
assert_eq!(code, 200, "failed with `{}`", response);
assert_eq!(response["results"].as_array().unwrap().len(), 77);
}
#[actix_rt::test]
async fn error_add_documents_bad_document_id() {
let server = Server::new().await;
let index = server.index("test");
index.create(Some("docid")).await;
let documents = json!([
{
"docid": "foo & bar",
"content": "foobar"
}
]);
index.add_documents(documents, None).await;
index.wait_task(1).await;
let (response, code) = index.get_task(1).await;
assert_eq!(code, 200);
assert_eq!(response["status"], json!("failed"));
assert_eq!(
response["error"]["message"],
json!(
r#"Document identifier `"foo & bar"` is invalid. A document identifier can be of type integer or string, only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_)."#
)
);
assert_eq!(response["error"]["code"], json!("invalid_document_id"));
assert_eq!(response["error"]["type"], json!("invalid_request"));
assert_eq!(
response["error"]["link"],
json!("https://docs.meilisearch.com/errors#invalid_document_id")
);
}
#[actix_rt::test]
async fn error_add_documents_missing_document_id() {
let server = Server::new().await;
let index = server.index("test");
index.create(Some("docid")).await;
let documents = json!([
{
"id": "11",
"content": "foobar"
}
]);
index.add_documents(documents, None).await;
index.wait_task(1).await;
let (response, code) = index.get_task(1).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "failed");
assert_eq!(
response["error"]["message"],
json!(r#"Document doesn't have a `docid` attribute: `{"id":"11","content":"foobar"}`."#)
);
assert_eq!(response["error"]["code"], json!("missing_document_id"));
assert_eq!(response["error"]["type"], json!("invalid_request"));
assert_eq!(
response["error"]["link"],
json!("https://docs.meilisearch.com/errors#missing_document_id")
);
}
#[actix_rt::test]
#[ignore] // // TODO: Fix in an other PR: this does not provoke any error.
async fn error_document_field_limit_reached() {
let server = Server::new().await;
let index = server.index("test");
index.create(Some("id")).await;
let mut big_object = std::collections::HashMap::new();
big_object.insert("id".to_owned(), "wow");
for i in 0..65535 {
let key = i.to_string();
big_object.insert(key, "I am a text!");
}
let documents = json!([big_object]);
let (_response, code) = index.update_documents(documents, Some("id")).await;
assert_eq!(code, 202);
index.wait_task(0).await;
let (response, code) = index.get_task(0).await;
assert_eq!(code, 200);
// Documents without a primary key are not accepted.
assert_eq!(response["status"], "failed");
let expected_error = json!({
"message": "A document cannot contain more than 65,535 fields.",
"code": "document_fields_limit_reached",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#document_fields_limit_reached"
});
assert_eq!(response["error"], expected_error);
}
#[actix_rt::test]
async fn add_documents_invalid_geo_field() {
let server = Server::new().await;
let index = server.index("test");
index.create(Some("id")).await;
index.update_settings(json!({"sortableAttributes": ["_geo"]})).await;
let documents = json!([
{
"id": "11",
"_geo": "foobar"
}
]);
index.add_documents(documents, None).await;
index.wait_task(2).await;
let (response, code) = index.get_task(2).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "failed");
}
#[actix_rt::test]
async fn error_add_documents_payload_size() {
let server = Server::new().await;
let index = server.index("test");
index.create(Some("id")).await;
let document = json!(
{
"id": "11",
"content": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec metus erat, consequat in blandit venenatis, ultricies eu ipsum. Etiam luctus elit et mollis ultrices. Nam turpis risus, dictum non eros in, eleifend feugiat elit. Morbi non dolor pulvinar, sagittis mi sed, ultricies lorem. Nulla ultricies sem metus. Donec at suscipit quam, sed elementum mi. Suspendisse potenti. Fusce pharetra turpis tortor, sed eleifend odio dapibus ut. Nulla facilisi. Suspendisse elementum, dui eget aliquet dignissim, ex tellus aliquam nisl, at eleifend nisl metus tempus diam. Mauris fermentum sollicitudin efficitur. Donec dignissim est vitae elit finibus faucibus"
}
);
let documents: Vec<_> = (0..16000).into_iter().map(|_| document.clone()).collect();
let documents = json!(documents);
let (response, code) = index.add_documents(documents, None).await;
let expected_response = json!({
"message": "The provided payload reached the size limit.",
"code": "payload_too_large",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#payload_too_large"
});
assert_eq!(response, expected_response);
assert_eq!(code, 413);
}
#[actix_rt::test]
async fn error_primary_key_inference() {
let server = Server::new().await;
let index = server.index("test");
let documents = json!([
{
"title": "11",
"desc": "foobar"
}
]);
index.add_documents(documents, None).await;
index.wait_task(0).await;
let (response, code) = index.get_task(0).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "failed");
let expected_error = json!({
"message": r#"The primary key inference process failed because the engine did not find any fields containing `id` substring in their name. If your document identifier does not contain any `id` substring, you can set the primary key of the index."#,
"code": "primary_key_inference_failed",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#primary_key_inference_failed"
});
assert_eq!(response["error"], expected_error);
}
#[actix_rt::test]
async fn add_documents_with_primary_key_twice() {
let server = Server::new().await;
let index = server.index("test");
let documents = json!([
{
"title": "11",
"desc": "foobar"
}
]);
index.add_documents(documents.clone(), Some("title")).await;
index.wait_task(0).await;
let (response, _code) = index.get_task(0).await;
assert_eq!(response["status"], "succeeded");
index.add_documents(documents, Some("title")).await;
index.wait_task(1).await;
let (response, _code) = index.get_task(1).await;
assert_eq!(response["status"], "succeeded");
}
#[actix_rt::test]
async fn batch_several_documents_addition() {
let server = Server::new().await;
let index = server.index("test");
let mut documents: Vec<_> = (0..150usize)
.into_iter()
.map(|id| {
json!(
{
"id": id,
"title": "foo",
"desc": "bar"
}
)
})
.collect();
documents[100] = json!({"title": "error", "desc": "error"});
// enqueue batch of documents
let mut waiter = Vec::new();
for chunk in documents.chunks(30) {
waiter.push(index.add_documents(json!(chunk), Some("id")));
}
// wait first batch of documents to finish
futures::future::join_all(waiter).await;
index.wait_task(4).await;
// run a second completely failing batch
documents[40] = json!({"title": "error", "desc": "error"});
documents[70] = json!({"title": "error", "desc": "error"});
documents[130] = json!({"title": "error", "desc": "error"});
let mut waiter = Vec::new();
for chunk in documents.chunks(30) {
waiter.push(index.add_documents(json!(chunk), Some("id")));
}
// wait second batch of documents to finish
futures::future::join_all(waiter).await;
index.wait_task(9).await;
let (response, _code) = index.filtered_tasks(&[], &["failed"]).await;
// Check if only the 6th task failed
println!("{}", &response);
assert_eq!(response["results"].as_array().unwrap().len(), 5);
// Check if there are exactly 120 documents (150 - 30) in the index;
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions { limit: Some(200), ..Default::default() })
.await;
assert_eq!(code, 200, "failed with `{}`", response);
assert_eq!(response["results"].as_array().unwrap().len(), 120);
}

View file

@ -0,0 +1,137 @@
use serde_json::json;
use crate::common::{GetAllDocumentsOptions, Server};
#[actix_rt::test]
async fn delete_one_document_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let (_response, code) = index.delete_document(0).await;
assert_eq!(code, 202);
let response = index.wait_task(0).await;
assert_eq!(response["status"], "failed");
}
#[actix_rt::test]
async fn delete_one_unexisting_document() {
let server = Server::new().await;
let index = server.index("test");
index.create(None).await;
let (response, code) = index.delete_document(0).await;
assert_eq!(code, 202, "{}", response);
let update = index.wait_task(0).await;
assert_eq!(update["status"], "succeeded");
}
#[actix_rt::test]
async fn delete_one_document() {
let server = Server::new().await;
let index = server.index("test");
index.add_documents(json!([{ "id": 0, "content": "foobar" }]), None).await;
index.wait_task(0).await;
let (_response, code) = server.index("test").delete_document(0).await;
assert_eq!(code, 202);
index.wait_task(1).await;
let (_response, code) = index.get_document(0, None).await;
assert_eq!(code, 404);
}
#[actix_rt::test]
async fn clear_all_documents_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let (_response, code) = index.clear_all_documents().await;
assert_eq!(code, 202);
let response = index.wait_task(0).await;
assert_eq!(response["status"], "failed");
}
#[actix_rt::test]
async fn clear_all_documents() {
let server = Server::new().await;
let index = server.index("test");
index
.add_documents(
json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }]),
None,
)
.await;
index.wait_task(0).await;
let (_response, code) = index.clear_all_documents().await;
assert_eq!(code, 202);
let _update = index.wait_task(1).await;
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert!(response["results"].as_array().unwrap().is_empty());
}
#[actix_rt::test]
async fn clear_all_documents_empty_index() {
let server = Server::new().await;
let index = server.index("test");
index.create(None).await;
let (_response, code) = index.clear_all_documents().await;
assert_eq!(code, 202);
let _update = index.wait_task(0).await;
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert!(response["results"].as_array().unwrap().is_empty());
}
#[actix_rt::test]
async fn error_delete_batch_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.delete_batch(vec![]).await;
let expected_response = json!({
"message": "Index `test` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
assert_eq!(code, 202);
let response = index.wait_task(0).await;
assert_eq!(response["status"], "failed");
assert_eq!(response["error"], expected_response);
}
#[actix_rt::test]
async fn delete_batch() {
let server = Server::new().await;
let index = server.index("test");
index.add_documents(json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }, { "id": 3, "content": "foobar" }]), Some("id")).await;
index.wait_task(0).await;
let (_response, code) = index.delete_batch(vec![1, 0]).await;
assert_eq!(code, 202);
let _update = index.wait_task(1).await;
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 1);
assert_eq!(response["results"][0]["id"], json!(3));
}
#[actix_rt::test]
async fn delete_no_document_batch() {
let server = Server::new().await;
let index = server.index("test");
index.add_documents(json!([{ "id": 1, "content": "foobar" }, { "id": 0, "content": "foobar" }, { "id": 3, "content": "foobar" }]), Some("id")).await;
index.wait_task(0).await;
let (_response, code) = index.delete_batch(vec![]).await;
assert_eq!(code, 202, "{}", _response);
let _update = index.wait_task(1).await;
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 3);
}

View file

@ -0,0 +1,380 @@
use actix_web::test;
use http::header::ACCEPT_ENCODING;
use serde_json::{json, Value};
use urlencoding::encode as urlencode;
use crate::common::encoder::Encoder;
use crate::common::{GetAllDocumentsOptions, GetDocumentOptions, Server};
// TODO: partial test since we are testing error, amd error is not yet fully implemented in
// transplant
#[actix_rt::test]
async fn get_unexisting_index_single_document() {
let server = Server::new().await;
let (_response, code) = server.index("test").get_document(1, None).await;
assert_eq!(code, 404);
}
#[actix_rt::test]
async fn error_get_unexisting_document() {
let server = Server::new().await;
let index = server.index("test");
index.create(None).await;
index.wait_task(0).await;
let (response, code) = index.get_document(1, None).await;
let expected_response = json!({
"message": "Document `1` not found.",
"code": "document_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#document_not_found"
});
assert_eq!(response, expected_response);
assert_eq!(code, 404);
}
#[actix_rt::test]
async fn get_document() {
let server = Server::new().await;
let index = server.index("test");
index.create(None).await;
let documents = serde_json::json!([
{
"id": 0,
"nested": { "content": "foobar" },
}
]);
let (_, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
index.wait_task(1).await;
let (response, code) = index.get_document(0, None).await;
assert_eq!(code, 200);
assert_eq!(
response,
serde_json::json!({
"id": 0,
"nested": { "content": "foobar" },
})
);
let (response, code) =
index.get_document(0, Some(GetDocumentOptions { fields: Some(vec!["id"]) })).await;
assert_eq!(code, 200);
assert_eq!(
response,
serde_json::json!({
"id": 0,
})
);
let (response, code) = index
.get_document(0, Some(GetDocumentOptions { fields: Some(vec!["nested.content"]) }))
.await;
assert_eq!(code, 200);
assert_eq!(
response,
serde_json::json!({
"nested": { "content": "foobar" },
})
);
}
#[actix_rt::test]
async fn error_get_unexisting_index_all_documents() {
let server = Server::new().await;
let (response, code) =
server.index("test").get_all_documents(GetAllDocumentsOptions::default()).await;
let expected_response = json!({
"message": "Index `test` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
assert_eq!(response, expected_response);
assert_eq!(code, 404);
}
#[actix_rt::test]
async fn get_no_document() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.create(None).await;
assert_eq!(code, 202);
index.wait_task(0).await;
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert!(response["results"].as_array().unwrap().is_empty());
}
#[actix_rt::test]
async fn get_all_documents_no_options() {
let server = Server::new().await;
let index = server.index("test");
index.load_test_set().await;
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 20);
let first = serde_json::json!({
"id":0,
"isActive":false,
"balance":"$2,668.55",
"picture":"http://placehold.it/32x32",
"age":36,
"color":"Green",
"name":"Lucas Hess",
"gender":"male",
"email":"lucashess@chorizon.com",
"phone":"+1 (998) 478-2597",
"address":"412 Losee Terrace, Blairstown, Georgia, 2825",
"about":"Mollit ad in exercitation quis. Anim est ut consequat fugiat duis magna aliquip velit nisi. Commodo eiusmod est consequat proident consectetur aliqua enim fugiat. Aliqua adipisicing laboris elit proident enim veniam laboris mollit. Incididunt fugiat minim ad nostrud deserunt tempor in. Id irure officia labore qui est labore nulla nisi. Magna sit quis tempor esse consectetur amet labore duis aliqua consequat.\r\n",
"registered":"2016-06-21T09:30:25 -02:00",
"latitude":-44.174957,
"longitude":-145.725388,
"tags":["bug"
,"bug"]});
assert_eq!(first, arr[0]);
}
#[actix_rt::test]
async fn get_all_documents_no_options_with_response_compression() {
let server = Server::new().await;
let index_uid = "test";
let index = server.index(index_uid);
index.load_test_set().await;
let app = server.init_web_app().await;
let req = test::TestRequest::get()
.uri(&format!("/indexes/{}/documents?", urlencode(index_uid)))
.insert_header((ACCEPT_ENCODING, "gzip"))
.to_request();
let res = test::call_service(&app, req).await;
assert_eq!(res.status(), 200);
let bytes = test::read_body(res).await;
let decoded = Encoder::Gzip.decode(bytes);
let parsed_response =
serde_json::from_slice::<Value>(decoded.into().as_ref()).expect("Expecting valid json");
let arr = parsed_response["results"].as_array().unwrap();
assert_eq!(arr.len(), 20);
}
#[actix_rt::test]
async fn test_get_all_documents_limit() {
let server = Server::new().await;
let index = server.index("test");
index.load_test_set().await;
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions { limit: Some(5), ..Default::default() })
.await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 5);
assert_eq!(response["results"][0]["id"], json!(0));
assert_eq!(response["offset"], json!(0));
assert_eq!(response["limit"], json!(5));
assert_eq!(response["total"], json!(77));
}
#[actix_rt::test]
async fn test_get_all_documents_offset() {
let server = Server::new().await;
let index = server.index("test");
index.load_test_set().await;
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions { offset: Some(5), ..Default::default() })
.await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 20);
assert_eq!(response["results"][0]["id"], json!(5));
assert_eq!(response["offset"], json!(5));
assert_eq!(response["limit"], json!(20));
assert_eq!(response["total"], json!(77));
}
#[actix_rt::test]
async fn test_get_all_documents_attributes_to_retrieve() {
let server = Server::new().await;
let index = server.index("test");
index.load_test_set().await;
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions {
attributes_to_retrieve: Some(vec!["name"]),
..Default::default()
})
.await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 20);
for results in response["results"].as_array().unwrap() {
assert_eq!(results.as_object().unwrap().keys().count(), 1);
assert!(results["name"] != json!(null));
}
assert_eq!(response["offset"], json!(0));
assert_eq!(response["limit"], json!(20));
assert_eq!(response["total"], json!(77));
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions {
attributes_to_retrieve: Some(vec![]),
..Default::default()
})
.await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 20);
for results in response["results"].as_array().unwrap() {
assert_eq!(results.as_object().unwrap().keys().count(), 0);
}
assert_eq!(response["offset"], json!(0));
assert_eq!(response["limit"], json!(20));
assert_eq!(response["total"], json!(77));
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions {
attributes_to_retrieve: Some(vec!["wrong"]),
..Default::default()
})
.await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 20);
for results in response["results"].as_array().unwrap() {
assert_eq!(results.as_object().unwrap().keys().count(), 0);
}
assert_eq!(response["offset"], json!(0));
assert_eq!(response["limit"], json!(20));
assert_eq!(response["total"], json!(77));
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions {
attributes_to_retrieve: Some(vec!["name", "tags"]),
..Default::default()
})
.await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 20);
for results in response["results"].as_array().unwrap() {
assert_eq!(results.as_object().unwrap().keys().count(), 2);
assert!(results["name"] != json!(null));
assert!(results["tags"] != json!(null));
}
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions {
attributes_to_retrieve: Some(vec!["*"]),
..Default::default()
})
.await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 20);
for results in response["results"].as_array().unwrap() {
assert_eq!(results.as_object().unwrap().keys().count(), 16);
}
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions {
attributes_to_retrieve: Some(vec!["*", "wrong"]),
..Default::default()
})
.await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 20);
for results in response["results"].as_array().unwrap() {
assert_eq!(results.as_object().unwrap().keys().count(), 16);
}
}
#[actix_rt::test]
async fn get_document_s_nested_attributes_to_retrieve() {
let server = Server::new().await;
let index = server.index("test");
index.create(None).await;
let documents = json!([
{
"id": 0,
"content.truc": "foobar",
},
{
"id": 1,
"content": {
"truc": "foobar",
"machin": "bidule",
},
},
]);
let (_, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
index.wait_task(1).await;
let (response, code) =
index.get_document(0, Some(GetDocumentOptions { fields: Some(vec!["content"]) })).await;
assert_eq!(code, 200);
assert_eq!(response, json!({}));
let (response, code) =
index.get_document(1, Some(GetDocumentOptions { fields: Some(vec!["content"]) })).await;
assert_eq!(code, 200);
assert_eq!(
response,
json!({
"content": {
"truc": "foobar",
"machin": "bidule",
},
})
);
let (response, code) = index
.get_document(0, Some(GetDocumentOptions { fields: Some(vec!["content.truc"]) }))
.await;
assert_eq!(code, 200);
assert_eq!(
response,
json!({
"content.truc": "foobar",
})
);
let (response, code) = index
.get_document(1, Some(GetDocumentOptions { fields: Some(vec!["content.truc"]) }))
.await;
assert_eq!(code, 200);
assert_eq!(
response,
json!({
"content": {
"truc": "foobar",
},
})
);
}
#[actix_rt::test]
async fn get_documents_displayed_attributes_is_ignored() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"displayedAttributes": ["gender"]})).await;
index.load_test_set().await;
let (response, code) = index.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 20);
assert_eq!(response["results"][0].as_object().unwrap().keys().count(), 16);
assert!(response["results"][0]["gender"] != json!(null));
assert_eq!(response["offset"], json!(0));
assert_eq!(response["limit"], json!(20));
assert_eq!(response["total"], json!(77));
let (response, code) = index.get_document(0, None).await;
assert_eq!(code, 200);
assert_eq!(response.as_object().unwrap().keys().count(), 16);
assert!(response.as_object().unwrap().get("gender").is_some());
}

View file

@ -0,0 +1,4 @@
mod add_documents;
mod delete_documents;
mod get_documents;
mod update_documents;

View file

@ -0,0 +1,198 @@
use serde_json::json;
use crate::common::encoder::Encoder;
use crate::common::{GetAllDocumentsOptions, Server};
#[actix_rt::test]
async fn error_document_update_create_index_bad_uid() {
let server = Server::new().await;
let index = server.index("883 fj!");
let (response, code) = index.update_documents(json!([{"id": 1}]), None).await;
let expected_response = json!({
"message": "`883 fj!` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).",
"code": "invalid_index_uid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"
});
assert_eq!(code, 400);
assert_eq!(response, expected_response);
}
#[actix_rt::test]
async fn document_update_with_primary_key() {
let server = Server::new().await;
let index = server.index("test");
let documents = json!([
{
"primary": 1,
"content": "foo",
}
]);
let (_response, code) = index.update_documents(documents, Some("primary")).await;
assert_eq!(code, 202);
index.wait_task(0).await;
let (response, code) = index.get_task(0).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "succeeded");
assert_eq!(response["uid"], 0);
assert_eq!(response["type"], "documentAdditionOrUpdate");
assert_eq!(response["details"]["indexedDocuments"], 1);
assert_eq!(response["details"]["receivedDocuments"], 1);
let (response, code) = index.get().await;
assert_eq!(code, 200);
assert_eq!(response["primaryKey"], "primary");
}
#[actix_rt::test]
async fn update_document() {
let server = Server::new().await;
let index = server.index("test");
let documents = json!([
{
"doc_id": 1,
"content": "foo",
}
]);
let (_response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
index.wait_task(0).await;
let documents = json!([
{
"doc_id": 1,
"other": "bar",
}
]);
let (response, code) = index.update_documents(documents, None).await;
assert_eq!(code, 202, "response: {}", response);
index.wait_task(1).await;
let (response, code) = index.get_task(1).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "succeeded");
let (response, code) = index.get_document(1, None).await;
assert_eq!(code, 200);
assert_eq!(response.to_string(), r##"{"doc_id":1,"content":"foo","other":"bar"}"##);
}
#[actix_rt::test]
async fn update_document_gzip_encoded() {
let server = Server::new().await;
let index = server.index_with_encoder("test", Encoder::Gzip);
let documents = json!([
{
"doc_id": 1,
"content": "foo",
}
]);
let (_response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
index.wait_task(0).await;
let documents = json!([
{
"doc_id": 1,
"other": "bar",
}
]);
let (response, code) = index.update_documents(documents, None).await;
assert_eq!(code, 202, "response: {}", response);
index.wait_task(1).await;
let (response, code) = index.get_task(1).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "succeeded");
let (response, code) = index.get_document(1, None).await;
assert_eq!(code, 200);
assert_eq!(response.to_string(), r##"{"doc_id":1,"content":"foo","other":"bar"}"##);
}
#[actix_rt::test]
async fn update_larger_dataset() {
let server = Server::new().await;
let index = server.index("test");
let documents = serde_json::from_str(include_str!("../assets/test_set.json")).unwrap();
index.update_documents(documents, None).await;
index.wait_task(0).await;
let (response, code) = index.get_task(0).await;
assert_eq!(code, 200);
assert_eq!(response["type"], "documentAdditionOrUpdate");
assert_eq!(response["details"]["indexedDocuments"], 77);
let (response, code) = index
.get_all_documents(GetAllDocumentsOptions { limit: Some(1000), ..Default::default() })
.await;
assert_eq!(code, 200);
assert_eq!(response["results"].as_array().unwrap().len(), 77);
}
#[actix_rt::test]
async fn error_update_documents_bad_document_id() {
let server = Server::new().await;
let index = server.index("test");
index.create(Some("docid")).await;
let documents = json!([
{
"docid": "foo & bar",
"content": "foobar"
}
]);
index.update_documents(documents, None).await;
let response = index.wait_task(1).await;
assert_eq!(response["status"], json!("failed"));
assert_eq!(
response["error"]["message"],
json!(
r#"Document identifier `"foo & bar"` is invalid. A document identifier can be of type integer or string, only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and underscores (_)."#
)
);
assert_eq!(response["error"]["code"], json!("invalid_document_id"));
assert_eq!(response["error"]["type"], json!("invalid_request"));
assert_eq!(
response["error"]["link"],
json!("https://docs.meilisearch.com/errors#invalid_document_id")
);
}
#[actix_rt::test]
async fn error_update_documents_missing_document_id() {
let server = Server::new().await;
let index = server.index("test");
index.create(Some("docid")).await;
let documents = json!([
{
"id": "11",
"content": "foobar"
}
]);
index.update_documents(documents, None).await;
let response = index.wait_task(1).await;
assert_eq!(response["status"], "failed");
assert_eq!(
response["error"]["message"],
r#"Document doesn't have a `docid` attribute: `{"id":"11","content":"foobar"}`."#
);
assert_eq!(response["error"]["code"], "missing_document_id");
assert_eq!(response["error"]["type"], "invalid_request");
assert_eq!(
response["error"]["link"],
"https://docs.meilisearch.com/errors#missing_document_id"
);
}

View file

@ -0,0 +1,73 @@
use std::path::PathBuf;
use manifest_dir_macros::exist_relative_path;
pub enum GetDump {
MoviesRawV1,
MoviesWithSettingsV1,
RubyGemsWithSettingsV1,
MoviesRawV2,
MoviesWithSettingsV2,
RubyGemsWithSettingsV2,
MoviesRawV3,
MoviesWithSettingsV3,
RubyGemsWithSettingsV3,
MoviesRawV4,
MoviesWithSettingsV4,
RubyGemsWithSettingsV4,
TestV5,
}
impl GetDump {
pub fn path(&self) -> PathBuf {
match self {
GetDump::MoviesRawV1 => {
exist_relative_path!("tests/assets/v1_v0.20.0_movies.dump").into()
}
GetDump::MoviesWithSettingsV1 => {
exist_relative_path!("tests/assets/v1_v0.20.0_movies_with_settings.dump").into()
}
GetDump::RubyGemsWithSettingsV1 => {
exist_relative_path!("tests/assets/v1_v0.20.0_rubygems_with_settings.dump").into()
}
GetDump::MoviesRawV2 => {
exist_relative_path!("tests/assets/v2_v0.21.1_movies.dump").into()
}
GetDump::MoviesWithSettingsV2 => {
exist_relative_path!("tests/assets/v2_v0.21.1_movies_with_settings.dump").into()
}
GetDump::RubyGemsWithSettingsV2 => {
exist_relative_path!("tests/assets/v2_v0.21.1_rubygems_with_settings.dump").into()
}
GetDump::MoviesRawV3 => {
exist_relative_path!("tests/assets/v3_v0.24.0_movies.dump").into()
}
GetDump::MoviesWithSettingsV3 => {
exist_relative_path!("tests/assets/v3_v0.24.0_movies_with_settings.dump").into()
}
GetDump::RubyGemsWithSettingsV3 => {
exist_relative_path!("tests/assets/v3_v0.24.0_rubygems_with_settings.dump").into()
}
GetDump::MoviesRawV4 => {
exist_relative_path!("tests/assets/v4_v0.25.2_movies.dump").into()
}
GetDump::MoviesWithSettingsV4 => {
exist_relative_path!("tests/assets/v4_v0.25.2_movies_with_settings.dump").into()
}
GetDump::RubyGemsWithSettingsV4 => {
exist_relative_path!("tests/assets/v4_v0.25.2_rubygems_with_settings.dump").into()
}
GetDump::TestV5 => {
exist_relative_path!("tests/assets/v5_v0.28.0_test_dump.dump").into()
}
}
}
}

View file

@ -0,0 +1,819 @@
mod data;
use meilisearch::Opt;
use serde_json::json;
use self::data::GetDump;
use crate::common::{default_settings, GetAllDocumentsOptions, Server};
// all the following test are ignored on windows. See #2364
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v1_movie_raw() {
let temp = tempfile::tempdir().unwrap();
let path = GetDump::MoviesRawV1.path();
let options = Opt { import_dump: Some(path), ..default_settings(temp.path()) };
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"genres": 53, "id": 53, "overview": 53, "poster": 53, "release_date": 53, "title": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({"displayedAttributes": ["*"], "searchableAttributes": ["*"], "filterableAttributes": [], "sortableAttributes": [], "rankingRules": ["typo", "words", "proximity", "attribute", "exactness"], "stopWords": [], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": {"oneTypo": 5, "twoTypos": 9}, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 } })
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks,
json!({ "results": [{"uid": 0, "indexUid": "indexUID", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": { "receivedDocuments": 0, "indexedDocuments": 31968 }, "error": null, "duration": "PT9.317060500S", "enqueuedAt": "2021-09-08T09:08:45.153219Z", "startedAt": "2021-09-08T09:08:45.3961665Z", "finishedAt": "2021-09-08T09:08:54.713227Z" }], "limit": 20, "from": 0, "next": null })
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(100, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({"id": 100, "title": "Lock, Stock and Two Smoking Barrels", "overview": "A card shark and his unwillingly-enlisted friends need to make a lot of cash quick after losing a sketchy poker match. To do this they decide to pull a heist on a small-time gang who happen to be operating out of the flat next door.", "genres": ["Comedy", "Crime"], "poster": "https://image.tmdb.org/t/p/w500/8kSerJrhrJWKLk1LViesGcnrUPE.jpg", "release_date": 889056000})
);
let (document, code) = index.get_document(500, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({"id": 500, "title": "Reservoir Dogs", "overview": "A botched robbery indicates a police informant, and the pressure mounts in the aftermath at a warehouse. Crime begets violence as the survivors -- veteran Mr. White, newcomer Mr. Orange, psychopathic parolee Mr. Blonde, bickering weasel Mr. Pink and Nice Guy Eddie -- unravel.", "genres": ["Crime", "Thriller"], "poster": "https://image.tmdb.org/t/p/w500/AjTtJNumZyUDz33VtMlF1K8JPsE.jpg", "release_date": 715392000})
);
let (document, code) = index.get_document(10006, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({"id": 10006, "title": "Wild Seven", "overview": "In this darkly karmic vision of Arizona, a man who breathes nothing but ill will begins a noxious domino effect as quickly as an uncontrollable virus kills. As he exits Arizona State Penn after twenty-one long years, Wilson has only one thing on the brain, leveling the score with career criminal, Mackey Willis.", "genres": ["Action", "Crime", "Drama"], "poster": "https://image.tmdb.org/t/p/w500/y114dTPoqn8k2Txps4P2tI95YCS.jpg", "release_date": 1136073600})
);
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v1_movie_with_settings() {
let temp = tempfile::tempdir().unwrap();
let path = GetDump::MoviesWithSettingsV1.path();
let options = Opt { import_dump: Some(path), ..default_settings(temp.path()) };
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"genres": 53, "id": 53, "overview": 53, "poster": 53, "release_date": 53, "title": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({ "displayedAttributes": ["genres", "id", "overview", "poster", "release_date", "title"], "searchableAttributes": ["title", "overview"], "filterableAttributes": ["genres"], "sortableAttributes": [], "rankingRules": ["typo", "words", "proximity", "attribute", "exactness"], "stopWords": ["of", "the"], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": { "oneTypo": 5, "twoTypos": 9 }, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 } })
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks,
json!({ "results": [{ "uid": 1, "indexUid": "indexUID", "status": "succeeded", "type": "settingsUpdate", "canceledBy": null, "details": { "displayedAttributes": ["genres", "id", "overview", "poster", "release_date", "title"], "searchableAttributes": ["title", "overview"], "filterableAttributes": ["genres"], "stopWords": ["of", "the"] }, "error": null, "duration": "PT7.288826907S", "enqueuedAt": "2021-09-08T09:34:40.882977Z", "startedAt": "2021-09-08T09:34:40.883073093Z", "finishedAt": "2021-09-08T09:34:48.1719Z"}, { "uid": 0, "indexUid": "indexUID", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": { "receivedDocuments": 0, "indexedDocuments": 31968 }, "error": null, "duration": "PT9.090735774S", "enqueuedAt": "2021-09-08T09:34:16.036101Z", "startedAt": "2021-09-08T09:34:16.261191226Z", "finishedAt": "2021-09-08T09:34:25.351927Z" }], "limit": 20, "from": 1, "next": null })
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(100, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 100, "title": "Lock, Stock and Two Smoking Barrels", "genres": ["Comedy", "Crime"], "overview": "A card shark and his unwillingly-enlisted friends need to make a lot of cash quick after losing a sketchy poker match. To do this they decide to pull a heist on a small-time gang who happen to be operating out of the flat next door.", "poster": "https://image.tmdb.org/t/p/w500/8kSerJrhrJWKLk1LViesGcnrUPE.jpg", "release_date": 889056000 })
);
let (document, code) = index.get_document(500, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 500, "title": "Reservoir Dogs", "genres": ["Crime", "Thriller"], "overview": "A botched robbery indicates a police informant, and the pressure mounts in the aftermath at a warehouse. Crime begets violence as the survivors -- veteran Mr. White, newcomer Mr. Orange, psychopathic parolee Mr. Blonde, bickering weasel Mr. Pink and Nice Guy Eddie -- unravel.", "poster": "https://image.tmdb.org/t/p/w500/AjTtJNumZyUDz33VtMlF1K8JPsE.jpg", "release_date": 715392000})
);
let (document, code) = index.get_document(10006, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 10006, "title": "Wild Seven", "genres": ["Action", "Crime", "Drama"], "overview": "In this darkly karmic vision of Arizona, a man who breathes nothing but ill will begins a noxious domino effect as quickly as an uncontrollable virus kills. As he exits Arizona State Penn after twenty-one long years, Wilson has only one thing on the brain, leveling the score with career criminal, Mackey Willis.", "poster": "https://image.tmdb.org/t/p/w500/y114dTPoqn8k2Txps4P2tI95YCS.jpg", "release_date": 1136073600})
);
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v1_rubygems_with_settings() {
let temp = tempfile::tempdir().unwrap();
let path = GetDump::RubyGemsWithSettingsV1.path();
let options = Opt { import_dump: Some(path), ..default_settings(temp.path()) };
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("rubygems"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("rubygems");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"description": 53, "id": 53, "name": 53, "summary": 53, "total_downloads": 53, "version": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({"displayedAttributes": ["description", "id", "name", "summary", "total_downloads", "version"], "searchableAttributes": ["name", "summary"], "filterableAttributes": ["version"], "sortableAttributes": [], "rankingRules": ["typo", "words", "fame:desc", "proximity", "attribute", "exactness", "total_downloads:desc"], "stopWords": [], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": {"oneTypo": 5, "twoTypos": 9}, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 }})
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks["results"][0],
json!({"uid": 92, "indexUid": "rubygems", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": {"receivedDocuments": 0, "indexedDocuments": 1042}, "error": null, "duration": "PT1.487793839S", "enqueuedAt": "2021-09-08T09:27:01.465296Z", "startedAt": "2021-09-08T09:28:44.882177161Z", "finishedAt": "2021-09-08T09:28:46.369971Z"})
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(188040, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "meilisearch", "summary": "An easy-to-use ruby client for Meilisearch API", "description": "An easy-to-use ruby client for Meilisearch API. See https://github.com/meilisearch/MeiliSearch", "id": "188040", "version": "0.15.2", "total_downloads": "7465"})
);
let (document, code) = index.get_document(191940, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "doggo", "summary": "RSpec 3 formatter - documentation, with progress indication", "description": "Similar to \"rspec -f d\", but also indicates progress by showing the current test number and total test count on each line.", "id": "191940", "version": "1.1.0", "total_downloads": "9394"})
);
let (document, code) = index.get_document(159227, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "vortex-of-agony", "summary": "You dont need to use nodejs or go, just install this plugin. It will crash your application at random", "description": "You dont need to use nodejs or go, just install this plugin. It will crash your application at random", "id": "159227", "version": "0.1.0", "total_downloads": "1007"})
);
}
#[actix_rt::test]
async fn import_dump_v2_movie_raw() {
let temp = tempfile::tempdir().unwrap();
let options =
Opt { import_dump: Some(GetDump::MoviesRawV2.path()), ..default_settings(temp.path()) };
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"genres": 53, "id": 53, "overview": 53, "poster": 53, "release_date": 53, "title": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({"displayedAttributes": ["*"], "searchableAttributes": ["*"], "filterableAttributes": [], "sortableAttributes": [], "rankingRules": ["words", "typo", "proximity", "attribute", "exactness"], "stopWords": [], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": {"oneTypo": 5, "twoTypos": 9}, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 } })
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks,
json!({ "results": [{"uid": 0, "indexUid": "indexUID", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": { "receivedDocuments": 0, "indexedDocuments": 31944 }, "error": null, "duration": "PT41.751156S", "enqueuedAt": "2021-09-08T08:30:30.550282Z", "startedAt": "2021-09-08T08:30:30.553012Z", "finishedAt": "2021-09-08T08:31:12.304168Z" }], "limit": 20, "from": 0, "next": null })
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(100, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({"id": 100, "title": "Lock, Stock and Two Smoking Barrels", "overview": "A card shark and his unwillingly-enlisted friends need to make a lot of cash quick after losing a sketchy poker match. To do this they decide to pull a heist on a small-time gang who happen to be operating out of the flat next door.", "genres": ["Comedy", "Crime"], "poster": "https://image.tmdb.org/t/p/w500/8kSerJrhrJWKLk1LViesGcnrUPE.jpg", "release_date": 889056000})
);
let (document, code) = index.get_document(500, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({"id": 500, "title": "Reservoir Dogs", "overview": "A botched robbery indicates a police informant, and the pressure mounts in the aftermath at a warehouse. Crime begets violence as the survivors -- veteran Mr. White, newcomer Mr. Orange, psychopathic parolee Mr. Blonde, bickering weasel Mr. Pink and Nice Guy Eddie -- unravel.", "genres": ["Crime", "Thriller"], "poster": "https://image.tmdb.org/t/p/w500/AjTtJNumZyUDz33VtMlF1K8JPsE.jpg", "release_date": 715392000})
);
let (document, code) = index.get_document(10006, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({"id": 10006, "title": "Wild Seven", "overview": "In this darkly karmic vision of Arizona, a man who breathes nothing but ill will begins a noxious domino effect as quickly as an uncontrollable virus kills. As he exits Arizona State Penn after twenty-one long years, Wilson has only one thing on the brain, leveling the score with career criminal, Mackey Willis.", "genres": ["Action", "Crime", "Drama"], "poster": "https://image.tmdb.org/t/p/w500/y114dTPoqn8k2Txps4P2tI95YCS.jpg", "release_date": 1136073600})
);
}
#[actix_rt::test]
async fn import_dump_v2_movie_with_settings() {
let temp = tempfile::tempdir().unwrap();
let options = Opt {
import_dump: Some(GetDump::MoviesWithSettingsV2.path()),
..default_settings(temp.path())
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"genres": 53, "id": 53, "overview": 53, "poster": 53, "release_date": 53, "title": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({ "displayedAttributes": ["title", "genres", "overview", "poster", "release_date"], "searchableAttributes": ["title", "overview"], "filterableAttributes": ["genres"], "sortableAttributes": [], "rankingRules": ["words", "typo", "proximity", "attribute", "exactness"], "stopWords": ["of", "the"], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": { "oneTypo": 5, "twoTypos": 9 }, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 } })
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks,
json!({ "results": [{ "uid": 1, "indexUid": "indexUID", "status": "succeeded", "type": "settingsUpdate", "canceledBy": null, "details": { "displayedAttributes": ["title", "genres", "overview", "poster", "release_date"], "searchableAttributes": ["title", "overview"], "filterableAttributes": ["genres"], "stopWords": ["of", "the"] }, "error": null, "duration": "PT37.488777S", "enqueuedAt": "2021-09-08T08:24:02.323444Z", "startedAt": "2021-09-08T08:24:02.324145Z", "finishedAt": "2021-09-08T08:24:39.812922Z" }, { "uid": 0, "indexUid": "indexUID", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": { "receivedDocuments": 0, "indexedDocuments": 31944 }, "error": null, "duration": "PT39.941318S", "enqueuedAt": "2021-09-08T08:21:14.742672Z", "startedAt": "2021-09-08T08:21:14.750166Z", "finishedAt": "2021-09-08T08:21:54.691484Z" }], "limit": 20, "from": 1, "next": null })
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(100, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 100, "title": "Lock, Stock and Two Smoking Barrels", "genres": ["Comedy", "Crime"], "overview": "A card shark and his unwillingly-enlisted friends need to make a lot of cash quick after losing a sketchy poker match. To do this they decide to pull a heist on a small-time gang who happen to be operating out of the flat next door.", "poster": "https://image.tmdb.org/t/p/w500/8kSerJrhrJWKLk1LViesGcnrUPE.jpg", "release_date": 889056000 })
);
let (document, code) = index.get_document(500, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 500, "title": "Reservoir Dogs", "genres": ["Crime", "Thriller"], "overview": "A botched robbery indicates a police informant, and the pressure mounts in the aftermath at a warehouse. Crime begets violence as the survivors -- veteran Mr. White, newcomer Mr. Orange, psychopathic parolee Mr. Blonde, bickering weasel Mr. Pink and Nice Guy Eddie -- unravel.", "poster": "https://image.tmdb.org/t/p/w500/AjTtJNumZyUDz33VtMlF1K8JPsE.jpg", "release_date": 715392000})
);
let (document, code) = index.get_document(10006, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 10006, "title": "Wild Seven", "genres": ["Action", "Crime", "Drama"], "overview": "In this darkly karmic vision of Arizona, a man who breathes nothing but ill will begins a noxious domino effect as quickly as an uncontrollable virus kills. As he exits Arizona State Penn after twenty-one long years, Wilson has only one thing on the brain, leveling the score with career criminal, Mackey Willis.", "poster": "https://image.tmdb.org/t/p/w500/y114dTPoqn8k2Txps4P2tI95YCS.jpg", "release_date": 1136073600})
);
}
#[actix_rt::test]
async fn import_dump_v2_rubygems_with_settings() {
let temp = tempfile::tempdir().unwrap();
let options = Opt {
import_dump: Some(GetDump::RubyGemsWithSettingsV2.path()),
..default_settings(temp.path())
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("rubygems"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("rubygems");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"description": 53, "id": 53, "name": 53, "summary": 53, "total_downloads": 53, "version": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({"displayedAttributes": ["name", "summary", "description", "version", "total_downloads"], "searchableAttributes": ["name", "summary"], "filterableAttributes": ["version"], "sortableAttributes": [], "rankingRules": ["typo", "words", "fame:desc", "proximity", "attribute", "exactness", "total_downloads:desc"], "stopWords": [], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": {"oneTypo": 5, "twoTypos": 9}, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 }})
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks["results"][0],
json!({"uid": 92, "indexUid": "rubygems", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": {"receivedDocuments": 0, "indexedDocuments": 1042}, "error": null, "duration": "PT14.034672S", "enqueuedAt": "2021-09-08T08:40:31.390775Z", "startedAt": "2021-09-08T08:51:39.060642Z", "finishedAt": "2021-09-08T08:51:53.095314Z"})
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(188040, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "meilisearch", "summary": "An easy-to-use ruby client for Meilisearch API", "description": "An easy-to-use ruby client for Meilisearch API. See https://github.com/meilisearch/MeiliSearch", "id": "188040", "version": "0.15.2", "total_downloads": "7465"})
);
let (document, code) = index.get_document(191940, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "doggo", "summary": "RSpec 3 formatter - documentation, with progress indication", "description": "Similar to \"rspec -f d\", but also indicates progress by showing the current test number and total test count on each line.", "id": "191940", "version": "1.1.0", "total_downloads": "9394"})
);
let (document, code) = index.get_document(159227, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "vortex-of-agony", "summary": "You dont need to use nodejs or go, just install this plugin. It will crash your application at random", "description": "You dont need to use nodejs or go, just install this plugin. It will crash your application at random", "id": "159227", "version": "0.1.0", "total_downloads": "1007"})
);
}
#[actix_rt::test]
async fn import_dump_v3_movie_raw() {
let temp = tempfile::tempdir().unwrap();
let options =
Opt { import_dump: Some(GetDump::MoviesRawV3.path()), ..default_settings(temp.path()) };
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"genres": 53, "id": 53, "overview": 53, "poster": 53, "release_date": 53, "title": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({"displayedAttributes": ["*"], "searchableAttributes": ["*"], "filterableAttributes": [], "sortableAttributes": [], "rankingRules": ["words", "typo", "proximity", "attribute", "exactness"], "stopWords": [], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": {"oneTypo": 5, "twoTypos": 9}, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 } })
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks,
json!({ "results": [{"uid": 0, "indexUid": "indexUID", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": { "receivedDocuments": 0, "indexedDocuments": 31944 }, "error": null, "duration": "PT41.751156S", "enqueuedAt": "2021-09-08T08:30:30.550282Z", "startedAt": "2021-09-08T08:30:30.553012Z", "finishedAt": "2021-09-08T08:31:12.304168Z" }], "limit": 20, "from": 0, "next": null })
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(100, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({"id": 100, "title": "Lock, Stock and Two Smoking Barrels", "overview": "A card shark and his unwillingly-enlisted friends need to make a lot of cash quick after losing a sketchy poker match. To do this they decide to pull a heist on a small-time gang who happen to be operating out of the flat next door.", "genres": ["Comedy", "Crime"], "poster": "https://image.tmdb.org/t/p/w500/8kSerJrhrJWKLk1LViesGcnrUPE.jpg", "release_date": 889056000})
);
let (document, code) = index.get_document(500, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({"id": 500, "title": "Reservoir Dogs", "overview": "A botched robbery indicates a police informant, and the pressure mounts in the aftermath at a warehouse. Crime begets violence as the survivors -- veteran Mr. White, newcomer Mr. Orange, psychopathic parolee Mr. Blonde, bickering weasel Mr. Pink and Nice Guy Eddie -- unravel.", "genres": ["Crime", "Thriller"], "poster": "https://image.tmdb.org/t/p/w500/AjTtJNumZyUDz33VtMlF1K8JPsE.jpg", "release_date": 715392000})
);
let (document, code) = index.get_document(10006, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({"id": 10006, "title": "Wild Seven", "overview": "In this darkly karmic vision of Arizona, a man who breathes nothing but ill will begins a noxious domino effect as quickly as an uncontrollable virus kills. As he exits Arizona State Penn after twenty-one long years, Wilson has only one thing on the brain, leveling the score with career criminal, Mackey Willis.", "genres": ["Action", "Crime", "Drama"], "poster": "https://image.tmdb.org/t/p/w500/y114dTPoqn8k2Txps4P2tI95YCS.jpg", "release_date": 1136073600})
);
}
#[actix_rt::test]
async fn import_dump_v3_movie_with_settings() {
let temp = tempfile::tempdir().unwrap();
let options = Opt {
import_dump: Some(GetDump::MoviesWithSettingsV3.path()),
..default_settings(temp.path())
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"genres": 53, "id": 53, "overview": 53, "poster": 53, "release_date": 53, "title": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({ "displayedAttributes": ["title", "genres", "overview", "poster", "release_date"], "searchableAttributes": ["title", "overview"], "filterableAttributes": ["genres"], "sortableAttributes": [], "rankingRules": ["words", "typo", "proximity", "attribute", "exactness"], "stopWords": ["of", "the"], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": { "oneTypo": 5, "twoTypos": 9 }, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 } })
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks,
json!({ "results": [{ "uid": 1, "indexUid": "indexUID", "status": "succeeded", "type": "settingsUpdate", "canceledBy": null, "details": { "displayedAttributes": ["title", "genres", "overview", "poster", "release_date"], "searchableAttributes": ["title", "overview"], "filterableAttributes": ["genres"], "stopWords": ["of", "the"] }, "error": null, "duration": "PT37.488777S", "enqueuedAt": "2021-09-08T08:24:02.323444Z", "startedAt": "2021-09-08T08:24:02.324145Z", "finishedAt": "2021-09-08T08:24:39.812922Z" }, { "uid": 0, "indexUid": "indexUID", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": { "receivedDocuments": 0, "indexedDocuments": 31944 }, "error": null, "duration": "PT39.941318S", "enqueuedAt": "2021-09-08T08:21:14.742672Z", "startedAt": "2021-09-08T08:21:14.750166Z", "finishedAt": "2021-09-08T08:21:54.691484Z" }], "limit": 20, "from": 1, "next": null })
);
// finally we're just going to check that we can["results"] still get a few documents by id
let (document, code) = index.get_document(100, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 100, "title": "Lock, Stock and Two Smoking Barrels", "genres": ["Comedy", "Crime"], "overview": "A card shark and his unwillingly-enlisted friends need to make a lot of cash quick after losing a sketchy poker match. To do this they decide to pull a heist on a small-time gang who happen to be operating out of the flat next door.", "poster": "https://image.tmdb.org/t/p/w500/8kSerJrhrJWKLk1LViesGcnrUPE.jpg", "release_date": 889056000 })
);
let (document, code) = index.get_document(500, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 500, "title": "Reservoir Dogs", "genres": ["Crime", "Thriller"], "overview": "A botched robbery indicates a police informant, and the pressure mounts in the aftermath at a warehouse. Crime begets violence as the survivors -- veteran Mr. White, newcomer Mr. Orange, psychopathic parolee Mr. Blonde, bickering weasel Mr. Pink and Nice Guy Eddie -- unravel.", "poster": "https://image.tmdb.org/t/p/w500/AjTtJNumZyUDz33VtMlF1K8JPsE.jpg", "release_date": 715392000})
);
let (document, code) = index.get_document(10006, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 10006, "title": "Wild Seven", "genres": ["Action", "Crime", "Drama"], "overview": "In this darkly karmic vision of Arizona, a man who breathes nothing but ill will begins a noxious domino effect as quickly as an uncontrollable virus kills. As he exits Arizona State Penn after twenty-one long years, Wilson has only one thing on the brain, leveling the score with career criminal, Mackey Willis.", "poster": "https://image.tmdb.org/t/p/w500/y114dTPoqn8k2Txps4P2tI95YCS.jpg", "release_date": 1136073600})
);
}
#[actix_rt::test]
async fn import_dump_v3_rubygems_with_settings() {
let temp = tempfile::tempdir().unwrap();
let options = Opt {
import_dump: Some(GetDump::RubyGemsWithSettingsV3.path()),
..default_settings(temp.path())
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("rubygems"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("rubygems");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"description": 53, "id": 53, "name": 53, "summary": 53, "total_downloads": 53, "version": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({"displayedAttributes": ["name", "summary", "description", "version", "total_downloads"], "searchableAttributes": ["name", "summary"], "filterableAttributes": ["version"], "sortableAttributes": [], "rankingRules": ["typo", "words", "fame:desc", "proximity", "attribute", "exactness", "total_downloads:desc"], "stopWords": [], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": {"oneTypo": 5, "twoTypos": 9}, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 } })
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks["results"][0],
json!({"uid": 92, "indexUid": "rubygems", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": {"receivedDocuments": 0, "indexedDocuments": 1042}, "error": null, "duration": "PT14.034672S", "enqueuedAt": "2021-09-08T08:40:31.390775Z", "startedAt": "2021-09-08T08:51:39.060642Z", "finishedAt": "2021-09-08T08:51:53.095314Z"})
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(188040, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "meilisearch", "summary": "An easy-to-use ruby client for Meilisearch API", "description": "An easy-to-use ruby client for Meilisearch API. See https://github.com/meilisearch/MeiliSearch", "id": "188040", "version": "0.15.2", "total_downloads": "7465"})
);
let (document, code) = index.get_document(191940, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "doggo", "summary": "RSpec 3 formatter - documentation, with progress indication", "description": "Similar to \"rspec -f d\", but also indicates progress by showing the current test number and total test count on each line.", "id": "191940", "version": "1.1.0", "total_downloads": "9394"})
);
let (document, code) = index.get_document(159227, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "vortex-of-agony", "summary": "You dont need to use nodejs or go, just install this plugin. It will crash your application at random", "description": "You dont need to use nodejs or go, just install this plugin. It will crash your application at random", "id": "159227", "version": "0.1.0", "total_downloads": "1007"})
);
}
#[actix_rt::test]
async fn import_dump_v4_movie_raw() {
let temp = tempfile::tempdir().unwrap();
let options =
Opt { import_dump: Some(GetDump::MoviesRawV4.path()), ..default_settings(temp.path()) };
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"genres": 53, "id": 53, "overview": 53, "poster": 53, "release_date": 53, "title": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({ "displayedAttributes": ["*"], "searchableAttributes": ["*"], "filterableAttributes": [], "sortableAttributes": [], "rankingRules": ["words", "typo", "proximity", "attribute", "exactness"], "stopWords": [], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": {"oneTypo": 5, "twoTypos": 9}, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 } })
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks,
json!({ "results": [{"uid": 0, "indexUid": "indexUID", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": { "receivedDocuments": 0, "indexedDocuments": 31944 }, "error": null, "duration": "PT41.751156S", "enqueuedAt": "2021-09-08T08:30:30.550282Z", "startedAt": "2021-09-08T08:30:30.553012Z", "finishedAt": "2021-09-08T08:31:12.304168Z" }], "limit" : 20, "from": 0, "next": null })
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(100, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 100, "title": "Lock, Stock and Two Smoking Barrels", "overview": "A card shark and his unwillingly-enlisted friends need to make a lot of cash quick after losing a sketchy poker match. To do this they decide to pull a heist on a small-time gang who happen to be operating out of the flat next door.", "genres": ["Comedy", "Crime"], "poster": "https://image.tmdb.org/t/p/w500/8kSerJrhrJWKLk1LViesGcnrUPE.jpg", "release_date": 889056000})
);
let (document, code) = index.get_document(500, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 500, "title": "Reservoir Dogs", "overview": "A botched robbery indicates a police informant, and the pressure mounts in the aftermath at a warehouse. Crime begets violence as the survivors -- veteran Mr. White, newcomer Mr. Orange, psychopathic parolee Mr. Blonde, bickering weasel Mr. Pink and Nice Guy Eddie -- unravel.", "genres": ["Crime", "Thriller"], "poster": "https://image.tmdb.org/t/p/w500/AjTtJNumZyUDz33VtMlF1K8JPsE.jpg", "release_date": 715392000})
);
let (document, code) = index.get_document(10006, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 10006, "title": "Wild Seven", "overview": "In this darkly karmic vision of Arizona, a man who breathes nothing but ill will begins a noxious domino effect as quickly as an uncontrollable virus kills. As he exits Arizona State Penn after twenty-one long years, Wilson has only one thing on the brain, leveling the score with career criminal, Mackey Willis.", "genres": ["Action", "Crime", "Drama"], "poster": "https://image.tmdb.org/t/p/w500/y114dTPoqn8k2Txps4P2tI95YCS.jpg", "release_date": 1136073600})
);
}
#[actix_rt::test]
async fn import_dump_v4_movie_with_settings() {
let temp = tempfile::tempdir().unwrap();
let options = Opt {
import_dump: Some(GetDump::MoviesWithSettingsV4.path()),
..default_settings(temp.path())
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"genres": 53, "id": 53, "overview": 53, "poster": 53, "release_date": 53, "title": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({ "displayedAttributes": ["title", "genres", "overview", "poster", "release_date"], "searchableAttributes": ["title", "overview"], "filterableAttributes": ["genres"], "sortableAttributes": [], "rankingRules": ["words", "typo", "proximity", "attribute", "exactness"], "stopWords": ["of", "the"], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": { "oneTypo": 5, "twoTypos": 9 }, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 } })
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks,
json!({ "results": [{ "uid": 1, "indexUid": "indexUID", "status": "succeeded", "type": "settingsUpdate", "canceledBy": null, "details": { "displayedAttributes": ["title", "genres", "overview", "poster", "release_date"], "searchableAttributes": ["title", "overview"], "filterableAttributes": ["genres"], "stopWords": ["of", "the"] }, "error": null, "duration": "PT37.488777S", "enqueuedAt": "2021-09-08T08:24:02.323444Z", "startedAt": "2021-09-08T08:24:02.324145Z", "finishedAt": "2021-09-08T08:24:39.812922Z" }, { "uid": 0, "indexUid": "indexUID", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": { "receivedDocuments": 0, "indexedDocuments": 31944 }, "error": null, "duration": "PT39.941318S", "enqueuedAt": "2021-09-08T08:21:14.742672Z", "startedAt": "2021-09-08T08:21:14.750166Z", "finishedAt": "2021-09-08T08:21:54.691484Z" }], "limit": 20, "from": 1, "next": null })
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(100, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 100, "title": "Lock, Stock and Two Smoking Barrels", "genres": ["Comedy", "Crime"], "overview": "A card shark and his unwillingly-enlisted friends need to make a lot of cash quick after losing a sketchy poker match. To do this they decide to pull a heist on a small-time gang who happen to be operating out of the flat next door.", "poster": "https://image.tmdb.org/t/p/w500/8kSerJrhrJWKLk1LViesGcnrUPE.jpg", "release_date": 889056000 })
);
let (document, code) = index.get_document(500, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 500, "title": "Reservoir Dogs", "genres": ["Crime", "Thriller"], "overview": "A botched robbery indicates a police informant, and the pressure mounts in the aftermath at a warehouse. Crime begets violence as the survivors -- veteran Mr. White, newcomer Mr. Orange, psychopathic parolee Mr. Blonde, bickering weasel Mr. Pink and Nice Guy Eddie -- unravel.", "poster": "https://image.tmdb.org/t/p/w500/AjTtJNumZyUDz33VtMlF1K8JPsE.jpg", "release_date": 715392000})
);
let (document, code) = index.get_document(10006, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "id": 10006, "title": "Wild Seven", "genres": ["Action", "Crime", "Drama"], "overview": "In this darkly karmic vision of Arizona, a man who breathes nothing but ill will begins a noxious domino effect as quickly as an uncontrollable virus kills. As he exits Arizona State Penn after twenty-one long years, Wilson has only one thing on the brain, leveling the score with career criminal, Mackey Willis.", "poster": "https://image.tmdb.org/t/p/w500/y114dTPoqn8k2Txps4P2tI95YCS.jpg", "release_date": 1136073600})
);
}
#[actix_rt::test]
async fn import_dump_v4_rubygems_with_settings() {
let temp = tempfile::tempdir().unwrap();
let options = Opt {
import_dump: Some(GetDump::RubyGemsWithSettingsV4.path()),
..default_settings(temp.path())
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("rubygems"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("rubygems");
let (stats, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(
stats,
json!({ "numberOfDocuments": 53, "isIndexing": false, "fieldDistribution": {"description": 53, "id": 53, "name": 53, "summary": 53, "total_downloads": 53, "version": 53 }})
);
let (settings, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(
settings,
json!({ "displayedAttributes": ["name", "summary", "description", "version", "total_downloads"], "searchableAttributes": ["name", "summary"], "filterableAttributes": ["version"], "sortableAttributes": [], "rankingRules": ["typo", "words", "fame:desc", "proximity", "attribute", "exactness", "total_downloads:desc"], "stopWords": [], "synonyms": {}, "distinctAttribute": null, "typoTolerance": {"enabled": true, "minWordSizeForTypos": {"oneTypo": 5, "twoTypos": 9}, "disableOnWords": [], "disableOnAttributes": [] }, "faceting": { "maxValuesPerFacet": 100 }, "pagination": { "maxTotalHits": 1000 } })
);
let (tasks, code) = index.list_tasks().await;
assert_eq!(code, 200);
assert_eq!(
tasks["results"][0],
json!({ "uid": 92, "indexUid": "rubygems", "status": "succeeded", "type": "documentAdditionOrUpdate", "canceledBy": null, "details": {"receivedDocuments": 0, "indexedDocuments": 1042}, "error": null, "duration": "PT14.034672S", "enqueuedAt": "2021-09-08T08:40:31.390775Z", "startedAt": "2021-09-08T08:51:39.060642Z", "finishedAt": "2021-09-08T08:51:53.095314Z"})
);
// finally we're just going to check that we can still get a few documents by id
let (document, code) = index.get_document(188040, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "meilisearch", "summary": "An easy-to-use ruby client for Meilisearch API", "description": "An easy-to-use ruby client for Meilisearch API. See https://github.com/meilisearch/MeiliSearch", "id": "188040", "version": "0.15.2", "total_downloads": "7465"})
);
let (document, code) = index.get_document(191940, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "doggo", "summary": "RSpec 3 formatter - documentation, with progress indication", "description": "Similar to \"rspec -f d\", but also indicates progress by showing the current test number and total test count on each line.", "id": "191940", "version": "1.1.0", "total_downloads": "9394"})
);
let (document, code) = index.get_document(159227, None).await;
assert_eq!(code, 200);
assert_eq!(
document,
json!({ "name": "vortex-of-agony", "summary": "You dont need to use nodejs or go, just install this plugin. It will crash your application at random", "description": "You dont need to use nodejs or go, just install this plugin. It will crash your application at random", "id": "159227", "version": "0.1.0", "total_downloads": "1007"})
);
}
#[actix_rt::test]
async fn import_dump_v5() {
let temp = tempfile::tempdir().unwrap();
let options =
Opt { import_dump: Some(GetDump::TestV5.path()), ..default_settings(temp.path()) };
let mut server = Server::new_auth_with_options(options, temp).await;
server.use_api_key("MASTER_KEY");
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200, "{indexes}");
assert_eq!(indexes["results"].as_array().unwrap().len(), 2);
assert_eq!(indexes["results"][0]["uid"], json!("test"));
assert_eq!(indexes["results"][1]["uid"], json!("test2"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let expected_stats = json!({
"numberOfDocuments": 10,
"isIndexing": false,
"fieldDistribution": {
"cast": 10,
"director": 10,
"genres": 10,
"id": 10,
"overview": 10,
"popularity": 10,
"poster_path": 10,
"producer": 10,
"production_companies": 10,
"release_date": 10,
"tagline": 10,
"title": 10,
"vote_average": 10,
"vote_count": 10
}
});
let index1 = server.index("test");
let index2 = server.index("test2");
let (stats, code) = index1.stats().await;
assert_eq!(code, 200);
assert_eq!(stats, expected_stats);
let (docs, code) = index2.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert_eq!(docs["results"].as_array().unwrap().len(), 10);
let (docs, code) = index1.get_all_documents(GetAllDocumentsOptions::default()).await;
assert_eq!(code, 200);
assert_eq!(docs["results"].as_array().unwrap().len(), 10);
let (stats, code) = index2.stats().await;
assert_eq!(code, 200);
assert_eq!(stats, expected_stats);
let (keys, code) = server.list_api_keys().await;
assert_eq!(code, 200);
let key = &keys["results"][0];
assert_eq!(key["name"], "my key");
}

View file

@ -0,0 +1,200 @@
use actix_web::http::header::ContentType;
use actix_web::test;
use http::header::ACCEPT_ENCODING;
use serde_json::{json, Value};
use crate::common::encoder::Encoder;
use crate::common::Server;
#[actix_rt::test]
async fn create_index_no_primary_key() {
let server = Server::new().await;
let index = server.index("test");
let (response, code) = index.create(None).await;
assert_eq!(code, 202);
assert_eq!(response["status"], "enqueued");
let response = index.wait_task(0).await;
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexCreation");
assert_eq!(response["details"]["primaryKey"], Value::Null);
}
#[actix_rt::test]
async fn create_index_with_gzip_encoded_request() {
let server = Server::new().await;
let index = server.index_with_encoder("test", Encoder::Gzip);
let (response, code) = index.create(None).await;
assert_eq!(code, 202);
assert_eq!(response["status"], "enqueued");
let response = index.wait_task(0).await;
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexCreation");
assert_eq!(response["details"]["primaryKey"], Value::Null);
}
#[actix_rt::test]
async fn create_index_with_gzip_encoded_request_and_receiving_brotli_encoded_response() {
let server = Server::new().await;
let app = server.init_web_app().await;
let body = serde_json::to_string(&json!({
"uid": "test",
"primaryKey": None::<&str>,
}))
.unwrap();
let req = test::TestRequest::post()
.uri("/indexes")
.insert_header(Encoder::Gzip.header().unwrap())
.insert_header((ACCEPT_ENCODING, "br"))
.insert_header(ContentType::json())
.set_payload(Encoder::Gzip.encode(body))
.to_request();
let res = test::call_service(&app, req).await;
assert_eq!(res.status(), 202);
let bytes = test::read_body(res).await;
let decoded = Encoder::Brotli.decode(bytes);
let parsed_response =
serde_json::from_slice::<Value>(decoded.into().as_ref()).expect("Expecting valid json");
assert_eq!(parsed_response["taskUid"], 0);
assert_eq!(parsed_response["indexUid"], "test");
}
#[actix_rt::test]
async fn create_index_with_zlib_encoded_request() {
let server = Server::new().await;
let index = server.index_with_encoder("test", Encoder::Deflate);
let (response, code) = index.create(None).await;
assert_eq!(code, 202);
assert_eq!(response["status"], "enqueued");
let response = index.wait_task(0).await;
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexCreation");
assert_eq!(response["details"]["primaryKey"], Value::Null);
}
#[actix_rt::test]
async fn create_index_with_brotli_encoded_request() {
let server = Server::new().await;
let index = server.index_with_encoder("test", Encoder::Brotli);
let (response, code) = index.create(None).await;
assert_eq!(code, 202);
assert_eq!(response["status"], "enqueued");
let response = index.wait_task(0).await;
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexCreation");
assert_eq!(response["details"]["primaryKey"], Value::Null);
}
#[actix_rt::test]
async fn create_index_with_primary_key() {
let server = Server::new().await;
let index = server.index("test");
let (response, code) = index.create(Some("primary")).await;
assert_eq!(code, 202);
assert_eq!(response["status"], "enqueued");
let response = index.wait_task(0).await;
assert_eq!(response["status"], "succeeded");
assert_eq!(response["type"], "indexCreation");
assert_eq!(response["details"]["primaryKey"], "primary");
}
#[actix_rt::test]
async fn create_index_with_invalid_primary_key() {
let document = json!([ { "id": 2, "title": "Pride and Prejudice" } ]);
let server = Server::new().await;
let index = server.index("movies");
let (_response, code) = index.add_documents(document, Some("title")).await;
assert_eq!(code, 202);
index.wait_task(0).await;
let (response, code) = index.get().await;
assert_eq!(code, 200);
assert_eq!(response["primaryKey"], Value::Null);
}
#[actix_rt::test]
async fn test_create_multiple_indexes() {
let server = Server::new().await;
let index1 = server.index("test1");
let index2 = server.index("test2");
let index3 = server.index("test3");
let index4 = server.index("test4");
index1.create(None).await;
index2.create(None).await;
index3.create(None).await;
index1.wait_task(0).await;
index1.wait_task(1).await;
index1.wait_task(2).await;
assert_eq!(index1.get().await.1, 200);
assert_eq!(index2.get().await.1, 200);
assert_eq!(index3.get().await.1, 200);
assert_eq!(index4.get().await.1, 404);
}
#[actix_rt::test]
async fn error_create_existing_index() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.create(Some("primary")).await;
assert_eq!(code, 202);
index.create(Some("primary")).await;
let response = index.wait_task(1).await;
let expected_response = json!({
"message": "Index `test` already exists.",
"code": "index_already_exists",
"type": "invalid_request",
"link":"https://docs.meilisearch.com/errors#index_already_exists"
});
assert_eq!(response["error"], expected_response);
}
#[actix_rt::test]
async fn error_create_with_invalid_index_uid() {
let server = Server::new().await;
let index = server.index("test test#!");
let (response, code) = index.create(None).await;
let expected_response = json!({
"message": "`test test#!` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).",
"code": "invalid_index_uid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"
});
assert_eq!(response, expected_response);
assert_eq!(code, 400);
}

View file

@ -0,0 +1,65 @@
use serde_json::json;
use crate::common::Server;
#[actix_rt::test]
async fn create_and_delete_index() {
let server = Server::new().await;
let index = server.index("test");
let (_response, code) = index.create(None).await;
assert_eq!(code, 202);
index.wait_task(0).await;
assert_eq!(index.get().await.1, 200);
let (_response, code) = index.delete().await;
assert_eq!(code, 202);
index.wait_task(1).await;
assert_eq!(index.get().await.1, 404);
}
#[actix_rt::test]
async fn error_delete_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.delete().await;
assert_eq!(code, 202);
let expected_response = json!({
"message": "Index `test` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
let response = index.wait_task(0).await;
assert_eq!(response["status"], "failed");
assert_eq!(response["error"], expected_response);
}
#[actix_rt::test]
async fn loop_delete_add_documents() {
let server = Server::new().await;
let index = server.index("test");
let documents = json!([{"id": 1, "field1": "hello"}]);
let mut tasks = Vec::new();
for _ in 0..50 {
let (response, code) = index.add_documents(documents.clone(), None).await;
tasks.push(response["taskUid"].as_u64().unwrap());
assert_eq!(code, 202, "{}", response);
let (response, code) = index.delete().await;
tasks.push(response["taskUid"].as_u64().unwrap());
assert_eq!(code, 202, "{}", response);
}
for task in tasks {
let response = index.wait_task(task).await;
assert_eq!(response["status"], "succeeded", "{}", response);
}
}

View file

@ -0,0 +1,196 @@
use serde_json::{json, Value};
use crate::common::Server;
#[actix_rt::test]
async fn create_and_get_index() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.create(None).await;
assert_eq!(code, 202);
index.wait_task(0).await;
let (response, code) = index.get().await;
assert_eq!(code, 200);
assert_eq!(response["uid"], "test");
assert!(response.get("createdAt").is_some());
assert!(response.get("updatedAt").is_some());
assert_eq!(response["createdAt"], response["updatedAt"]);
assert_eq!(response["primaryKey"], Value::Null);
assert_eq!(response.as_object().unwrap().len(), 4);
}
#[actix_rt::test]
async fn error_get_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let (response, code) = index.get().await;
let expected_response = json!({
"message": "Index `test` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
assert_eq!(response, expected_response);
assert_eq!(code, 404);
}
#[actix_rt::test]
async fn no_index_return_empty_list() {
let server = Server::new().await;
let (response, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert!(response["results"].is_array());
assert!(response["results"].as_array().unwrap().is_empty());
}
#[actix_rt::test]
async fn list_multiple_indexes() {
let server = Server::new().await;
server.index("test").create(None).await;
server.index("test1").create(Some("key")).await;
server.index("test").wait_task(1).await;
let (response, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 2);
assert!(arr.iter().any(|entry| entry["uid"] == "test" && entry["primaryKey"] == Value::Null));
assert!(arr.iter().any(|entry| entry["uid"] == "test1" && entry["primaryKey"] == "key"));
}
#[actix_rt::test]
async fn get_and_paginate_indexes() {
let server = Server::new().await;
const NB_INDEXES: usize = 50;
for i in 0..NB_INDEXES {
server.index(&format!("test_{i:02}")).create(None).await;
server.index(&format!("test_{i:02}")).wait_task(i as u64).await;
}
// basic
let (response, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(20));
assert_eq!(response["offset"], json!(0));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 20);
// ensuring we get all the indexes in the alphabetical order
assert!((0..20)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with an offset
let (response, code) = server.list_indexes(Some(15), None).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(20));
assert_eq!(response["offset"], json!(15));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 20);
assert!((15..35)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with an offset and not enough elements
let (response, code) = server.list_indexes(Some(45), None).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(20));
assert_eq!(response["offset"], json!(45));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 5);
assert!((45..50)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with a limit lower than the default
let (response, code) = server.list_indexes(None, Some(5)).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(5));
assert_eq!(response["offset"], json!(0));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 5);
assert!((0..5)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with a limit higher than the default
let (response, code) = server.list_indexes(None, Some(40)).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(40));
assert_eq!(response["offset"], json!(0));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 40);
assert!((0..40)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with a limit higher than the default
let (response, code) = server.list_indexes(None, Some(80)).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(80));
assert_eq!(response["offset"], json!(0));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 50);
assert!((0..50)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with a limit and an offset
let (response, code) = server.list_indexes(Some(20), Some(10)).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(10));
assert_eq!(response["offset"], json!(20));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 10);
assert!((20..30)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
}
#[actix_rt::test]
async fn get_invalid_index_uid() {
let server = Server::new().await;
let index = server.index("this is not a valid index name");
let (response, code) = index.get().await;
assert_eq!(code, 404);
assert_eq!(
response,
json!(
{
"message": "Index `this is not a valid index name` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
})
);
}

View file

@ -0,0 +1,5 @@
mod create_index;
mod delete_index;
mod get_index;
mod stats;
mod update_index;

View file

@ -0,0 +1,63 @@
use serde_json::json;
use crate::common::Server;
#[actix_rt::test]
async fn stats() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.create(Some("id")).await;
assert_eq!(code, 202);
index.wait_task(0).await;
let (response, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(response["numberOfDocuments"], 0);
assert!(response["isIndexing"] == false);
assert!(response["fieldDistribution"].as_object().unwrap().is_empty());
let documents = json!([
{
"id": 1,
"name": "Alexey",
},
{
"id": 2,
"age": 45,
}
]);
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
assert_eq!(response["taskUid"], 1);
index.wait_task(1).await;
let (response, code) = index.stats().await;
assert_eq!(code, 200);
assert_eq!(response["numberOfDocuments"], 2);
assert!(response["isIndexing"] == false);
assert_eq!(response["fieldDistribution"]["id"], 2);
assert_eq!(response["fieldDistribution"]["name"], 1);
assert_eq!(response["fieldDistribution"]["age"], 1);
}
#[actix_rt::test]
async fn error_get_stats_unexisting_index() {
let server = Server::new().await;
let (response, code) = server.index("test").stats().await;
let expected_response = json!({
"message": "Index `test` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
assert_eq!(response, expected_response);
assert_eq!(code, 404);
}

View file

@ -0,0 +1,124 @@
use serde_json::json;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::encoder::Encoder;
use crate::common::Server;
#[actix_rt::test]
async fn update_primary_key() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.create(None).await;
assert_eq!(code, 202);
index.update(Some("primary")).await;
let response = index.wait_task(1).await;
assert_eq!(response["status"], "succeeded");
let (response, code) = index.get().await;
assert_eq!(code, 200);
assert_eq!(response["uid"], "test");
assert!(response.get("createdAt").is_some());
assert!(response.get("updatedAt").is_some());
let created_at =
OffsetDateTime::parse(response["createdAt"].as_str().unwrap(), &Rfc3339).unwrap();
let updated_at =
OffsetDateTime::parse(response["updatedAt"].as_str().unwrap(), &Rfc3339).unwrap();
assert!(created_at < updated_at);
assert_eq!(response["primaryKey"], "primary");
assert_eq!(response.as_object().unwrap().len(), 4);
}
#[actix_rt::test]
async fn create_and_update_with_different_encoding() {
let server = Server::new().await;
let index = server.index_with_encoder("test", Encoder::Gzip);
let (_, code) = index.create(None).await;
assert_eq!(code, 202);
let index = server.index_with_encoder("test", Encoder::Brotli);
index.update(Some("primary")).await;
let response = index.wait_task(1).await;
assert_eq!(response["status"], "succeeded");
}
#[actix_rt::test]
async fn update_nothing() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.create(None).await;
assert_eq!(code, 202);
index.wait_task(0).await;
let (_, code) = index.update(None).await;
assert_eq!(code, 202);
let response = index.wait_task(1).await;
assert_eq!(response["status"], "succeeded");
}
#[actix_rt::test]
async fn error_update_existing_primary_key() {
let server = Server::new().await;
let index = server.index("test");
let (_response, code) = index.create(Some("id")).await;
assert_eq!(code, 202);
let documents = json!([
{
"id": "11",
"content": "foobar"
}
]);
index.add_documents(documents, None).await;
let (_, code) = index.update(Some("primary")).await;
assert_eq!(code, 202);
let response = index.wait_task(2).await;
let expected_response = json!({
"message": "Index already has a primary key: `id`.",
"code": "index_primary_key_already_exists",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_primary_key_already_exists"
});
assert_eq!(response["error"], expected_response);
}
#[actix_rt::test]
async fn error_update_unexisting_index() {
let server = Server::new().await;
let (_, code) = server.index("test").update(None).await;
assert_eq!(code, 202);
let response = server.index("test").wait_task(0).await;
let expected_response = json!({
"message": "Index `test` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
assert_eq!(response["error"], expected_response);
}

View file

@ -0,0 +1,17 @@
mod auth;
mod common;
mod dashboard;
mod documents;
mod dumps;
mod index;
mod search;
mod settings;
mod snapshot;
mod stats;
mod tasks;
// Tests are isolated by features in different modules to allow better readability, test
// targetability, and improved incremental compilation times.
//
// All the integration tests live in the same root module so only one test executable is generated,
// thus improving linking time.

View file

@ -0,0 +1,438 @@
use serde_json::json;
use super::DOCUMENTS;
use crate::common::Server;
#[actix_rt::test]
async fn search_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let expected_response = json!({
"message": "Index `test` not found.",
"code": "index_not_found",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#index_not_found"
});
index
.search(json!({"q": "hello"}), |response, code| {
assert_eq!(code, 404);
assert_eq!(response, expected_response);
})
.await;
}
#[actix_rt::test]
async fn search_unexisting_parameter() {
let server = Server::new().await;
let index = server.index("test");
index
.search(json!({"marin": "hello"}), |response, code| {
assert_eq!(code, 400, "{}", response);
assert_eq!(response["code"], "bad_request");
})
.await;
}
#[actix_rt::test]
async fn search_invalid_highlight_and_crop_tags() {
let server = Server::new().await;
let index = server.index("test");
let fields = &["cropMarker", "highlightPreTag", "highlightPostTag"];
for field in fields {
// object
let (response, code) =
index.search_post(json!({field.to_string(): {"marker": "<crop>"}})).await;
assert_eq!(code, 400, "field {} passing object: {}", &field, response);
assert_eq!(response["code"], "bad_request");
// array
let (response, code) =
index.search_post(json!({field.to_string(): ["marker", "<crop>"]})).await;
assert_eq!(code, 400, "field {} passing array: {}", &field, response);
assert_eq!(response["code"], "bad_request");
}
}
#[actix_rt::test]
async fn filter_invalid_syntax_object() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` at `title & Glass`.\n1:14 title & Glass",
"code": "invalid_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_filter"
});
index
.search(json!({"filter": "title & Glass"}), |response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
})
.await;
}
#[actix_rt::test]
async fn filter_invalid_syntax_array() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` at `title & Glass`.\n1:14 title & Glass",
"code": "invalid_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_filter"
});
index
.search(json!({"filter": ["title & Glass"]}), |response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
})
.await;
}
#[actix_rt::test]
async fn filter_invalid_syntax_string() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "Found unexpected characters at the end of the filter: `XOR title = Glass`. You probably forgot an `OR` or an `AND` rule.\n15:32 title = Glass XOR title = Glass",
"code": "invalid_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_filter"
});
index
.search(json!({"filter": "title = Glass XOR title = Glass"}), |response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
})
.await;
}
#[actix_rt::test]
async fn filter_invalid_attribute_array() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "Attribute `many` is not filterable. Available filterable attributes are: `title`.\n1:5 many = Glass",
"code": "invalid_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_filter"
});
index
.search(json!({"filter": ["many = Glass"]}), |response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
})
.await;
}
#[actix_rt::test]
async fn filter_invalid_attribute_string() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "Attribute `many` is not filterable. Available filterable attributes are: `title`.\n1:5 many = Glass",
"code": "invalid_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_filter"
});
index
.search(json!({"filter": "many = Glass"}), |response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
})
.await;
}
#[actix_rt::test]
async fn filter_reserved_geo_attribute_array() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "`_geo` is a reserved keyword and thus can't be used as a filter expression. Use the _geoRadius(latitude, longitude, distance) built-in rule to filter on _geo field coordinates.\n1:5 _geo = Glass",
"code": "invalid_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_filter"
});
index
.search(json!({"filter": ["_geo = Glass"]}), |response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
})
.await;
}
#[actix_rt::test]
async fn filter_reserved_geo_attribute_string() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "`_geo` is a reserved keyword and thus can't be used as a filter expression. Use the _geoRadius(latitude, longitude, distance) built-in rule to filter on _geo field coordinates.\n1:5 _geo = Glass",
"code": "invalid_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_filter"
});
index
.search(json!({"filter": "_geo = Glass"}), |response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
})
.await;
}
#[actix_rt::test]
async fn filter_reserved_attribute_array() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "`_geoDistance` is a reserved keyword and thus can't be used as a filter expression.\n1:13 _geoDistance = Glass",
"code": "invalid_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_filter"
});
index
.search(json!({"filter": ["_geoDistance = Glass"]}), |response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
})
.await;
}
#[actix_rt::test]
async fn filter_reserved_attribute_string() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "`_geoDistance` is a reserved keyword and thus can't be used as a filter expression.\n1:13 _geoDistance = Glass",
"code": "invalid_filter",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_filter"
});
index
.search(json!({"filter": "_geoDistance = Glass"}), |response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
})
.await;
}
#[actix_rt::test]
async fn sort_geo_reserved_attribute() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"sortableAttributes": ["id"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "`_geo` is a reserved keyword and thus can't be used as a sort expression. Use the _geoPoint(latitude, longitude) built-in rule to sort on _geo field coordinates.",
"code": "invalid_sort",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_sort"
});
index
.search(
json!({
"sort": ["_geo:asc"]
}),
|response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
},
)
.await;
}
#[actix_rt::test]
async fn sort_reserved_attribute() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"sortableAttributes": ["id"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "`_geoDistance` is a reserved keyword and thus can't be used as a sort expression.",
"code": "invalid_sort",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_sort"
});
index
.search(
json!({
"sort": ["_geoDistance:asc"]
}),
|response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
},
)
.await;
}
#[actix_rt::test]
async fn sort_unsortable_attribute() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"sortableAttributes": ["id"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "Attribute `title` is not sortable. Available sortable attributes are: `id`.",
"code": "invalid_sort",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_sort"
});
index
.search(
json!({
"sort": ["title:asc"]
}),
|response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
},
)
.await;
}
#[actix_rt::test]
async fn sort_invalid_syntax() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"sortableAttributes": ["id"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "Invalid syntax for the sort parameter: expected expression ending by `:asc` or `:desc`, found `title`.",
"code": "invalid_sort",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_sort"
});
index
.search(
json!({
"sort": ["title"]
}),
|response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
},
)
.await;
}
#[actix_rt::test]
async fn sort_unset_ranking_rule() {
let server = Server::new().await;
let index = server.index("test");
index
.update_settings(
json!({"sortableAttributes": ["title"], "rankingRules": ["proximity", "exactness"]}),
)
.await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let expected_response = json!({
"message": "The sort ranking rule must be specified in the ranking rules settings to use the sort parameter at search time.",
"code": "invalid_sort",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_sort"
});
index
.search(
json!({
"sort": ["title:asc"]
}),
|response, code| {
assert_eq!(response, expected_response);
assert_eq!(code, 400);
},
)
.await;
}

View file

@ -0,0 +1,444 @@
use serde_json::json;
use super::*;
use crate::common::Server;
#[actix_rt::test]
async fn formatted_contain_wildcard() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({ "displayedAttributes": ["id", "cattos"] })).await;
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
index.search(json!({ "q": "pesti", "attributesToRetrieve": ["father", "mother"], "attributesToHighlight": ["father", "mother", "*"], "attributesToCrop": ["doggos"], "showMatchesPosition": true }),
|response, code|
{
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"_formatted": {
"id": "852",
"cattos": "<em>pesti</em>",
},
"_matchesPosition": {"cattos": [{"start": 0, "length": 5}]},
})
);
}
)
.await;
index
.search(json!({ "q": "pesti", "attributesToRetrieve": ["*"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
"cattos": "pesti",
})
);
})
.await;
index
.search(
json!({ "q": "pesti", "attributesToRetrieve": ["*"], "attributesToHighlight": ["id"], "showMatchesPosition": true }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
"cattos": "pesti",
"_formatted": {
"id": "852",
"cattos": "pesti",
},
"_matchesPosition": {"cattos": [{"start": 0, "length": 5}]},
})
);
}
)
.await;
index
.search(
json!({ "q": "pesti", "attributesToRetrieve": ["*"], "attributesToCrop": ["*"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
"cattos": "pesti",
"_formatted": {
"id": "852",
"cattos": "pesti",
}
})
);
},
)
.await;
index
.search(json!({ "q": "pesti", "attributesToCrop": ["*"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
"cattos": "pesti",
"_formatted": {
"id": "852",
"cattos": "pesti",
}
})
);
})
.await;
}
#[actix_rt::test]
async fn format_nested() {
let server = Server::new().await;
let index = server.index("test");
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
index
.search(json!({ "q": "pesti", "attributesToRetrieve": ["doggos"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"doggos": [
{
"name": "bobby",
"age": 2,
},
{
"name": "buddy",
"age": 4,
},
],
})
);
})
.await;
index
.search(
json!({ "q": "pesti", "attributesToRetrieve": ["doggos.name"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"doggos": [
{
"name": "bobby",
},
{
"name": "buddy",
},
],
})
);
},
)
.await;
index
.search(
json!({ "q": "bobby", "attributesToRetrieve": ["doggos.name"], "showMatchesPosition": true }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"doggos": [
{
"name": "bobby",
},
{
"name": "buddy",
},
],
"_matchesPosition": {"doggos.name": [{"start": 0, "length": 5}]},
})
);
}
)
.await;
index
.search(json!({ "q": "pesti", "attributesToRetrieve": [], "attributesToHighlight": ["doggos.name"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"_formatted": {
"doggos": [
{
"name": "bobby",
},
{
"name": "buddy",
},
],
},
})
);
})
.await;
index
.search(json!({ "q": "pesti", "attributesToRetrieve": [], "attributesToCrop": ["doggos.name"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"_formatted": {
"doggos": [
{
"name": "bobby",
},
{
"name": "buddy",
},
],
},
})
);
})
.await;
index
.search(json!({ "q": "pesti", "attributesToRetrieve": ["doggos.name"], "attributesToHighlight": ["doggos.age"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"doggos": [
{
"name": "bobby",
},
{
"name": "buddy",
},
],
"_formatted": {
"doggos": [
{
"name": "bobby",
"age": "2",
},
{
"name": "buddy",
"age": "4",
},
],
},
})
);
})
.await;
index
.search(json!({ "q": "pesti", "attributesToRetrieve": [], "attributesToHighlight": ["doggos.age"], "attributesToCrop": ["doggos.name"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"_formatted": {
"doggos": [
{
"name": "bobby",
"age": "2",
},
{
"name": "buddy",
"age": "4",
},
],
},
})
);
}
)
.await;
}
#[actix_rt::test]
async fn displayedattr_2_smol() {
let server = Server::new().await;
let index = server.index("test");
// not enough displayed for the other settings
index.update_settings(json!({ "displayedAttributes": ["id"] })).await;
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
index
.search(json!({ "attributesToRetrieve": ["father", "id"], "attributesToHighlight": ["mother"], "attributesToCrop": ["cattos"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
})
);
})
.await;
index
.search(json!({ "attributesToRetrieve": ["id"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
})
);
})
.await;
index
.search(json!({ "attributesToHighlight": ["id"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
"_formatted": {
"id": "852",
}
})
);
})
.await;
index
.search(json!({ "attributesToCrop": ["id"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
"_formatted": {
"id": "852",
}
})
);
})
.await;
index
.search(
json!({ "attributesToHighlight": ["id"], "attributesToCrop": ["id"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
"_formatted": {
"id": "852",
}
})
);
},
)
.await;
index
.search(json!({ "attributesToHighlight": ["cattos"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
})
);
})
.await;
index
.search(json!({ "attributesToCrop": ["cattos"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"id": 852,
})
);
})
.await;
index
.search(json!({ "attributesToRetrieve": ["cattos"] }), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"][0], json!({}));
})
.await;
index
.search(
json!({ "attributesToRetrieve": ["cattos"], "attributesToHighlight": ["cattos"], "attributesToCrop": ["cattos"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"][0], json!({}));
}
)
.await;
index
.search(
json!({ "attributesToRetrieve": ["cattos"], "attributesToHighlight": ["id"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"_formatted": {
"id": "852",
}
})
);
},
)
.await;
index
.search(
json!({ "attributesToRetrieve": ["cattos"], "attributesToCrop": ["id"] }),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(
response["hits"][0],
json!({
"_formatted": {
"id": "852",
}
})
);
},
)
.await;
}

View file

@ -0,0 +1,676 @@
// This modules contains all the test concerning search. Each particular feature of the search
// should be tested in its own module to isolate tests and keep the tests readable.
mod errors;
mod formatted;
mod pagination;
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use crate::common::Server;
pub(self) static DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
"title": "Shazam!",
"id": "287947",
},
{
"title": "Captain Marvel",
"id": "299537",
},
{
"title": "Escape Room",
"id": "522681",
},
{
"title": "How to Train Your Dragon: The Hidden World",
"id": "166428",
},
{
"title": "Glass",
"id": "450465",
}
])
});
pub(self) static NESTED_DOCUMENTS: Lazy<Value> = Lazy::new(|| {
json!([
{
"id": 852,
"father": "jean",
"mother": "michelle",
"doggos": [
{
"name": "bobby",
"age": 2,
},
{
"name": "buddy",
"age": 4,
},
],
"cattos": "pesti",
},
{
"id": 654,
"father": "pierre",
"mother": "sabine",
"doggos": [
{
"name": "gros bill",
"age": 8,
},
],
"cattos": ["simba", "pestiféré"],
},
{
"id": 750,
"father": "romain",
"mother": "michelle",
"cattos": ["enigma"],
},
{
"id": 951,
"father": "jean-baptiste",
"mother": "sophie",
"doggos": [
{
"name": "turbo",
"age": 5,
},
{
"name": "fast",
"age": 6,
},
],
"cattos": ["moumoute", "gomez"],
},
])
});
#[actix_rt::test]
async fn simple_placeholder_search() {
let server = Server::new().await;
let index = server.index("basic");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
index
.search(json!({}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
})
.await;
let index = server.index("nested");
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
index
.search(json!({}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 4);
})
.await;
}
#[actix_rt::test]
async fn simple_search() {
let server = Server::new().await;
let index = server.index("test");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
index
.search(json!({"q": "glass"}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
})
.await;
let index = server.index("nested");
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
index
.search(json!({"q": "pesti"}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 2);
})
.await;
}
#[actix_rt::test]
async fn search_multiple_params() {
let server = Server::new().await;
let index = server.index("test");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
index
.search(
json!({
"q": "glass",
"attributesToCrop": ["title:2"],
"attributesToHighlight": ["title"],
"limit": 1,
"offset": 0,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
},
)
.await;
let index = server.index("nested");
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
index
.search(
json!({
"q": "pesti",
"attributesToCrop": ["catto:2"],
"attributesToHighlight": ["catto"],
"limit": 2,
"offset": 0,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 2);
},
)
.await;
}
#[actix_rt::test]
async fn search_with_filter_string_notation() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
index
.search(
json!({
"filter": "title = Glass"
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
},
)
.await;
let index = server.index("nested");
index.update_settings(json!({"filterableAttributes": ["cattos", "doggos.age"]})).await;
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(3).await;
index
.search(
json!({
"filter": "cattos = pesti"
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
assert_eq!(response["hits"][0]["id"], json!(852));
},
)
.await;
index
.search(
json!({
"filter": "doggos.age > 5"
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 2);
assert_eq!(response["hits"][0]["id"], json!(654));
assert_eq!(response["hits"][1]["id"], json!(951));
},
)
.await;
}
#[actix_rt::test]
async fn search_with_filter_array_notation() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let (response, code) = index
.search_post(json!({
"filter": ["title = Glass"]
}))
.await;
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
let (response, code) = index
.search_post(json!({
"filter": [["title = Glass", "title = \"Shazam!\"", "title = \"Escape Room\""]]
}))
.await;
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 3);
}
#[actix_rt::test]
async fn search_with_sort_on_numbers() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"sortableAttributes": ["id"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
index
.search(
json!({
"sort": ["id:asc"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
},
)
.await;
let index = server.index("nested");
index.update_settings(json!({"sortableAttributes": ["doggos.age"]})).await;
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(3).await;
index
.search(
json!({
"sort": ["doggos.age:asc"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 4);
},
)
.await;
}
#[actix_rt::test]
async fn search_with_sort_on_strings() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"sortableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
index
.search(
json!({
"sort": ["title:desc"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
},
)
.await;
let index = server.index("nested");
index.update_settings(json!({"sortableAttributes": ["doggos.name"]})).await;
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(3).await;
index
.search(
json!({
"sort": ["doggos.name:asc"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 4);
},
)
.await;
}
#[actix_rt::test]
async fn search_with_multiple_sort() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"sortableAttributes": ["id", "title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let (response, code) = index
.search_post(json!({
"sort": ["id:asc", "title:desc"]
}))
.await;
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
}
#[actix_rt::test]
async fn search_facet_distribution() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({"filterableAttributes": ["title"]})).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
index
.search(
json!({
"facets": ["title"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
let dist = response["facetDistribution"].as_object().unwrap();
assert_eq!(dist.len(), 1);
assert!(dist.get("title").is_some());
},
)
.await;
let index = server.index("nested");
index.update_settings(json!({"filterableAttributes": ["father", "doggos.name"]})).await;
let documents = NESTED_DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(3).await;
// TODO: TAMO: fix the test
index
.search(
json!({
// "facets": ["father", "doggos.name"]
"facets": ["father"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
let dist = response["facetDistribution"].as_object().unwrap();
assert_eq!(dist.len(), 1);
assert_eq!(
dist["father"],
json!({ "jean": 1, "pierre": 1, "romain": 1, "jean-baptiste": 1})
);
/*
assert_eq!(
dist["doggos.name"],
json!({ "bobby": 1, "buddy": 1, "gros bill": 1, "turbo": 1, "fast": 1})
);
*/
},
)
.await;
index.update_settings(json!({"filterableAttributes": ["doggos"]})).await;
index.wait_task(4).await;
index
.search(
json!({
"facets": ["doggos.name"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
let dist = response["facetDistribution"].as_object().unwrap();
assert_eq!(dist.len(), 1);
assert_eq!(
dist["doggos.name"],
json!({ "bobby": 1, "buddy": 1, "gros bill": 1, "turbo": 1, "fast": 1})
);
},
)
.await;
index
.search(
json!({
"facets": ["doggos"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
let dist = response["facetDistribution"].as_object().unwrap();
assert_eq!(dist.len(), 3);
assert_eq!(
dist["doggos.name"],
json!({ "bobby": 1, "buddy": 1, "gros bill": 1, "turbo": 1, "fast": 1})
);
assert_eq!(dist["doggos.age"], json!({ "2": 1, "4": 1, "5": 1, "6": 1, "8": 1}));
},
)
.await;
}
#[actix_rt::test]
async fn displayed_attributes() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({ "displayedAttributes": ["title"] })).await;
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(1).await;
let (response, code) =
index.search_post(json!({ "attributesToRetrieve": ["title", "id"] })).await;
assert_eq!(code, 200, "{}", response);
assert!(response["hits"][0].get("title").is_some());
}
#[actix_rt::test]
async fn placeholder_search_is_hard_limited() {
let server = Server::new().await;
let index = server.index("test");
let documents: Vec<_> = (0..1200).map(|i| json!({ "id": i, "text": "I am unique!" })).collect();
index.add_documents(documents.into(), None).await;
index.wait_task(0).await;
index
.search(
json!({
"limit": 1500,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 1000);
},
)
.await;
index
.search(
json!({
"offset": 800,
"limit": 400,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 200);
},
)
.await;
index.update_settings(json!({ "pagination": { "maxTotalHits": 10_000 } })).await;
index.wait_task(1).await;
index
.search(
json!({
"limit": 1500,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 1200);
},
)
.await;
index
.search(
json!({
"offset": 1000,
"limit": 400,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 200);
},
)
.await;
}
#[actix_rt::test]
async fn search_is_hard_limited() {
let server = Server::new().await;
let index = server.index("test");
let documents: Vec<_> = (0..1200).map(|i| json!({ "id": i, "text": "I am unique!" })).collect();
index.add_documents(documents.into(), None).await;
index.wait_task(0).await;
index
.search(
json!({
"q": "unique",
"limit": 1500,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 1000);
},
)
.await;
index
.search(
json!({
"q": "unique",
"offset": 800,
"limit": 400,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 200);
},
)
.await;
index.update_settings(json!({ "pagination": { "maxTotalHits": 10_000 } })).await;
index.wait_task(1).await;
index
.search(
json!({
"q": "unique",
"limit": 1500,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 1200);
},
)
.await;
index
.search(
json!({
"q": "unique",
"offset": 1000,
"limit": 400,
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 200);
},
)
.await;
}
#[actix_rt::test]
async fn faceting_max_values_per_facet() {
let server = Server::new().await;
let index = server.index("test");
index.update_settings(json!({ "filterableAttributes": ["number"] })).await;
let documents: Vec<_> = (0..10_000).map(|id| json!({ "id": id, "number": id * 10 })).collect();
index.add_documents(json!(documents), None).await;
index.wait_task(1).await;
index
.search(
json!({
"facets": ["number"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
let numbers = response["facetDistribution"]["number"].as_object().unwrap();
assert_eq!(numbers.len(), 100);
},
)
.await;
index.update_settings(json!({ "faceting": { "maxValuesPerFacet": 10_000 } })).await;
index.wait_task(2).await;
index
.search(
json!({
"facets": ["number"]
}),
|response, code| {
assert_eq!(code, 200, "{}", response);
let numbers = &response["facetDistribution"]["number"].as_object().unwrap();
assert_eq!(numbers.len(), 10_000);
},
)
.await;
}

View file

@ -0,0 +1,113 @@
use serde_json::json;
use crate::common::Server;
use crate::search::DOCUMENTS;
#[actix_rt::test]
async fn default_search_should_return_estimated_total_hit() {
let server = Server::new().await;
let index = server.index("basic");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
index
.search(json!({}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert!(response.get("estimatedTotalHits").is_some());
assert!(response.get("limit").is_some());
assert!(response.get("offset").is_some());
// these fields shouldn't be present
assert!(response.get("totalHits").is_none());
assert!(response.get("page").is_none());
assert!(response.get("totalPages").is_none());
})
.await;
}
#[actix_rt::test]
async fn simple_search() {
let server = Server::new().await;
let index = server.index("basic");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
index
.search(json!({"page": 1}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 5);
assert!(response.get("totalHits").is_some());
assert_eq!(response["page"], 1);
assert_eq!(response["totalPages"], 1);
// these fields shouldn't be present
assert!(response.get("estimatedTotalHits").is_none());
assert!(response.get("limit").is_none());
assert!(response.get("offset").is_none());
})
.await;
}
#[actix_rt::test]
async fn page_zero_should_not_return_any_result() {
let server = Server::new().await;
let index = server.index("basic");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
index
.search(json!({"page": 0}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 0);
assert!(response.get("totalHits").is_some());
assert_eq!(response["page"], 0);
assert_eq!(response["totalPages"], 1);
})
.await;
}
#[actix_rt::test]
async fn hits_per_page_1() {
let server = Server::new().await;
let index = server.index("basic");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
index
.search(json!({"hitsPerPage": 1}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 1);
assert_eq!(response["totalHits"], 5);
assert_eq!(response["page"], 1);
assert_eq!(response["totalPages"], 5);
})
.await;
}
#[actix_rt::test]
async fn hits_per_page_0_should_not_return_any_result() {
let server = Server::new().await;
let index = server.index("basic");
let documents = DOCUMENTS.clone();
index.add_documents(documents, None).await;
index.wait_task(0).await;
index
.search(json!({"hitsPerPage": 0}), |response, code| {
assert_eq!(code, 200, "{}", response);
assert_eq!(response["hits"].as_array().unwrap().len(), 0);
assert_eq!(response["totalHits"], 5);
assert_eq!(response["page"], 1);
assert_eq!(response["totalPages"], 0);
})
.await;
}

View file

@ -0,0 +1,45 @@
use serde_json::json;
use crate::common::Server;
#[actix_rt::test]
async fn set_and_reset_distinct_attribute() {
let server = Server::new().await;
let index = server.index("test");
let (_response, _code) = index.update_settings(json!({ "distinctAttribute": "test"})).await;
index.wait_task(0).await;
let (response, _) = index.settings().await;
assert_eq!(response["distinctAttribute"], "test");
index.update_settings(json!({ "distinctAttribute": null })).await;
index.wait_task(1).await;
let (response, _) = index.settings().await;
assert_eq!(response["distinctAttribute"], json!(null));
}
#[actix_rt::test]
async fn set_and_reset_distinct_attribute_with_dedicated_route() {
let server = Server::new().await;
let index = server.index("test");
let (_response, _code) = index.update_distinct_attribute(json!("test")).await;
index.wait_task(0).await;
let (response, _) = index.get_distinct_attribute().await;
assert_eq!(response, "test");
index.update_distinct_attribute(json!(null)).await;
index.wait_task(1).await;
let (response, _) = index.get_distinct_attribute().await;
assert_eq!(response, json!(null));
}

View file

@ -0,0 +1,318 @@
use std::collections::HashMap;
use once_cell::sync::Lazy;
use serde_json::{json, Value};
use crate::common::Server;
static DEFAULT_SETTINGS_VALUES: Lazy<HashMap<&'static str, Value>> = Lazy::new(|| {
let mut map = HashMap::new();
map.insert("displayed_attributes", json!(["*"]));
map.insert("searchable_attributes", json!(["*"]));
map.insert("filterable_attributes", json!([]));
map.insert("distinct_attribute", json!(Value::Null));
map.insert(
"ranking_rules",
json!(["words", "typo", "proximity", "attribute", "sort", "exactness"]),
);
map.insert("stop_words", json!([]));
map.insert("synonyms", json!({}));
map.insert(
"faceting",
json!({
"maxValuesPerFacet": json!(100),
}),
);
map.insert(
"pagination",
json!({
"maxTotalHits": json!(1000),
}),
);
map
});
#[actix_rt::test]
async fn get_settings_unexisting_index() {
let server = Server::new().await;
let (response, code) = server.index("test").settings().await;
assert_eq!(code, 404, "{}", response)
}
#[actix_rt::test]
async fn get_settings() {
let server = Server::new().await;
let index = server.index("test");
index.create(None).await;
index.wait_task(0).await;
let (response, code) = index.settings().await;
assert_eq!(code, 200);
let settings = response.as_object().unwrap();
assert_eq!(settings.keys().len(), 11);
assert_eq!(settings["displayedAttributes"], json!(["*"]));
assert_eq!(settings["searchableAttributes"], json!(["*"]));
assert_eq!(settings["filterableAttributes"], json!([]));
assert_eq!(settings["sortableAttributes"], json!([]));
assert_eq!(settings["distinctAttribute"], json!(null));
assert_eq!(
settings["rankingRules"],
json!(["words", "typo", "proximity", "attribute", "sort", "exactness"])
);
assert_eq!(settings["stopWords"], json!([]));
assert_eq!(
settings["faceting"],
json!({
"maxValuesPerFacet": 100,
})
);
assert_eq!(
settings["pagination"],
json!({
"maxTotalHits": 1000,
})
);
}
#[actix_rt::test]
async fn error_update_settings_unknown_field() {
let server = Server::new().await;
let index = server.index("test");
let (_response, code) = index.update_settings(json!({"foo": 12})).await;
assert_eq!(code, 400);
}
#[actix_rt::test]
async fn test_partial_update() {
let server = Server::new().await;
let index = server.index("test");
let (_response, _code) = index.update_settings(json!({"displayedAttributes": ["foo"]})).await;
index.wait_task(0).await;
let (response, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(response["displayedAttributes"], json!(["foo"]));
assert_eq!(response["searchableAttributes"], json!(["*"]));
let (_response, _) = index.update_settings(json!({"searchableAttributes": ["bar"]})).await;
index.wait_task(1).await;
let (response, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(response["displayedAttributes"], json!(["foo"]));
assert_eq!(response["searchableAttributes"], json!(["bar"]));
}
#[actix_rt::test]
async fn error_delete_settings_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let (_response, code) = index.delete_settings().await;
assert_eq!(code, 202);
let response = index.wait_task(0).await;
assert_eq!(response["status"], "failed");
}
#[actix_rt::test]
async fn reset_all_settings() {
let server = Server::new().await;
let index = server.index("test");
let documents = json!([
{
"id": 1,
"name": "curqui",
"age": 99
}
]);
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202);
assert_eq!(response["taskUid"], 0);
index.wait_task(0).await;
index
.update_settings(json!({"displayedAttributes": ["name", "age"], "searchableAttributes": ["name"], "stopWords": ["the"], "filterableAttributes": ["age"], "synonyms": {"puppy": ["dog", "doggo", "potat"] }}))
.await;
index.wait_task(1).await;
let (response, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(response["displayedAttributes"], json!(["name", "age"]));
assert_eq!(response["searchableAttributes"], json!(["name"]));
assert_eq!(response["stopWords"], json!(["the"]));
assert_eq!(response["synonyms"], json!({"puppy": ["dog", "doggo", "potat"] }));
assert_eq!(response["filterableAttributes"], json!(["age"]));
index.delete_settings().await;
index.wait_task(2).await;
let (response, code) = index.settings().await;
assert_eq!(code, 200);
assert_eq!(response["displayedAttributes"], json!(["*"]));
assert_eq!(response["searchableAttributes"], json!(["*"]));
assert_eq!(response["stopWords"], json!([]));
assert_eq!(response["filterableAttributes"], json!([]));
assert_eq!(response["synonyms"], json!({}));
let (response, code) = index.get_document(1, None).await;
assert_eq!(code, 200);
assert!(response.as_object().unwrap().get("age").is_some());
}
#[actix_rt::test]
async fn update_setting_unexisting_index() {
let server = Server::new().await;
let index = server.index("test");
let (_response, code) = index.update_settings(json!({})).await;
assert_eq!(code, 202);
let response = index.wait_task(0).await;
assert_eq!(response["status"], "succeeded");
let (_response, code) = index.get().await;
assert_eq!(code, 200);
index.delete_settings().await;
let response = index.wait_task(1).await;
assert_eq!(response["status"], "succeeded");
}
#[actix_rt::test]
async fn error_update_setting_unexisting_index_invalid_uid() {
let server = Server::new().await;
let index = server.index("test##! ");
let (response, code) = index.update_settings(json!({})).await;
assert_eq!(code, 400);
let expected = json!({
"message": "`test##! ` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).",
"code": "invalid_index_uid",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"});
assert_eq!(response, expected);
}
macro_rules! test_setting_routes {
($($setting:ident $write_method:ident), *) => {
$(
mod $setting {
use crate::common::Server;
use super::DEFAULT_SETTINGS_VALUES;
#[actix_rt::test]
async fn get_unexisting_index() {
let server = Server::new().await;
let url = format!("/indexes/test/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (_response, code) = server.service.get(url).await;
assert_eq!(code, 404);
}
#[actix_rt::test]
async fn update_unexisting_index() {
let server = Server::new().await;
let url = format!("/indexes/test/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.$write_method(url, serde_json::Value::Null).await;
assert_eq!(code, 202, "{}", response);
server.index("").wait_task(0).await;
let (response, code) = server.index("test").get().await;
assert_eq!(code, 200, "{}", response);
}
#[actix_rt::test]
async fn delete_unexisting_index() {
let server = Server::new().await;
let url = format!("/indexes/test/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (_, code) = server.service.delete(url).await;
assert_eq!(code, 202);
let response = server.index("").wait_task(0).await;
assert_eq!(response["status"], "failed");
}
#[actix_rt::test]
async fn get_default() {
let server = Server::new().await;
let index = server.index("test");
let (response, code) = index.create(None).await;
assert_eq!(code, 202, "{}", response);
index.wait_task(0).await;
let url = format!("/indexes/test/settings/{}",
stringify!($setting)
.chars()
.map(|c| if c == '_' { '-' } else { c })
.collect::<String>());
let (response, code) = server.service.get(url).await;
assert_eq!(code, 200, "{}", response);
let expected = DEFAULT_SETTINGS_VALUES.get(stringify!($setting)).unwrap();
assert_eq!(expected, &response);
}
}
)*
};
}
test_setting_routes!(
filterable_attributes put,
displayed_attributes put,
searchable_attributes put,
distinct_attribute put,
stop_words put,
ranking_rules put,
synonyms put,
pagination patch,
faceting patch
);
#[actix_rt::test]
async fn error_set_invalid_ranking_rules() {
let server = Server::new().await;
let index = server.index("test");
index.create(None).await;
let (_response, _code) =
index.update_settings(json!({ "rankingRules": [ "manyTheFish"]})).await;
index.wait_task(1).await;
let (response, code) = index.get_task(1).await;
assert_eq!(code, 200);
assert_eq!(response["status"], "failed");
let expected_error = json!({
"message": r#"`manyTheFish` ranking rule is invalid. Valid ranking rules are words, typo, sort, proximity, attribute, exactness and custom ranking rules."#,
"code": "invalid_ranking_rule",
"type": "invalid_request",
"link": "https://docs.meilisearch.com/errors#invalid_ranking_rule"
});
assert_eq!(response["error"], expected_error);
}
#[actix_rt::test]
async fn set_and_reset_distinct_attribute_with_dedicated_route() {
let server = Server::new().await;
let index = server.index("test");
let (_response, _code) = index.update_distinct_attribute(json!("test")).await;
index.wait_task(0).await;
let (response, _) = index.get_distinct_attribute().await;
assert_eq!(response, "test");
index.update_distinct_attribute(json!(null)).await;
index.wait_task(1).await;
let (response, _) = index.get_distinct_attribute().await;
assert_eq!(response, json!(null));
}

View file

@ -0,0 +1,2 @@
mod distinct;
mod get_settings;

View file

@ -0,0 +1,80 @@
use std::time::Duration;
use meilisearch::Opt;
use tokio::time::sleep;
use crate::common::server::default_settings;
use crate::common::{GetAllDocumentsOptions, Server};
macro_rules! verify_snapshot {
(
$orig:expr,
$snapshot: expr,
|$server:ident| =>
$($e:expr,)+) => {
use std::sync::Arc;
let snapshot = Arc::new($snapshot);
let orig = Arc::new($orig);
$(
{
let test= |$server: Arc<Server>| async move {
$e.await
};
let (snapshot, _) = test(snapshot.clone()).await;
let (orig, _) = test(orig.clone()).await;
assert_eq!(snapshot, orig);
}
)*
};
}
#[actix_rt::test]
#[ignore] // TODO: unignore
async fn perform_snapshot() {
let temp = tempfile::tempdir().unwrap();
let snapshot_dir = tempfile::tempdir().unwrap();
let options = Opt {
snapshot_dir: snapshot_dir.path().to_owned(),
snapshot_interval_sec: 1,
schedule_snapshot: true,
..default_settings(temp.path())
};
let server = Server::new_with_options(options).await.unwrap();
let index = server.index("test");
index
.update_settings(serde_json::json! ({
"searchableAttributes": [],
}))
.await;
index.load_test_set().await;
server.index("test1").create(Some("prim")).await;
index.wait_task(2).await;
sleep(Duration::from_secs(2)).await;
let temp = tempfile::tempdir().unwrap();
let snapshot_path = snapshot_dir.path().to_owned().join("db.snapshot");
let options = Opt { import_snapshot: Some(snapshot_path), ..default_settings(temp.path()) };
let snapshot_server = Server::new_with_options(options).await.unwrap();
verify_snapshot!(server, snapshot_server, |server| =>
server.list_indexes(None, None),
// for some reason the db sizes differ. this may be due to the compaction options we have
// set when performing the snapshot
//server.stats(),
server.tasks(),
server.index("test").get_all_documents(GetAllDocumentsOptions::default()),
server.index("test").settings(),
server.index("test1").get_all_documents(GetAllDocumentsOptions::default()),
server.index("test1").settings(),
);
}

View file

@ -0,0 +1,76 @@
use serde_json::json;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
use crate::common::Server;
#[actix_rt::test]
async fn get_settings_unexisting_index() {
let server = Server::new().await;
let (response, code) = server.version().await;
assert_eq!(code, 200);
let version = response.as_object().unwrap();
assert!(version.get("commitSha").is_some());
assert!(version.get("commitDate").is_some());
assert!(version.get("pkgVersion").is_some());
}
#[actix_rt::test]
async fn test_healthyness() {
let server = Server::new().await;
let (response, status_code) = server.service.get("/health").await;
assert_eq!(status_code, 200);
assert_eq!(response["status"], "available");
}
#[actix_rt::test]
async fn stats() {
let server = Server::new().await;
let index = server.index("test");
let (_, code) = index.create(Some("id")).await;
assert_eq!(code, 202);
index.wait_task(0).await;
let (response, code) = server.stats().await;
assert_eq!(code, 200);
assert!(response.get("databaseSize").is_some());
assert!(response.get("lastUpdate").is_some());
assert!(response["indexes"].get("test").is_some());
assert_eq!(response["indexes"]["test"]["numberOfDocuments"], 0);
assert!(response["indexes"]["test"]["isIndexing"] == false);
let documents = json!([
{
"id": 1,
"name": "Alexey",
},
{
"id": 2,
"age": 45,
}
]);
let (response, code) = index.add_documents(documents, None).await;
assert_eq!(code, 202, "{}", response);
assert_eq!(response["taskUid"], 1);
index.wait_task(1).await;
let timestamp = OffsetDateTime::now_utc();
let (response, code) = server.stats().await;
assert_eq!(code, 200);
assert!(response["databaseSize"].as_u64().unwrap() > 0);
let last_update =
OffsetDateTime::parse(response["lastUpdate"].as_str().unwrap(), &Rfc3339).unwrap();
assert!(last_update - timestamp < time::Duration::SECOND);
assert_eq!(response["indexes"]["test"]["numberOfDocuments"], 2);
assert!(response["indexes"]["test"]["isIndexing"] == false);
assert_eq!(response["indexes"]["test"]["fieldDistribution"]["id"], 2);
assert_eq!(response["indexes"]["test"]["fieldDistribution"]["name"], 1);
assert_eq!(response["indexes"]["test"]["fieldDistribution"]["age"], 1);
}

File diff suppressed because it is too large Load diff