mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-12-26 14:40:05 +01:00
Merge #3417
3417: Allow multiple searches in a single request r=irevoire a=dureuill # Pull Request ## Related issue Fixes #3427 ## What does this PR do? ### User standpoint - Adds a new `/multi-search` entry point (not to be confused with the existing `/{index_uid}/search` entry points) that accepts a POST whose body is an object containing an array of queries. - Each query must specify on which index it acts by providing its `indexUid`. Other parameters are identical to the one in the existing search routes (`q`, `limit`, etc.). - The response is a JSON object containing an array of the results for each search query as if it had been performed using the `/{index_uid}/search` routes. ### Implementation standpoint - Refactor authentication module: - Allow tenant token to be checked even without an index in URL - Add `meilisearch-auth` as a dependency to `index-scheduler` so as to have a working method of checking if the indexes are authorized there that takes into account both the API key and the tenant token (existing method relied on a behavior that was returning the allowed indexes from the API key as long as there weren't any tenant token) - Make `AuthFilter` an object with invariants and so its fields are now private - Use the methods of `AuthFilter` to know if an index is authorized rather than relying on its internal search rules. - Make tenant token search rules optional and `None` when the `AuthFilter` was not built with a tenant token. - Add a new `routes::index::search::multiple_search` module containing a post handler that performs the same work as the existing `routes::index::search` post handler, but in a loop. - Add various tests - Add authentication test suite ### Sample request <details> <summary> Click to see request/response </summary> ```json ~/datasets ❯ curl \ -X POST 'http://localhost:7700/multi-search' \ -H 'Content-Type: application/json' \ --data-binary '{"queries": [{ "indexUid": "index-0", "q": "toto", "limit": 1 }, {"indexUid": "index-1", "q": "titi", "limit": 1}]}' | jsonxf { "results": [ { "indexUid": "index-0", "hits": [ { "id": 20480, "title": "Toto - 25th Anniversary - Live in Amsterdam", "overview": "Filmed in High Definition in Amsterdam on Toto's 25th Anniversary Tour in 2003, this stunning concert captures the band at their very best, reunited with original vocalist Bobby Kimball. The set combines all their hits with tracks from their latest album \"Through the Looking Glass\" and other live favorites, performed in front of a wildly enthusastic sell-out crowd. Extras include 35 minute behind-the-scenes film following the band through various stages of their world tour including footage from Japan, Thailand, South Korea, and France. Toto celebrate their 25th anniversary with this blistering live concert, filmed in Amsterdam on February 25th, 2003. Proving they've still got exactly what it takes to move a crowd, the band perform a mixture of medley's, solo spots, and huge hits. Tracks include \"Rosanna,\" \"Africa,\" \"Hold The Line,\" a cover of the Beatles' \"While My Guitar Gently Weeps,\" and many more.", "genres": [ "Music" ], "poster": "https://image.tmdb.org/t/p/w500/7SCbUPwoB8Z7VUIA1Rn1WWwjNiT.jpg", "release_date": 1064275200 } ], "query": "toto", "processingTimeMs": 1, "limit": 1, "offset": 0, "estimatedTotalHits": 17 }, { "indexUid": "index-1", "hits": [ { "id": 41212, "title": "Titicut Follies", "overview": "The film is a stark and graphic portrayal of the conditions that existed at the State Prison for the Criminally Insane at Bridgewater, Massachusetts. TITICUT FOLLIES documents the various ways the inmates are treated by the guards, social workers and psychiatrists.", "genres": [ "Documentary" ], "poster": "https://image.tmdb.org/t/p/w500/2Ju5hn1ofOPeP1eRJtQWakiHuhW.jpg", "release_date": -70934400 } ], "query": "titi", "processingTimeMs": 0, "limit": 1, "offset": 0, "estimatedTotalHits": 7 } ]} ``` </details> ## PR checklist Please check if your PR fulfills the following requirements: - [ ] Does this PR fix an existing issue, or have you listed the changes applied in the PR description (and why they are needed)? - [ ] Have you read the contributing guidelines? - [ ] Have you made sure that the title is accurate and descriptive of the changes? Thank you so much for contributing to Meilisearch! Co-authored-by: Louis Dureuil <louis@meilisearch.com>
This commit is contained in:
commit
6eeba3a8ab
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -1904,6 +1904,7 @@ dependencies = [
|
||||
"insta",
|
||||
"log",
|
||||
"meili-snap",
|
||||
"meilisearch-auth",
|
||||
"meilisearch-types",
|
||||
"nelson",
|
||||
"page_size 0.5.0",
|
||||
|
@ -19,6 +19,7 @@ dump = { path = "../dump" }
|
||||
enum-iterator = "1.1.3"
|
||||
file-store = { path = "../file-store" }
|
||||
log = "0.4.14"
|
||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
page_size = "0.5.0"
|
||||
roaring = { version = "0.10.0", features = ["serde"] }
|
||||
|
@ -43,7 +43,6 @@ use file_store::FileStore;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::heed::types::{OwnedType, SerdeBincode, SerdeJson, Str};
|
||||
use meilisearch_types::heed::{self, Database, Env, RoTxn};
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
use meilisearch_types::milli;
|
||||
use meilisearch_types::milli::documents::DocumentsBatchBuilder;
|
||||
use meilisearch_types::milli::update::IndexerConfig;
|
||||
@ -630,13 +629,13 @@ impl IndexScheduler {
|
||||
&self,
|
||||
rtxn: &RoTxn,
|
||||
query: &Query,
|
||||
authorized_indexes: &Option<Vec<IndexUidPattern>>,
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
) -> Result<RoaringBitmap> {
|
||||
let mut tasks = self.get_task_ids(rtxn, query)?;
|
||||
|
||||
// If the query contains a list of index uid or there is a finite list of authorized indexes,
|
||||
// then we must exclude all the kinds that aren't associated to one and only one index.
|
||||
if query.index_uids.is_some() || authorized_indexes.is_some() {
|
||||
if query.index_uids.is_some() || !filters.all_indexes_authorized() {
|
||||
for kind in enum_iterator::all::<Kind>().filter(|kind| !kind.related_to_one_index()) {
|
||||
tasks -= self.get_kind(rtxn, kind)?;
|
||||
}
|
||||
@ -644,11 +643,11 @@ impl IndexScheduler {
|
||||
|
||||
// Any task that is internally associated with a non-authorized index
|
||||
// must be discarded.
|
||||
if let Some(authorized_indexes) = authorized_indexes {
|
||||
if !filters.all_indexes_authorized() {
|
||||
let all_indexes_iter = self.index_tasks.iter(rtxn)?;
|
||||
for result in all_indexes_iter {
|
||||
let (index, index_tasks) = result?;
|
||||
if !authorized_indexes.iter().any(|p| p.matches_str(index)) {
|
||||
if !filters.is_index_authorized(index) {
|
||||
tasks -= index_tasks;
|
||||
}
|
||||
}
|
||||
@ -668,12 +667,11 @@ impl IndexScheduler {
|
||||
pub fn get_tasks_from_authorized_indexes(
|
||||
&self,
|
||||
query: Query,
|
||||
authorized_indexes: Option<Vec<IndexUidPattern>>,
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
) -> Result<Vec<Task>> {
|
||||
let rtxn = self.env.read_txn()?;
|
||||
|
||||
let tasks =
|
||||
self.get_task_ids_from_authorized_indexes(&rtxn, &query, &authorized_indexes)?;
|
||||
let tasks = self.get_task_ids_from_authorized_indexes(&rtxn, &query, filters)?;
|
||||
|
||||
let tasks = self.get_existing_tasks(
|
||||
&rtxn,
|
||||
@ -1120,7 +1118,9 @@ mod tests {
|
||||
use crossbeam::channel::RecvTimeoutError;
|
||||
use file_store::File;
|
||||
use meili_snap::snapshot;
|
||||
use meilisearch_auth::AuthFilter;
|
||||
use meilisearch_types::document_formats::DocumentFormatError;
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
use meilisearch_types::milli::obkv_to_json;
|
||||
use meilisearch_types::milli::update::IndexDocumentsMethod::{
|
||||
ReplaceDocuments, UpdateDocuments,
|
||||
@ -2371,38 +2371,45 @@ mod tests {
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let query = Query { limit: Some(0), ..Default::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
|
||||
let query = Query { limit: Some(1), ..Default::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
|
||||
let query = Query { limit: Some(2), ..Default::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,2,]");
|
||||
|
||||
let query = Query { from: Some(1), ..Default::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,]");
|
||||
|
||||
let query = Query { from: Some(2), ..Default::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]");
|
||||
|
||||
let query = Query { from: Some(1), limit: Some(1), ..Default::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,]");
|
||||
|
||||
let query = Query { from: Some(1), limit: Some(2), ..Default::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,]");
|
||||
}
|
||||
|
||||
@ -2427,21 +2434,24 @@ mod tests {
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
|
||||
let query = Query { statuses: Some(vec![Status::Processing]), ..Default::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,]"); // only the processing tasks in the first tick
|
||||
|
||||
let query = Query { statuses: Some(vec![Status::Enqueued]), ..Default::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,2,]"); // only the enqueued tasks in the first tick
|
||||
|
||||
let query = Query {
|
||||
statuses: Some(vec![Status::Enqueued, Status::Processing]),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]"); // both enqueued and processing tasks in the first tick
|
||||
|
||||
let query = Query {
|
||||
@ -2449,8 +2459,9 @@ mod tests {
|
||||
after_started_at: Some(start_time),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both enqueued and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes after the start of the test, which should excludes the enqueued tasks
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,]");
|
||||
@ -2460,8 +2471,9 @@ mod tests {
|
||||
before_started_at: Some(start_time),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both enqueued and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes before the start of the test, which should excludes all of them
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
@ -2472,8 +2484,9 @@ mod tests {
|
||||
before_started_at: Some(start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both enqueued and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes after the start of the test and before one minute after the start of the test,
|
||||
// which should exclude the enqueued tasks and include the only processing task
|
||||
@ -2498,8 +2511,9 @@ mod tests {
|
||||
before_started_at: Some(start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both succeeded and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes after the start of the test and before one minute after the start of the test,
|
||||
// which should include all tasks
|
||||
@ -2510,8 +2524,9 @@ mod tests {
|
||||
before_started_at: Some(start_time),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both succeeded and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes before the start of the test, which should exclude all tasks
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
@ -2522,8 +2537,9 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// both succeeded and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes after the start of the second part of the test and before one minute after the
|
||||
// second start of the test, which should exclude all tasks
|
||||
@ -2541,8 +2557,9 @@ mod tests {
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// we run the same query to verify that, and indeed find that the last task is matched
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
|
||||
@ -2552,8 +2569,9 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// enqueued, succeeded, or processing tasks started after the second part of the test, should
|
||||
// again only return the last task
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
@ -2563,8 +2581,9 @@ mod tests {
|
||||
|
||||
// now the last task should have failed
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "end");
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// so running the last query should return nothing
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
|
||||
@ -2574,8 +2593,9 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// but the same query on failed tasks should return the last task
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
|
||||
@ -2585,8 +2605,9 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// but the same query on failed tasks should return the last task
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
|
||||
@ -2597,8 +2618,9 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// same query but with an invalid uid
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
|
||||
@ -2609,8 +2631,9 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// same query but with a valid uid
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
}
|
||||
@ -2640,8 +2663,9 @@ mod tests {
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
|
||||
let query = Query { index_uids: Some(vec!["catto".to_owned()]), ..Default::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// only the first task associated with catto is returned, the indexSwap tasks are excluded!
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,]");
|
||||
|
||||
@ -2650,7 +2674,9 @@ mod tests {
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
&Some(vec![IndexUidPattern::new_unchecked("doggo")]),
|
||||
&AuthFilter::with_allowed_indexes(
|
||||
vec![IndexUidPattern::new_unchecked("doggo")].into_iter().collect(),
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
// we have asked for only the tasks associated with catto, but are only authorized to retrieve the tasks
|
||||
@ -2662,7 +2688,9 @@ mod tests {
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
&Some(vec![IndexUidPattern::new_unchecked("doggo")]),
|
||||
&AuthFilter::with_allowed_indexes(
|
||||
vec![IndexUidPattern::new_unchecked("doggo")].into_iter().collect(),
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
// we asked for all the tasks, but we are only authorized to retrieve the doggo tasks
|
||||
@ -2674,10 +2702,14 @@ mod tests {
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
&Some(vec![
|
||||
IndexUidPattern::new_unchecked("catto"),
|
||||
IndexUidPattern::new_unchecked("doggo"),
|
||||
]),
|
||||
&AuthFilter::with_allowed_indexes(
|
||||
vec![
|
||||
IndexUidPattern::new_unchecked("catto"),
|
||||
IndexUidPattern::new_unchecked("doggo"),
|
||||
]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
// we asked for all the tasks, but we are only authorized to retrieve the doggo and catto tasks
|
||||
@ -2685,8 +2717,9 @@ mod tests {
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,]");
|
||||
|
||||
let query = Query::default();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// we asked for all the tasks with all index authorized -> all tasks returned
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,3,]");
|
||||
}
|
||||
@ -2717,8 +2750,9 @@ mod tests {
|
||||
|
||||
let rtxn = index_scheduler.read_txn().unwrap();
|
||||
let query = Query { canceled_by: Some(vec![task_cancelation.uid]), ..Query::default() };
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
// 0 is not returned because it was not canceled, 3 is not returned because it is the uid of the
|
||||
// taskCancelation itself
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,2,]");
|
||||
@ -2728,7 +2762,9 @@ mod tests {
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
&Some(vec![IndexUidPattern::new_unchecked("doggo")]),
|
||||
&AuthFilter::with_allowed_indexes(
|
||||
vec![IndexUidPattern::new_unchecked("doggo")].into_iter().collect(),
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
// Return only 1 because the user is not authorized to see task 2
|
||||
|
@ -85,17 +85,13 @@ impl AuthController {
|
||||
uid: Uuid,
|
||||
search_rules: Option<SearchRules>,
|
||||
) -> Result<AuthFilter> {
|
||||
let mut filters = AuthFilter::default();
|
||||
let key = self.get_key(uid)?;
|
||||
|
||||
filters.search_rules = match search_rules {
|
||||
Some(search_rules) => search_rules,
|
||||
None => SearchRules::Set(key.indexes.into_iter().collect()),
|
||||
};
|
||||
let key_authorized_indexes = SearchRules::Set(key.indexes.into_iter().collect());
|
||||
|
||||
filters.allow_index_creation = self.is_key_authorized(uid, Action::IndexesAdd, None)?;
|
||||
let allow_index_creation = self.is_key_authorized(uid, Action::IndexesAdd, None)?;
|
||||
|
||||
Ok(filters)
|
||||
Ok(AuthFilter { search_rules, key_authorized_indexes, allow_index_creation })
|
||||
}
|
||||
|
||||
pub fn list_keys(&self) -> Result<Vec<Key>> {
|
||||
@ -160,13 +156,59 @@ impl AuthController {
|
||||
}
|
||||
|
||||
pub struct AuthFilter {
|
||||
pub search_rules: SearchRules,
|
||||
pub allow_index_creation: bool,
|
||||
search_rules: Option<SearchRules>,
|
||||
key_authorized_indexes: SearchRules,
|
||||
allow_index_creation: bool,
|
||||
}
|
||||
|
||||
impl Default for AuthFilter {
|
||||
fn default() -> Self {
|
||||
Self { search_rules: SearchRules::default(), allow_index_creation: true }
|
||||
Self {
|
||||
search_rules: None,
|
||||
key_authorized_indexes: SearchRules::default(),
|
||||
allow_index_creation: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AuthFilter {
|
||||
#[inline]
|
||||
pub fn allow_index_creation(&self, index: &str) -> bool {
|
||||
self.allow_index_creation && self.is_index_authorized(index)
|
||||
}
|
||||
|
||||
pub fn with_allowed_indexes(allowed_indexes: HashSet<IndexUidPattern>) -> Self {
|
||||
Self {
|
||||
search_rules: None,
|
||||
key_authorized_indexes: SearchRules::Set(allowed_indexes),
|
||||
allow_index_creation: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn all_indexes_authorized(&self) -> bool {
|
||||
self.key_authorized_indexes.all_indexes_authorized()
|
||||
&& self
|
||||
.search_rules
|
||||
.as_ref()
|
||||
.map(|search_rules| search_rules.all_indexes_authorized())
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
pub fn is_index_authorized(&self, index: &str) -> bool {
|
||||
self.key_authorized_indexes.is_index_authorized(index)
|
||||
&& self
|
||||
.search_rules
|
||||
.as_ref()
|
||||
.map(|search_rules| search_rules.is_index_authorized(index))
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
pub fn get_index_search_rules(&self, index: &str) -> Option<IndexSearchRules> {
|
||||
if !self.is_index_authorized(index) {
|
||||
return None;
|
||||
}
|
||||
let search_rules = self.search_rules.as_ref().unwrap_or(&self.key_authorized_indexes);
|
||||
search_rules.get_index_search_rules(index)
|
||||
}
|
||||
}
|
||||
|
||||
@ -185,7 +227,7 @@ impl Default for SearchRules {
|
||||
}
|
||||
|
||||
impl SearchRules {
|
||||
pub fn is_index_authorized(&self, index: &str) -> bool {
|
||||
fn is_index_authorized(&self, index: &str) -> bool {
|
||||
match self {
|
||||
Self::Set(set) => {
|
||||
set.contains("*")
|
||||
@ -200,7 +242,7 @@ impl SearchRules {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_index_search_rules(&self, index: &str) -> Option<IndexSearchRules> {
|
||||
fn get_index_search_rules(&self, index: &str) -> Option<IndexSearchRules> {
|
||||
match self {
|
||||
Self::Set(_) => {
|
||||
if self.is_index_authorized(index) {
|
||||
@ -219,24 +261,10 @@ impl SearchRules {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the list of indexes such that `self.is_index_authorized(index) == true`,
|
||||
/// or `None` if all indexes satisfy this condition.
|
||||
pub fn authorized_indexes(&self) -> Option<Vec<IndexUidPattern>> {
|
||||
fn all_indexes_authorized(&self) -> bool {
|
||||
match self {
|
||||
SearchRules::Set(set) => {
|
||||
if set.contains("*") {
|
||||
None
|
||||
} else {
|
||||
Some(set.iter().cloned().collect())
|
||||
}
|
||||
}
|
||||
SearchRules::Map(map) => {
|
||||
if map.contains_key("*") {
|
||||
None
|
||||
} else {
|
||||
Some(map.keys().cloned().collect())
|
||||
}
|
||||
}
|
||||
SearchRules::Set(set) => set.contains("*"),
|
||||
SearchRules::Map(map) => map.contains_key("*"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,8 +11,8 @@ use serde::{Deserialize, Serialize};
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResponseError {
|
||||
#[serde(skip)]
|
||||
code: StatusCode,
|
||||
message: String,
|
||||
pub code: StatusCode,
|
||||
pub message: String,
|
||||
#[serde(rename = "code")]
|
||||
error_code: String,
|
||||
#[serde(rename = "type")]
|
||||
|
@ -26,6 +26,18 @@ impl SearchAggregator {
|
||||
pub fn succeed(&mut self, _: &dyn Any) {}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MultiSearchAggregator;
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl MultiSearchAggregator {
|
||||
pub fn from_queries(_: &dyn Any, _: &dyn Any) -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn succeed(&mut self) {}
|
||||
}
|
||||
|
||||
impl MockAnalytics {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(opt: &Opt) -> Arc<dyn Analytics> {
|
||||
@ -43,6 +55,7 @@ impl Analytics for MockAnalytics {
|
||||
fn publish(&self, _event_name: String, _send: Value, _request: Option<&HttpRequest>) {}
|
||||
fn get_search(&self, _aggregate: super::SearchAggregator) {}
|
||||
fn post_search(&self, _aggregate: super::SearchAggregator) {}
|
||||
fn post_multi_search(&self, _aggregate: super::MultiSearchAggregator) {}
|
||||
fn add_documents(
|
||||
&self,
|
||||
_documents_query: &UpdateDocumentsQuery,
|
||||
|
@ -23,6 +23,8 @@ use crate::routes::tasks::TasksFilterQuery;
|
||||
pub type SegmentAnalytics = mock_analytics::MockAnalytics;
|
||||
#[cfg(any(debug_assertions, not(feature = "analytics")))]
|
||||
pub type SearchAggregator = mock_analytics::SearchAggregator;
|
||||
#[cfg(any(debug_assertions, not(feature = "analytics")))]
|
||||
pub type MultiSearchAggregator = mock_analytics::MultiSearchAggregator;
|
||||
|
||||
// if we are in release mode and the feature analytics was enabled
|
||||
// we use the real analytics
|
||||
@ -30,6 +32,8 @@ pub type SearchAggregator = mock_analytics::SearchAggregator;
|
||||
pub type SegmentAnalytics = segment_analytics::SegmentAnalytics;
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
pub type SearchAggregator = segment_analytics::SearchAggregator;
|
||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||
pub type MultiSearchAggregator = segment_analytics::MultiSearchAggregator;
|
||||
|
||||
/// The Meilisearch config dir:
|
||||
/// `~/.config/Meilisearch` on *NIX or *BSD.
|
||||
@ -74,6 +78,9 @@ pub trait Analytics: Sync + Send {
|
||||
/// This method should be called to aggregate a post search
|
||||
fn post_search(&self, aggregate: SearchAggregator);
|
||||
|
||||
/// This method should be called to aggregate a post array of searches
|
||||
fn post_multi_search(&self, aggregate: MultiSearchAggregator);
|
||||
|
||||
// this method should be called to aggregate a add documents request
|
||||
fn add_documents(
|
||||
&self,
|
||||
|
@ -9,7 +9,7 @@ use actix_web::HttpRequest;
|
||||
use byte_unit::Byte;
|
||||
use http::header::CONTENT_TYPE;
|
||||
use index_scheduler::IndexScheduler;
|
||||
use meilisearch_auth::{AuthController, SearchRules};
|
||||
use meilisearch_auth::{AuthController, AuthFilter};
|
||||
use meilisearch_types::InstanceUid;
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
@ -30,7 +30,7 @@ use crate::routes::indexes::documents::UpdateDocumentsQuery;
|
||||
use crate::routes::tasks::TasksFilterQuery;
|
||||
use crate::routes::{create_all_stats, Stats};
|
||||
use crate::search::{
|
||||
SearchQuery, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
|
||||
SearchQuery, SearchQueryWithIndex, SearchResult, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
|
||||
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
|
||||
};
|
||||
use crate::Opt;
|
||||
@ -68,6 +68,7 @@ pub enum AnalyticsMsg {
|
||||
BatchMessage(Track),
|
||||
AggregateGetSearch(SearchAggregator),
|
||||
AggregatePostSearch(SearchAggregator),
|
||||
AggregatePostMultiSearch(MultiSearchAggregator),
|
||||
AggregateAddDocuments(DocumentsAggregator),
|
||||
AggregateDeleteDocuments(DocumentsDeletionAggregator),
|
||||
AggregateUpdateDocuments(DocumentsAggregator),
|
||||
@ -133,6 +134,7 @@ impl SegmentAnalytics {
|
||||
opt: opt.clone(),
|
||||
batcher,
|
||||
post_search_aggregator: SearchAggregator::default(),
|
||||
post_multi_search_aggregator: MultiSearchAggregator::default(),
|
||||
get_search_aggregator: SearchAggregator::default(),
|
||||
add_documents_aggregator: DocumentsAggregator::default(),
|
||||
delete_documents_aggregator: DocumentsDeletionAggregator::default(),
|
||||
@ -174,6 +176,10 @@ impl super::Analytics for SegmentAnalytics {
|
||||
let _ = self.sender.try_send(AnalyticsMsg::AggregatePostSearch(aggregate));
|
||||
}
|
||||
|
||||
fn post_multi_search(&self, aggregate: MultiSearchAggregator) {
|
||||
let _ = self.sender.try_send(AnalyticsMsg::AggregatePostMultiSearch(aggregate));
|
||||
}
|
||||
|
||||
fn add_documents(
|
||||
&self,
|
||||
documents_query: &UpdateDocumentsQuery,
|
||||
@ -326,6 +332,7 @@ pub struct Segment {
|
||||
batcher: AutoBatcher,
|
||||
get_search_aggregator: SearchAggregator,
|
||||
post_search_aggregator: SearchAggregator,
|
||||
post_multi_search_aggregator: MultiSearchAggregator,
|
||||
add_documents_aggregator: DocumentsAggregator,
|
||||
delete_documents_aggregator: DocumentsDeletionAggregator,
|
||||
update_documents_aggregator: DocumentsAggregator,
|
||||
@ -383,6 +390,7 @@ impl Segment {
|
||||
Some(AnalyticsMsg::BatchMessage(msg)) => drop(self.batcher.push(msg).await),
|
||||
Some(AnalyticsMsg::AggregateGetSearch(agreg)) => self.get_search_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregatePostSearch(agreg)) => self.post_search_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregatePostMultiSearch(agreg)) => self.post_multi_search_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregateAddDocuments(agreg)) => self.add_documents_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregateDeleteDocuments(agreg)) => self.delete_documents_aggregator.aggregate(agreg),
|
||||
Some(AnalyticsMsg::AggregateUpdateDocuments(agreg)) => self.update_documents_aggregator.aggregate(agreg),
|
||||
@ -401,7 +409,7 @@ impl Segment {
|
||||
auth_controller: AuthController,
|
||||
) {
|
||||
if let Ok(stats) =
|
||||
create_all_stats(index_scheduler.into(), auth_controller, &SearchRules::default())
|
||||
create_all_stats(index_scheduler.into(), auth_controller, &AuthFilter::default())
|
||||
{
|
||||
// Replace the version number with the prototype name if any.
|
||||
let version = if let Some(prototype) = crate::prototype_name() {
|
||||
@ -428,6 +436,8 @@ impl Segment {
|
||||
.into_event(&self.user, "Documents Searched GET");
|
||||
let post_search = std::mem::take(&mut self.post_search_aggregator)
|
||||
.into_event(&self.user, "Documents Searched POST");
|
||||
let post_multi_search = std::mem::take(&mut self.post_multi_search_aggregator)
|
||||
.into_event(&self.user, "Documents Searched by Multi-Search POST");
|
||||
let add_documents = std::mem::take(&mut self.add_documents_aggregator)
|
||||
.into_event(&self.user, "Documents Added");
|
||||
let delete_documents = std::mem::take(&mut self.delete_documents_aggregator)
|
||||
@ -445,6 +455,9 @@ impl Segment {
|
||||
if let Some(post_search) = post_search {
|
||||
let _ = self.batcher.push(post_search).await;
|
||||
}
|
||||
if let Some(post_multi_search) = post_multi_search {
|
||||
let _ = self.batcher.push(post_multi_search).await;
|
||||
}
|
||||
if let Some(add_documents) = add_documents {
|
||||
let _ = self.batcher.push(add_documents).await;
|
||||
}
|
||||
@ -718,6 +731,118 @@ impl SearchAggregator {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MultiSearchAggregator {
|
||||
timestamp: Option<OffsetDateTime>,
|
||||
|
||||
// requests
|
||||
total_received: usize,
|
||||
total_succeeded: usize,
|
||||
|
||||
// sum of the number of distinct indexes in each single request, use with total_received to compute an avg
|
||||
total_distinct_index_count: usize,
|
||||
// number of queries with a single index, use with total_received to compute a proportion
|
||||
total_single_index: usize,
|
||||
|
||||
// sum of the number of search queries in the requests, use with total_received to compute an average
|
||||
total_search_count: usize,
|
||||
|
||||
// context
|
||||
user_agents: HashSet<String>,
|
||||
}
|
||||
|
||||
impl MultiSearchAggregator {
|
||||
pub fn from_queries(query: &[SearchQueryWithIndex], request: &HttpRequest) -> Self {
|
||||
let timestamp = Some(OffsetDateTime::now_utc());
|
||||
|
||||
let user_agents = extract_user_agents(request).into_iter().collect();
|
||||
|
||||
let distinct_indexes: HashSet<_> =
|
||||
query.iter().map(|query| query.index_uid.as_str()).collect();
|
||||
|
||||
Self {
|
||||
timestamp,
|
||||
total_received: 1,
|
||||
total_succeeded: 0,
|
||||
total_distinct_index_count: distinct_indexes.len(),
|
||||
total_single_index: if distinct_indexes.len() == 1 { 1 } else { 0 },
|
||||
total_search_count: query.len(),
|
||||
user_agents,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn succeed(&mut self) {
|
||||
self.total_succeeded = self.total_succeeded.saturating_add(1);
|
||||
}
|
||||
|
||||
pub fn aggregate(&mut self, other: Self) {
|
||||
// write the aggregate in a way that will cause a compilation error if a field is added.
|
||||
|
||||
// get ownership of self, replacing it by a default value.
|
||||
let this = std::mem::take(self);
|
||||
|
||||
let timestamp = this.timestamp.or(other.timestamp);
|
||||
let total_received = this.total_received.saturating_add(other.total_received);
|
||||
let total_succeeded = this.total_succeeded.saturating_add(other.total_succeeded);
|
||||
let total_distinct_index_count =
|
||||
this.total_distinct_index_count.saturating_add(other.total_distinct_index_count);
|
||||
let total_single_index = this.total_single_index.saturating_add(other.total_single_index);
|
||||
let total_search_count = this.total_search_count.saturating_add(other.total_search_count);
|
||||
let mut user_agents = this.user_agents;
|
||||
|
||||
for user_agent in other.user_agents.into_iter() {
|
||||
user_agents.insert(user_agent);
|
||||
}
|
||||
|
||||
// need all fields or compile error
|
||||
let mut aggregated = Self {
|
||||
timestamp,
|
||||
total_received,
|
||||
total_succeeded,
|
||||
total_distinct_index_count,
|
||||
total_single_index,
|
||||
total_search_count,
|
||||
user_agents,
|
||||
// do not add _ or ..Default::default() here
|
||||
};
|
||||
|
||||
// replace the default self with the aggregated value
|
||||
std::mem::swap(self, &mut aggregated);
|
||||
}
|
||||
|
||||
pub fn into_event(self, user: &User, event_name: &str) -> Option<Track> {
|
||||
if self.total_received == 0 {
|
||||
None
|
||||
} else {
|
||||
let properties = json!({
|
||||
"user-agent": self.user_agents,
|
||||
"requests": {
|
||||
"total_succeeded": self.total_succeeded,
|
||||
"total_failed": self.total_received.saturating_sub(self.total_succeeded), // just to be sure we never panics
|
||||
"total_received": self.total_received,
|
||||
},
|
||||
"indexes": {
|
||||
"total_single_index": self.total_single_index,
|
||||
"total_distinct_index_count": self.total_distinct_index_count,
|
||||
"avg_distinct_index_count": (self.total_distinct_index_count as f64) / (self.total_received as f64), // not 0 else returned early
|
||||
},
|
||||
"searches": {
|
||||
"total_search_count": self.total_search_count,
|
||||
"avg_search_count": (self.total_search_count as f64) / (self.total_received as f64),
|
||||
}
|
||||
});
|
||||
|
||||
Some(Track {
|
||||
timestamp: self.timestamp,
|
||||
user: user.clone(),
|
||||
event: event_name.to_string(),
|
||||
properties,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DocumentsAggregator {
|
||||
timestamp: Option<OffsetDateTime>,
|
||||
|
@ -136,6 +136,13 @@ pub mod policies {
|
||||
|
||||
use crate::extractors::authentication::Policy;
|
||||
|
||||
enum TenantTokenOutcome {
|
||||
NotATenantToken,
|
||||
Invalid,
|
||||
Expired,
|
||||
Valid(Uuid, SearchRules),
|
||||
}
|
||||
|
||||
fn tenant_token_validation() -> Validation {
|
||||
let mut validation = Validation::default();
|
||||
validation.validate_exp = false;
|
||||
@ -164,29 +171,42 @@ pub mod policies {
|
||||
pub struct ActionPolicy<const A: u8>;
|
||||
|
||||
impl<const A: u8> Policy for ActionPolicy<A> {
|
||||
/// Attempts to grant authentication from a bearer token (that can be a tenant token or an API key), the requested Action,
|
||||
/// and a list of requested indexes.
|
||||
///
|
||||
/// If the bearer token is not allowed for the specified indexes and action, returns `None`.
|
||||
/// Otherwise, returns an object containing the generated permissions: the search filters to add to a search, and the list of allowed indexes
|
||||
/// (that may contain more indexes than requested).
|
||||
fn authenticate(
|
||||
auth: AuthController,
|
||||
token: &str,
|
||||
index: Option<&str>,
|
||||
) -> Option<AuthFilter> {
|
||||
// authenticate if token is the master key.
|
||||
// master key can only have access to keys routes.
|
||||
// if master key is None only keys routes are inaccessible.
|
||||
// Without a master key, all routes are accessible except the key-related routes.
|
||||
if auth.get_master_key().map_or_else(|| !is_keys_action(A), |mk| mk == token) {
|
||||
return Some(AuthFilter::default());
|
||||
}
|
||||
|
||||
// Tenant token
|
||||
if let Some(filters) = ActionPolicy::<A>::authenticate_tenant_token(&auth, token, index)
|
||||
{
|
||||
return Some(filters);
|
||||
} else if let Some(action) = Action::from_repr(A) {
|
||||
// API key
|
||||
if let Ok(Some(uid)) = auth.get_optional_uid_from_encoded_key(token.as_bytes()) {
|
||||
if let Ok(true) = auth.is_key_authorized(uid, action, index) {
|
||||
return auth.get_key_filters(uid, None).ok();
|
||||
let (key_uuid, search_rules) =
|
||||
match ActionPolicy::<A>::authenticate_tenant_token(&auth, token) {
|
||||
TenantTokenOutcome::Valid(key_uuid, search_rules) => {
|
||||
(key_uuid, Some(search_rules))
|
||||
}
|
||||
}
|
||||
TenantTokenOutcome::Expired => return None,
|
||||
TenantTokenOutcome::Invalid => return None,
|
||||
TenantTokenOutcome::NotATenantToken => {
|
||||
(auth.get_optional_uid_from_encoded_key(token.as_bytes()).ok()??, None)
|
||||
}
|
||||
};
|
||||
|
||||
// check that the indexes are allowed
|
||||
let action = Action::from_repr(A)?;
|
||||
let auth_filter = auth.get_key_filters(key_uuid, search_rules).ok()?;
|
||||
if auth.is_key_authorized(key_uuid, action, index).unwrap_or(false)
|
||||
&& index.map(|index| auth_filter.is_index_authorized(index)).unwrap_or(true)
|
||||
{
|
||||
return Some(auth_filter);
|
||||
}
|
||||
|
||||
None
|
||||
@ -194,50 +214,43 @@ pub mod policies {
|
||||
}
|
||||
|
||||
impl<const A: u8> ActionPolicy<A> {
|
||||
fn authenticate_tenant_token(
|
||||
auth: &AuthController,
|
||||
token: &str,
|
||||
index: Option<&str>,
|
||||
) -> Option<AuthFilter> {
|
||||
// A tenant token only has access to the search route which always defines an index.
|
||||
let index = index?;
|
||||
|
||||
fn authenticate_tenant_token(auth: &AuthController, token: &str) -> TenantTokenOutcome {
|
||||
// Only search action can be accessed by a tenant token.
|
||||
if A != actions::SEARCH {
|
||||
return None;
|
||||
return TenantTokenOutcome::NotATenantToken;
|
||||
}
|
||||
|
||||
let uid = extract_key_id(token)?;
|
||||
// check if parent key is authorized to do the action.
|
||||
if auth.is_key_authorized(uid, Action::Search, Some(index)).ok()? {
|
||||
// Check if tenant token is valid.
|
||||
let key = auth.generate_key(uid)?;
|
||||
let data = decode::<Claims>(
|
||||
token,
|
||||
&DecodingKey::from_secret(key.as_bytes()),
|
||||
&tenant_token_validation(),
|
||||
)
|
||||
.ok()?;
|
||||
let uid = if let Some(uid) = extract_key_id(token) {
|
||||
uid
|
||||
} else {
|
||||
return TenantTokenOutcome::NotATenantToken;
|
||||
};
|
||||
|
||||
// Check index access if an index restriction is provided.
|
||||
if !data.claims.search_rules.is_index_authorized(index) {
|
||||
return None;
|
||||
// Check if tenant token is valid.
|
||||
let key = if let Some(key) = auth.generate_key(uid) {
|
||||
key
|
||||
} else {
|
||||
return TenantTokenOutcome::Invalid;
|
||||
};
|
||||
|
||||
let data = if let Ok(data) = decode::<Claims>(
|
||||
token,
|
||||
&DecodingKey::from_secret(key.as_bytes()),
|
||||
&tenant_token_validation(),
|
||||
) {
|
||||
data
|
||||
} else {
|
||||
return TenantTokenOutcome::Invalid;
|
||||
};
|
||||
|
||||
// Check if token is expired.
|
||||
if let Some(exp) = data.claims.exp {
|
||||
if OffsetDateTime::now_utc().unix_timestamp() > exp {
|
||||
return TenantTokenOutcome::Expired;
|
||||
}
|
||||
|
||||
// Check if token is expired.
|
||||
if let Some(exp) = data.claims.exp {
|
||||
if OffsetDateTime::now_utc().unix_timestamp() > exp {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
return match auth.get_key_filters(uid, Some(data.claims.search_rules)) {
|
||||
Ok(auth) if auth.search_rules.is_index_authorized(index) => Some(auth),
|
||||
_ => None,
|
||||
};
|
||||
}
|
||||
|
||||
None
|
||||
TenantTokenOutcome::Valid(uid, data.claims.search_rules)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ pub async fn replace_documents(
|
||||
|
||||
analytics.add_documents(¶ms, index_scheduler.index(&index_uid).is_err(), &req);
|
||||
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation;
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid);
|
||||
let task = document_addition(
|
||||
extract_mime_type(&req)?,
|
||||
index_scheduler,
|
||||
@ -223,7 +223,7 @@ pub async fn update_documents(
|
||||
|
||||
analytics.update_documents(¶ms, index_scheduler.index(&index_uid).is_err(), &req);
|
||||
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation;
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid);
|
||||
let task = document_addition(
|
||||
extract_mime_type(&req)?,
|
||||
index_scheduler,
|
||||
|
@ -89,11 +89,11 @@ pub async fn list_indexes(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_GET }>, Data<IndexScheduler>>,
|
||||
paginate: AwebQueryParameter<ListIndexes, DeserrQueryParamError>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let search_rules = &index_scheduler.filters().search_rules;
|
||||
let filters = index_scheduler.filters();
|
||||
let indexes: Vec<_> = index_scheduler.indexes()?;
|
||||
let indexes = indexes
|
||||
.into_iter()
|
||||
.filter(|(name, _)| search_rules.is_index_authorized(name))
|
||||
.filter(|(name, _)| filters.is_index_authorized(name))
|
||||
.map(|(name, index)| IndexView::new(name, &index))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
@ -120,7 +120,7 @@ pub async fn create_index(
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let IndexCreateRequest { primary_key, uid } = body.into_inner();
|
||||
|
||||
let allow_index_creation = index_scheduler.filters().search_rules.is_index_authorized(&uid);
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation(&uid);
|
||||
if allow_index_creation {
|
||||
analytics.publish(
|
||||
"Index Created".to_string(),
|
||||
|
@ -3,7 +3,6 @@ use actix_web::{web, HttpRequest, HttpResponse};
|
||||
use deserr::actix_web::{AwebJson, AwebQueryParameter};
|
||||
use index_scheduler::IndexScheduler;
|
||||
use log::debug;
|
||||
use meilisearch_auth::IndexSearchRules;
|
||||
use meilisearch_types::deserr::query_params::Param;
|
||||
use meilisearch_types::deserr::{DeserrJsonError, DeserrQueryParamError};
|
||||
use meilisearch_types::error::deserr_codes::*;
|
||||
@ -17,9 +16,9 @@ use crate::extractors::authentication::policies::*;
|
||||
use crate::extractors::authentication::GuardedData;
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::search::{
|
||||
perform_search, MatchingStrategy, SearchQuery, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
|
||||
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
|
||||
DEFAULT_SEARCH_OFFSET,
|
||||
add_search_rules, perform_search, MatchingStrategy, SearchQuery, DEFAULT_CROP_LENGTH,
|
||||
DEFAULT_CROP_MARKER, DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG,
|
||||
DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_OFFSET,
|
||||
};
|
||||
|
||||
pub fn configure(cfg: &mut web::ServiceConfig) {
|
||||
@ -101,26 +100,6 @@ impl From<SearchQueryGet> for SearchQuery {
|
||||
}
|
||||
}
|
||||
|
||||
/// Incorporate search rules in search query
|
||||
fn add_search_rules(query: &mut SearchQuery, rules: IndexSearchRules) {
|
||||
query.filter = match (query.filter.take(), rules.filter) {
|
||||
(None, rules_filter) => rules_filter,
|
||||
(filter, None) => filter,
|
||||
(Some(filter), Some(rules_filter)) => {
|
||||
let filter = match filter {
|
||||
Value::Array(filter) => filter,
|
||||
filter => vec![filter],
|
||||
};
|
||||
let rules_filter = match rules_filter {
|
||||
Value::Array(rules_filter) => rules_filter,
|
||||
rules_filter => vec![rules_filter],
|
||||
};
|
||||
|
||||
Some(Value::Array([filter, rules_filter].concat()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: TAMO: split on :asc, and :desc, instead of doing some weird things
|
||||
|
||||
/// Transform the sort query parameter into something that matches the post expected format.
|
||||
@ -159,9 +138,7 @@ pub async fn search_with_url_query(
|
||||
let mut query: SearchQuery = params.into_inner().into();
|
||||
|
||||
// Tenant token search_rules.
|
||||
if let Some(search_rules) =
|
||||
index_scheduler.filters().search_rules.get_index_search_rules(&index_uid)
|
||||
{
|
||||
if let Some(search_rules) = index_scheduler.filters().get_index_search_rules(&index_uid) {
|
||||
add_search_rules(&mut query, search_rules);
|
||||
}
|
||||
|
||||
@ -193,9 +170,7 @@ pub async fn search_with_post(
|
||||
debug!("search called with params: {:?}", query);
|
||||
|
||||
// Tenant token search_rules.
|
||||
if let Some(search_rules) =
|
||||
index_scheduler.filters().search_rules.get_index_search_rules(&index_uid)
|
||||
{
|
||||
if let Some(search_rules) = index_scheduler.filters().get_index_search_rules(&index_uid) {
|
||||
add_search_rules(&mut query, search_rules);
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,8 @@ macro_rules! make_setting_route {
|
||||
|
||||
let new_settings = Settings { $attr: Setting::Reset.into(), ..Default::default() };
|
||||
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation;
|
||||
let allow_index_creation =
|
||||
index_scheduler.filters().allow_index_creation(&index_uid);
|
||||
|
||||
let task = KindWithContent::SettingsUpdate {
|
||||
index_uid: index_uid.to_string(),
|
||||
@ -86,7 +87,8 @@ macro_rules! make_setting_route {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation;
|
||||
let allow_index_creation =
|
||||
index_scheduler.filters().allow_index_creation(&index_uid);
|
||||
|
||||
let task = KindWithContent::SettingsUpdate {
|
||||
index_uid: index_uid.to_string(),
|
||||
@ -560,7 +562,7 @@ pub async fn update_all(
|
||||
Some(&req),
|
||||
);
|
||||
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation;
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid);
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
||||
let task = KindWithContent::SettingsUpdate {
|
||||
index_uid,
|
||||
@ -596,7 +598,7 @@ pub async fn delete_all(
|
||||
|
||||
let new_settings = Settings::cleared().into_unchecked();
|
||||
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation;
|
||||
let allow_index_creation = index_scheduler.filters().allow_index_creation(&index_uid);
|
||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
||||
let task = KindWithContent::SettingsUpdate {
|
||||
index_uid,
|
||||
|
@ -22,6 +22,7 @@ const PAGINATION_DEFAULT_LIMIT: usize = 20;
|
||||
mod api_key;
|
||||
mod dump;
|
||||
pub mod indexes;
|
||||
mod multi_search;
|
||||
mod swap_indexes;
|
||||
pub mod tasks;
|
||||
|
||||
@ -33,6 +34,7 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
|
||||
.service(web::resource("/stats").route(web::get().to(get_stats)))
|
||||
.service(web::resource("/version").route(web::get().to(get_version)))
|
||||
.service(web::scope("/indexes").configure(indexes::configure))
|
||||
.service(web::scope("/multi-search").configure(multi_search::configure))
|
||||
.service(web::scope("/swap-indexes").configure(swap_indexes::configure));
|
||||
}
|
||||
|
||||
@ -237,10 +239,9 @@ async fn get_stats(
|
||||
analytics: web::Data<dyn Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
analytics.publish("Stats Seen".to_string(), json!({ "per_index_uid": false }), Some(&req));
|
||||
let search_rules = &index_scheduler.filters().search_rules;
|
||||
let filters = index_scheduler.filters();
|
||||
|
||||
let stats =
|
||||
create_all_stats((*index_scheduler).clone(), (*auth_controller).clone(), search_rules)?;
|
||||
let stats = create_all_stats((*index_scheduler).clone(), (*auth_controller).clone(), filters)?;
|
||||
|
||||
debug!("returns: {:?}", stats);
|
||||
Ok(HttpResponse::Ok().json(stats))
|
||||
@ -249,19 +250,19 @@ async fn get_stats(
|
||||
pub fn create_all_stats(
|
||||
index_scheduler: Data<IndexScheduler>,
|
||||
auth_controller: AuthController,
|
||||
search_rules: &meilisearch_auth::SearchRules,
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
) -> Result<Stats, ResponseError> {
|
||||
let mut last_task: Option<OffsetDateTime> = None;
|
||||
let mut indexes = BTreeMap::new();
|
||||
let mut database_size = 0;
|
||||
let processing_task = index_scheduler.get_tasks_from_authorized_indexes(
|
||||
Query { statuses: Some(vec![Status::Processing]), limit: Some(1), ..Query::default() },
|
||||
search_rules.authorized_indexes(),
|
||||
filters,
|
||||
)?;
|
||||
// accumulate the size of each indexes
|
||||
let processing_index = processing_task.first().and_then(|task| task.index_uid());
|
||||
for (name, index) in index_scheduler.indexes()? {
|
||||
if !search_rules.is_index_authorized(&name) {
|
||||
if !filters.is_index_authorized(&name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
122
meilisearch/src/routes/multi_search.rs
Normal file
122
meilisearch/src/routes/multi_search.rs
Normal file
@ -0,0 +1,122 @@
|
||||
use actix_http::StatusCode;
|
||||
use actix_web::web::{self, Data};
|
||||
use actix_web::{HttpRequest, HttpResponse};
|
||||
use deserr::actix_web::AwebJson;
|
||||
use index_scheduler::IndexScheduler;
|
||||
use log::debug;
|
||||
use meilisearch_types::deserr::DeserrJsonError;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::keys::actions;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::analytics::{Analytics, MultiSearchAggregator};
|
||||
use crate::extractors::authentication::policies::ActionPolicy;
|
||||
use crate::extractors::authentication::{AuthenticationError, GuardedData};
|
||||
use crate::extractors::sequential_extractor::SeqHandler;
|
||||
use crate::search::{
|
||||
add_search_rules, perform_search, SearchQueryWithIndex, SearchResultWithIndex,
|
||||
};
|
||||
|
||||
pub fn configure(cfg: &mut web::ServiceConfig) {
|
||||
cfg.service(web::resource("").route(web::post().to(SeqHandler(multi_search_with_post))));
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct SearchResults {
|
||||
results: Vec<SearchResultWithIndex>,
|
||||
}
|
||||
|
||||
#[derive(Debug, deserr::Deserr)]
|
||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||
pub struct SearchQueries {
|
||||
queries: Vec<SearchQueryWithIndex>,
|
||||
}
|
||||
|
||||
pub async fn multi_search_with_post(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::SEARCH }>, Data<IndexScheduler>>,
|
||||
params: AwebJson<SearchQueries, DeserrJsonError>,
|
||||
req: HttpRequest,
|
||||
analytics: web::Data<dyn Analytics>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let queries = params.into_inner().queries;
|
||||
|
||||
let mut multi_aggregate = MultiSearchAggregator::from_queries(&queries, &req);
|
||||
|
||||
// Explicitly expect a `(ResponseError, usize)` for the error type rather than `ResponseError` only,
|
||||
// so that `?` doesn't work if it doesn't use `with_index`, ensuring that it is not forgotten in case of code
|
||||
// changes.
|
||||
let search_results: Result<_, (ResponseError, usize)> = (|| {
|
||||
async {
|
||||
let mut search_results = Vec::with_capacity(queries.len());
|
||||
for (query_index, (index_uid, mut query)) in
|
||||
queries.into_iter().map(SearchQueryWithIndex::into_index_query).enumerate()
|
||||
{
|
||||
debug!("multi-search #{query_index}: called with params: {:?}", query);
|
||||
|
||||
// Check index from API key
|
||||
if !index_scheduler.filters().is_index_authorized(&index_uid) {
|
||||
return Err(AuthenticationError::InvalidToken).with_index(query_index);
|
||||
}
|
||||
// Apply search rules from tenant token
|
||||
if let Some(search_rules) =
|
||||
index_scheduler.filters().get_index_search_rules(&index_uid)
|
||||
{
|
||||
add_search_rules(&mut query, search_rules);
|
||||
}
|
||||
|
||||
let index = index_scheduler
|
||||
.index(&index_uid)
|
||||
.map_err(|err| {
|
||||
let mut err = ResponseError::from(err);
|
||||
// Patch the HTTP status code to 400 as it defaults to 404 for `index_not_found`, but
|
||||
// here the resource not found is not part of the URL.
|
||||
err.code = StatusCode::BAD_REQUEST;
|
||||
err
|
||||
})
|
||||
.with_index(query_index)?;
|
||||
let search_result =
|
||||
tokio::task::spawn_blocking(move || perform_search(&index, query))
|
||||
.await
|
||||
.with_index(query_index)?;
|
||||
|
||||
search_results.push(SearchResultWithIndex {
|
||||
index_uid: index_uid.into_inner(),
|
||||
result: search_result.with_index(query_index)?,
|
||||
});
|
||||
}
|
||||
Ok(search_results)
|
||||
}
|
||||
})()
|
||||
.await;
|
||||
|
||||
if search_results.is_ok() {
|
||||
multi_aggregate.succeed();
|
||||
}
|
||||
analytics.post_multi_search(multi_aggregate);
|
||||
|
||||
let search_results = search_results.map_err(|(mut err, query_index)| {
|
||||
// Add the query index that failed as context for the error message.
|
||||
// We're doing it only here and not directly in the `WithIndex` trait so that the `with_index` function returns a different type
|
||||
// of result and we can benefit from static typing.
|
||||
err.message = format!("Inside `.queries[{query_index}]`: {}", err.message);
|
||||
err
|
||||
})?;
|
||||
|
||||
debug!("returns: {:?}", search_results);
|
||||
|
||||
Ok(HttpResponse::Ok().json(SearchResults { results: search_results }))
|
||||
}
|
||||
|
||||
/// Local `Result` extension trait to avoid `map_err` boilerplate.
|
||||
trait WithIndex {
|
||||
type T;
|
||||
/// convert the error type inside of the `Result` to a `ResponseError`, and return a couple of it + the usize.
|
||||
fn with_index(self, index: usize) -> Result<Self::T, (ResponseError, usize)>;
|
||||
}
|
||||
|
||||
impl<T, E: Into<ResponseError>> WithIndex for Result<T, E> {
|
||||
type T = T;
|
||||
fn with_index(self, index: usize) -> Result<T, (ResponseError, usize)> {
|
||||
self.map_err(|err| (err.into(), index))
|
||||
}
|
||||
}
|
@ -42,7 +42,7 @@ pub async fn swap_indexes(
|
||||
}),
|
||||
Some(&req),
|
||||
);
|
||||
let search_rules = &index_scheduler.filters().search_rules;
|
||||
let filters = index_scheduler.filters();
|
||||
|
||||
let mut swaps = vec![];
|
||||
for SwapIndexesPayload { indexes } in params.into_iter() {
|
||||
@ -53,7 +53,7 @@ pub async fn swap_indexes(
|
||||
return Err(MeilisearchHttpError::SwapIndexPayloadWrongLength(indexes).into());
|
||||
}
|
||||
};
|
||||
if !search_rules.is_index_authorized(lhs) || !search_rules.is_index_authorized(rhs) {
|
||||
if !filters.is_index_authorized(lhs) || !filters.is_index_authorized(rhs) {
|
||||
return Err(AuthenticationError::InvalidToken.into());
|
||||
}
|
||||
swaps.push(IndexSwap { indexes: (lhs.to_string(), rhs.to_string()) });
|
||||
|
@ -319,7 +319,7 @@ async fn cancel_tasks(
|
||||
let tasks = index_scheduler.get_task_ids_from_authorized_indexes(
|
||||
&index_scheduler.read_txn()?,
|
||||
&query,
|
||||
&index_scheduler.filters().search_rules.authorized_indexes(),
|
||||
index_scheduler.filters(),
|
||||
)?;
|
||||
let task_cancelation =
|
||||
KindWithContent::TaskCancelation { query: format!("?{}", req.query_string()), tasks };
|
||||
@ -364,7 +364,7 @@ async fn delete_tasks(
|
||||
let tasks = index_scheduler.get_task_ids_from_authorized_indexes(
|
||||
&index_scheduler.read_txn()?,
|
||||
&query,
|
||||
&index_scheduler.filters().search_rules.authorized_indexes(),
|
||||
index_scheduler.filters(),
|
||||
)?;
|
||||
let task_deletion =
|
||||
KindWithContent::TaskDeletion { query: format!("?{}", req.query_string()), tasks };
|
||||
@ -398,10 +398,7 @@ async fn get_tasks(
|
||||
let query = params.into_query();
|
||||
|
||||
let mut tasks_results: Vec<TaskView> = index_scheduler
|
||||
.get_tasks_from_authorized_indexes(
|
||||
query,
|
||||
index_scheduler.filters().search_rules.authorized_indexes(),
|
||||
)?
|
||||
.get_tasks_from_authorized_indexes(query, index_scheduler.filters())?
|
||||
.into_iter()
|
||||
.map(|t| TaskView::from_task(&t))
|
||||
.collect();
|
||||
@ -439,12 +436,8 @@ async fn get_task(
|
||||
|
||||
let query = index_scheduler::Query { uids: Some(vec![task_uid]), ..Query::default() };
|
||||
|
||||
if let Some(task) = index_scheduler
|
||||
.get_tasks_from_authorized_indexes(
|
||||
query,
|
||||
index_scheduler.filters().search_rules.authorized_indexes(),
|
||||
)?
|
||||
.first()
|
||||
if let Some(task) =
|
||||
index_scheduler.get_tasks_from_authorized_indexes(query, index_scheduler.filters())?.first()
|
||||
{
|
||||
let task_view = TaskView::from_task(task);
|
||||
Ok(HttpResponse::Ok().json(task_view))
|
||||
|
@ -5,8 +5,10 @@ use std::time::Instant;
|
||||
|
||||
use deserr::Deserr;
|
||||
use either::Either;
|
||||
use meilisearch_auth::IndexSearchRules;
|
||||
use meilisearch_types::deserr::DeserrJsonError;
|
||||
use meilisearch_types::error::deserr_codes::*;
|
||||
use meilisearch_types::index_uid::IndexUid;
|
||||
use meilisearch_types::settings::DEFAULT_PAGINATION_MAX_TOTAL_HITS;
|
||||
use meilisearch_types::{milli, Document};
|
||||
use milli::tokenizer::TokenizerBuilder;
|
||||
@ -74,6 +76,100 @@ impl SearchQuery {
|
||||
}
|
||||
}
|
||||
|
||||
/// A `SearchQuery` + an index UID.
|
||||
// This struct contains the fields of `SearchQuery` inline.
|
||||
// This is because neither deserr nor serde support `flatten` when using `deny_unknown_fields.
|
||||
// The `From<SearchQueryWithIndex>` implementation ensures both structs remain up to date.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserr)]
|
||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||
pub struct SearchQueryWithIndex {
|
||||
#[deserr(error = DeserrJsonError<InvalidIndexUid>, missing_field_error = DeserrJsonError::missing_index_uid)]
|
||||
pub index_uid: IndexUid,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchQ>)]
|
||||
pub q: Option<String>,
|
||||
#[deserr(default = DEFAULT_SEARCH_OFFSET(), error = DeserrJsonError<InvalidSearchOffset>)]
|
||||
pub offset: usize,
|
||||
#[deserr(default = DEFAULT_SEARCH_LIMIT(), error = DeserrJsonError<InvalidSearchLimit>)]
|
||||
pub limit: usize,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchPage>)]
|
||||
pub page: Option<usize>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchHitsPerPage>)]
|
||||
pub hits_per_page: Option<usize>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchAttributesToRetrieve>)]
|
||||
pub attributes_to_retrieve: Option<BTreeSet<String>>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchAttributesToCrop>)]
|
||||
pub attributes_to_crop: Option<Vec<String>>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchCropLength>, default = DEFAULT_CROP_LENGTH())]
|
||||
pub crop_length: usize,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchAttributesToHighlight>)]
|
||||
pub attributes_to_highlight: Option<HashSet<String>>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchShowMatchesPosition>, default)]
|
||||
pub show_matches_position: bool,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchFilter>)]
|
||||
pub filter: Option<Value>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchSort>)]
|
||||
pub sort: Option<Vec<String>>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchFacets>)]
|
||||
pub facets: Option<Vec<String>>,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchHighlightPreTag>, default = DEFAULT_HIGHLIGHT_PRE_TAG())]
|
||||
pub highlight_pre_tag: String,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchHighlightPostTag>, default = DEFAULT_HIGHLIGHT_POST_TAG())]
|
||||
pub highlight_post_tag: String,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchCropMarker>, default = DEFAULT_CROP_MARKER())]
|
||||
pub crop_marker: String,
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSearchMatchingStrategy>, default)]
|
||||
pub matching_strategy: MatchingStrategy,
|
||||
}
|
||||
|
||||
impl SearchQueryWithIndex {
|
||||
pub fn into_index_query(self) -> (IndexUid, SearchQuery) {
|
||||
let SearchQueryWithIndex {
|
||||
index_uid,
|
||||
q,
|
||||
offset,
|
||||
limit,
|
||||
page,
|
||||
hits_per_page,
|
||||
attributes_to_retrieve,
|
||||
attributes_to_crop,
|
||||
crop_length,
|
||||
attributes_to_highlight,
|
||||
show_matches_position,
|
||||
filter,
|
||||
sort,
|
||||
facets,
|
||||
highlight_pre_tag,
|
||||
highlight_post_tag,
|
||||
crop_marker,
|
||||
matching_strategy,
|
||||
} = self;
|
||||
(
|
||||
index_uid,
|
||||
SearchQuery {
|
||||
q,
|
||||
offset,
|
||||
limit,
|
||||
page,
|
||||
hits_per_page,
|
||||
attributes_to_retrieve,
|
||||
attributes_to_crop,
|
||||
crop_length,
|
||||
attributes_to_highlight,
|
||||
show_matches_position,
|
||||
filter,
|
||||
sort,
|
||||
facets,
|
||||
highlight_pre_tag,
|
||||
highlight_post_tag,
|
||||
crop_marker,
|
||||
matching_strategy,
|
||||
// do not use ..Default::default() here,
|
||||
// rather add any missing field from `SearchQuery` to `SearchQueryWithIndex`
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserr)]
|
||||
#[deserr(rename_all = camelCase)]
|
||||
pub enum MatchingStrategy {
|
||||
@ -122,6 +218,14 @@ pub struct SearchResult {
|
||||
pub facet_stats: Option<BTreeMap<String, FacetStats>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug, Clone, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct SearchResultWithIndex {
|
||||
pub index_uid: String,
|
||||
#[serde(flatten)]
|
||||
pub result: SearchResult,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[serde(untagged)]
|
||||
pub enum HitsInfo {
|
||||
@ -137,6 +241,26 @@ pub struct FacetStats {
|
||||
pub max: f64,
|
||||
}
|
||||
|
||||
/// Incorporate search rules in search query
|
||||
pub fn add_search_rules(query: &mut SearchQuery, rules: IndexSearchRules) {
|
||||
query.filter = match (query.filter.take(), rules.filter) {
|
||||
(None, rules_filter) => rules_filter,
|
||||
(filter, None) => filter,
|
||||
(Some(filter), Some(rules_filter)) => {
|
||||
let filter = match filter {
|
||||
Value::Array(filter) => filter,
|
||||
filter => vec![filter],
|
||||
};
|
||||
let rules_filter = match rules_filter {
|
||||
Value::Array(rules_filter) => rules_filter,
|
||||
rules_filter => vec![rules_filter],
|
||||
};
|
||||
|
||||
Some(Value::Array([filter, rules_filter].concat()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn perform_search(
|
||||
index: &Index,
|
||||
query: SearchQuery,
|
||||
|
@ -11,6 +11,7 @@ use crate::common::Server;
|
||||
pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'static str>>> =
|
||||
Lazy::new(|| {
|
||||
let mut authorizations = hashmap! {
|
||||
("POST", "/multi-search") => hashset!{"search", "*"},
|
||||
("POST", "/indexes/products/search") => hashset!{"search", "*"},
|
||||
("GET", "/indexes/products/search") => hashset!{"search", "*"},
|
||||
("POST", "/indexes/products/documents") => hashset!{"documents.add", "documents.*", "*"},
|
||||
|
@ -4,6 +4,8 @@ mod errors;
|
||||
mod payload;
|
||||
mod tenant_token;
|
||||
|
||||
mod tenant_token_multi_search;
|
||||
|
||||
use actix_web::http::StatusCode;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
|
1141
meilisearch/tests/auth/tenant_token_multi_search.rs
Normal file
1141
meilisearch/tests/auth/tenant_token_multi_search.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -103,6 +103,10 @@ impl Server {
|
||||
Index { uid: uid.as_ref().to_string(), service: &self.service, encoder }
|
||||
}
|
||||
|
||||
pub async fn multi_search(&self, queries: Value) -> (Value, StatusCode) {
|
||||
self.service.post("/multi-search", queries).await
|
||||
}
|
||||
|
||||
pub async fn list_indexes_raw(&self, parameters: &str) -> (Value, StatusCode) {
|
||||
self.service.get(format!("/indexes{parameters}")).await
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
mod errors;
|
||||
mod formatted;
|
||||
mod multi;
|
||||
mod pagination;
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
|
343
meilisearch/tests/search/multi.rs
Normal file
343
meilisearch/tests/search/multi.rs
Normal file
@ -0,0 +1,343 @@
|
||||
use meili_snap::{json_string, snapshot};
|
||||
use serde_json::json;
|
||||
|
||||
use super::{DOCUMENTS, NESTED_DOCUMENTS};
|
||||
use crate::common::Server;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_empty_list() {
|
||||
let server = Server::new().await;
|
||||
|
||||
let (response, code) = server.multi_search(json!({"queries": []})).await;
|
||||
snapshot!(code, @"200 OK");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"results": []
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_json_object() {
|
||||
let server = Server::new().await;
|
||||
|
||||
let (response, code) = server.multi_search(json!({})).await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Missing field `queries`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_json_array() {
|
||||
let server = Server::new().await;
|
||||
|
||||
let (response, code) = server.multi_search(json!([])).await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid value type: expected an object, but found an array: `[]`",
|
||||
"code": "bad_request",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#bad_request"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn simple_search_single_index() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.wait_task(0).await;
|
||||
|
||||
let (response, code) = server
|
||||
.multi_search(json!({"queries": [
|
||||
{"indexUid" : "test", "q": "glass"},
|
||||
{"indexUid": "test", "q": "captain"},
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
insta::assert_json_snapshot!(response["results"], { "[].processingTimeMs" => "[time]" }, @r###"
|
||||
[
|
||||
{
|
||||
"indexUid": "test",
|
||||
"hits": [
|
||||
{
|
||||
"title": "Glass",
|
||||
"id": "450465"
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1
|
||||
},
|
||||
{
|
||||
"indexUid": "test",
|
||||
"hits": [
|
||||
{
|
||||
"title": "Captain Marvel",
|
||||
"id": "299537"
|
||||
}
|
||||
],
|
||||
"query": "captain",
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1
|
||||
}
|
||||
]
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn simple_search_missing_index_uid() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.wait_task(0).await;
|
||||
|
||||
let (response, code) = server
|
||||
.multi_search(json!({"queries": [
|
||||
{"q": "glass"},
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
insta::assert_json_snapshot!(response, @r###"
|
||||
{
|
||||
"message": "Missing field `indexUid` inside `.queries[0]`",
|
||||
"code": "missing_index_uid",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#missing_index_uid"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn simple_search_illegal_index_uid() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.wait_task(0).await;
|
||||
|
||||
let (response, code) = server
|
||||
.multi_search(json!({"queries": [
|
||||
{"indexUid": "hé", "q": "glass"},
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
insta::assert_json_snapshot!(response, @r###"
|
||||
{
|
||||
"message": "Invalid value at `.queries[0].indexUid`: `hé` is not a valid index uid. Index uid can be an integer or a string containing only alphanumeric characters, hyphens (-) and underscores (_).",
|
||||
"code": "invalid_index_uid",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_index_uid"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn simple_search_two_indexes() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.wait_task(0).await;
|
||||
|
||||
let index = server.index("nested");
|
||||
let documents = NESTED_DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.wait_task(1).await;
|
||||
|
||||
let (response, code) = server
|
||||
.multi_search(json!({"queries": [
|
||||
{"indexUid" : "test", "q": "glass"},
|
||||
{"indexUid": "nested", "q": "pesti"},
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"200 OK");
|
||||
insta::assert_json_snapshot!(response["results"], { "[].processingTimeMs" => "[time]" }, @r###"
|
||||
[
|
||||
{
|
||||
"indexUid": "test",
|
||||
"hits": [
|
||||
{
|
||||
"title": "Glass",
|
||||
"id": "450465"
|
||||
}
|
||||
],
|
||||
"query": "glass",
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 1
|
||||
},
|
||||
{
|
||||
"indexUid": "nested",
|
||||
"hits": [
|
||||
{
|
||||
"id": 852,
|
||||
"father": "jean",
|
||||
"mother": "michelle",
|
||||
"doggos": [
|
||||
{
|
||||
"name": "bobby",
|
||||
"age": 2
|
||||
},
|
||||
{
|
||||
"name": "buddy",
|
||||
"age": 4
|
||||
}
|
||||
],
|
||||
"cattos": "pesti"
|
||||
},
|
||||
{
|
||||
"id": 654,
|
||||
"father": "pierre",
|
||||
"mother": "sabine",
|
||||
"doggos": [
|
||||
{
|
||||
"name": "gros bill",
|
||||
"age": 8
|
||||
}
|
||||
],
|
||||
"cattos": [
|
||||
"simba",
|
||||
"pestiféré"
|
||||
]
|
||||
}
|
||||
],
|
||||
"query": "pesti",
|
||||
"processingTimeMs": "[time]",
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"estimatedTotalHits": 2
|
||||
}
|
||||
]
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_one_index_doesnt_exist() {
|
||||
let server = Server::new().await;
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.wait_task(0).await;
|
||||
|
||||
let (response, code) = server
|
||||
.multi_search(json!({"queries": [
|
||||
{"indexUid" : "test", "q": "glass"},
|
||||
{"indexUid": "nested", "q": "pesti"},
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Inside `.queries[1]`: Index `nested` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_multiple_indexes_dont_exist() {
|
||||
let server = Server::new().await;
|
||||
|
||||
let (response, code) = server
|
||||
.multi_search(json!({"queries": [
|
||||
{"indexUid" : "test", "q": "glass"},
|
||||
{"indexUid": "nested", "q": "pesti"},
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Inside `.queries[0]`: Index `test` not found.",
|
||||
"code": "index_not_found",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_one_query_error() {
|
||||
let server = Server::new().await;
|
||||
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.wait_task(0).await;
|
||||
|
||||
let index = server.index("nested");
|
||||
let documents = NESTED_DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.wait_task(1).await;
|
||||
|
||||
let (response, code) = server
|
||||
.multi_search(json!({"queries": [
|
||||
{"indexUid" : "test", "q": "glass", "facets": ["title"]},
|
||||
{"indexUid": "nested", "q": "pesti"},
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Inside `.queries[0]`: Invalid facet distribution, this index does not have configured filterable attributes.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
}
|
||||
"###);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_multiple_query_errors() {
|
||||
let server = Server::new().await;
|
||||
|
||||
let index = server.index("test");
|
||||
|
||||
let documents = DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.wait_task(0).await;
|
||||
|
||||
let index = server.index("nested");
|
||||
let documents = NESTED_DOCUMENTS.clone();
|
||||
index.add_documents(documents, None).await;
|
||||
index.wait_task(1).await;
|
||||
|
||||
let (response, code) = server
|
||||
.multi_search(json!({"queries": [
|
||||
{"indexUid" : "test", "q": "glass", "facets": ["title"]},
|
||||
{"indexUid": "nested", "q": "pesti", "facets": ["doggos"]},
|
||||
]}))
|
||||
.await;
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Inside `.queries[0]`: Invalid facet distribution, this index does not have configured filterable attributes.",
|
||||
"code": "invalid_search_facets",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_search_facets"
|
||||
}
|
||||
"###);
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user