Merge branch 'main' into index-swap-error-handling

This commit is contained in:
Tamo 2022-10-27 18:06:45 +02:00 committed by GitHub
commit c9f89d38e3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
25 changed files with 475 additions and 169 deletions

View file

@ -4,7 +4,7 @@ description = "Meilisearch HTTP server"
edition = "2021"
license = "MIT"
name = "meilisearch-http"
version = "0.29.1"
version = "0.30.0"
[[bin]]
name = "meilisearch"

View file

@ -1,11 +1,11 @@
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::{IndexScheduler, Query};
use index_scheduler::IndexScheduler;
use log::debug;
use meilisearch_types::error::ResponseError;
use meilisearch_types::index_uid::IndexUid;
use meilisearch_types::milli::{self, FieldDistribution, Index};
use meilisearch_types::tasks::{KindWithContent, Status};
use meilisearch_types::tasks::KindWithContent;
use serde::{Deserialize, Serialize};
use serde_json::json;
use time::OffsetDateTime;
@ -202,14 +202,7 @@ impl IndexStats {
index_uid: String,
) -> Result<Self, ResponseError> {
// we check if there is currently a task processing associated with this index.
let processing_task = index_scheduler.get_tasks(Query {
status: Some(vec![Status::Processing]),
index_uid: Some(vec![index_uid.clone()]),
limit: Some(1),
..Query::default()
})?;
let is_processing = !processing_task.is_empty();
let is_processing = index_scheduler.is_index_processing(&index_uid)?;
let index = index_scheduler.index(&index_uid)?;
let rtxn = index.read_txn()?;
Ok(IndexStats {

View file

@ -153,7 +153,7 @@ pub async fn search_with_url_query(
let mut aggregate = SearchAggregator::from_query(&query, &req);
let index = index_scheduler.index(&index_uid)?;
let search_result = perform_search(&index, query);
let search_result = tokio::task::spawn_blocking(move || perform_search(&index, query)).await?;
if let Ok(ref search_result) = search_result {
aggregate.succeed(search_result);
}
@ -185,7 +185,7 @@ pub async fn search_with_post(
let mut aggregate = SearchAggregator::from_query(&query, &req);
let index = index_scheduler.index(&index_uid)?;
let search_result = perform_search(&index, query);
let search_result = tokio::task::spawn_blocking(move || perform_search(&index, query)).await?;
if let Ok(ref search_result) = search_result {
aggregate.succeed(search_result);
}

View file

@ -270,11 +270,10 @@ pub fn create_all_stats(
let mut last_task: Option<OffsetDateTime> = None;
let mut indexes = BTreeMap::new();
let mut database_size = 0;
let processing_task = index_scheduler.get_tasks(Query {
status: Some(vec![Status::Processing]),
limit: Some(1),
..Query::default()
})?;
let processing_task = index_scheduler.get_tasks_from_authorized_indexes(
Query { status: Some(vec![Status::Processing]), limit: Some(1), ..Query::default() },
search_rules.authorized_indexes(),
)?;
let processing_index = processing_task.first().and_then(|task| task.index_uid());
for (name, index) in index_scheduler.indexes()? {
if !search_rules.is_index_authorized(&name) {

View file

@ -291,8 +291,11 @@ async fn cancel_tasks(
return Err(index_scheduler::Error::TaskCancelationWithEmptyQuery.into());
}
let filtered_query = filter_out_inaccessible_indexes_from_query(&index_scheduler, &query);
let tasks = index_scheduler.get_task_ids(&filtered_query)?;
let tasks = index_scheduler.get_task_ids_from_authorized_indexes(
&index_scheduler.read_txn()?,
&query,
&index_scheduler.filters().search_rules.authorized_indexes(),
)?;
let task_cancelation =
KindWithContent::TaskCancelation { query: req.query_string().to_string(), tasks };
@ -348,8 +351,11 @@ async fn delete_tasks(
return Err(index_scheduler::Error::TaskDeletionWithEmptyQuery.into());
}
let filtered_query = filter_out_inaccessible_indexes_from_query(&index_scheduler, &query);
let tasks = index_scheduler.get_task_ids(&filtered_query)?;
let tasks = index_scheduler.get_task_ids_from_authorized_indexes(
&index_scheduler.read_txn()?,
&query,
&index_scheduler.filters().search_rules.authorized_indexes(),
)?;
let task_deletion =
KindWithContent::TaskDeletion { query: req.query_string().to_string(), tasks };
@ -425,10 +431,15 @@ async fn get_tasks(
before_finished_at,
after_finished_at,
};
let query = filter_out_inaccessible_indexes_from_query(&index_scheduler, &query);
let mut tasks_results: Vec<TaskView> =
index_scheduler.get_tasks(query)?.into_iter().map(|t| TaskView::from_task(&t)).collect();
let mut tasks_results: Vec<TaskView> = index_scheduler
.get_tasks_from_authorized_indexes(
query,
index_scheduler.filters().search_rules.authorized_indexes(),
)?
.into_iter()
.map(|t| TaskView::from_task(&t))
.collect();
// If we were able to fetch the number +1 tasks we asked
// it means that there is more to come.
@ -454,17 +465,15 @@ async fn get_task(
analytics.publish("Tasks Seen".to_string(), json!({ "per_task_uid": true }), Some(&req));
let search_rules = &index_scheduler.filters().search_rules;
let mut filters = index_scheduler::Query::default();
if !search_rules.is_index_authorized("*") {
for (index, _policy) in search_rules.clone() {
filters = filters.with_index(index);
}
}
let query = index_scheduler::Query { uid: Some(vec![task_id]), ..Query::default() };
filters.uid = Some(vec![task_id]);
if let Some(task) = index_scheduler.get_tasks(filters)?.first() {
if let Some(task) = index_scheduler
.get_tasks_from_authorized_indexes(
query,
index_scheduler.filters().search_rules.authorized_indexes(),
)?
.first()
{
let task_view = TaskView::from_task(task);
Ok(HttpResponse::Ok().json(task_view))
} else {
@ -472,39 +481,6 @@ async fn get_task(
}
}
fn filter_out_inaccessible_indexes_from_query<const ACTION: u8>(
index_scheduler: &GuardedData<ActionPolicy<ACTION>, Data<IndexScheduler>>,
query: &Query,
) -> Query {
let mut query = query.clone();
// First remove all indexes from the query, we will add them back later
let indexes = query.index_uid.take();
let search_rules = &index_scheduler.filters().search_rules;
// We filter on potential indexes and make sure that the search filter
// restrictions are also applied.
match indexes {
Some(indexes) => {
for name in indexes.iter() {
if search_rules.is_index_authorized(name) {
query = query.with_index(name.to_string());
}
}
}
None => {
if !search_rules.is_index_authorized("*") {
for (index, _policy) in search_rules.clone() {
query = query.with_index(index.to_string());
}
}
}
};
query
}
pub(crate) mod date_deserializer {
use time::format_description::well_known::Rfc3339;
use time::macros::format_description;

View file

@ -203,7 +203,6 @@ macro_rules! compute_forbidden_search {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn search_authorized_simple_token() {
let tenant_tokens = vec![
hashmap! {
@ -252,7 +251,6 @@ async fn search_authorized_simple_token() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn search_authorized_filter_token() {
let tenant_tokens = vec![
hashmap! {
@ -306,7 +304,6 @@ async fn search_authorized_filter_token() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn filter_search_authorized_filter_token() {
let tenant_tokens = vec![
hashmap! {
@ -360,7 +357,6 @@ async fn filter_search_authorized_filter_token() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn error_search_token_forbidden_parent_key() {
let tenant_tokens = vec![
hashmap! {
@ -393,7 +389,6 @@ async fn error_search_token_forbidden_parent_key() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn error_search_forbidden_token() {
let tenant_tokens = vec![
// bad index
@ -448,7 +443,6 @@ async fn error_search_forbidden_token() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn error_access_forbidden_routes() {
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");
@ -483,7 +477,6 @@ async fn error_access_forbidden_routes() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn error_access_expired_parent_key() {
use std::{thread, time};
let mut server = Server::new_auth().await;
@ -523,7 +516,6 @@ async fn error_access_expired_parent_key() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn error_access_modified_token() {
let mut server = Server::new_auth().await;
server.use_api_key("MASTER_KEY");

View file

@ -25,7 +25,6 @@ async fn import_dump_v1() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v2_movie_raw() {
let temp = tempfile::tempdir().unwrap();
@ -87,7 +86,6 @@ async fn import_dump_v2_movie_raw() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v2_movie_with_settings() {
let temp = tempfile::tempdir().unwrap();
@ -151,7 +149,6 @@ async fn import_dump_v2_movie_with_settings() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v2_rubygems_with_settings() {
let temp = tempfile::tempdir().unwrap();
@ -215,7 +212,6 @@ async fn import_dump_v2_rubygems_with_settings() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v3_movie_raw() {
let temp = tempfile::tempdir().unwrap();
@ -277,7 +273,6 @@ async fn import_dump_v3_movie_raw() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v3_movie_with_settings() {
let temp = tempfile::tempdir().unwrap();
@ -341,7 +336,6 @@ async fn import_dump_v3_movie_with_settings() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v3_rubygems_with_settings() {
let temp = tempfile::tempdir().unwrap();
@ -405,7 +399,6 @@ async fn import_dump_v3_rubygems_with_settings() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v4_movie_raw() {
let temp = tempfile::tempdir().unwrap();
@ -467,7 +460,6 @@ async fn import_dump_v4_movie_raw() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v4_movie_with_settings() {
let temp = tempfile::tempdir().unwrap();
@ -531,7 +523,6 @@ async fn import_dump_v4_movie_with_settings() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v4_rubygems_with_settings() {
let temp = tempfile::tempdir().unwrap();
@ -595,7 +586,6 @@ async fn import_dump_v4_rubygems_with_settings() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn import_dump_v5() {
let temp = tempfile::tempdir().unwrap();

View file

@ -44,7 +44,6 @@ async fn error_delete_unexisting_index() {
}
#[actix_rt::test]
#[cfg_attr(target_os = "windows", ignore)]
async fn loop_delete_add_documents() {
let server = Server::new().await;
let index = server.index("test");