mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-11-22 21:04:27 +01:00
Finish first draft of the DELETE /tasks route
This commit is contained in:
parent
9522b75454
commit
8bb0fcd144
@ -20,6 +20,7 @@ use meilisearch_types::{
|
|||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(crate) enum Batch {
|
pub(crate) enum Batch {
|
||||||
Cancel(Task),
|
Cancel(Task),
|
||||||
TaskDeletion(Task),
|
TaskDeletion(Task),
|
||||||
@ -42,6 +43,7 @@ pub(crate) enum Batch {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub(crate) enum IndexOperation {
|
pub(crate) enum IndexOperation {
|
||||||
DocumentImport {
|
DocumentImport {
|
||||||
index_uid: String,
|
index_uid: String,
|
||||||
@ -381,7 +383,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 2. we get the next task to delete
|
// 2. we get the next task to delete
|
||||||
let to_delete = self.get_kind(rtxn, Kind::TaskDeletion)?;
|
let to_delete = self.get_kind(rtxn, Kind::TaskDeletion)? & enqueued;
|
||||||
if let Some(task_id) = to_delete.min() {
|
if let Some(task_id) = to_delete.min() {
|
||||||
let task = self
|
let task = self
|
||||||
.get_task(rtxn, task_id)?
|
.get_task(rtxn, task_id)?
|
||||||
|
@ -30,13 +30,10 @@ use meilisearch_types::milli::{Index, RoaringBitmapCodec, BEU32};
|
|||||||
|
|
||||||
use crate::index_mapper::IndexMapper;
|
use crate::index_mapper::IndexMapper;
|
||||||
|
|
||||||
const DEFAULT_LIMIT: fn() -> u32 = || 20;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Query {
|
pub struct Query {
|
||||||
#[serde(default = "DEFAULT_LIMIT")]
|
pub limit: Option<u32>,
|
||||||
pub limit: u32,
|
|
||||||
pub from: Option<u32>,
|
pub from: Option<u32>,
|
||||||
pub status: Option<Vec<Status>>,
|
pub status: Option<Vec<Status>>,
|
||||||
#[serde(rename = "type")]
|
#[serde(rename = "type")]
|
||||||
@ -48,7 +45,7 @@ pub struct Query {
|
|||||||
impl Default for Query {
|
impl Default for Query {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
limit: DEFAULT_LIMIT(),
|
limit: None,
|
||||||
from: None,
|
from: None,
|
||||||
status: None,
|
status: None,
|
||||||
kind: None,
|
kind: None,
|
||||||
@ -96,7 +93,10 @@ impl Query {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_limit(self, limit: u32) -> Self {
|
pub fn with_limit(self, limit: u32) -> Self {
|
||||||
Self { limit, ..self }
|
Self {
|
||||||
|
limit: Some(limit),
|
||||||
|
..self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -245,13 +245,20 @@ impl IndexScheduler {
|
|||||||
/// Return the task ids corresponding to the query
|
/// Return the task ids corresponding to the query
|
||||||
pub fn get_task_ids(&self, query: &Query) -> Result<RoaringBitmap> {
|
pub fn get_task_ids(&self, query: &Query) -> Result<RoaringBitmap> {
|
||||||
let rtxn = self.env.read_txn()?;
|
let rtxn = self.env.read_txn()?;
|
||||||
let last_task_id = match self.last_task_id(&rtxn)? {
|
|
||||||
Some(tid) => query.from.map(|from| from.min(tid)).unwrap_or(tid),
|
|
||||||
None => return Ok(RoaringBitmap::new()),
|
|
||||||
};
|
|
||||||
|
|
||||||
// This is the list of all the tasks.
|
// This is the list of all the tasks.
|
||||||
let mut tasks = RoaringBitmap::from_sorted_iter(0..last_task_id).unwrap();
|
let mut tasks = {
|
||||||
|
let mut all_tasks = RoaringBitmap::new();
|
||||||
|
for status in [
|
||||||
|
Status::Enqueued,
|
||||||
|
Status::Processing,
|
||||||
|
Status::Succeeded,
|
||||||
|
Status::Failed,
|
||||||
|
] {
|
||||||
|
all_tasks |= self.get_status(&rtxn, status)?;
|
||||||
|
}
|
||||||
|
all_tasks
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(uids) = &query.uid {
|
if let Some(uids) = &query.uid {
|
||||||
tasks &= RoaringBitmap::from_iter(uids);
|
tasks &= RoaringBitmap::from_iter(uids);
|
||||||
@ -289,8 +296,14 @@ impl IndexScheduler {
|
|||||||
let rtxn = self.env.read_txn()?;
|
let rtxn = self.env.read_txn()?;
|
||||||
let tasks = self.get_task_ids(&query)?;
|
let tasks = self.get_task_ids(&query)?;
|
||||||
|
|
||||||
let tasks =
|
let tasks = self.get_existing_tasks(
|
||||||
self.get_existing_tasks(&rtxn, tasks.into_iter().rev().take(query.limit as usize))?;
|
&rtxn,
|
||||||
|
tasks
|
||||||
|
.into_iter()
|
||||||
|
.rev()
|
||||||
|
.take(query.limit.unwrap_or(u32::MAX) as usize),
|
||||||
|
)?;
|
||||||
|
|
||||||
let (started_at, processing) = self
|
let (started_at, processing) = self
|
||||||
.processing_tasks
|
.processing_tasks
|
||||||
.read()
|
.read()
|
||||||
|
@ -21,9 +21,12 @@ use super::fold_star_or;
|
|||||||
const DEFAULT_LIMIT: fn() -> u32 = || 20;
|
const DEFAULT_LIMIT: fn() -> u32 = || 20;
|
||||||
|
|
||||||
pub fn configure(cfg: &mut web::ServiceConfig) {
|
pub fn configure(cfg: &mut web::ServiceConfig) {
|
||||||
cfg.service(web::resource("").route(web::get().to(SeqHandler(get_tasks))))
|
cfg.service(
|
||||||
.service(web::resource("/{task_id}").route(web::get().to(SeqHandler(get_task))))
|
web::resource("")
|
||||||
.service(web::resource("").route(web::delete().to(SeqHandler(delete_tasks))));
|
.route(web::get().to(SeqHandler(get_tasks)))
|
||||||
|
.route(web::delete().to(SeqHandler(delete_tasks))),
|
||||||
|
)
|
||||||
|
.service(web::resource("/{task_id}").route(web::get().to(SeqHandler(get_task))));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize)]
|
#[derive(Debug, Clone, PartialEq, Serialize)]
|
||||||
@ -63,8 +66,8 @@ pub struct TaskView {
|
|||||||
pub finished_at: Option<OffsetDateTime>,
|
pub finished_at: Option<OffsetDateTime>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Task> for TaskView {
|
impl TaskView {
|
||||||
fn from(task: Task) -> Self {
|
fn from_task(task: &Task) -> TaskView {
|
||||||
TaskView {
|
TaskView {
|
||||||
uid: task.uid,
|
uid: task.uid,
|
||||||
index_uid: task
|
index_uid: task
|
||||||
@ -72,7 +75,7 @@ impl From<Task> for TaskView {
|
|||||||
.and_then(|vec| vec.first().map(|i| i.to_string())),
|
.and_then(|vec| vec.first().map(|i| i.to_string())),
|
||||||
status: task.status,
|
status: task.status,
|
||||||
kind: task.kind.as_kind(),
|
kind: task.kind.as_kind(),
|
||||||
details: task.details.map(DetailsView::from),
|
details: task.details.clone().map(DetailsView::from),
|
||||||
error: task.error.clone(),
|
error: task.error.clone(),
|
||||||
duration: task
|
duration: task
|
||||||
.started_at
|
.started_at
|
||||||
@ -172,38 +175,44 @@ pub struct TasksFilterQuery {
|
|||||||
from: Option<TaskId>,
|
from: Option<TaskId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[derive(Deserialize, Debug)]
|
||||||
fn task_type_matches_content(type_: &TaskType, content: &TaskContent) -> bool {
|
#[serde(rename_all = "camelCase", deny_unknown_fields)]
|
||||||
matches!((type_, content),
|
pub struct TaskDeletionQuery {
|
||||||
(TaskType::IndexCreation, TaskContent::IndexCreation { .. })
|
#[serde(rename = "type")]
|
||||||
| (TaskType::IndexUpdate, TaskContent::IndexUpdate { .. })
|
type_: Option<CS<Kind>>,
|
||||||
| (TaskType::IndexDeletion, TaskContent::IndexDeletion { .. })
|
uid: Option<CS<u32>>,
|
||||||
| (TaskType::DocumentAdditionOrUpdate, TaskContent::DocumentAddition { .. })
|
status: Option<CS<Status>>,
|
||||||
| (TaskType::DocumentDeletion, TaskContent::DocumentDeletion{ .. })
|
index_uid: Option<CS<IndexUid>>,
|
||||||
| (TaskType::SettingsUpdate, TaskContent::SettingsUpdate { .. })
|
|
||||||
| (TaskType::DumpCreation, TaskContent::Dump { .. })
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
fn task_status_matches_events(status: &TaskStatus, events: &[TaskEvent]) -> bool {
|
|
||||||
events.last().map_or(false, |event| {
|
|
||||||
matches!((status, event),
|
|
||||||
(TaskStatus::Enqueued, TaskEvent::Created(_))
|
|
||||||
| (TaskStatus::Processing, TaskEvent::Processing(_) | TaskEvent::Batched { .. })
|
|
||||||
| (TaskStatus::Succeeded, TaskEvent::Succeeded { .. })
|
|
||||||
| (TaskStatus::Failed, TaskEvent::Failed { .. }),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_tasks(
|
async fn delete_tasks(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::TASKS_DELETE }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::TASKS_DELETE }>, Data<IndexScheduler>>,
|
||||||
params: web::Query<Query>,
|
params: web::Query<TaskDeletionQuery>,
|
||||||
_req: HttpRequest,
|
_req: HttpRequest,
|
||||||
_analytics: web::Data<dyn Analytics>,
|
_analytics: web::Data<dyn Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let query = params.into_inner();
|
let TaskDeletionQuery {
|
||||||
|
type_,
|
||||||
|
uid,
|
||||||
|
status,
|
||||||
|
index_uid,
|
||||||
|
} = params.into_inner();
|
||||||
|
|
||||||
|
let kind: Option<Vec<_>> = type_.map(|x| x.into_iter().collect());
|
||||||
|
let uid: Option<Vec<_>> = uid.map(|x| x.into_iter().collect());
|
||||||
|
let status: Option<Vec<_>> = status.map(|x| x.into_iter().collect());
|
||||||
|
let index_uid: Option<Vec<_>> =
|
||||||
|
index_uid.map(|x| x.into_iter().map(|x| x.to_string()).collect());
|
||||||
|
|
||||||
|
let query = Query {
|
||||||
|
limit: None,
|
||||||
|
from: None,
|
||||||
|
status,
|
||||||
|
kind,
|
||||||
|
index_uid,
|
||||||
|
uid,
|
||||||
|
};
|
||||||
|
|
||||||
let filtered_query = filter_out_inaccessible_indexes_from_query(&index_scheduler, &query);
|
let filtered_query = filter_out_inaccessible_indexes_from_query(&index_scheduler, &query);
|
||||||
|
|
||||||
let tasks = index_scheduler.get_task_ids(&filtered_query)?;
|
let tasks = index_scheduler.get_task_ids(&filtered_query)?;
|
||||||
@ -215,7 +224,7 @@ async fn delete_tasks(
|
|||||||
// TODO: Lo: analytics
|
// TODO: Lo: analytics
|
||||||
let task = index_scheduler.register(task_deletion)?;
|
let task = index_scheduler.register(task_deletion)?;
|
||||||
|
|
||||||
let task_view: TaskView = task.into();
|
let task_view = TaskView::from_task(&task);
|
||||||
|
|
||||||
Ok(HttpResponse::Ok().json(task_view))
|
Ok(HttpResponse::Ok().json(task_view))
|
||||||
}
|
}
|
||||||
@ -288,9 +297,13 @@ async fn get_tasks(
|
|||||||
filters.from = from;
|
filters.from = from;
|
||||||
// We +1 just to know if there is more after this "page" or not.
|
// We +1 just to know if there is more after this "page" or not.
|
||||||
let limit = limit.saturating_add(1);
|
let limit = limit.saturating_add(1);
|
||||||
filters.limit = limit;
|
filters.limit = Some(limit);
|
||||||
|
|
||||||
let mut tasks_results: Vec<_> = index_scheduler.get_tasks(filters)?.into_iter().collect();
|
let mut tasks_results: Vec<TaskView> = index_scheduler
|
||||||
|
.get_tasks(filters)?
|
||||||
|
.into_iter()
|
||||||
|
.map(|t| TaskView::from_task(&t))
|
||||||
|
.collect();
|
||||||
|
|
||||||
// If we were able to fetch the number +1 tasks we asked
|
// If we were able to fetch the number +1 tasks we asked
|
||||||
// it means that there is more to come.
|
// it means that there is more to come.
|
||||||
@ -338,7 +351,8 @@ async fn get_task(
|
|||||||
filters.uid = Some(vec![task_id]);
|
filters.uid = Some(vec![task_id]);
|
||||||
|
|
||||||
if let Some(task) = index_scheduler.get_tasks(filters)?.first() {
|
if let Some(task) = index_scheduler.get_tasks(filters)?.first() {
|
||||||
Ok(HttpResponse::Ok().json(task))
|
let task_view = TaskView::from_task(&task);
|
||||||
|
Ok(HttpResponse::Ok().json(task_view))
|
||||||
} else {
|
} else {
|
||||||
Err(index_scheduler::Error::TaskNotFound(task_id).into())
|
Err(index_scheduler::Error::TaskNotFound(task_id).into())
|
||||||
}
|
}
|
||||||
@ -355,8 +369,6 @@ fn filter_out_inaccessible_indexes_from_query<const ACTION: u8>(
|
|||||||
|
|
||||||
let search_rules = &index_scheduler.filters().search_rules;
|
let search_rules = &index_scheduler.filters().search_rules;
|
||||||
|
|
||||||
let mut query = index_scheduler::Query::default();
|
|
||||||
|
|
||||||
// We filter on potential indexes and make sure that the search filter
|
// We filter on potential indexes and make sure that the search filter
|
||||||
// restrictions are also applied.
|
// restrictions are also applied.
|
||||||
match indexes {
|
match indexes {
|
||||||
|
@ -16,6 +16,7 @@ pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'
|
|||||||
("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "documents.*", "*"},
|
("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "documents.*", "*"},
|
||||||
("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "documents.*", "*"},
|
("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "documents.*", "*"},
|
||||||
("GET", "/tasks") => hashset!{"tasks.get", "tasks.*", "*"},
|
("GET", "/tasks") => hashset!{"tasks.get", "tasks.*", "*"},
|
||||||
|
("DELETE", "/tasks") => hashset!{"tasks.delete", "tasks.*", "*"},
|
||||||
("GET", "/tasks?indexUid=products") => hashset!{"tasks.get", "tasks.*", "*"},
|
("GET", "/tasks?indexUid=products") => hashset!{"tasks.get", "tasks.*", "*"},
|
||||||
("GET", "/tasks/0") => hashset!{"tasks.get", "tasks.*", "*"},
|
("GET", "/tasks/0") => hashset!{"tasks.get", "tasks.*", "*"},
|
||||||
("PATCH", "/indexes/products/") => hashset!{"indexes.update", "indexes.*", "*"},
|
("PATCH", "/indexes/products/") => hashset!{"indexes.update", "indexes.*", "*"},
|
||||||
|
@ -16,7 +16,7 @@ use crate::{
|
|||||||
|
|
||||||
pub type TaskId = u32;
|
pub type TaskId = u32;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Task {
|
pub struct Task {
|
||||||
pub uid: TaskId,
|
pub uid: TaskId,
|
||||||
@ -73,7 +73,7 @@ impl Task {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub enum KindWithContent {
|
pub enum KindWithContent {
|
||||||
DocumentImport {
|
DocumentImport {
|
||||||
|
Loading…
Reference in New Issue
Block a user