3673: Handle the task queue being full r=irevoire a=dureuill

# Pull Request

## Related issue
Fixes a remaining issue with #3659 where it was not always possible to send tasks back even after deleting some tasks when prompted.

## Tests

- see integration test
- also manually tested with a 1MiB task queue. Was not possible to become unblocked before this PR, is now possible.

## What does this PR do?
- Use the `non_free_pages_size` method to compute the space occupied by the task db instead of the `real_disk_size` which is not always affected by task deletion.
- Expand the test so that it adds a task after the deletion. The test now fails before this PR and succeeds after this PR.

Co-authored-by: Louis Dureuil <louis@meilisearch.com>
This commit is contained in:
bors[bot] 2023-04-13 16:24:16 +00:00 committed by GitHub
commit 4b953d62fb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 18 additions and 2 deletions

View File

@ -822,7 +822,7 @@ impl IndexScheduler {
// if the task doesn't delete anything and 50% of the task queue is full, we must refuse to enqueue the incomming task // if the task doesn't delete anything and 50% of the task queue is full, we must refuse to enqueue the incomming task
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } if !tasks.is_empty()) if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } if !tasks.is_empty())
&& (self.env.real_disk_size()? * 100) / self.env.map_size()? as u64 > 50 && (self.env.non_free_pages_size()? * 100) / self.env.map_size()? as u64 > 50
{ {
return Err(Error::NoSpaceLeftInTaskQueue); return Err(Error::NoSpaceLeftInTaskQueue);
} }

View File

@ -1050,7 +1050,7 @@ async fn test_task_queue_is_full() {
"###); "###);
// But we should still be able to register tasks deletion IF they delete something // But we should still be able to register tasks deletion IF they delete something
let (result, code) = server.delete_tasks("uids=0").await; let (result, code) = server.delete_tasks("uids=*").await;
snapshot!(code, @"200 OK"); snapshot!(code, @"200 OK");
snapshot!(json_string!(result, { ".enqueuedAt" => "[date]", ".taskUid" => "uid" }), @r###" snapshot!(json_string!(result, { ".enqueuedAt" => "[date]", ".taskUid" => "uid" }), @r###"
{ {
@ -1062,6 +1062,22 @@ async fn test_task_queue_is_full() {
} }
"###); "###);
let result = server.wait_task(result["taskUid"].as_u64().unwrap()).await;
snapshot!(json_string!(result["status"]), @r###""succeeded""###);
// Now we should be able to register tasks again
let (result, code) = server.create_index(json!({ "uid": "doggo" })).await;
snapshot!(code, @"202 Accepted");
snapshot!(json_string!(result, { ".enqueuedAt" => "[date]", ".taskUid" => "uid" }), @r###"
{
"taskUid": "uid",
"indexUid": "doggo",
"status": "enqueued",
"type": "indexCreation",
"enqueuedAt": "[date]"
}
"###);
// we're going to fill up the queue once again // we're going to fill up the queue once again
loop { loop {
let (res, code) = server.delete_tasks("uids=0").await; let (res, code) = server.delete_tasks("uids=0").await;