diff --git a/meilisearch-http/src/lib.rs b/meilisearch-http/src/lib.rs index 858f49924..fcf07587f 100644 --- a/meilisearch-http/src/lib.rs +++ b/meilisearch-http/src/lib.rs @@ -31,7 +31,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result { // disable autobatching? AUTOBATCHING_ENABLED.store( - opt.scheduler_options.disable_auto_batching, + !opt.scheduler_options.disable_auto_batching, std::sync::atomic::Ordering::Relaxed, ); diff --git a/meilisearch-http/src/task.rs b/meilisearch-http/src/task.rs index 06bba1f76..08009f7da 100644 --- a/meilisearch-http/src/task.rs +++ b/meilisearch-http/src/task.rs @@ -231,7 +231,7 @@ pub struct TaskView { #[serde(serialize_with = "time::serde::rfc3339::option::serialize")] finished_at: Option, #[serde(skip_serializing_if = "Option::is_none")] - batch_uid: Option>, + batch_uid: Option, } impl From for TaskView { @@ -380,15 +380,15 @@ impl From for TaskView { let duration = finished_at.zip(started_at).map(|(tf, ts)| (tf - ts)); - let batch_uid = if AUTOBATCHING_ENABLED.load(std::sync::atomic::Ordering::Relaxed) { - let id = events.iter().find_map(|e| match e { - TaskEvent::Batched { batch_id, .. } => Some(*batch_id), - _ => None, - }); - Some(id) - } else { - None - }; + let batch_uid = AUTOBATCHING_ENABLED + .load(std::sync::atomic::Ordering::Relaxed) + .then(|| { + events.iter().find_map(|e| match e { + TaskEvent::Batched { batch_id, .. } => Some(*batch_id), + _ => None, + }) + }) + .flatten(); Self { uid: id, diff --git a/meilisearch-http/tests/documents/add_documents.rs b/meilisearch-http/tests/documents/add_documents.rs index 9ccd69e63..dae03d435 100644 --- a/meilisearch-http/tests/documents/add_documents.rs +++ b/meilisearch-http/tests/documents/add_documents.rs @@ -1126,10 +1126,10 @@ async fn batch_several_documents_addition() { index.wait_task(4).await; // run a second completely failing batch + documents[40] = json!({"title": "error", "desc": "error"}); + documents[70] = json!({"title": "error", "desc": "error"}); + documents[130] = json!({"title": "error", "desc": "error"}); for chunk in documents.chunks(30) { - let mut chunk = chunk.to_vec(); - chunk[0] = json!({"title": "error", "desc": "error"}); - index.add_documents(json!(chunk), Some("id")).await; } // wait second batch of documents to finish @@ -1144,16 +1144,23 @@ async fn batch_several_documents_addition() { json!( { "results": [ - {"uid": 9, "status": "failed"}, - {"uid": 8, "status": "failed"}, - {"uid": 7, "status": "failed"}, - {"uid": 6, "status": "failed"}, - {"uid": 5, "status": "failed"}, - {"uid": 4, "status": "succeeded"}, - {"uid": 3, "status": "failed"}, - {"uid": 2, "status": "succeeded"}, - {"uid": 1, "status": "succeeded"}, - {"uid": 0, "status": "succeeded"}, + // Completelly failing batch + {"uid": 9, "status": "failed", "batchUid": 6}, + {"uid": 8, "status": "failed", "batchUid": 6}, + {"uid": 7, "status": "failed", "batchUid": 6}, + {"uid": 6, "status": "failed", "batchUid": 6}, + + // Inter-batch + {"uid": 5, "status": "succeeded", "batchUid": 5}, + + // 1 fail in an succeded batch + {"uid": 4, "status": "succeeded", "batchUid": 1}, + {"uid": 3, "status": "failed", "batchUid": 1}, + {"uid": 2, "status": "succeeded", "batchUid": 1}, + {"uid": 1, "status": "succeeded", "batchUid": 1}, + + // Inter-batch + {"uid": 0, "status": "succeeded", "batchUid": 0}, ] } )