Add the payload size to the parameters

This commit is contained in:
Kerollmops 2025-06-25 18:24:50 +02:00 committed by Clément Renault
parent a743da3061
commit 63031219c5
No known key found for this signature in database
GPG key ID: F250A4C4E3AE5F5F
13 changed files with 130 additions and 41 deletions

View file

@ -212,20 +212,23 @@ impl<'a> Dump<'a> {
KindWithContent::DumpCreation { keys, instance_uid }
}
KindDump::SnapshotCreation => KindWithContent::SnapshotCreation,
KindDump::Export { url, indexes, api_key } => KindWithContent::Export {
url,
api_key,
indexes: indexes
.into_iter()
.map(|(pattern, settings)| {
Ok((
IndexUidPattern::try_from(pattern)
.map_err(|_| Error::CorruptedDump)?,
settings,
))
})
.collect::<Result<_, Error>>()?,
},
KindDump::Export { url, api_key, payload_size, indexes } => {
KindWithContent::Export {
url,
api_key,
payload_size,
indexes: indexes
.into_iter()
.map(|(pattern, settings)| {
Ok((
IndexUidPattern::try_from(pattern)
.map_err(|_| Error::CorruptedDump)?,
settings,
))
})
.collect::<Result<_, Error>>()?,
}
}
KindDump::UpgradeDatabase { from } => KindWithContent::UpgradeDatabase { from },
},
};

View file

@ -289,8 +289,8 @@ fn snapshot_details(d: &Details) -> String {
Details::IndexSwap { swaps } => {
format!("{{ swaps: {swaps:?} }}")
}
Details::Export { url, api_key, indexes } => {
format!("{{ url: {url:?}, api_key: {api_key:?}, indexes: {indexes:?} }}")
Details::Export { url, api_key, payload_size, indexes } => {
format!("{{ url: {url:?}, api_key: {api_key:?}, payload_size: {payload_size:?}, indexes: {indexes:?} }}")
}
Details::UpgradeDatabase { from, to } => {
format!("{{ from: {from:?}, to: {to:?} }}")

View file

@ -362,12 +362,19 @@ impl IndexScheduler {
Ok((vec![task], ProcessBatchInfo::default()))
}
Batch::Export { mut task } => {
let KindWithContent::Export { url, indexes, api_key } = &task.kind else {
let KindWithContent::Export { url, api_key, payload_size, indexes } = &task.kind
else {
unreachable!()
};
let ret = catch_unwind(AssertUnwindSafe(|| {
self.process_export(url, indexes, api_key.as_deref(), progress)
self.process_export(
url,
api_key.as_deref(),
payload_size.as_ref(),
indexes,
progress,
)
}));
match ret {

View file

@ -4,6 +4,7 @@ use std::sync::atomic;
use std::time::Duration;
use backoff::ExponentialBackoff;
use byte_unit::Byte;
use flate2::write::GzEncoder;
use flate2::Compression;
use meilisearch_types::index_uid_pattern::IndexUidPattern;
@ -25,8 +26,9 @@ impl IndexScheduler {
pub(super) fn process_export(
&self,
base_url: &str,
indexes: &BTreeMap<IndexUidPattern, ExportIndexSettings>,
api_key: Option<&str>,
payload_size: Option<&Byte>,
indexes: &BTreeMap<IndexUidPattern, ExportIndexSettings>,
progress: Progress,
) -> Result<()> {
#[cfg(test)]
@ -122,7 +124,7 @@ impl IndexScheduler {
let (step, progress_step) = AtomicDocumentStep::new(total_documents);
progress.update_progress(progress_step);
let limit = 50 * 1024 * 1024; // 50 MiB
let limit = payload_size.map(|ps| ps.as_u64() as usize).unwrap_or(50 * 1024 * 1024); // defaults to 50 MiB
let documents_url = format!("{base_url}/indexes/{uid}/documents");
request_threads()

View file

@ -601,7 +601,7 @@ impl crate::IndexScheduler {
Details::Dump { dump_uid: _ } => {
assert_eq!(kind.as_kind(), Kind::DumpCreation);
}
Details::Export { url: _, api_key: _, indexes: _ } => {
Details::Export { url: _, api_key: _, payload_size: _, indexes: _ } => {
assert_eq!(kind.as_kind(), Kind::Export);
}
Details::UpgradeDatabase { from: _, to: _ } => {