fix clippy

This commit is contained in:
Irevoire 2022-10-22 16:35:42 +02:00 committed by Clément Renault
parent 874499a2d2
commit e9055f5572
No known key found for this signature in database
GPG key ID: 92ADA4E935E71FA4
31 changed files with 125 additions and 124 deletions

View file

@ -170,7 +170,7 @@ impl BatchKind {
// We don't batch any of these operations
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap) => Break(this),
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
(this, kind) if index_already_exists == false && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
Break(this)
},
// The index deletion can batch with everything but must stop after
@ -443,7 +443,7 @@ mod tests {
input: impl IntoIterator<Item = KindWithContent>,
) -> Option<(BatchKind, bool)> {
autobatch(
input.into_iter().enumerate().map(|(id, kind)| (id as TaskId, kind.into())).collect(),
input.into_iter().enumerate().map(|(id, kind)| (id as TaskId, kind)).collect(),
index_already_exists,
)
}

View file

@ -242,7 +242,7 @@ impl IndexScheduler {
match task.kind {
KindWithContent::SettingsUpdate {
ref new_settings, is_deletion, ..
} => settings.push((is_deletion, new_settings.clone())),
} => settings.push((is_deletion, *new_settings.clone())),
_ => unreachable!(),
}
}
@ -424,7 +424,7 @@ impl IndexScheduler {
// AT LEAST one index. We can use the right or left one it doesn't
// matter.
let index_name = task.indexes().unwrap()[0];
let index_already_exists = self.index_mapper.exists(rtxn, &index_name)?;
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
let index_tasks = self.index_tasks(rtxn, index_name)? & enqueued;
@ -550,7 +550,7 @@ impl IndexScheduler {
} else {
unreachable!();
};
let dump = dump::DumpWriter::new(instance_uid.clone())?;
let dump = dump::DumpWriter::new(*instance_uid)?;
// 1. dump the keys
let mut dump_keys = dump.create_keys()?;
@ -566,7 +566,7 @@ impl IndexScheduler {
for ret in self.all_tasks.iter(&rtxn)? {
let (_, mut t) = ret?;
let status = t.status;
let content_file = t.content_uuid().map(|uuid| uuid.clone());
let content_file = t.content_uuid().copied();
// In the case we're dumping ourselves we want to be marked as finished
// to not loop over ourselves indefinitely.
@ -745,11 +745,11 @@ impl IndexScheduler {
/// Swap the index `lhs` with the index `rhs`.
fn apply_index_swap(&self, wtxn: &mut RwTxn, task_id: u32, lhs: &str, rhs: &str) -> Result<()> {
// 1. Verify that both lhs and rhs are existing indexes
let index_lhs_exists = self.index_mapper.index_exists(&wtxn, lhs)?;
let index_lhs_exists = self.index_mapper.index_exists(wtxn, lhs)?;
if !index_lhs_exists {
return Err(Error::IndexNotFound(lhs.to_owned()));
}
let index_rhs_exists = self.index_mapper.index_exists(&wtxn, rhs)?;
let index_rhs_exists = self.index_mapper.index_exists(wtxn, rhs)?;
if !index_rhs_exists {
return Err(Error::IndexNotFound(rhs.to_owned()));
}
@ -764,7 +764,7 @@ impl IndexScheduler {
// 3. before_name -> new_name in the task's KindWithContent
for task_id in &index_lhs_task_ids | &index_rhs_task_ids {
let mut task = self.get_task(&wtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
let mut task = self.get_task(wtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
swap_index_uid_in_task(&mut task, (lhs, rhs));
self.all_tasks.put(wtxn, &BEU32::new(task_id), &task)?;
}
@ -934,7 +934,7 @@ impl IndexScheduler {
// TODO merge the settings to only do *one* reindexation.
for (task, (_, settings)) in tasks.iter_mut().zip(settings) {
let checked_settings = settings.clone().check();
task.details = Some(Details::SettingsUpdate { settings });
task.details = Some(Details::SettingsUpdate { settings: Box::new(settings) });
let mut builder =
milli::update::Settings::new(index_wtxn, index, indexer_config);
@ -1023,7 +1023,7 @@ impl IndexScheduler {
let enqueued_tasks = self.get_status(wtxn, Status::Enqueued)?;
let processing_tasks = &self.processing_tasks.read().unwrap().processing.clone();
let all_task_ids = self.all_task_ids(&wtxn)?;
let all_task_ids = self.all_task_ids(wtxn)?;
let mut to_delete_tasks = all_task_ids & matched_tasks;
to_delete_tasks -= processing_tasks;
to_delete_tasks -= enqueued_tasks;

View file

@ -4,6 +4,7 @@ use thiserror::Error;
use crate::TaskId;
#[allow(clippy::large_enum_variant)]
#[derive(Error, Debug)]
pub enum Error {
#[error("Index `{0}` not found.")]

View file

@ -39,6 +39,7 @@ pub struct IndexMapper {
}
/// Whether the index is available for use or is forbidden to be inserted back in the index map
#[allow(clippy::large_enum_variant)]
#[derive(Clone)]
pub enum IndexStatus {
/// Do not insert it back in the index map as it is currently being deleted.

View file

@ -267,6 +267,7 @@ impl IndexScheduler {
/// - `indexer_config`: configuration used during indexing for each meilisearch index
/// - `autobatching_enabled`: `true` iff the index scheduler is allowed to automatically batch tasks
/// together, to process multiple tasks at once.
#[allow(clippy::too_many_arguments)]
pub fn new(
tasks_path: PathBuf,
update_file_path: PathBuf,
@ -401,7 +402,7 @@ impl IndexScheduler {
if let Some(index) = &query.index_uid {
let mut index_tasks = RoaringBitmap::new();
for index in index {
index_tasks |= self.index_tasks(&rtxn, &index)?;
index_tasks |= self.index_tasks(&rtxn, index)?;
}
tasks &= index_tasks;
}
@ -793,7 +794,7 @@ mod tests {
primary_key: primary_key.map(ToOwned::to_owned),
method: ReplaceDocuments,
content_file: Uuid::from_u128(content_file_uuid),
documents_count: documents_count,
documents_count,
allow_index_creation: true,
}
}

View file

@ -1,3 +1,5 @@
use std::fmt::Write;
use meilisearch_types::heed::types::{OwnedType, SerdeBincode, SerdeJson, Str};
use meilisearch_types::heed::{Database, RoTxn};
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
@ -96,8 +98,8 @@ fn snapshot_bitmap(r: &RoaringBitmap) -> String {
fn snapshot_all_tasks(rtxn: &RoTxn, db: Database<OwnedType<BEU32>, SerdeJson<Task>>) -> String {
let mut snap = String::new();
let mut iter = db.iter(rtxn).unwrap();
while let Some(next) = iter.next() {
let iter = db.iter(rtxn).unwrap();
for next in iter {
let (task_id, task) = next.unwrap();
snap.push_str(&format!("{task_id} {}\n", snapshot_task(&task)));
}
@ -109,8 +111,8 @@ fn snapshot_date_db(
db: Database<OwnedType<BEI128>, CboRoaringBitmapCodec>,
) -> String {
let mut snap = String::new();
let mut iter = db.iter(rtxn).unwrap();
while let Some(next) = iter.next() {
let iter = db.iter(rtxn).unwrap();
for next in iter {
let (_timestamp, task_ids) = next.unwrap();
snap.push_str(&format!("[timestamp] {}\n", snapshot_bitmap(&task_ids)));
}
@ -191,31 +193,31 @@ fn snaphsot_details(d: &Details) -> String {
fn snapshot_status(rtxn: &RoTxn, db: Database<SerdeBincode<Status>, RoaringBitmapCodec>) -> String {
let mut snap = String::new();
let mut iter = db.iter(rtxn).unwrap();
while let Some(next) = iter.next() {
let iter = db.iter(rtxn).unwrap();
for next in iter {
let (status, task_ids) = next.unwrap();
snap.push_str(&format!("{status} {}\n", snapshot_bitmap(&task_ids)));
write!(snap, "{status} {}\n", snapshot_bitmap(&task_ids)).unwrap();
}
snap
}
fn snapshot_kind(rtxn: &RoTxn, db: Database<SerdeBincode<Kind>, RoaringBitmapCodec>) -> String {
let mut snap = String::new();
let mut iter = db.iter(rtxn).unwrap();
while let Some(next) = iter.next() {
let iter = db.iter(rtxn).unwrap();
for next in iter {
let (kind, task_ids) = next.unwrap();
let kind = serde_json::to_string(&kind).unwrap();
snap.push_str(&format!("{kind} {}\n", snapshot_bitmap(&task_ids)));
write!(snap, "{kind} {}\n", snapshot_bitmap(&task_ids)).unwrap();
}
snap
}
fn snapshot_index_tasks(rtxn: &RoTxn, db: Database<Str, RoaringBitmapCodec>) -> String {
let mut snap = String::new();
let mut iter = db.iter(rtxn).unwrap();
while let Some(next) = iter.next() {
let iter = db.iter(rtxn).unwrap();
for next in iter {
let (index, task_ids) = next.unwrap();
snap.push_str(&format!("{index} {}\n", snapshot_bitmap(&task_ids)));
write!(snap, "{index} {}\n", snapshot_bitmap(&task_ids)).unwrap();
}
snap
}

View file

@ -13,7 +13,7 @@ use crate::{Error, IndexScheduler, Result, Task, TaskId, BEI128};
impl IndexScheduler {
pub(crate) fn all_task_ids(&self, rtxn: &RoTxn) -> Result<RoaringBitmap> {
enum_iterator::all().map(|s| self.get_status(&rtxn, s)).union()
enum_iterator::all().map(|s| self.get_status(rtxn, s)).union()
}
pub(crate) fn last_task_id(&self, rtxn: &RoTxn) -> Result<Option<TaskId>> {
@ -173,7 +173,7 @@ pub(crate) fn insert_task_datetime(
task_id: TaskId,
) -> Result<()> {
let timestamp = BEI128::new(time.unix_timestamp_nanos());
let mut task_ids = database.get(&wtxn, &timestamp)?.unwrap_or_default();
let mut task_ids = database.get(wtxn, &timestamp)?.unwrap_or_default();
task_ids.insert(task_id);
database.put(wtxn, &timestamp, &RoaringBitmap::from_iter([task_id]))?;
Ok(())
@ -186,7 +186,7 @@ pub(crate) fn remove_task_datetime(
task_id: TaskId,
) -> Result<()> {
let timestamp = BEI128::new(time.unix_timestamp_nanos());
if let Some(mut existing) = database.get(&wtxn, &timestamp)? {
if let Some(mut existing) = database.get(wtxn, &timestamp)? {
existing.remove(task_id);
if existing.is_empty() {
database.delete(wtxn, &timestamp)?;
@ -214,7 +214,7 @@ pub(crate) fn keep_tasks_within_datetimes(
let mut collected_task_ids = RoaringBitmap::new();
let start = map_bound(start, |b| BEI128::new(b.unix_timestamp_nanos()));
let end = map_bound(end, |b| BEI128::new(b.unix_timestamp_nanos()));
let iter = database.range(&rtxn, &(start, end))?;
let iter = database.range(rtxn, &(start, end))?;
for r in iter {
let (_timestamp, task_ids) = r?;
collected_task_ids |= task_ids;
@ -245,22 +245,21 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
K::IndexUpdate { index_uid, .. } => index_uids.push(index_uid),
K::IndexSwap { swaps } => {
for (lhs, rhs) in swaps.iter_mut() {
if lhs == &swap.0 || lhs == &swap.1 {
if lhs == swap.0 || lhs == swap.1 {
index_uids.push(lhs);
}
if rhs == &swap.0 || rhs == &swap.1 {
if rhs == swap.0 || rhs == swap.1 {
index_uids.push(rhs);
}
}
}
K::TaskCancelation { .. } | K::TaskDeletion { .. } | K::DumpExport { .. } | K::Snapshot => {
()
}
};
for index_uid in index_uids {
if index_uid == &swap.0 {
if index_uid == swap.0 {
*index_uid = swap.1.to_owned();
} else if index_uid == &swap.1 {
} else if index_uid == swap.1 {
*index_uid = swap.0.to_owned();
}
}