mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-11-22 21:04:27 +01:00
fix clippy
This commit is contained in:
parent
874499a2d2
commit
e9055f5572
@ -1,3 +1,6 @@
|
|||||||
|
#![allow(clippy::type_complexity)]
|
||||||
|
#![allow(clippy::wrong_self_convention)]
|
||||||
|
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::keys::Key;
|
use meilisearch_types::keys::Key;
|
||||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||||
@ -99,7 +102,7 @@ pub enum KindDump {
|
|||||||
},
|
},
|
||||||
DocumentClear,
|
DocumentClear,
|
||||||
Settings {
|
Settings {
|
||||||
settings: meilisearch_types::settings::Settings<Unchecked>,
|
settings: Box<meilisearch_types::settings::Settings<Unchecked>>,
|
||||||
is_deletion: bool,
|
is_deletion: bool,
|
||||||
allow_index_creation: bool,
|
allow_index_creation: bool,
|
||||||
},
|
},
|
||||||
@ -369,7 +372,7 @@ pub(crate) mod test {
|
|||||||
|
|
||||||
pub fn create_test_dump() -> File {
|
pub fn create_test_dump() -> File {
|
||||||
let instance_uid = create_test_instance_uid();
|
let instance_uid = create_test_instance_uid();
|
||||||
let dump = DumpWriter::new(Some(instance_uid.clone())).unwrap();
|
let dump = DumpWriter::new(Some(instance_uid)).unwrap();
|
||||||
|
|
||||||
// ========== Adding an index
|
// ========== Adding an index
|
||||||
let documents = create_test_documents();
|
let documents = create_test_documents();
|
||||||
|
@ -74,7 +74,7 @@ impl CompatV3ToV4 {
|
|||||||
.map(|index| index.uid.clone());
|
.map(|index| index.uid.clone());
|
||||||
|
|
||||||
let index_uid = match index_uid {
|
let index_uid = match index_uid {
|
||||||
Some(uid) => uid.to_string(),
|
Some(uid) => uid,
|
||||||
None => {
|
None => {
|
||||||
log::warn!(
|
log::warn!(
|
||||||
"Error while importing the update {}.",
|
"Error while importing the update {}.",
|
||||||
@ -120,7 +120,7 @@ impl CompatV3ToV4 {
|
|||||||
primary_key: primary_key.clone(),
|
primary_key: primary_key.clone(),
|
||||||
documents_count: 0, // we don't have this info
|
documents_count: 0, // we don't have this info
|
||||||
allow_index_creation: true, // there was no API-key in the v3
|
allow_index_creation: true, // there was no API-key in the v3
|
||||||
content_uuid: content_uuid.clone(),
|
content_uuid: *content_uuid,
|
||||||
},
|
},
|
||||||
v3::Kind::Settings(settings) => {
|
v3::Kind::Settings(settings) => {
|
||||||
v4::tasks::TaskContent::SettingsUpdate {
|
v4::tasks::TaskContent::SettingsUpdate {
|
||||||
|
@ -51,7 +51,7 @@ impl CompatV5ToV6 {
|
|||||||
pub fn tasks(
|
pub fn tasks(
|
||||||
&mut self,
|
&mut self,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(v6::Task, Option<Box<UpdateFile>>)>> + '_>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(v6::Task, Option<Box<UpdateFile>>)>> + '_>> {
|
||||||
let instance_uid = self.instance_uid().ok().flatten().map(|uid| uid.clone());
|
let instance_uid = self.instance_uid().ok().flatten();
|
||||||
let keys = self.keys()?.collect::<Result<Vec<_>>>()?;
|
let keys = self.keys()?.collect::<Result<Vec<_>>>()?;
|
||||||
|
|
||||||
let tasks = match self {
|
let tasks = match self {
|
||||||
@ -59,7 +59,7 @@ impl CompatV5ToV6 {
|
|||||||
CompatV5ToV6::Compat(compat) => compat.tasks(),
|
CompatV5ToV6::Compat(compat) => compat.tasks(),
|
||||||
};
|
};
|
||||||
Ok(Box::new(tasks.map(move |task| {
|
Ok(Box::new(tasks.map(move |task| {
|
||||||
task.and_then(|(task, content_file)| {
|
task.map(|(task, content_file)| {
|
||||||
let mut task_view: v5::tasks::TaskView = task.clone().into();
|
let mut task_view: v5::tasks::TaskView = task.clone().into();
|
||||||
|
|
||||||
if task_view.status == v5::Status::Processing {
|
if task_view.status == v5::Status::Processing {
|
||||||
@ -75,7 +75,7 @@ impl CompatV5ToV6 {
|
|||||||
v5::Status::Succeeded => v6::Status::Succeeded,
|
v5::Status::Succeeded => v6::Status::Succeeded,
|
||||||
v5::Status::Failed => v6::Status::Failed,
|
v5::Status::Failed => v6::Status::Failed,
|
||||||
},
|
},
|
||||||
kind: match task.content.clone() {
|
kind: match task.content {
|
||||||
v5::tasks::TaskContent::IndexCreation { primary_key, .. } => {
|
v5::tasks::TaskContent::IndexCreation { primary_key, .. } => {
|
||||||
v6::Kind::IndexCreation { primary_key }
|
v6::Kind::IndexCreation { primary_key }
|
||||||
}
|
}
|
||||||
@ -100,7 +100,7 @@ impl CompatV5ToV6 {
|
|||||||
v6::milli::update::IndexDocumentsMethod::UpdateDocuments
|
v6::milli::update::IndexDocumentsMethod::UpdateDocuments
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
allow_index_creation: allow_index_creation.clone(),
|
allow_index_creation,
|
||||||
},
|
},
|
||||||
v5::tasks::TaskContent::DocumentDeletion { deletion, .. } => match deletion
|
v5::tasks::TaskContent::DocumentDeletion { deletion, .. } => match deletion
|
||||||
{
|
{
|
||||||
@ -117,13 +117,11 @@ impl CompatV5ToV6 {
|
|||||||
} => v6::Kind::Settings {
|
} => v6::Kind::Settings {
|
||||||
is_deletion,
|
is_deletion,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
settings: settings.into(),
|
settings: Box::new(settings.into()),
|
||||||
},
|
|
||||||
v5::tasks::TaskContent::Dump { uid } => v6::Kind::DumpExport {
|
|
||||||
dump_uid: uid,
|
|
||||||
keys: keys.clone(),
|
|
||||||
instance_uid: instance_uid.clone(),
|
|
||||||
},
|
},
|
||||||
|
v5::tasks::TaskContent::Dump { uid } => {
|
||||||
|
v6::Kind::DumpExport { dump_uid: uid, keys: keys.clone(), instance_uid }
|
||||||
|
}
|
||||||
},
|
},
|
||||||
canceled_by: None,
|
canceled_by: None,
|
||||||
details: task_view.details.map(|details| match details {
|
details: task_view.details.map(|details| match details {
|
||||||
@ -134,7 +132,7 @@ impl CompatV5ToV6 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
v5::Details::Settings { settings } => {
|
v5::Details::Settings { settings } => {
|
||||||
v6::Details::SettingsUpdate { settings: settings.into() }
|
v6::Details::SettingsUpdate { settings: Box::new(settings.into()) }
|
||||||
}
|
}
|
||||||
v5::Details::IndexInfo { primary_key } => {
|
v5::Details::IndexInfo { primary_key } => {
|
||||||
v6::Details::IndexInfo { primary_key }
|
v6::Details::IndexInfo { primary_key }
|
||||||
@ -157,7 +155,7 @@ impl CompatV5ToV6 {
|
|||||||
finished_at: task_view.finished_at,
|
finished_at: task_view.finished_at,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok((task, content_file))
|
(task, content_file)
|
||||||
})
|
})
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
@ -134,7 +134,7 @@ impl From<CompatV4ToV5> for DumpReader {
|
|||||||
|
|
||||||
pub enum DumpIndexReader {
|
pub enum DumpIndexReader {
|
||||||
Current(v6::V6IndexReader),
|
Current(v6::V6IndexReader),
|
||||||
Compat(CompatIndexV5ToV6),
|
Compat(Box<CompatIndexV5ToV6>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DumpIndexReader {
|
impl DumpIndexReader {
|
||||||
@ -176,7 +176,7 @@ impl From<V6IndexReader> for DumpIndexReader {
|
|||||||
|
|
||||||
impl From<CompatIndexV5ToV6> for DumpIndexReader {
|
impl From<CompatIndexV5ToV6> for DumpIndexReader {
|
||||||
fn from(value: CompatIndexV5ToV6) -> Self {
|
fn from(value: CompatIndexV5ToV6) -> Self {
|
||||||
DumpIndexReader::Compat(value)
|
DumpIndexReader::Compat(Box::new(value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,10 +105,10 @@ impl V2Reader {
|
|||||||
|
|
||||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V2IndexReader>> + '_> {
|
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V2IndexReader>> + '_> {
|
||||||
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
||||||
Ok(V2IndexReader::new(
|
V2IndexReader::new(
|
||||||
index.uid.clone(),
|
index.uid.clone(),
|
||||||
&self.dump.path().join("indexes").join(format!("index-{}", index.uuid.to_string())),
|
&self.dump.path().join("indexes").join(format!("index-{}", index.uuid)),
|
||||||
)?)
|
)
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,7 +122,7 @@ impl V2Reader {
|
|||||||
.path()
|
.path()
|
||||||
.join("updates")
|
.join("updates")
|
||||||
.join("update_files")
|
.join("update_files")
|
||||||
.join(format!("update_{}", uuid.to_string()));
|
.join(format!("update_{}", uuid));
|
||||||
Ok((task, Some(UpdateFile::new(&update_file_path)?)))
|
Ok((task, Some(UpdateFile::new(&update_file_path)?)))
|
||||||
} else {
|
} else {
|
||||||
Ok((task, None))
|
Ok((task, None))
|
||||||
|
@ -111,10 +111,10 @@ impl V3Reader {
|
|||||||
|
|
||||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V3IndexReader>> + '_> {
|
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V3IndexReader>> + '_> {
|
||||||
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
||||||
Ok(V3IndexReader::new(
|
V3IndexReader::new(
|
||||||
index.uid.clone(),
|
index.uid.clone(),
|
||||||
&self.dump.path().join("indexes").join(index.uuid.to_string()),
|
&self.dump.path().join("indexes").join(index.uuid.to_string()),
|
||||||
)?)
|
)
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ pub struct Facets {
|
|||||||
pub min_level_size: Option<NonZeroUsize>,
|
pub min_level_size: Option<NonZeroUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
|
@ -97,10 +97,10 @@ impl V4Reader {
|
|||||||
|
|
||||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V4IndexReader>> + '_> {
|
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V4IndexReader>> + '_> {
|
||||||
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
||||||
Ok(V4IndexReader::new(
|
V4IndexReader::new(
|
||||||
index.uid.clone(),
|
index.uid.clone(),
|
||||||
&self.dump.path().join("indexes").join(index.index_meta.uuid.to_string()),
|
&self.dump.path().join("indexes").join(index.index_meta.uuid.to_string()),
|
||||||
)?)
|
)
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,15 +23,15 @@ where
|
|||||||
.serialize(s)
|
.serialize(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default, Debug, PartialEq)]
|
#[derive(Clone, Default, Debug, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
pub struct Checked;
|
pub struct Checked;
|
||||||
|
|
||||||
#[derive(Clone, Default, Debug, Deserialize, PartialEq)]
|
#[derive(Clone, Default, Debug, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
pub struct Unchecked;
|
pub struct Unchecked;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, Deserialize, PartialEq)]
|
#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
@ -42,7 +42,7 @@ pub struct MinWordSizeTyposSetting {
|
|||||||
pub two_typos: Setting<u8>,
|
pub two_typos: Setting<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, Deserialize, PartialEq)]
|
#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
@ -59,7 +59,7 @@ pub struct TypoSettings {
|
|||||||
/// Holds all the settings for an index. `T` can either be `Checked` if they represents settings
|
/// Holds all the settings for an index. `T` can either be `Checked` if they represents settings
|
||||||
/// whose validity is guaranteed, or `Unchecked` if they need to be validated. In the later case, a
|
/// whose validity is guaranteed, or `Unchecked` if they need to be validated. In the later case, a
|
||||||
/// call to `check` will return a `Settings<Checked>` from a `Settings<Unchecked>`.
|
/// call to `check` will return a `Settings<Checked>` from a `Settings<Unchecked>`.
|
||||||
#[derive(Debug, Clone, Default, Deserialize, PartialEq)]
|
#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
@ -191,7 +191,7 @@ pub struct Facets {
|
|||||||
pub min_level_size: Option<NonZeroUsize>,
|
pub min_level_size: Option<NonZeroUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Copy)]
|
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
Reset,
|
Reset,
|
||||||
|
@ -9,7 +9,7 @@ use super::settings::{Settings, Unchecked};
|
|||||||
pub type TaskId = u32;
|
pub type TaskId = u32;
|
||||||
pub type BatchId = u32;
|
pub type BatchId = u32;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, PartialEq)]
|
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
pub struct Task {
|
pub struct Task {
|
||||||
pub id: TaskId,
|
pub id: TaskId,
|
||||||
@ -18,7 +18,7 @@ pub struct Task {
|
|||||||
pub events: Vec<TaskEvent>,
|
pub events: Vec<TaskEvent>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, PartialEq)]
|
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[allow(clippy::large_enum_variant)]
|
||||||
pub enum TaskContent {
|
pub enum TaskContent {
|
||||||
|
@ -138,10 +138,10 @@ impl V5Reader {
|
|||||||
|
|
||||||
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V5IndexReader>> + '_> {
|
pub fn indexes(&self) -> Result<impl Iterator<Item = Result<V5IndexReader>> + '_> {
|
||||||
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
Ok(self.index_uuid.iter().map(|index| -> Result<_> {
|
||||||
Ok(V5IndexReader::new(
|
V5IndexReader::new(
|
||||||
index.uid.clone(),
|
index.uid.clone(),
|
||||||
&self.dump.path().join("indexes").join(index.index_meta.uuid.to_string()),
|
&self.dump.path().join("indexes").join(index.index_meta.uuid.to_string()),
|
||||||
)?)
|
)
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ pub struct Unchecked;
|
|||||||
/// Holds all the settings for an index. `T` can either be `Checked` if they represents settings
|
/// Holds all the settings for an index. `T` can either be `Checked` if they represents settings
|
||||||
/// whose validity is guaranteed, or `Unchecked` if they need to be validated. In the later case, a
|
/// whose validity is guaranteed, or `Unchecked` if they need to be validated. In the later case, a
|
||||||
/// call to `check` will return a `Settings<Checked>` from a `Settings<Unchecked>`.
|
/// call to `check` will return a `Settings<Checked>` from a `Settings<Unchecked>`.
|
||||||
#[derive(Debug, Clone, Default, Deserialize, PartialEq)]
|
#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
@ -47,7 +47,7 @@ pub struct Settings<T> {
|
|||||||
pub _kind: PhantomData<T>,
|
pub _kind: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Copy)]
|
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
pub enum Setting<T> {
|
pub enum Setting<T> {
|
||||||
Set(T),
|
Set(T),
|
||||||
@ -102,7 +102,7 @@ impl<'de, T: Deserialize<'de>> Deserialize<'de> for Setting<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, Deserialize, PartialEq)]
|
#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
@ -113,7 +113,7 @@ pub struct MinWordSizeTyposSetting {
|
|||||||
pub two_typos: Setting<u8>,
|
pub two_typos: Setting<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, Deserialize, PartialEq)]
|
#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
@ -128,7 +128,7 @@ pub struct TypoSettings {
|
|||||||
pub disable_on_attributes: Setting<BTreeSet<String>>,
|
pub disable_on_attributes: Setting<BTreeSet<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, Deserialize, PartialEq)]
|
#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
@ -137,7 +137,7 @@ pub struct FacetingSettings {
|
|||||||
pub max_values_per_facet: Setting<usize>,
|
pub max_values_per_facet: Setting<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, Deserialize, PartialEq)]
|
#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
|
@ -9,7 +9,7 @@ use super::settings::{Settings, Unchecked};
|
|||||||
pub type TaskId = u32;
|
pub type TaskId = u32;
|
||||||
pub type BatchId = u32;
|
pub type BatchId = u32;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, PartialEq)]
|
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
pub struct Task {
|
pub struct Task {
|
||||||
pub id: TaskId,
|
pub id: TaskId,
|
||||||
@ -21,7 +21,7 @@ pub struct Task {
|
|||||||
pub events: Vec<TaskEvent>,
|
pub events: Vec<TaskEvent>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, PartialEq)]
|
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
|
||||||
#[cfg_attr(test, derive(serde::Serialize))]
|
#[cfg_attr(test, derive(serde::Serialize))]
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[allow(clippy::large_enum_variant)]
|
||||||
pub enum TaskContent {
|
pub enum TaskContent {
|
||||||
|
@ -108,7 +108,7 @@ impl V6Reader {
|
|||||||
.path()
|
.path()
|
||||||
.join("tasks")
|
.join("tasks")
|
||||||
.join("update_files")
|
.join("update_files")
|
||||||
.join(format!("{}.jsonl", task.uid.to_string()));
|
.join(format!("{}.jsonl", task.uid));
|
||||||
|
|
||||||
if update_file_path.exists() {
|
if update_file_path.exists() {
|
||||||
Ok((
|
Ok((
|
||||||
|
@ -233,13 +233,13 @@ pub(crate) mod test {
|
|||||||
let mut ident = String::new();
|
let mut ident = String::new();
|
||||||
|
|
||||||
for _ in 0..depth {
|
for _ in 0..depth {
|
||||||
ident.push_str(&"│");
|
ident.push('│');
|
||||||
ident.push_str(&" ".repeat(4));
|
ident.push_str(&" ".repeat(4));
|
||||||
}
|
}
|
||||||
if idx == entries.len() - 1 {
|
if idx == entries.len() - 1 {
|
||||||
ident.push_str(&"└");
|
ident.push('└');
|
||||||
} else {
|
} else {
|
||||||
ident.push_str(&"├");
|
ident.push('├');
|
||||||
}
|
}
|
||||||
ident.push_str(&"-".repeat(4));
|
ident.push_str(&"-".repeat(4));
|
||||||
|
|
||||||
|
@ -170,7 +170,7 @@ impl BatchKind {
|
|||||||
// We don't batch any of these operations
|
// We don't batch any of these operations
|
||||||
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap) => Break(this),
|
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap) => Break(this),
|
||||||
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
|
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
|
||||||
(this, kind) if index_already_exists == false && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
|
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
|
||||||
Break(this)
|
Break(this)
|
||||||
},
|
},
|
||||||
// The index deletion can batch with everything but must stop after
|
// The index deletion can batch with everything but must stop after
|
||||||
@ -443,7 +443,7 @@ mod tests {
|
|||||||
input: impl IntoIterator<Item = KindWithContent>,
|
input: impl IntoIterator<Item = KindWithContent>,
|
||||||
) -> Option<(BatchKind, bool)> {
|
) -> Option<(BatchKind, bool)> {
|
||||||
autobatch(
|
autobatch(
|
||||||
input.into_iter().enumerate().map(|(id, kind)| (id as TaskId, kind.into())).collect(),
|
input.into_iter().enumerate().map(|(id, kind)| (id as TaskId, kind)).collect(),
|
||||||
index_already_exists,
|
index_already_exists,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -242,7 +242,7 @@ impl IndexScheduler {
|
|||||||
match task.kind {
|
match task.kind {
|
||||||
KindWithContent::SettingsUpdate {
|
KindWithContent::SettingsUpdate {
|
||||||
ref new_settings, is_deletion, ..
|
ref new_settings, is_deletion, ..
|
||||||
} => settings.push((is_deletion, new_settings.clone())),
|
} => settings.push((is_deletion, *new_settings.clone())),
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -424,7 +424,7 @@ impl IndexScheduler {
|
|||||||
// AT LEAST one index. We can use the right or left one it doesn't
|
// AT LEAST one index. We can use the right or left one it doesn't
|
||||||
// matter.
|
// matter.
|
||||||
let index_name = task.indexes().unwrap()[0];
|
let index_name = task.indexes().unwrap()[0];
|
||||||
let index_already_exists = self.index_mapper.exists(rtxn, &index_name)?;
|
let index_already_exists = self.index_mapper.exists(rtxn, index_name)?;
|
||||||
|
|
||||||
let index_tasks = self.index_tasks(rtxn, index_name)? & enqueued;
|
let index_tasks = self.index_tasks(rtxn, index_name)? & enqueued;
|
||||||
|
|
||||||
@ -550,7 +550,7 @@ impl IndexScheduler {
|
|||||||
} else {
|
} else {
|
||||||
unreachable!();
|
unreachable!();
|
||||||
};
|
};
|
||||||
let dump = dump::DumpWriter::new(instance_uid.clone())?;
|
let dump = dump::DumpWriter::new(*instance_uid)?;
|
||||||
|
|
||||||
// 1. dump the keys
|
// 1. dump the keys
|
||||||
let mut dump_keys = dump.create_keys()?;
|
let mut dump_keys = dump.create_keys()?;
|
||||||
@ -566,7 +566,7 @@ impl IndexScheduler {
|
|||||||
for ret in self.all_tasks.iter(&rtxn)? {
|
for ret in self.all_tasks.iter(&rtxn)? {
|
||||||
let (_, mut t) = ret?;
|
let (_, mut t) = ret?;
|
||||||
let status = t.status;
|
let status = t.status;
|
||||||
let content_file = t.content_uuid().map(|uuid| uuid.clone());
|
let content_file = t.content_uuid().copied();
|
||||||
|
|
||||||
// In the case we're dumping ourselves we want to be marked as finished
|
// In the case we're dumping ourselves we want to be marked as finished
|
||||||
// to not loop over ourselves indefinitely.
|
// to not loop over ourselves indefinitely.
|
||||||
@ -745,11 +745,11 @@ impl IndexScheduler {
|
|||||||
/// Swap the index `lhs` with the index `rhs`.
|
/// Swap the index `lhs` with the index `rhs`.
|
||||||
fn apply_index_swap(&self, wtxn: &mut RwTxn, task_id: u32, lhs: &str, rhs: &str) -> Result<()> {
|
fn apply_index_swap(&self, wtxn: &mut RwTxn, task_id: u32, lhs: &str, rhs: &str) -> Result<()> {
|
||||||
// 1. Verify that both lhs and rhs are existing indexes
|
// 1. Verify that both lhs and rhs are existing indexes
|
||||||
let index_lhs_exists = self.index_mapper.index_exists(&wtxn, lhs)?;
|
let index_lhs_exists = self.index_mapper.index_exists(wtxn, lhs)?;
|
||||||
if !index_lhs_exists {
|
if !index_lhs_exists {
|
||||||
return Err(Error::IndexNotFound(lhs.to_owned()));
|
return Err(Error::IndexNotFound(lhs.to_owned()));
|
||||||
}
|
}
|
||||||
let index_rhs_exists = self.index_mapper.index_exists(&wtxn, rhs)?;
|
let index_rhs_exists = self.index_mapper.index_exists(wtxn, rhs)?;
|
||||||
if !index_rhs_exists {
|
if !index_rhs_exists {
|
||||||
return Err(Error::IndexNotFound(rhs.to_owned()));
|
return Err(Error::IndexNotFound(rhs.to_owned()));
|
||||||
}
|
}
|
||||||
@ -764,7 +764,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
// 3. before_name -> new_name in the task's KindWithContent
|
// 3. before_name -> new_name in the task's KindWithContent
|
||||||
for task_id in &index_lhs_task_ids | &index_rhs_task_ids {
|
for task_id in &index_lhs_task_ids | &index_rhs_task_ids {
|
||||||
let mut task = self.get_task(&wtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
let mut task = self.get_task(wtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
swap_index_uid_in_task(&mut task, (lhs, rhs));
|
swap_index_uid_in_task(&mut task, (lhs, rhs));
|
||||||
self.all_tasks.put(wtxn, &BEU32::new(task_id), &task)?;
|
self.all_tasks.put(wtxn, &BEU32::new(task_id), &task)?;
|
||||||
}
|
}
|
||||||
@ -934,7 +934,7 @@ impl IndexScheduler {
|
|||||||
// TODO merge the settings to only do *one* reindexation.
|
// TODO merge the settings to only do *one* reindexation.
|
||||||
for (task, (_, settings)) in tasks.iter_mut().zip(settings) {
|
for (task, (_, settings)) in tasks.iter_mut().zip(settings) {
|
||||||
let checked_settings = settings.clone().check();
|
let checked_settings = settings.clone().check();
|
||||||
task.details = Some(Details::SettingsUpdate { settings });
|
task.details = Some(Details::SettingsUpdate { settings: Box::new(settings) });
|
||||||
|
|
||||||
let mut builder =
|
let mut builder =
|
||||||
milli::update::Settings::new(index_wtxn, index, indexer_config);
|
milli::update::Settings::new(index_wtxn, index, indexer_config);
|
||||||
@ -1023,7 +1023,7 @@ impl IndexScheduler {
|
|||||||
let enqueued_tasks = self.get_status(wtxn, Status::Enqueued)?;
|
let enqueued_tasks = self.get_status(wtxn, Status::Enqueued)?;
|
||||||
let processing_tasks = &self.processing_tasks.read().unwrap().processing.clone();
|
let processing_tasks = &self.processing_tasks.read().unwrap().processing.clone();
|
||||||
|
|
||||||
let all_task_ids = self.all_task_ids(&wtxn)?;
|
let all_task_ids = self.all_task_ids(wtxn)?;
|
||||||
let mut to_delete_tasks = all_task_ids & matched_tasks;
|
let mut to_delete_tasks = all_task_ids & matched_tasks;
|
||||||
to_delete_tasks -= processing_tasks;
|
to_delete_tasks -= processing_tasks;
|
||||||
to_delete_tasks -= enqueued_tasks;
|
to_delete_tasks -= enqueued_tasks;
|
||||||
|
@ -4,6 +4,7 @@ use thiserror::Error;
|
|||||||
|
|
||||||
use crate::TaskId;
|
use crate::TaskId;
|
||||||
|
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error("Index `{0}` not found.")]
|
#[error("Index `{0}` not found.")]
|
||||||
|
@ -39,6 +39,7 @@ pub struct IndexMapper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Whether the index is available for use or is forbidden to be inserted back in the index map
|
/// Whether the index is available for use or is forbidden to be inserted back in the index map
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub enum IndexStatus {
|
pub enum IndexStatus {
|
||||||
/// Do not insert it back in the index map as it is currently being deleted.
|
/// Do not insert it back in the index map as it is currently being deleted.
|
||||||
|
@ -267,6 +267,7 @@ impl IndexScheduler {
|
|||||||
/// - `indexer_config`: configuration used during indexing for each meilisearch index
|
/// - `indexer_config`: configuration used during indexing for each meilisearch index
|
||||||
/// - `autobatching_enabled`: `true` iff the index scheduler is allowed to automatically batch tasks
|
/// - `autobatching_enabled`: `true` iff the index scheduler is allowed to automatically batch tasks
|
||||||
/// together, to process multiple tasks at once.
|
/// together, to process multiple tasks at once.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
tasks_path: PathBuf,
|
tasks_path: PathBuf,
|
||||||
update_file_path: PathBuf,
|
update_file_path: PathBuf,
|
||||||
@ -401,7 +402,7 @@ impl IndexScheduler {
|
|||||||
if let Some(index) = &query.index_uid {
|
if let Some(index) = &query.index_uid {
|
||||||
let mut index_tasks = RoaringBitmap::new();
|
let mut index_tasks = RoaringBitmap::new();
|
||||||
for index in index {
|
for index in index {
|
||||||
index_tasks |= self.index_tasks(&rtxn, &index)?;
|
index_tasks |= self.index_tasks(&rtxn, index)?;
|
||||||
}
|
}
|
||||||
tasks &= index_tasks;
|
tasks &= index_tasks;
|
||||||
}
|
}
|
||||||
@ -793,7 +794,7 @@ mod tests {
|
|||||||
primary_key: primary_key.map(ToOwned::to_owned),
|
primary_key: primary_key.map(ToOwned::to_owned),
|
||||||
method: ReplaceDocuments,
|
method: ReplaceDocuments,
|
||||||
content_file: Uuid::from_u128(content_file_uuid),
|
content_file: Uuid::from_u128(content_file_uuid),
|
||||||
documents_count: documents_count,
|
documents_count,
|
||||||
allow_index_creation: true,
|
allow_index_creation: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
use std::fmt::Write;
|
||||||
|
|
||||||
use meilisearch_types::heed::types::{OwnedType, SerdeBincode, SerdeJson, Str};
|
use meilisearch_types::heed::types::{OwnedType, SerdeBincode, SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, RoTxn};
|
use meilisearch_types::heed::{Database, RoTxn};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||||
@ -96,8 +98,8 @@ fn snapshot_bitmap(r: &RoaringBitmap) -> String {
|
|||||||
|
|
||||||
fn snapshot_all_tasks(rtxn: &RoTxn, db: Database<OwnedType<BEU32>, SerdeJson<Task>>) -> String {
|
fn snapshot_all_tasks(rtxn: &RoTxn, db: Database<OwnedType<BEU32>, SerdeJson<Task>>) -> String {
|
||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
let mut iter = db.iter(rtxn).unwrap();
|
let iter = db.iter(rtxn).unwrap();
|
||||||
while let Some(next) = iter.next() {
|
for next in iter {
|
||||||
let (task_id, task) = next.unwrap();
|
let (task_id, task) = next.unwrap();
|
||||||
snap.push_str(&format!("{task_id} {}\n", snapshot_task(&task)));
|
snap.push_str(&format!("{task_id} {}\n", snapshot_task(&task)));
|
||||||
}
|
}
|
||||||
@ -109,8 +111,8 @@ fn snapshot_date_db(
|
|||||||
db: Database<OwnedType<BEI128>, CboRoaringBitmapCodec>,
|
db: Database<OwnedType<BEI128>, CboRoaringBitmapCodec>,
|
||||||
) -> String {
|
) -> String {
|
||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
let mut iter = db.iter(rtxn).unwrap();
|
let iter = db.iter(rtxn).unwrap();
|
||||||
while let Some(next) = iter.next() {
|
for next in iter {
|
||||||
let (_timestamp, task_ids) = next.unwrap();
|
let (_timestamp, task_ids) = next.unwrap();
|
||||||
snap.push_str(&format!("[timestamp] {}\n", snapshot_bitmap(&task_ids)));
|
snap.push_str(&format!("[timestamp] {}\n", snapshot_bitmap(&task_ids)));
|
||||||
}
|
}
|
||||||
@ -191,31 +193,31 @@ fn snaphsot_details(d: &Details) -> String {
|
|||||||
|
|
||||||
fn snapshot_status(rtxn: &RoTxn, db: Database<SerdeBincode<Status>, RoaringBitmapCodec>) -> String {
|
fn snapshot_status(rtxn: &RoTxn, db: Database<SerdeBincode<Status>, RoaringBitmapCodec>) -> String {
|
||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
let mut iter = db.iter(rtxn).unwrap();
|
let iter = db.iter(rtxn).unwrap();
|
||||||
while let Some(next) = iter.next() {
|
for next in iter {
|
||||||
let (status, task_ids) = next.unwrap();
|
let (status, task_ids) = next.unwrap();
|
||||||
snap.push_str(&format!("{status} {}\n", snapshot_bitmap(&task_ids)));
|
write!(snap, "{status} {}\n", snapshot_bitmap(&task_ids)).unwrap();
|
||||||
}
|
}
|
||||||
snap
|
snap
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot_kind(rtxn: &RoTxn, db: Database<SerdeBincode<Kind>, RoaringBitmapCodec>) -> String {
|
fn snapshot_kind(rtxn: &RoTxn, db: Database<SerdeBincode<Kind>, RoaringBitmapCodec>) -> String {
|
||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
let mut iter = db.iter(rtxn).unwrap();
|
let iter = db.iter(rtxn).unwrap();
|
||||||
while let Some(next) = iter.next() {
|
for next in iter {
|
||||||
let (kind, task_ids) = next.unwrap();
|
let (kind, task_ids) = next.unwrap();
|
||||||
let kind = serde_json::to_string(&kind).unwrap();
|
let kind = serde_json::to_string(&kind).unwrap();
|
||||||
snap.push_str(&format!("{kind} {}\n", snapshot_bitmap(&task_ids)));
|
write!(snap, "{kind} {}\n", snapshot_bitmap(&task_ids)).unwrap();
|
||||||
}
|
}
|
||||||
snap
|
snap
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot_index_tasks(rtxn: &RoTxn, db: Database<Str, RoaringBitmapCodec>) -> String {
|
fn snapshot_index_tasks(rtxn: &RoTxn, db: Database<Str, RoaringBitmapCodec>) -> String {
|
||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
let mut iter = db.iter(rtxn).unwrap();
|
let iter = db.iter(rtxn).unwrap();
|
||||||
while let Some(next) = iter.next() {
|
for next in iter {
|
||||||
let (index, task_ids) = next.unwrap();
|
let (index, task_ids) = next.unwrap();
|
||||||
snap.push_str(&format!("{index} {}\n", snapshot_bitmap(&task_ids)));
|
write!(snap, "{index} {}\n", snapshot_bitmap(&task_ids)).unwrap();
|
||||||
}
|
}
|
||||||
snap
|
snap
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ use crate::{Error, IndexScheduler, Result, Task, TaskId, BEI128};
|
|||||||
|
|
||||||
impl IndexScheduler {
|
impl IndexScheduler {
|
||||||
pub(crate) fn all_task_ids(&self, rtxn: &RoTxn) -> Result<RoaringBitmap> {
|
pub(crate) fn all_task_ids(&self, rtxn: &RoTxn) -> Result<RoaringBitmap> {
|
||||||
enum_iterator::all().map(|s| self.get_status(&rtxn, s)).union()
|
enum_iterator::all().map(|s| self.get_status(rtxn, s)).union()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn last_task_id(&self, rtxn: &RoTxn) -> Result<Option<TaskId>> {
|
pub(crate) fn last_task_id(&self, rtxn: &RoTxn) -> Result<Option<TaskId>> {
|
||||||
@ -173,7 +173,7 @@ pub(crate) fn insert_task_datetime(
|
|||||||
task_id: TaskId,
|
task_id: TaskId,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timestamp = BEI128::new(time.unix_timestamp_nanos());
|
let timestamp = BEI128::new(time.unix_timestamp_nanos());
|
||||||
let mut task_ids = database.get(&wtxn, ×tamp)?.unwrap_or_default();
|
let mut task_ids = database.get(wtxn, ×tamp)?.unwrap_or_default();
|
||||||
task_ids.insert(task_id);
|
task_ids.insert(task_id);
|
||||||
database.put(wtxn, ×tamp, &RoaringBitmap::from_iter([task_id]))?;
|
database.put(wtxn, ×tamp, &RoaringBitmap::from_iter([task_id]))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -186,7 +186,7 @@ pub(crate) fn remove_task_datetime(
|
|||||||
task_id: TaskId,
|
task_id: TaskId,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timestamp = BEI128::new(time.unix_timestamp_nanos());
|
let timestamp = BEI128::new(time.unix_timestamp_nanos());
|
||||||
if let Some(mut existing) = database.get(&wtxn, ×tamp)? {
|
if let Some(mut existing) = database.get(wtxn, ×tamp)? {
|
||||||
existing.remove(task_id);
|
existing.remove(task_id);
|
||||||
if existing.is_empty() {
|
if existing.is_empty() {
|
||||||
database.delete(wtxn, ×tamp)?;
|
database.delete(wtxn, ×tamp)?;
|
||||||
@ -214,7 +214,7 @@ pub(crate) fn keep_tasks_within_datetimes(
|
|||||||
let mut collected_task_ids = RoaringBitmap::new();
|
let mut collected_task_ids = RoaringBitmap::new();
|
||||||
let start = map_bound(start, |b| BEI128::new(b.unix_timestamp_nanos()));
|
let start = map_bound(start, |b| BEI128::new(b.unix_timestamp_nanos()));
|
||||||
let end = map_bound(end, |b| BEI128::new(b.unix_timestamp_nanos()));
|
let end = map_bound(end, |b| BEI128::new(b.unix_timestamp_nanos()));
|
||||||
let iter = database.range(&rtxn, &(start, end))?;
|
let iter = database.range(rtxn, &(start, end))?;
|
||||||
for r in iter {
|
for r in iter {
|
||||||
let (_timestamp, task_ids) = r?;
|
let (_timestamp, task_ids) = r?;
|
||||||
collected_task_ids |= task_ids;
|
collected_task_ids |= task_ids;
|
||||||
@ -245,22 +245,21 @@ pub fn swap_index_uid_in_task(task: &mut Task, swap: (&str, &str)) {
|
|||||||
K::IndexUpdate { index_uid, .. } => index_uids.push(index_uid),
|
K::IndexUpdate { index_uid, .. } => index_uids.push(index_uid),
|
||||||
K::IndexSwap { swaps } => {
|
K::IndexSwap { swaps } => {
|
||||||
for (lhs, rhs) in swaps.iter_mut() {
|
for (lhs, rhs) in swaps.iter_mut() {
|
||||||
if lhs == &swap.0 || lhs == &swap.1 {
|
if lhs == swap.0 || lhs == swap.1 {
|
||||||
index_uids.push(lhs);
|
index_uids.push(lhs);
|
||||||
}
|
}
|
||||||
if rhs == &swap.0 || rhs == &swap.1 {
|
if rhs == swap.0 || rhs == swap.1 {
|
||||||
index_uids.push(rhs);
|
index_uids.push(rhs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
K::TaskCancelation { .. } | K::TaskDeletion { .. } | K::DumpExport { .. } | K::Snapshot => {
|
K::TaskCancelation { .. } | K::TaskDeletion { .. } | K::DumpExport { .. } | K::Snapshot => {
|
||||||
()
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
for index_uid in index_uids {
|
for index_uid in index_uids {
|
||||||
if index_uid == &swap.0 {
|
if index_uid == swap.0 {
|
||||||
*index_uid = swap.1.to_owned();
|
*index_uid = swap.1.to_owned();
|
||||||
} else if index_uid == &swap.1 {
|
} else if index_uid == swap.1 {
|
||||||
*index_uid = swap.0.to_owned();
|
*index_uid = swap.0.to_owned();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,7 @@ use std::sync::Mutex;
|
|||||||
pub use insta;
|
pub use insta;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
static SNAPSHOT_NAMES: Lazy<Mutex<HashMap<PathBuf, usize>>> = Lazy::new(|| Mutex::default());
|
static SNAPSHOT_NAMES: Lazy<Mutex<HashMap<PathBuf, usize>>> = Lazy::new(Mutex::default);
|
||||||
|
|
||||||
/// Return the md5 hash of the given string
|
/// Return the md5 hash of the given string
|
||||||
pub fn hash_snapshot(snap: &str) -> String {
|
pub fn hash_snapshot(snap: &str) -> String {
|
||||||
@ -25,7 +25,7 @@ pub fn default_snapshot_settings_for_test(name: Option<&str>) -> (insta::Setting
|
|||||||
|
|
||||||
let test_name = std::thread::current().name().unwrap().rsplit("::").next().unwrap().to_owned();
|
let test_name = std::thread::current().name().unwrap().rsplit("::").next().unwrap().to_owned();
|
||||||
|
|
||||||
let path = Path::new("snapshots").join(filename).join(&test_name).to_owned();
|
let path = Path::new("snapshots").join(filename).join(&test_name);
|
||||||
settings.set_snapshot_path(path.clone());
|
settings.set_snapshot_path(path.clone());
|
||||||
let snap_name = if let Some(name) = name {
|
let snap_name = if let Some(name) = name {
|
||||||
Cow::Borrowed(name)
|
Cow::Borrowed(name)
|
||||||
|
@ -87,21 +87,20 @@ pub fn create_app(
|
|||||||
.configure(|s| dashboard(s, enable_dashboard));
|
.configure(|s| dashboard(s, enable_dashboard));
|
||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
let app = app.configure(|s| configure_metrics_route(s, opt.enable_metrics_route));
|
let app = app.configure(|s| configure_metrics_route(s, opt.enable_metrics_route));
|
||||||
let app = app
|
|
||||||
.wrap(
|
|
||||||
Cors::default()
|
|
||||||
.send_wildcard()
|
|
||||||
.allow_any_header()
|
|
||||||
.allow_any_origin()
|
|
||||||
.allow_any_method()
|
|
||||||
.max_age(86_400), // 24h
|
|
||||||
)
|
|
||||||
.wrap(middleware::Logger::default())
|
|
||||||
.wrap(middleware::Compress::default())
|
|
||||||
.wrap(middleware::NormalizePath::new(middleware::TrailingSlash::Trim));
|
|
||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
let app = app.wrap(Condition::new(opt.enable_metrics_route, route_metrics::RouteMetrics));
|
let app = app.wrap(Condition::new(opt.enable_metrics_route, route_metrics::RouteMetrics));
|
||||||
app
|
app.wrap(
|
||||||
|
Cors::default()
|
||||||
|
.send_wildcard()
|
||||||
|
.allow_any_header()
|
||||||
|
.allow_any_origin()
|
||||||
|
.allow_any_method()
|
||||||
|
.max_age(86_400), // 24h
|
||||||
|
)
|
||||||
|
.wrap(middleware::Logger::default())
|
||||||
|
.wrap(middleware::Compress::default())
|
||||||
|
.wrap(middleware::NormalizePath::new(middleware::TrailingSlash::Trim))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: TAMO: Finish setting up things
|
// TODO: TAMO: Finish setting up things
|
||||||
@ -148,7 +147,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(IndexScheduler, AuthContr
|
|||||||
Ok(()) => (index_scheduler, auth_controller),
|
Ok(()) => (index_scheduler, auth_controller),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
std::fs::remove_dir_all(&opt.db_path)?;
|
std::fs::remove_dir_all(&opt.db_path)?;
|
||||||
return Err(e.into());
|
return Err(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if !empty_db && !opt.ignore_dump_if_db_exists {
|
} else if !empty_db && !opt.ignore_dump_if_db_exists {
|
||||||
@ -164,7 +163,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(IndexScheduler, AuthContr
|
|||||||
Ok(()) => (index_scheduler, auth_controller),
|
Ok(()) => (index_scheduler, auth_controller),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
std::fs::remove_dir_all(&opt.db_path)?;
|
std::fs::remove_dir_all(&opt.db_path)?;
|
||||||
return Err(e.into());
|
return Err(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -232,7 +231,7 @@ fn import_dump(
|
|||||||
keys.push(key);
|
keys.push(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
let indexer_config = index_scheduler.indexer_config().clone();
|
let indexer_config = index_scheduler.indexer_config();
|
||||||
|
|
||||||
// /!\ The tasks must be imported AFTER importing the indexes or else the scheduler might
|
// /!\ The tasks must be imported AFTER importing the indexes or else the scheduler might
|
||||||
// try to process tasks while we're trying to import the indexes.
|
// try to process tasks while we're trying to import the indexes.
|
||||||
|
@ -147,7 +147,7 @@ Anonymous telemetry:\t\"Enabled\""
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(instance_uid) = analytics.instance_uid() {
|
if let Some(instance_uid) = analytics.instance_uid() {
|
||||||
eprintln!("Instance UID:\t\t\"{}\"", instance_uid.to_string());
|
eprintln!("Instance UID:\t\t\"{}\"", instance_uid);
|
||||||
}
|
}
|
||||||
|
|
||||||
eprintln!();
|
eprintln!();
|
||||||
|
@ -262,7 +262,7 @@ async fn document_addition(
|
|||||||
Err(index_scheduler::Error::FileStore(file_store::Error::IoError(e)))
|
Err(index_scheduler::Error::FileStore(file_store::Error::IoError(e)))
|
||||||
if e.kind() == ErrorKind::NotFound =>
|
if e.kind() == ErrorKind::NotFound =>
|
||||||
{
|
{
|
||||||
()
|
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::warn!("Unknown error happened while deleting a malformed update file with uuid {uuid}: {e}");
|
log::warn!("Unknown error happened while deleting a malformed update file with uuid {uuid}: {e}");
|
||||||
|
@ -45,7 +45,7 @@ macro_rules! make_setting_route {
|
|||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
||||||
let task = KindWithContent::SettingsUpdate {
|
let task = KindWithContent::SettingsUpdate {
|
||||||
index_uid,
|
index_uid,
|
||||||
new_settings,
|
new_settings: Box::new(new_settings),
|
||||||
is_deletion: true,
|
is_deletion: true,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
};
|
};
|
||||||
@ -84,7 +84,7 @@ macro_rules! make_setting_route {
|
|||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
||||||
let task = KindWithContent::SettingsUpdate {
|
let task = KindWithContent::SettingsUpdate {
|
||||||
index_uid,
|
index_uid,
|
||||||
new_settings,
|
new_settings: Box::new(new_settings),
|
||||||
is_deletion: false,
|
is_deletion: false,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
};
|
};
|
||||||
@ -443,7 +443,7 @@ pub async fn update_all(
|
|||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
||||||
let task = KindWithContent::SettingsUpdate {
|
let task = KindWithContent::SettingsUpdate {
|
||||||
index_uid,
|
index_uid,
|
||||||
new_settings,
|
new_settings: Box::new(new_settings),
|
||||||
is_deletion: false,
|
is_deletion: false,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
};
|
};
|
||||||
@ -474,8 +474,8 @@ pub async fn delete_all(
|
|||||||
let allow_index_creation = index_scheduler.filters().allow_index_creation;
|
let allow_index_creation = index_scheduler.filters().allow_index_creation;
|
||||||
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
let index_uid = IndexUid::try_from(index_uid.into_inner())?.into_inner();
|
||||||
let task = KindWithContent::SettingsUpdate {
|
let task = KindWithContent::SettingsUpdate {
|
||||||
index_uid: index_uid,
|
index_uid,
|
||||||
new_settings,
|
new_settings: Box::new(new_settings),
|
||||||
is_deletion: true,
|
is_deletion: true,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
};
|
};
|
||||||
|
@ -275,7 +275,7 @@ pub fn create_all_stats(
|
|||||||
limit: Some(1),
|
limit: Some(1),
|
||||||
..Query::default()
|
..Query::default()
|
||||||
})?;
|
})?;
|
||||||
let processing_index = processing_task.first().and_then(|task| task.index_uid().clone());
|
let processing_index = processing_task.first().and_then(|task| task.index_uid());
|
||||||
for (name, index) in index_scheduler.indexes()? {
|
for (name, index) in index_scheduler.indexes()? {
|
||||||
if !search_rules.is_index_authorized(&name) {
|
if !search_rules.is_index_authorized(&name) {
|
||||||
continue;
|
continue;
|
||||||
@ -286,7 +286,7 @@ pub fn create_all_stats(
|
|||||||
let rtxn = index.read_txn()?;
|
let rtxn = index.read_txn()?;
|
||||||
let stats = IndexStats {
|
let stats = IndexStats {
|
||||||
number_of_documents: index.number_of_documents(&rtxn)?,
|
number_of_documents: index.number_of_documents(&rtxn)?,
|
||||||
is_indexing: processing_index.as_deref().map_or(false, |index_name| name == index_name),
|
is_indexing: processing_index.map_or(false, |index_name| name == index_name),
|
||||||
field_distribution: index.field_distribution(&rtxn)?,
|
field_distribution: index.field_distribution(&rtxn)?,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -113,14 +113,14 @@ pub struct DetailsView {
|
|||||||
pub dump_uid: Option<String>,
|
pub dump_uid: Option<String>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub settings: Option<Settings<Unchecked>>,
|
pub settings: Option<Box<Settings<Unchecked>>>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub indexes: Option<Vec<(String, String)>>,
|
pub indexes: Option<Vec<(String, String)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Details> for DetailsView {
|
impl From<Details> for DetailsView {
|
||||||
fn from(details: Details) -> Self {
|
fn from(details: Details) -> Self {
|
||||||
match details.clone() {
|
match details {
|
||||||
Details::DocumentAdditionOrUpdate { received_documents, indexed_documents } => {
|
Details::DocumentAdditionOrUpdate { received_documents, indexed_documents } => {
|
||||||
DetailsView {
|
DetailsView {
|
||||||
received_documents: Some(received_documents),
|
received_documents: Some(received_documents),
|
||||||
@ -471,7 +471,7 @@ async fn get_task(
|
|||||||
filters.uid = Some(vec![task_id]);
|
filters.uid = Some(vec![task_id]);
|
||||||
|
|
||||||
if let Some(task) = index_scheduler.get_tasks(filters)?.first() {
|
if let Some(task) = index_scheduler.get_tasks(filters)?.first() {
|
||||||
let task_view = TaskView::from_task(&task);
|
let task_view = TaskView::from_task(task);
|
||||||
Ok(HttpResponse::Ok().json(task_view))
|
Ok(HttpResponse::Ok().json(task_view))
|
||||||
} else {
|
} else {
|
||||||
Err(index_scheduler::Error::TaskNotFound(task_id).into())
|
Err(index_scheduler::Error::TaskNotFound(task_id).into())
|
||||||
@ -494,7 +494,7 @@ fn filter_out_inaccessible_indexes_from_query<const ACTION: u8>(
|
|||||||
match indexes {
|
match indexes {
|
||||||
Some(indexes) => {
|
Some(indexes) => {
|
||||||
for name in indexes.iter() {
|
for name in indexes.iter() {
|
||||||
if search_rules.is_index_authorized(&name) {
|
if search_rules.is_index_authorized(name) {
|
||||||
query = query.with_index(name.to_string());
|
query = query.with_index(name.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -543,7 +543,7 @@ pub(crate) mod date_deserializer {
|
|||||||
DeserializeDateOption::After => {
|
DeserializeDateOption::After => {
|
||||||
let datetime = datetime
|
let datetime = datetime
|
||||||
.checked_add(Duration::days(1))
|
.checked_add(Duration::days(1))
|
||||||
.ok_or(serde::de::Error::custom("date overflow"))?;
|
.ok_or_else(|| serde::de::Error::custom("date overflow"))?;
|
||||||
Ok(datetime)
|
Ok(datetime)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -552,7 +552,7 @@ fn parse_filter(facets: &Value) -> Result<Option<Filter>, MeilisearchHttpError>
|
|||||||
Ok(condition)
|
Ok(condition)
|
||||||
}
|
}
|
||||||
Value::Array(arr) => parse_filter_array(arr),
|
Value::Array(arr) => parse_filter_array(arr),
|
||||||
v => Err(MeilisearchHttpError::InvalidExpression(&["Array"], v.clone()).into()),
|
v => Err(MeilisearchHttpError::InvalidExpression(&["Array"], v.clone())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -570,8 +570,7 @@ fn parse_filter_array(arr: &[Value]) -> Result<Option<Filter>, MeilisearchHttpEr
|
|||||||
return Err(MeilisearchHttpError::InvalidExpression(
|
return Err(MeilisearchHttpError::InvalidExpression(
|
||||||
&["String"],
|
&["String"],
|
||||||
v.clone(),
|
v.clone(),
|
||||||
)
|
))
|
||||||
.into())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -581,8 +580,7 @@ fn parse_filter_array(arr: &[Value]) -> Result<Option<Filter>, MeilisearchHttpEr
|
|||||||
return Err(MeilisearchHttpError::InvalidExpression(
|
return Err(MeilisearchHttpError::InvalidExpression(
|
||||||
&["String", "[String]"],
|
&["String", "[String]"],
|
||||||
v.clone(),
|
v.clone(),
|
||||||
)
|
))
|
||||||
.into())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -102,7 +102,7 @@ pub enum KindWithContent {
|
|||||||
},
|
},
|
||||||
SettingsUpdate {
|
SettingsUpdate {
|
||||||
index_uid: String,
|
index_uid: String,
|
||||||
new_settings: Settings<Unchecked>,
|
new_settings: Box<Settings<Unchecked>>,
|
||||||
is_deletion: bool,
|
is_deletion: bool,
|
||||||
allow_index_creation: bool,
|
allow_index_creation: bool,
|
||||||
},
|
},
|
||||||
@ -417,14 +417,13 @@ impl FromStr for Kind {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||||
#[allow(clippy::large_enum_variant)]
|
|
||||||
pub enum Details {
|
pub enum Details {
|
||||||
DocumentAdditionOrUpdate {
|
DocumentAdditionOrUpdate {
|
||||||
received_documents: u64,
|
received_documents: u64,
|
||||||
indexed_documents: Option<u64>,
|
indexed_documents: Option<u64>,
|
||||||
},
|
},
|
||||||
SettingsUpdate {
|
SettingsUpdate {
|
||||||
settings: Settings<Unchecked>,
|
settings: Box<Settings<Unchecked>>,
|
||||||
},
|
},
|
||||||
IndexInfo {
|
IndexInfo {
|
||||||
primary_key: Option<String>,
|
primary_key: Option<String>,
|
||||||
|
Loading…
Reference in New Issue
Block a user