apply most style comments of the review

This commit is contained in:
Tamo 2022-10-17 16:45:00 +02:00 committed by Clément Renault
parent dd70daaae3
commit 78ce29f461
No known key found for this signature in database
GPG Key ID: 92ADA4E935E71FA4
7 changed files with 27 additions and 23 deletions

2
Cargo.lock generated
View File

@ -1109,12 +1109,12 @@ dependencies = [
"http",
"index-scheduler",
"insta",
"lazy_static",
"log",
"maplit",
"meili-snap",
"meilisearch-auth",
"meilisearch-types",
"once_cell",
"regex",
"serde",
"serde_json",

View File

@ -20,7 +20,7 @@ meilisearch-auth = { path = "../meilisearch-auth" }
meilisearch-types = { path = "../meilisearch-types" }
http = "0.2.8"
regex = "1.6.0"
lazy_static = "1.4.0"
once_cell = "1.15.0"
roaring = { version = "0.10.0", features = ["serde"] }
[dev-dependencies]

View File

@ -5,7 +5,7 @@ use self::compat::v4_to_v5::CompatV4ToV5;
use self::compat::v5_to_v6::{CompatIndexV5ToV6, CompatV5ToV6};
use self::v5::V5Reader;
use self::v6::{V6IndexReader, V6Reader};
use crate::{Result, Version};
use crate::{Error, Result, Version};
use flate2::bufread::GzDecoder;
use serde::Deserialize;
@ -46,7 +46,7 @@ impl DumpReader {
match dump_version {
// Version::V1 => Ok(Box::new(v1::Reader::open(path)?)),
Version::V1 => todo!(),
Version::V1 => Err(Error::DumpV1Unsupported),
Version::V2 => Ok(v2::V2Reader::open(path)?
.to_v3()
.to_v4()

View File

@ -4,6 +4,7 @@ use std::{
str::FromStr,
};
use once_cell::sync::Lazy;
use regex::Regex;
use serde::{Deserialize, Deserializer};
@ -132,9 +133,8 @@ impl Settings<Unchecked> {
}
}
lazy_static::lazy_static! {
static ref ASC_DESC_REGEX: Regex = Regex::new(r#"(asc|desc)\(([\w_-]+)\)"#).unwrap();
}
static ASC_DESC_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"(asc|desc)\(([\w_-]+)\)"#).unwrap());
#[derive(Debug, Deserialize, Clone, PartialEq, Eq)]
pub enum Criterion {

View File

@ -488,15 +488,22 @@ impl IndexScheduler {
Batch::Snapshot(_) => todo!(),
Batch::Dump(mut task) => {
let started_at = OffsetDateTime::now_utc();
let KindWithContent::DumpExport { keys, instance_uid, dump_uid } = &task.kind else {
let (keys, instance_uid, dump_uid) = if let KindWithContent::DumpExport {
keys,
instance_uid,
dump_uid,
} = &task.kind
{
(keys, instance_uid, dump_uid)
} else {
unreachable!();
};
let dump = dump::DumpWriter::new(instance_uid.clone())?;
let mut d_keys = dump.create_keys()?;
let mut dump_keys = dump.create_keys()?;
// 1. dump the keys
for key in keys {
d_keys.push_key(key)?;
dump_keys.push_key(key)?;
}
let rtxn = self.env.read_txn()?;
@ -575,8 +582,8 @@ impl IndexScheduler {
}
let path = self.dumps_path.join(format!("{}.dump", dump_uid));
let file = File::create(path).unwrap();
dump.persist_to(BufWriter::new(file)).unwrap();
let file = File::create(path)?;
dump.persist_to(BufWriter::new(file))?;
task.status = Status::Succeeded;

View File

@ -17,7 +17,6 @@ use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
use meilisearch_types::InstanceUid;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::{Arc, RwLock};
use file_store::FileStore;
@ -397,7 +396,7 @@ impl IndexScheduler {
/// Register a new task comming from a dump in the scheduler.
/// By takinig a mutable ref we're pretty sure no one will ever import a dump while actix is running.
pub fn register_dumpped_task(
pub fn register_dumped_task(
&mut self,
task: TaskDump,
content_file: Option<Box<UpdateFile>>,
@ -421,9 +420,7 @@ impl IndexScheduler {
}
// If the task isn't `Enqueued` then just generate a recognisable `Uuid`
// in case we try to open it later.
_ if task.status != Status::Enqueued => {
Some(Uuid::from_str("00112233-4455-6677-8899-aabbccddeeff").unwrap())
}
_ if task.status != Status::Enqueued => Some(Uuid::nil()),
_ => None,
};
@ -492,7 +489,7 @@ impl IndexScheduler {
};
self.all_tasks
.append(&mut wtxn, &BEU32::new(task.uid), &task)?;
.put(&mut wtxn, &BEU32::new(task.uid), &task)?;
if let Some(indexes) = task.indexes() {
for index in indexes {

View File

@ -79,7 +79,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(IndexScheduler, AuthContr
todo!("We'll see later"),
)
};
let meilisearch = || -> anyhow::Result<_> {
let meilisearch_builder = || -> anyhow::Result<_> {
// if anything wrong happens we delete the `data.ms` entirely.
match (
index_scheduler_builder().map_err(anyhow::Error::from),
@ -102,7 +102,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(IndexScheduler, AuthContr
let src_path_exists = path.exists();
if empty_db && src_path_exists {
let (mut index_scheduler, mut auth_controller) = meilisearch()?;
let (mut index_scheduler, mut auth_controller) = meilisearch_builder()?;
import_dump(
&opt.db_path,
path,
@ -120,7 +120,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(IndexScheduler, AuthContr
} else if !src_path_exists && !opt.ignore_missing_dump {
bail!("dump doesn't exist at {:?}", path)
} else {
let (mut index_scheduler, mut auth_controller) = meilisearch()?;
let (mut index_scheduler, mut auth_controller) = meilisearch_builder()?;
import_dump(
&opt.db_path,
path,
@ -130,7 +130,7 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<(IndexScheduler, AuthContr
(index_scheduler, auth_controller)
}
} else {
meilisearch()?
meilisearch_builder()?
};
/*
@ -260,7 +260,7 @@ fn import_dump(
// 4. Import the tasks.
for ret in dump_reader.tasks() {
let (task, file) = ret?;
index_scheduler.register_dumpped_task(task, file, &keys, instance_uid)?;
index_scheduler.register_dumped_task(task, file, &keys, instance_uid)?;
}
Ok(())
}