mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-11-22 21:04:27 +01:00
Format let-else ❤️ 🎉
This commit is contained in:
parent
661d1f90dc
commit
324d448236
@ -223,7 +223,9 @@ impl IndexMap {
|
|||||||
enable_mdb_writemap: bool,
|
enable_mdb_writemap: bool,
|
||||||
map_size_growth: usize,
|
map_size_growth: usize,
|
||||||
) {
|
) {
|
||||||
let Some(index) = self.available.remove(uuid) else { return; };
|
let Some(index) = self.available.remove(uuid) else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
self.close(*uuid, index, enable_mdb_writemap, map_size_growth);
|
self.close(*uuid, index, enable_mdb_writemap, map_size_growth);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,9 +147,7 @@ impl Key {
|
|||||||
fn parse_expiration_date(
|
fn parse_expiration_date(
|
||||||
string: Option<String>,
|
string: Option<String>,
|
||||||
) -> std::result::Result<Option<OffsetDateTime>, ParseOffsetDateTimeError> {
|
) -> std::result::Result<Option<OffsetDateTime>, ParseOffsetDateTimeError> {
|
||||||
let Some(string) = string else {
|
let Some(string) = string else { return Ok(None) };
|
||||||
return Ok(None)
|
|
||||||
};
|
|
||||||
let datetime = if let Ok(datetime) = OffsetDateTime::parse(&string, &Rfc3339) {
|
let datetime = if let Ok(datetime) = OffsetDateTime::parse(&string, &Rfc3339) {
|
||||||
datetime
|
datetime
|
||||||
} else if let Ok(primitive_datetime) = PrimitiveDateTime::parse(
|
} else if let Ok(primitive_datetime) = PrimitiveDateTime::parse(
|
||||||
|
@ -97,8 +97,7 @@ impl ScoreDetails {
|
|||||||
let attribute_details = attribute_details
|
let attribute_details = attribute_details
|
||||||
.as_object_mut()
|
.as_object_mut()
|
||||||
.expect("attribute details was not an object");
|
.expect("attribute details was not an object");
|
||||||
let Some(fid_details) = fid_details
|
let Some(fid_details) = fid_details else {
|
||||||
else {
|
|
||||||
unimplemented!("position not preceded by attribute");
|
unimplemented!("position not preceded by attribute");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -153,7 +153,12 @@ pub fn bucket_sort<'ctx, Q: RankingRuleQueryTrait>(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket(ctx, logger, &ranking_rule_universes[cur_ranking_rule_index])? else {
|
let Some(next_bucket) = ranking_rules[cur_ranking_rule_index].next_bucket(
|
||||||
|
ctx,
|
||||||
|
logger,
|
||||||
|
&ranking_rule_universes[cur_ranking_rule_index],
|
||||||
|
)?
|
||||||
|
else {
|
||||||
back!();
|
back!();
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
@ -206,11 +206,9 @@ impl<'ctx, G: RankingRuleGraphTrait> RankingRule<'ctx, QueryGraph> for GraphBase
|
|||||||
|
|
||||||
let all_costs = state.all_costs.get(state.graph.query_graph.root_node);
|
let all_costs = state.all_costs.get(state.graph.query_graph.root_node);
|
||||||
// Retrieve the cost of the paths to compute
|
// Retrieve the cost of the paths to compute
|
||||||
let Some(&cost) = all_costs
|
let Some(&cost) = all_costs.iter().find(|c| **c >= state.cur_cost) else {
|
||||||
.iter()
|
self.state = None;
|
||||||
.find(|c| **c >= state.cur_cost) else {
|
return Ok(None);
|
||||||
self.state = None;
|
|
||||||
return Ok(None);
|
|
||||||
};
|
};
|
||||||
state.cur_cost = cost + 1;
|
state.cur_cost = cost + 1;
|
||||||
|
|
||||||
|
@ -80,7 +80,9 @@ impl MatchingWords {
|
|||||||
let word = self.word_interner.get(*word);
|
let word = self.word_interner.get(*word);
|
||||||
// if the word is a prefix we match using starts_with.
|
// if the word is a prefix we match using starts_with.
|
||||||
if located_words.is_prefix && token.lemma().starts_with(word) {
|
if located_words.is_prefix && token.lemma().starts_with(word) {
|
||||||
let Some((char_index, c)) = word.char_indices().take(located_words.original_char_count).last() else {
|
let Some((char_index, c)) =
|
||||||
|
word.char_indices().take(located_words.original_char_count).last()
|
||||||
|
else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let prefix_length = char_index + c.len_utf8();
|
let prefix_length = char_index + c.len_utf8();
|
||||||
|
@ -349,10 +349,7 @@ impl QueryGraph {
|
|||||||
for (_, node) in self.nodes.iter() {
|
for (_, node) in self.nodes.iter() {
|
||||||
match &node.data {
|
match &node.data {
|
||||||
QueryNodeData::Term(term) => {
|
QueryNodeData::Term(term) => {
|
||||||
let Some(phrase) = term.term_subset.original_phrase(ctx)
|
let Some(phrase) = term.term_subset.original_phrase(ctx) else { continue };
|
||||||
else {
|
|
||||||
continue
|
|
||||||
};
|
|
||||||
let phrase = ctx.phrase_interner.get(phrase);
|
let phrase = ctx.phrase_interner.get(phrase);
|
||||||
word_count += phrase.words.iter().copied().filter(|a| a.is_some()).count()
|
word_count += phrase.words.iter().copied().filter(|a| a.is_some()).count()
|
||||||
}
|
}
|
||||||
|
@ -175,9 +175,7 @@ impl QueryTermSubset {
|
|||||||
|
|
||||||
pub fn use_prefix_db(&self, ctx: &SearchContext) -> Option<Word> {
|
pub fn use_prefix_db(&self, ctx: &SearchContext) -> Option<Word> {
|
||||||
let original = ctx.term_interner.get(self.original);
|
let original = ctx.term_interner.get(self.original);
|
||||||
let Some(use_prefix_db) = original.zero_typo.use_prefix_db else {
|
let Some(use_prefix_db) = original.zero_typo.use_prefix_db else { return None };
|
||||||
return None
|
|
||||||
};
|
|
||||||
let word = match &self.zero_typo_subset {
|
let word = match &self.zero_typo_subset {
|
||||||
NTypoTermSubset::All => Some(use_prefix_db),
|
NTypoTermSubset::All => Some(use_prefix_db),
|
||||||
NTypoTermSubset::Subset { words, phrases: _ } => {
|
NTypoTermSubset::Subset { words, phrases: _ } => {
|
||||||
@ -261,13 +259,15 @@ impl QueryTermSubset {
|
|||||||
|
|
||||||
match &self.one_typo_subset {
|
match &self.one_typo_subset {
|
||||||
NTypoTermSubset::All => {
|
NTypoTermSubset::All => {
|
||||||
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo else {
|
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo
|
||||||
|
else {
|
||||||
panic!()
|
panic!()
|
||||||
};
|
};
|
||||||
result.extend(one_typo.iter().copied().map(Word::Derived))
|
result.extend(one_typo.iter().copied().map(Word::Derived))
|
||||||
}
|
}
|
||||||
NTypoTermSubset::Subset { words, phrases: _ } => {
|
NTypoTermSubset::Subset { words, phrases: _ } => {
|
||||||
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo else {
|
let Lazy::Init(OneTypoTerm { split_words: _, one_typo }) = &original.one_typo
|
||||||
|
else {
|
||||||
panic!()
|
panic!()
|
||||||
};
|
};
|
||||||
result.extend(one_typo.intersection(words).copied().map(Word::Derived));
|
result.extend(one_typo.intersection(words).copied().map(Word::Derived));
|
||||||
@ -277,15 +277,11 @@ impl QueryTermSubset {
|
|||||||
|
|
||||||
match &self.two_typo_subset {
|
match &self.two_typo_subset {
|
||||||
NTypoTermSubset::All => {
|
NTypoTermSubset::All => {
|
||||||
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else {
|
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { panic!() };
|
||||||
panic!()
|
|
||||||
};
|
|
||||||
result.extend(two_typos.iter().copied().map(Word::Derived));
|
result.extend(two_typos.iter().copied().map(Word::Derived));
|
||||||
}
|
}
|
||||||
NTypoTermSubset::Subset { words, phrases: _ } => {
|
NTypoTermSubset::Subset { words, phrases: _ } => {
|
||||||
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else {
|
let Lazy::Init(TwoTypoTerm { two_typos }) = &original.two_typo else { panic!() };
|
||||||
panic!()
|
|
||||||
};
|
|
||||||
result.extend(two_typos.intersection(words).copied().map(Word::Derived));
|
result.extend(two_typos.intersection(words).copied().map(Word::Derived));
|
||||||
}
|
}
|
||||||
NTypoTermSubset::Nothing => {}
|
NTypoTermSubset::Nothing => {}
|
||||||
@ -308,13 +304,15 @@ impl QueryTermSubset {
|
|||||||
|
|
||||||
match &self.one_typo_subset {
|
match &self.one_typo_subset {
|
||||||
NTypoTermSubset::All => {
|
NTypoTermSubset::All => {
|
||||||
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo else {
|
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo
|
||||||
|
else {
|
||||||
panic!();
|
panic!();
|
||||||
};
|
};
|
||||||
result.extend(split_words.iter().copied());
|
result.extend(split_words.iter().copied());
|
||||||
}
|
}
|
||||||
NTypoTermSubset::Subset { phrases, .. } => {
|
NTypoTermSubset::Subset { phrases, .. } => {
|
||||||
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo else {
|
let Lazy::Init(OneTypoTerm { split_words, one_typo: _ }) = &original.one_typo
|
||||||
|
else {
|
||||||
panic!();
|
panic!();
|
||||||
};
|
};
|
||||||
if let Some(split_words) = split_words {
|
if let Some(split_words) = split_words {
|
||||||
|
@ -18,7 +18,7 @@ pub fn build_edges(
|
|||||||
return Ok(vec![(
|
return Ok(vec![(
|
||||||
right_ngram_max as u32,
|
right_ngram_max as u32,
|
||||||
conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }),
|
conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }),
|
||||||
)])
|
)]);
|
||||||
};
|
};
|
||||||
|
|
||||||
if left_term.positions.end() + 1 != *right_term.positions.start() {
|
if left_term.positions.end() + 1 != *right_term.positions.start() {
|
||||||
|
@ -75,8 +75,9 @@ impl<'ctx, Query> Sort<'ctx, Query> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn must_redact(index: &Index, rtxn: &'ctx heed::RoTxn, field_name: &str) -> Result<bool> {
|
fn must_redact(index: &Index, rtxn: &'ctx heed::RoTxn, field_name: &str) -> Result<bool> {
|
||||||
let Some(displayed_fields) = index.displayed_fields(rtxn)?
|
let Some(displayed_fields) = index.displayed_fields(rtxn)? else {
|
||||||
else { return Ok(false); };
|
return Ok(false);
|
||||||
|
};
|
||||||
|
|
||||||
Ok(!displayed_fields.iter().any(|&field| field == field_name))
|
Ok(!displayed_fields.iter().any(|&field| field == field_name))
|
||||||
}
|
}
|
||||||
|
@ -2048,10 +2048,11 @@ mod tests {
|
|||||||
"branch_id_number": 0
|
"branch_id_number": 0
|
||||||
}]};
|
}]};
|
||||||
|
|
||||||
let Err(Error::UserError(UserError::MultiplePrimaryKeyCandidatesFound {
|
let Err(Error::UserError(UserError::MultiplePrimaryKeyCandidatesFound { candidates })) =
|
||||||
candidates
|
index.add_documents(doc_multiple_ids)
|
||||||
})) =
|
else {
|
||||||
index.add_documents(doc_multiple_ids) else { panic!("Expected Error::UserError(MultiplePrimaryKeyCandidatesFound)") };
|
panic!("Expected Error::UserError(MultiplePrimaryKeyCandidatesFound)")
|
||||||
|
};
|
||||||
|
|
||||||
assert_eq!(candidates, vec![S("id"), S("project_id"), S("public_uid"),]);
|
assert_eq!(candidates, vec![S("id"), S("project_id"), S("public_uid"),]);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user