From 6b2fe94192fb8731c6ba542eda5f3a90cdc86f9f Mon Sep 17 00:00:00 2001 From: Ewan Higgs Date: Mon, 24 Oct 2022 21:34:13 +0200 Subject: [PATCH 1/6] Fixes for clippy bringing us down to 18 remaining issues. This brings us a step closer to enforcing clippy on each build. --- milli/src/asc_desc.rs | 2 +- milli/src/documents/builder.rs | 2 +- milli/src/documents/mod.rs | 6 +- milli/src/fields_ids_map.rs | 2 +- .../facet_string_level_zero_value_codec.rs | 2 +- .../facet_string_zero_bounds_value_codec.rs | 4 +- milli/src/index.rs | 6 +- milli/src/search/criteria/asc_desc.rs | 2 +- milli/src/search/criteria/attribute.rs | 17 +++-- milli/src/search/criteria/geo.rs | 2 +- milli/src/search/criteria/initial.rs | 2 +- milli/src/search/criteria/mod.rs | 68 +++++++++---------- milli/src/search/criteria/proximity.rs | 8 +-- milli/src/search/criteria/typo.rs | 6 +- milli/src/search/criteria/words.rs | 5 +- milli/src/search/facet/facet_distribution.rs | 4 +- milli/src/search/facet/filter.rs | 34 +++++----- milli/src/search/matches/matching_words.rs | 2 +- milli/src/search/matches/mod.rs | 10 +-- milli/src/search/query_tree.rs | 15 ++-- milli/src/update/delete_documents.rs | 21 +++--- milli/src/update/facets.rs | 10 +-- milli/src/update/index_documents/enrich.rs | 8 +-- .../extract/extract_docid_word_positions.rs | 2 +- .../extract/extract_fid_docid_facet_values.rs | 6 +- .../src/update/index_documents/extract/mod.rs | 2 +- 26 files changed, 117 insertions(+), 131 deletions(-) diff --git a/milli/src/asc_desc.rs b/milli/src/asc_desc.rs index 88023b3cf..21065da36 100644 --- a/milli/src/asc_desc.rs +++ b/milli/src/asc_desc.rs @@ -70,7 +70,7 @@ impl FromStr for Member { type Err = AscDescError; fn from_str(text: &str) -> Result { - match text.strip_prefix("_geoPoint(").and_then(|text| text.strip_suffix(")")) { + match text.strip_prefix("_geoPoint(").and_then(|text| text.strip_suffix(')')) { Some(point) => { let (lat, lng) = point .split_once(',') diff --git a/milli/src/documents/builder.rs b/milli/src/documents/builder.rs index 9fda31cf0..1fa59168e 100644 --- a/milli/src/documents/builder.rs +++ b/milli/src/documents/builder.rs @@ -60,7 +60,7 @@ impl DocumentsBatchBuilder { /// Appends a new JSON object into the batch and updates the `DocumentsBatchIndex` accordingly. pub fn append_json_object(&mut self, object: &Object) -> io::Result<()> { // Make sure that we insert the fields ids in order as the obkv writer has this requirement. - let mut fields_ids: Vec<_> = object.keys().map(|k| self.fields_index.insert(&k)).collect(); + let mut fields_ids: Vec<_> = object.keys().map(|k| self.fields_index.insert(k)).collect(); fields_ids.sort_unstable(); self.obkv_buffer.clear(); diff --git a/milli/src/documents/mod.rs b/milli/src/documents/mod.rs index 0bdf6600a..da3a07942 100644 --- a/milli/src/documents/mod.rs +++ b/milli/src/documents/mod.rs @@ -25,9 +25,9 @@ const DOCUMENTS_BATCH_INDEX_KEY: [u8; 8] = u64::MAX.to_be_bytes(); pub fn obkv_to_object(obkv: &KvReader, index: &DocumentsBatchIndex) -> Result { obkv.iter() .map(|(field_id, value)| { - let field_name = index.name(field_id).ok_or_else(|| { - FieldIdMapMissingEntry::FieldId { field_id, process: "obkv_to_object" } - })?; + let field_name = index + .name(field_id) + .ok_or(FieldIdMapMissingEntry::FieldId { field_id, process: "obkv_to_object" })?; let value = serde_json::from_slice(value).map_err(InternalError::SerdeJson)?; Ok((field_name.to_string(), value)) }) diff --git a/milli/src/fields_ids_map.rs b/milli/src/fields_ids_map.rs index b0a084c3c..810ff755b 100644 --- a/milli/src/fields_ids_map.rs +++ b/milli/src/fields_ids_map.rs @@ -65,7 +65,7 @@ impl FieldsIdsMap { } /// Iterate over the ids in the order of the ids. - pub fn ids<'a>(&'a self) -> impl Iterator + 'a { + pub fn ids(&'_ self) -> impl Iterator + '_ { self.ids_names.keys().copied() } diff --git a/milli/src/heed_codec/facet/facet_string_level_zero_value_codec.rs b/milli/src/heed_codec/facet/facet_string_level_zero_value_codec.rs index 22031c474..d1605e6ef 100644 --- a/milli/src/heed_codec/facet/facet_string_level_zero_value_codec.rs +++ b/milli/src/heed_codec/facet/facet_string_level_zero_value_codec.rs @@ -34,7 +34,7 @@ where type EItem = (&'a str, C::EItem); fn bytes_encode((string, value): &'a Self::EItem) -> Option> { - let value_bytes = C::bytes_encode(&value)?; + let value_bytes = C::bytes_encode(value)?; let mut bytes = Vec::with_capacity(2 + string.len() + value_bytes.len()); encode_prefix_string(string, &mut bytes).ok()?; diff --git a/milli/src/heed_codec/facet/facet_string_zero_bounds_value_codec.rs b/milli/src/heed_codec/facet/facet_string_zero_bounds_value_codec.rs index 337433c2b..90ba09ae2 100644 --- a/milli/src/heed_codec/facet/facet_string_zero_bounds_value_codec.rs +++ b/milli/src/heed_codec/facet/facet_string_zero_bounds_value_codec.rs @@ -66,14 +66,14 @@ where bytes.extend_from_slice(left.as_bytes()); bytes.extend_from_slice(right.as_bytes()); - let value_bytes = C::bytes_encode(&value)?; + let value_bytes = C::bytes_encode(value)?; bytes.extend_from_slice(&value_bytes[..]); Some(Cow::Owned(bytes)) } None => { bytes.push(0); - let value_bytes = C::bytes_encode(&value)?; + let value_bytes = C::bytes_encode(value)?; bytes.extend_from_slice(&value_bytes[..]); Some(Cow::Owned(bytes)) } diff --git a/milli/src/index.rs b/milli/src/index.rs index 94e2f538d..0601ae7b7 100644 --- a/milli/src/index.rs +++ b/milli/src/index.rs @@ -320,7 +320,7 @@ impl Index { /// Writes the documents primary key, this is the field name that is used to store the id. pub(crate) fn put_primary_key(&self, wtxn: &mut RwTxn, primary_key: &str) -> heed::Result<()> { self.set_updated_at(wtxn, &OffsetDateTime::now_utc())?; - self.main.put::<_, Str, Str>(wtxn, main_key::PRIMARY_KEY_KEY, &primary_key) + self.main.put::<_, Str, Str>(wtxn, main_key::PRIMARY_KEY_KEY, primary_key) } /// Deletes the primary key of the documents, this can be done to reset indexes settings. @@ -1013,7 +1013,7 @@ impl Index { let kv = self .documents .get(rtxn, &BEU32::new(id))? - .ok_or_else(|| UserError::UnknownInternalDocumentId { document_id: id })?; + .ok_or(UserError::UnknownInternalDocumentId { document_id: id })?; documents.push((id, kv)); } @@ -1072,7 +1072,7 @@ impl Index { wtxn: &mut RwTxn, time: &OffsetDateTime, ) -> heed::Result<()> { - self.main.put::<_, Str, SerdeJson>(wtxn, main_key::UPDATED_AT_KEY, &time) + self.main.put::<_, Str, SerdeJson>(wtxn, main_key::UPDATED_AT_KEY, time) } pub fn authorize_typos(&self, txn: &RoTxn) -> heed::Result { diff --git a/milli/src/search/criteria/asc_desc.rs b/milli/src/search/criteria/asc_desc.rs index 6d50c1bb5..bf015c5fc 100644 --- a/milli/src/search/criteria/asc_desc.rs +++ b/milli/src/search/criteria/asc_desc.rs @@ -115,7 +115,7 @@ impl<'t> Criterion for AscDesc<'t> { let mut candidates = match (&self.query_tree, candidates) { (_, Some(candidates)) => candidates, (Some(qt), None) => { - let context = CriteriaBuilder::new(&self.rtxn, &self.index)?; + let context = CriteriaBuilder::new(self.rtxn, self.index)?; resolve_query_tree(&context, qt, params.wdcache)? } (None, None) => self.index.documents_ids(self.rtxn)?, diff --git a/milli/src/search/criteria/attribute.rs b/milli/src/search/criteria/attribute.rs index d8feeeee9..7e55a1038 100644 --- a/milli/src/search/criteria/attribute.rs +++ b/milli/src/search/criteria/attribute.rs @@ -89,7 +89,7 @@ impl<'t> Criterion for Attribute<'t> { } } } else { - let mut set_buckets = match self.set_buckets.as_mut() { + let set_buckets = match self.set_buckets.as_mut() { Some(set_buckets) => set_buckets, None => { let new_buckets = initialize_set_buckets( @@ -102,7 +102,7 @@ impl<'t> Criterion for Attribute<'t> { } }; - match set_compute_candidates(&mut set_buckets, &allowed_candidates)? { + match set_compute_candidates(set_buckets, &allowed_candidates)? { Some((_score, candidates)) => candidates, None => { return Ok(Some(CriterionResult { @@ -199,18 +199,18 @@ impl<'t> QueryPositionIterator<'t> { let iter = ctx.word_position_iterator(word, in_prefix_cache)?; inner.push(iter.peekable()); } else { - for (word, _) in word_derivations(&word, true, 0, ctx.words_fst(), wdcache)? + for (word, _) in word_derivations(word, true, 0, ctx.words_fst(), wdcache)? { - let iter = ctx.word_position_iterator(&word, in_prefix_cache)?; + let iter = ctx.word_position_iterator(word, in_prefix_cache)?; inner.push(iter.peekable()); } } } QueryKind::Tolerant { typo, word } => { for (word, _) in - word_derivations(&word, query.prefix, *typo, ctx.words_fst(), wdcache)? + word_derivations(word, query.prefix, *typo, ctx.words_fst(), wdcache)? { - let iter = ctx.word_position_iterator(&word, in_prefix_cache)?; + let iter = ctx.word_position_iterator(word, in_prefix_cache)?; inner.push(iter.peekable()); } } @@ -476,8 +476,7 @@ fn initialize_linear_buckets( } else { words_positions .get(word) - .map(|positions| positions.iter().next()) - .flatten() + .and_then(|positions| positions.iter().next()) } } QueryKind::Tolerant { typo, word } => { @@ -574,7 +573,7 @@ fn flatten_query_tree(query_tree: &Operation) -> FlattenedQueryTree { if ops.iter().all(|op| op.query().is_some()) { vec![vec![ops.iter().flat_map(|op| op.query()).cloned().collect()]] } else { - ops.iter().map(recurse).flatten().collect() + ops.iter().flat_map(recurse).collect() } } Phrase(words) => { diff --git a/milli/src/search/criteria/geo.rs b/milli/src/search/criteria/geo.rs index e3bda51de..1b08cfac8 100644 --- a/milli/src/search/criteria/geo.rs +++ b/milli/src/search/criteria/geo.rs @@ -90,7 +90,7 @@ impl Criterion for Geo<'_> { let mut candidates = match (&query_tree, candidates) { (_, Some(candidates)) => candidates, (Some(qt), None) => { - let context = CriteriaBuilder::new(&self.rtxn, &self.index)?; + let context = CriteriaBuilder::new(self.rtxn, self.index)?; resolve_query_tree(&context, qt, params.wdcache)? } (None, None) => self.index.documents_ids(self.rtxn)?, diff --git a/milli/src/search/criteria/initial.rs b/milli/src/search/criteria/initial.rs index ac61adfe2..85daa813b 100644 --- a/milli/src/search/criteria/initial.rs +++ b/milli/src/search/criteria/initial.rs @@ -44,7 +44,7 @@ impl Criterion for Initial<'_, D> { let mut candidates = resolve_query_tree( self.ctx, answer.query_tree.as_ref().unwrap(), - &mut params.wdcache, + params.wdcache, )?; // Apply the filters on the documents retrieved with the query tree. diff --git a/milli/src/search/criteria/mod.rs b/milli/src/search/criteria/mod.rs index 3159afb9e..7d59bb3c0 100644 --- a/milli/src/search/criteria/mod.rs +++ b/milli/src/search/criteria/mod.rs @@ -186,19 +186,19 @@ impl<'c> Context<'c> for CriteriaBuilder<'c> { } fn word_docids(&self, word: &str) -> heed::Result> { - self.index.word_docids.get(self.rtxn, &word) + self.index.word_docids.get(self.rtxn, word) } fn exact_word_docids(&self, word: &str) -> heed::Result> { - self.index.exact_word_docids.get(self.rtxn, &word) + self.index.exact_word_docids.get(self.rtxn, word) } fn word_prefix_docids(&self, word: &str) -> heed::Result> { - self.index.word_prefix_docids.get(self.rtxn, &word) + self.index.word_prefix_docids.get(self.rtxn, word) } fn exact_word_prefix_docids(&self, word: &str) -> heed::Result> { - self.index.exact_word_prefix_docids.get(self.rtxn, &word) + self.index.exact_word_prefix_docids.get(self.rtxn, word) } fn word_pair_proximity_docids( @@ -321,7 +321,7 @@ impl<'t> CriteriaBuilder<'t> { exhaustive_number_hits, distinct, )) as Box; - for name in self.index.criteria(&self.rtxn)? { + for name in self.index.criteria(self.rtxn)? { criterion = match name { Name::Words => Box::new(Words::new(self, criterion)), Name::Typo => Box::new(Typo::new(self, criterion)), @@ -330,29 +330,23 @@ impl<'t> CriteriaBuilder<'t> { for asc_desc in sort_criteria { criterion = match asc_desc { AscDescName::Asc(Member::Field(field)) => Box::new(AscDesc::asc( - &self.index, - &self.rtxn, + self.index, + self.rtxn, criterion, field.to_string(), )?), AscDescName::Desc(Member::Field(field)) => Box::new(AscDesc::desc( - &self.index, - &self.rtxn, + self.index, + self.rtxn, criterion, field.to_string(), )?), - AscDescName::Asc(Member::Geo(point)) => Box::new(Geo::asc( - &self.index, - &self.rtxn, - criterion, - point.clone(), - )?), - AscDescName::Desc(Member::Geo(point)) => Box::new(Geo::desc( - &self.index, - &self.rtxn, - criterion, - point.clone(), - )?), + AscDescName::Asc(Member::Geo(point)) => { + Box::new(Geo::asc(self.index, self.rtxn, criterion, *point)?) + } + AscDescName::Desc(Member::Geo(point)) => { + Box::new(Geo::desc(self.index, self.rtxn, criterion, *point)?) + } }; } criterion @@ -363,10 +357,10 @@ impl<'t> CriteriaBuilder<'t> { Name::Attribute => Box::new(Attribute::new(self, criterion)), Name::Exactness => Box::new(Exactness::new(self, criterion, &primitive_query)?), Name::Asc(field) => { - Box::new(AscDesc::asc(&self.index, &self.rtxn, criterion, field)?) + Box::new(AscDesc::asc(self.index, self.rtxn, criterion, field)?) } Name::Desc(field) => { - Box::new(AscDesc::desc(&self.index, &self.rtxn, criterion, field)?) + Box::new(AscDesc::desc(self.index, self.rtxn, criterion, field)?) } }; } @@ -408,7 +402,7 @@ pub fn resolve_query_tree( } Ok(candidates) } - Phrase(words) => resolve_phrase(ctx, &words), + Phrase(words) => resolve_phrase(ctx, words), Or(_, ops) => { let mut candidates = RoaringBitmap::new(); for op in ops { @@ -457,7 +451,7 @@ pub fn resolve_phrase(ctx: &dyn Context, phrase: &[String]) -> Result Result { match &query.kind { QueryKind::Exact { word, original_typo } => { - if query.prefix && ctx.in_prefix_cache(&word) { - let mut docids = ctx.word_prefix_docids(&word)?.unwrap_or_default(); + if query.prefix && ctx.in_prefix_cache(word) { + let mut docids = ctx.word_prefix_docids(word)?.unwrap_or_default(); // only add the exact docids if the word hasn't been derived if *original_typo == 0 { - docids |= ctx.exact_word_prefix_docids(&word)?.unwrap_or_default(); + docids |= ctx.exact_word_prefix_docids(word)?.unwrap_or_default(); } Ok(docids) } else if query.prefix { - let words = word_derivations(&word, true, 0, ctx.words_fst(), wdcache)?; + let words = word_derivations(word, true, 0, ctx.words_fst(), wdcache)?; let mut docids = RoaringBitmap::new(); for (word, _typo) in words { - docids |= ctx.word_docids(&word)?.unwrap_or_default(); + docids |= ctx.word_docids(word)?.unwrap_or_default(); // only add the exact docids if the word hasn't been derived if *original_typo == 0 { - docids |= ctx.exact_word_docids(&word)?.unwrap_or_default(); + docids |= ctx.exact_word_docids(word)?.unwrap_or_default(); } } Ok(docids) } else { - let mut docids = ctx.word_docids(&word)?.unwrap_or_default(); + let mut docids = ctx.word_docids(word)?.unwrap_or_default(); // only add the exact docids if the word hasn't been derived if *original_typo == 0 { - docids |= ctx.exact_word_docids(&word)?.unwrap_or_default(); + docids |= ctx.exact_word_docids(word)?.unwrap_or_default(); } Ok(docids) } } QueryKind::Tolerant { typo, word } => { - let words = word_derivations(&word, query.prefix, *typo, ctx.words_fst(), wdcache)?; + let words = word_derivations(word, query.prefix, *typo, ctx.words_fst(), wdcache)?; let mut docids = RoaringBitmap::new(); for (word, typo) in words { - let mut current_docids = ctx.word_docids(&word)?.unwrap_or_default(); + let mut current_docids = ctx.word_docids(word)?.unwrap_or_default(); if *typo == 0 { - current_docids |= ctx.exact_word_docids(&word)?.unwrap_or_default() + current_docids |= ctx.exact_word_docids(word)?.unwrap_or_default() } docids |= current_docids; } @@ -585,7 +579,7 @@ fn query_pair_proximity_docids( } (QueryKind::Tolerant { typo, word: left }, QueryKind::Exact { word: right, .. }) => { let l_words = - word_derivations(&left, false, *typo, ctx.words_fst(), wdcache)?.to_owned(); + word_derivations(left, false, *typo, ctx.words_fst(), wdcache)?.to_owned(); if prefix { let mut docids = RoaringBitmap::new(); for (left, _) in l_words { diff --git a/milli/src/search/criteria/proximity.rs b/milli/src/search/criteria/proximity.rs index e942a7bef..b7c10a2e0 100644 --- a/milli/src/search/criteria/proximity.rs +++ b/milli/src/search/criteria/proximity.rs @@ -99,7 +99,7 @@ impl<'t> Criterion for Proximity<'t> { // use set theory based algorithm resolve_candidates( self.ctx, - &query_tree, + query_tree, self.proximity, &mut self.candidates_cache, params.wdcache, @@ -194,7 +194,7 @@ fn resolve_candidates<'t>( .map(|w| Query { prefix: false, kind: QueryKind::exact(w.clone()) }); match (most_left, most_right) { - (Some(l), Some(r)) => vec![(l, r, resolve_phrase(ctx, &words)?)], + (Some(l), Some(r)) => vec![(l, r, resolve_phrase(ctx, words)?)], _otherwise => Default::default(), } } else { @@ -496,7 +496,7 @@ fn resolve_plane_sweep_candidates( match kind { QueryKind::Exact { word, .. } => { if *prefix { - let iter = word_derivations(word, true, 0, &words_positions) + let iter = word_derivations(word, true, 0, words_positions) .flat_map(|positions| positions.iter().map(|p| (p, 0, p))); result.extend(iter); } else if let Some(positions) = words_positions.get(word) { @@ -504,7 +504,7 @@ fn resolve_plane_sweep_candidates( } } QueryKind::Tolerant { typo, word } => { - let iter = word_derivations(word, *prefix, *typo, &words_positions) + let iter = word_derivations(word, *prefix, *typo, words_positions) .flat_map(|positions| positions.iter().map(|p| (p, 0, p))); result.extend(iter); } diff --git a/milli/src/search/criteria/typo.rs b/milli/src/search/criteria/typo.rs index 605089fae..76bd04d20 100644 --- a/milli/src/search/criteria/typo.rs +++ b/milli/src/search/criteria/typo.rs @@ -69,7 +69,7 @@ impl<'t> Criterion for Typo<'t> { let fst = self.ctx.words_fst(); let new_query_tree = match self.typos { typos if typos < MAX_TYPOS_PER_WORD => alterate_query_tree( - &fst, + fst, query_tree.clone(), self.typos, params.wdcache, @@ -78,7 +78,7 @@ impl<'t> Criterion for Typo<'t> { // When typos >= MAX_TYPOS_PER_WORD, no more alteration of the query tree is possible, // we keep the altered query tree *query_tree = alterate_query_tree( - &fst, + fst, query_tree.clone(), self.typos, params.wdcache, @@ -199,7 +199,7 @@ fn alterate_query_tree( ops.iter_mut().try_for_each(|op| recurse(words_fst, op, number_typos, wdcache)) } // Because Phrases don't allow typos, no alteration can be done. - Phrase(_words) => return Ok(()), + Phrase(_words) => Ok(()), Operation::Query(q) => { if let QueryKind::Tolerant { typo, word } = &q.kind { // if no typo is allowed we don't call word_derivations function, diff --git a/milli/src/search/criteria/words.rs b/milli/src/search/criteria/words.rs index ccc6c0617..b67b7f6b4 100644 --- a/milli/src/search/criteria/words.rs +++ b/milli/src/search/criteria/words.rs @@ -53,10 +53,7 @@ impl<'t> Criterion for Words<'t> { None => None, }; - let bucket_candidates = match self.bucket_candidates.as_mut() { - Some(bucket_candidates) => Some(take(bucket_candidates)), - None => None, - }; + let bucket_candidates = self.bucket_candidates.as_mut().map(take); return Ok(Some(CriterionResult { query_tree: Some(query_tree), diff --git a/milli/src/search/facet/facet_distribution.rs b/milli/src/search/facet/facet_distribution.rs index b2718a490..47e4088fe 100644 --- a/milli/src/search/facet/facet_distribution.rs +++ b/milli/src/search/facet/facet_distribution.rs @@ -66,7 +66,7 @@ impl<'a> FacetDistribution<'a> { ) -> heed::Result<()> { match facet_type { FacetType::Number => { - let mut key_buffer: Vec<_> = field_id.to_be_bytes().iter().copied().collect(); + let mut key_buffer: Vec<_> = field_id.to_be_bytes().to_vec(); let distribution_prelength = distribution.len(); let db = self.index.field_id_docid_facet_f64s; @@ -91,7 +91,7 @@ impl<'a> FacetDistribution<'a> { } FacetType::String => { let mut normalized_distribution = BTreeMap::new(); - let mut key_buffer: Vec<_> = field_id.to_be_bytes().iter().copied().collect(); + let mut key_buffer: Vec<_> = field_id.to_be_bytes().to_vec(); let db = self.index.field_id_docid_facet_strings; for docid in candidates.into_iter() { diff --git a/milli/src/search/facet/filter.rs b/milli/src/search/facet/filter.rs index 7241dab2b..1d8fcd389 100644 --- a/milli/src/search/facet/filter.rs +++ b/milli/src/search/facet/filter.rs @@ -96,7 +96,7 @@ impl<'a> Filter<'a> { Either::Left(array) => { let mut ors = vec![]; for rule in array { - if let Some(filter) = Self::from_str(rule.as_ref())? { + if let Some(filter) = Self::from_str(rule)? { ors.push(filter.condition); } } @@ -108,7 +108,7 @@ impl<'a> Filter<'a> { } } Either::Right(rule) => { - if let Some(filter) = Self::from_str(rule.as_ref())? { + if let Some(filter) = Self::from_str(rule)? { ands.push(filter.condition); } } @@ -358,7 +358,7 @@ impl<'a> Filter<'a> { index, filterable_fields, )?; - return Ok(all_ids - selected); + Ok(all_ids - selected) } FilterCondition::In { fid, els } => { if crate::is_faceted(fid.value(), filterable_fields) { @@ -377,38 +377,36 @@ impl<'a> Filter<'a> { Ok(RoaringBitmap::new()) } } else { - return Err(fid.as_external_error(FilterError::AttributeNotFilterable { + Err(fid.as_external_error(FilterError::AttributeNotFilterable { attribute: fid.value(), filterable_fields: filterable_fields.clone(), - }))?; + }))? } } FilterCondition::Condition { fid, op } => { if crate::is_faceted(fid.value(), filterable_fields) { let field_ids_map = index.fields_ids_map(rtxn)?; if let Some(fid) = field_ids_map.id(fid.value()) { - Self::evaluate_operator(rtxn, index, fid, &op) + Self::evaluate_operator(rtxn, index, fid, op) } else { - return Ok(RoaringBitmap::new()); + Ok(RoaringBitmap::new()) } } else { match fid.lexeme() { attribute @ "_geo" => { - return Err(fid.as_external_error(FilterError::BadGeo(attribute)))?; + Err(fid.as_external_error(FilterError::BadGeo(attribute)))? } attribute if attribute.starts_with("_geoPoint(") => { - return Err(fid.as_external_error(FilterError::BadGeo("_geoPoint")))?; + Err(fid.as_external_error(FilterError::BadGeo("_geoPoint")))? } attribute @ "_geoDistance" => { - return Err(fid.as_external_error(FilterError::Reserved(attribute)))?; + Err(fid.as_external_error(FilterError::Reserved(attribute)))? } attribute => { - return Err(fid.as_external_error( - FilterError::AttributeNotFilterable { - attribute, - filterable_fields: filterable_fields.clone(), - }, - ))?; + Err(fid.as_external_error(FilterError::AttributeNotFilterable { + attribute, + filterable_fields: filterable_fields.clone(), + }))? } } } @@ -477,10 +475,10 @@ impl<'a> Filter<'a> { Ok(result) } else { - return Err(point[0].as_external_error(FilterError::AttributeNotFilterable { + Err(point[0].as_external_error(FilterError::AttributeNotFilterable { attribute: "_geo", filterable_fields: filterable_fields.clone(), - }))?; + }))? } } } diff --git a/milli/src/search/matches/matching_words.rs b/milli/src/search/matches/matching_words.rs index 71fbfd794..1f6ead8a9 100644 --- a/milli/src/search/matches/matching_words.rs +++ b/milli/src/search/matches/matching_words.rs @@ -44,7 +44,7 @@ impl<'a> Iterator for MatchesIter<'a, '_> { fn next(&mut self) -> Option { match self.inner.next() { - Some((matching_words, ids)) => match matching_words[0].match_token(&self.token) { + Some((matching_words, ids)) => match matching_words[0].match_token(self.token) { Some(char_len) => { if matching_words.len() > 1 { Some(MatchType::Partial(PartialMatch { diff --git a/milli/src/search/matches/mod.rs b/milli/src/search/matches/mod.rs index 53101a065..b76ddef99 100644 --- a/milli/src/search/matches/mod.rs +++ b/milli/src/search/matches/mod.rs @@ -49,16 +49,16 @@ impl<'a, A> MatcherBuilder<'a, A> { pub fn build<'t, 'm>(&'m self, text: &'t str) -> Matcher<'t, 'm, A> { let crop_marker = match &self.crop_marker { Some(marker) => marker.as_str(), - None => &DEFAULT_CROP_MARKER, + None => DEFAULT_CROP_MARKER, }; let highlight_prefix = match &self.highlight_prefix { Some(marker) => marker.as_str(), - None => &DEFAULT_HIGHLIGHT_PREFIX, + None => DEFAULT_HIGHLIGHT_PREFIX, }; let highlight_suffix = match &self.highlight_suffix { Some(marker) => marker.as_str(), - None => &DEFAULT_HIGHLIGHT_SUFFIX, + None => DEFAULT_HIGHLIGHT_SUFFIX, }; Matcher { text, @@ -95,7 +95,7 @@ pub struct Match { token_position: usize, } -#[derive(Serialize, Debug, Clone, PartialEq)] +#[derive(Serialize, Debug, Clone, PartialEq, Eq)] pub struct MatchBounds { pub start: usize, pub length: usize, @@ -131,7 +131,7 @@ impl<'t, A: AsRef<[u8]>> Matcher<'t, '_, A> { potential_matches.push((token_position, word_position, partial.char_len())); for (token_position, word_position, word) in words_positions { - partial = match partial.match_token(&word) { + partial = match partial.match_token(word) { // token matches the partial match, but the match is not full, // we temporarly save the current token then we try to match the next one. Some(MatchType::Partial(partial)) => { diff --git a/milli/src/search/query_tree.rs b/milli/src/search/query_tree.rs index 034b9123b..6e908f25d 100755 --- a/milli/src/search/query_tree.rs +++ b/milli/src/search/query_tree.rs @@ -188,8 +188,8 @@ impl<'a> Context for QueryTreeBuilder<'a> { } fn min_word_len_for_typo(&self) -> heed::Result<(u8, u8)> { - let one = self.index.min_word_len_one_typo(&self.rtxn)?; - let two = self.index.min_word_len_two_typos(&self.rtxn)?; + let one = self.index.min_word_len_one_typo(self.rtxn)?; + let two = self.index.min_word_len_two_typos(self.rtxn)?; Ok((one, two)) } @@ -207,7 +207,7 @@ impl<'a> Context for QueryTreeBuilder<'a> { self.index .word_pair_proximity_docids .remap_data_type::() - .get(&self.rtxn, &key) + .get(self.rtxn, &key) } } @@ -313,7 +313,7 @@ pub struct TypoConfig<'a> { /// Return the `QueryKind` of a word depending on `authorize_typos` /// and the provided word length. -fn typos<'a>(word: String, authorize_typos: bool, config: TypoConfig<'a>) -> QueryKind { +fn typos(word: String, authorize_typos: bool, config: TypoConfig<'_>) -> QueryKind { if authorize_typos && !config.exact_words.map_or(false, |s| s.contains(&word)) { let count = word.chars().count().min(u8::MAX as usize) as u8; if count < config.word_len_one_typo { @@ -556,7 +556,7 @@ fn create_matching_words( for synonym in synonyms { let synonym = synonym .into_iter() - .map(|syn| MatchingWord::new(syn.to_string(), 0, false)) + .map(|syn| MatchingWord::new(syn, 0, false)) .collect(); matching_words.push((synonym, vec![id])); } @@ -583,8 +583,7 @@ fn create_matching_words( PrimitiveQueryPart::Phrase(words) => { let ids: Vec<_> = (0..words.len()).into_iter().map(|i| id + i as PrimitiveWordId).collect(); - let words = - words.into_iter().map(|w| MatchingWord::new(w.to_string(), 0, false)).collect(); + let words = words.into_iter().map(|w| MatchingWord::new(w, 0, false)).collect(); matching_words.push((words, ids)); } } @@ -639,7 +638,7 @@ fn create_matching_words( for synonym in synonyms { let synonym = synonym .into_iter() - .map(|syn| MatchingWord::new(syn.to_string(), 0, false)) + .map(|syn| MatchingWord::new(syn, 0, false)) .collect(); matching_words.push((synonym, ids.clone())); } diff --git a/milli/src/update/delete_documents.rs b/milli/src/update/delete_documents.rs index 54328b50d..26340b9dd 100644 --- a/milli/src/update/delete_documents.rs +++ b/milli/src/update/delete_documents.rs @@ -127,7 +127,7 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> { // the `soft_deleted_documents_ids` bitmap and early exit. let size_used = self.index.used_size()?; let map_size = self.index.env.map_size()? as u64; - let nb_documents = self.index.number_of_documents(&self.wtxn)?; + let nb_documents = self.index.number_of_documents(self.wtxn)?; let nb_soft_deleted = soft_deleted_docids.len(); let percentage_available = 100 - (size_used * 100 / map_size); @@ -158,12 +158,11 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> { // and we can reset the soft deleted bitmap self.index.put_soft_deleted_documents_ids(self.wtxn, &RoaringBitmap::new())?; - let primary_key = self.index.primary_key(self.wtxn)?.ok_or_else(|| { - InternalError::DatabaseMissingEntry { + let primary_key = + self.index.primary_key(self.wtxn)?.ok_or(InternalError::DatabaseMissingEntry { db_name: db_name::MAIN, key: Some(main_key::PRIMARY_KEY_KEY), - } - })?; + })?; // Since we already checked if the DB was empty, if we can't find the primary key, then // something is wrong, and we must return an error. @@ -433,7 +432,7 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> { .map(|point| (point, point.data.0)) .unzip(); points_to_remove.iter().for_each(|point| { - rtree.remove(&point); + rtree.remove(point); }); geo_faceted_doc_ids -= docids_to_remove; @@ -534,7 +533,7 @@ fn remove_from_word_docids( // We create an iterator to be able to get the content and delete the word docids. // It's faster to acquire a cursor to get and delete or put, as we avoid traversing // the LMDB B-Tree two times but only once. - let mut iter = db.prefix_iter_mut(txn, &word)?; + let mut iter = db.prefix_iter_mut(txn, word)?; if let Some((key, mut docids)) = iter.next().transpose()? { if key == word { let previous_len = docids.len(); @@ -597,7 +596,7 @@ fn remove_docids_from_facet_field_id_string_docids<'a, C, D>( // level key. We must then parse the value using the appropriate codec. let (group, mut docids) = FacetStringZeroBoundsValueCodec::::bytes_decode(val) - .ok_or_else(|| SerializationError::Decoding { db_name })?; + .ok_or(SerializationError::Decoding { db_name })?; let previous_len = docids.len(); docids -= to_remove; @@ -609,7 +608,7 @@ fn remove_docids_from_facet_field_id_string_docids<'a, C, D>( let val = &(group, docids); let value_bytes = FacetStringZeroBoundsValueCodec::::bytes_encode(val) - .ok_or_else(|| SerializationError::Encoding { db_name })?; + .ok_or(SerializationError::Encoding { db_name })?; // safety: we don't keep references from inside the LMDB database. unsafe { iter.put_current(&key, &value_bytes)? }; @@ -619,7 +618,7 @@ fn remove_docids_from_facet_field_id_string_docids<'a, C, D>( // The key corresponds to a level zero facet string. let (original_value, mut docids) = FacetStringLevelZeroValueCodec::bytes_decode(val) - .ok_or_else(|| SerializationError::Decoding { db_name })?; + .ok_or(SerializationError::Decoding { db_name })?; let previous_len = docids.len(); docids -= to_remove; @@ -630,7 +629,7 @@ fn remove_docids_from_facet_field_id_string_docids<'a, C, D>( let key = key.to_owned(); let val = &(original_value, docids); let value_bytes = FacetStringLevelZeroValueCodec::bytes_encode(val) - .ok_or_else(|| SerializationError::Encoding { db_name })?; + .ok_or(SerializationError::Encoding { db_name })?; // safety: we don't keep references from inside the LMDB database. unsafe { iter.put_current(&key, &value_bytes)? }; diff --git a/milli/src/update/facets.rs b/milli/src/update/facets.rs index 108acae4f..0d7dcbc50 100644 --- a/milli/src/update/facets.rs +++ b/milli/src/update/facets.rs @@ -262,8 +262,8 @@ impl<'t, 'u, 'i> Facets<'t, 'u, 'i> { /// 1. a vector of grenad::Reader. The reader at index `i` corresponds to the elements of level `i + 1` /// that must be inserted into the database. /// 2. a roaring bitmap of all the document ids present in the database -fn compute_facet_number_levels<'t>( - rtxn: &'t heed::RoTxn, +fn compute_facet_number_levels( + rtxn: &'_ heed::RoTxn, db: heed::Database, compression_type: CompressionType, compression_level: Option, @@ -496,7 +496,7 @@ where bitmaps.clear(); } // level 0 is already stored in the DB - return Ok(vec![]); + Ok(vec![]) } else { // level >= 1 // we compute each element of this level based on the elements of the level below it @@ -562,7 +562,7 @@ where } sub_writers.push(writer_into_reader(cur_writer)?); - return Ok(sub_writers); + Ok(sub_writers) } } @@ -598,7 +598,7 @@ fn write_number_entry( ) -> Result<()> { let key = (field_id, level, left, right); let key = FacetLevelValueF64Codec::bytes_encode(&key).ok_or(Error::Encoding)?; - let data = CboRoaringBitmapCodec::bytes_encode(&ids).ok_or(Error::Encoding)?; + let data = CboRoaringBitmapCodec::bytes_encode(ids).ok_or(Error::Encoding)?; writer.insert(&key, &data)?; Ok(()) } diff --git a/milli/src/update/index_documents/enrich.rs b/milli/src/update/index_documents/enrich.rs index 15fbe9319..7eda5dca4 100644 --- a/milli/src/update/index_documents/enrich.rs +++ b/milli/src/update/index_documents/enrich.rs @@ -140,7 +140,7 @@ fn fetch_or_generate_document_id( } None => Ok(Err(UserError::MissingDocumentId { primary_key: primary_key.to_string(), - document: obkv_to_object(&document, &documents_batch_index)?, + document: obkv_to_object(document, documents_batch_index)?, })), } } @@ -156,7 +156,7 @@ fn fetch_or_generate_document_id( if matching_documents_ids.len() >= 2 { return Ok(Err(UserError::TooManyDocumentIds { primary_key: nested.name().to_string(), - document: obkv_to_object(&document, &documents_batch_index)?, + document: obkv_to_object(document, documents_batch_index)?, })); } } @@ -170,7 +170,7 @@ fn fetch_or_generate_document_id( }, None => Ok(Err(UserError::MissingDocumentId { primary_key: nested.name().to_string(), - document: obkv_to_object(&document, &documents_batch_index)?, + document: obkv_to_object(document, documents_batch_index)?, })), } } @@ -313,7 +313,7 @@ pub fn validate_document_id_value(document_id: Value) -> Result Ok(Err(UserError::InvalidDocumentId { document_id: Value::String(string) })), }, Value::Number(number) if number.is_i64() => Ok(Ok(number.to_string())), - content => Ok(Err(UserError::InvalidDocumentId { document_id: content.clone() })), + content => Ok(Err(UserError::InvalidDocumentId { document_id: content })), } } diff --git a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs b/milli/src/update/index_documents/extract/extract_docid_word_positions.rs index e067623e2..f1d595039 100644 --- a/milli/src/update/index_documents/extract/extract_docid_word_positions.rs +++ b/milli/src/update/index_documents/extract/extract_docid_word_positions.rs @@ -132,7 +132,7 @@ fn json_to_string<'a>(value: &'a Value, buffer: &'a mut String) -> Option<&'a st } if let Value::String(string) = value { - Some(&string) + Some(string) } else if inner(value, buffer) { Some(buffer) } else { diff --git a/milli/src/update/index_documents/extract/extract_fid_docid_facet_values.rs b/milli/src/update/index_documents/extract/extract_fid_docid_facet_values.rs index f9d1443d5..44afcde6c 100644 --- a/milli/src/update/index_documents/extract/extract_fid_docid_facet_values.rs +++ b/milli/src/update/index_documents/extract/extract_fid_docid_facet_values.rs @@ -67,7 +67,7 @@ pub fn extract_fid_docid_facet_values( facet_exists_docids.entry(field_id).or_default().insert(document); // For the other extraction tasks, prefix the key with the field_id and the document_id - key_buffer.extend_from_slice(&docid_bytes); + key_buffer.extend_from_slice(docid_bytes); let value = serde_json::from_slice(field_bytes).map_err(InternalError::SerdeJson)?; @@ -107,8 +107,8 @@ pub fn extract_fid_docid_facet_values( let facet_exists_docids_reader = writer_into_reader(facet_exists_docids_writer)?; Ok(( - sorter_into_reader(fid_docid_facet_numbers_sorter, indexer.clone())?, - sorter_into_reader(fid_docid_facet_strings_sorter, indexer.clone())?, + sorter_into_reader(fid_docid_facet_numbers_sorter, indexer)?, + sorter_into_reader(fid_docid_facet_strings_sorter, indexer)?, facet_exists_docids_reader, )) } diff --git a/milli/src/update/index_documents/extract/mod.rs b/milli/src/update/index_documents/extract/mod.rs index 50cc04610..8e0e61175 100644 --- a/milli/src/update/index_documents/extract/mod.rs +++ b/milli/src/update/index_documents/extract/mod.rs @@ -150,7 +150,7 @@ pub(crate) fn data_from_obkv_documents( spawn_extraction_task::<_, _, Vec>>( docid_fid_facet_numbers_chunks, indexer, - lmdb_writer_sx.clone(), + lmdb_writer_sx, extract_facet_number_docids, merge_cbo_roaring_bitmaps, TypedChunk::FieldIdFacetNumberDocids, From 17f7922bfc9d6b68527c0fc148dc09966c6b55bd Mon Sep 17 00:00:00 2001 From: Ewan Higgs Date: Tue, 25 Oct 2022 12:42:38 +0200 Subject: [PATCH 2/6] Remove unneeded lifetimes. --- milli/src/search/query_tree.rs | 2 +- milli/src/update/facets.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/milli/src/search/query_tree.rs b/milli/src/search/query_tree.rs index 6e908f25d..9b4b38f76 100755 --- a/milli/src/search/query_tree.rs +++ b/milli/src/search/query_tree.rs @@ -313,7 +313,7 @@ pub struct TypoConfig<'a> { /// Return the `QueryKind` of a word depending on `authorize_typos` /// and the provided word length. -fn typos(word: String, authorize_typos: bool, config: TypoConfig<'_>) -> QueryKind { +fn typos(word: String, authorize_typos: bool, config: TypoConfig) -> QueryKind { if authorize_typos && !config.exact_words.map_or(false, |s| s.contains(&word)) { let count = word.chars().count().min(u8::MAX as usize) as u8; if count < config.word_len_one_typo { diff --git a/milli/src/update/facets.rs b/milli/src/update/facets.rs index 0d7dcbc50..ae2a6d7fd 100644 --- a/milli/src/update/facets.rs +++ b/milli/src/update/facets.rs @@ -263,7 +263,7 @@ impl<'t, 'u, 'i> Facets<'t, 'u, 'i> { /// that must be inserted into the database. /// 2. a roaring bitmap of all the document ids present in the database fn compute_facet_number_levels( - rtxn: &'_ heed::RoTxn, + rtxn: &heed::RoTxn, db: heed::Database, compression_type: CompressionType, compression_level: Option, From 2ce025a9069243a74a0a704aafa803aa875e1ad8 Mon Sep 17 00:00:00 2001 From: Ewan Higgs Date: Tue, 25 Oct 2022 20:58:31 +0200 Subject: [PATCH 3/6] Fixes after rebase to fix new issues. --- milli/src/index.rs | 2 +- milli/src/search/criteria/mod.rs | 18 +++++++++--------- milli/src/update/facets.rs | 4 ++-- .../update/prefix_word_pairs/prefix_word.rs | 7 +++---- .../update/prefix_word_pairs/word_prefix.rs | 16 ++++++---------- 5 files changed, 21 insertions(+), 26 deletions(-) diff --git a/milli/src/index.rs b/milli/src/index.rs index 0601ae7b7..4144728f1 100644 --- a/milli/src/index.rs +++ b/milli/src/index.rs @@ -200,7 +200,7 @@ impl Index { pub fn new>(options: heed::EnvOpenOptions, path: P) -> Result { let now = OffsetDateTime::now_utc(); - Self::new_with_creation_dates(options, path, now.clone(), now) + Self::new_with_creation_dates(options, path, now, now) } fn set_creation_dates( diff --git a/milli/src/search/criteria/mod.rs b/milli/src/search/criteria/mod.rs index 7d59bb3c0..1b46c8441 100644 --- a/milli/src/search/criteria/mod.rs +++ b/milli/src/search/criteria/mod.rs @@ -562,11 +562,11 @@ fn query_pair_proximity_docids( )? { Some(docids) => Ok(docids), None => { - let r_words = word_derivations(&right, true, 0, ctx.words_fst(), wdcache)?; + let r_words = word_derivations(right, true, 0, ctx.words_fst(), wdcache)?; all_word_pair_overall_proximity_docids( ctx, &[(left, 0)], - &r_words, + r_words, proximity, ) } @@ -592,11 +592,11 @@ fn query_pair_proximity_docids( Some(docids) => Ok(docids), None => { let r_words = - word_derivations(&right, true, 0, ctx.words_fst(), wdcache)?; + word_derivations(right, true, 0, ctx.words_fst(), wdcache)?; all_word_pair_overall_proximity_docids( ctx, &[(left, 0)], - &r_words, + r_words, proximity, ) } @@ -609,17 +609,17 @@ fn query_pair_proximity_docids( } } (QueryKind::Exact { word: left, .. }, QueryKind::Tolerant { typo, word: right }) => { - let r_words = word_derivations(&right, prefix, *typo, ctx.words_fst(), wdcache)?; - all_word_pair_overall_proximity_docids(ctx, &[(left, 0)], &r_words, proximity) + let r_words = word_derivations(right, prefix, *typo, ctx.words_fst(), wdcache)?; + all_word_pair_overall_proximity_docids(ctx, &[(left, 0)], r_words, proximity) } ( QueryKind::Tolerant { typo: l_typo, word: left }, QueryKind::Tolerant { typo: r_typo, word: right }, ) => { let l_words = - word_derivations(&left, false, *l_typo, ctx.words_fst(), wdcache)?.to_owned(); - let r_words = word_derivations(&right, prefix, *r_typo, ctx.words_fst(), wdcache)?; - all_word_pair_overall_proximity_docids(ctx, &l_words, &r_words, proximity) + word_derivations(left, false, *l_typo, ctx.words_fst(), wdcache)?.to_owned(); + let r_words = word_derivations(right, prefix, *r_typo, ctx.words_fst(), wdcache)?; + all_word_pair_overall_proximity_docids(ctx, &l_words, r_words, proximity) } } } diff --git a/milli/src/update/facets.rs b/milli/src/update/facets.rs index ae2a6d7fd..50b34a714 100644 --- a/milli/src/update/facets.rs +++ b/milli/src/update/facets.rs @@ -332,8 +332,8 @@ fn compute_facet_number_levels( /// 1. a vector of grenad::Reader. The reader at index `i` corresponds to the elements of level `i + 1` /// that must be inserted into the database. /// 2. a roaring bitmap of all the document ids present in the database -fn compute_facet_strings_levels<'t>( - rtxn: &'t heed::RoTxn, +fn compute_facet_strings_levels( + rtxn: &heed::RoTxn, db: heed::Database, compression_type: CompressionType, compression_level: Option, diff --git a/milli/src/update/prefix_word_pairs/prefix_word.rs b/milli/src/update/prefix_word_pairs/prefix_word.rs index 26fe0105e..952e02558 100644 --- a/milli/src/update/prefix_word_pairs/prefix_word.rs +++ b/milli/src/update/prefix_word_pairs/prefix_word.rs @@ -30,9 +30,8 @@ pub fn index_prefix_word_database( debug!("Computing and writing the word prefix pair proximity docids into LMDB on disk..."); let common_prefixes: Vec<_> = common_prefix_fst_words - .into_iter() - .map(|s| s.into_iter()) - .flatten() + .iter() + .flat_map(|s| s.iter()) .map(|s| s.as_str()) .filter(|s| s.len() <= max_prefix_length) .collect(); @@ -73,7 +72,7 @@ pub fn index_prefix_word_database( // Now we do the same thing with the new prefixes and all word pairs in the DB let new_prefixes: Vec<_> = new_prefix_fst_words - .into_iter() + .iter() .map(|s| s.as_str()) .filter(|s| s.len() <= max_prefix_length) .collect(); diff --git a/milli/src/update/prefix_word_pairs/word_prefix.rs b/milli/src/update/prefix_word_pairs/word_prefix.rs index 5895cdc46..53e421fac 100644 --- a/milli/src/update/prefix_word_pairs/word_prefix.rs +++ b/milli/src/update/prefix_word_pairs/word_prefix.rs @@ -195,9 +195,8 @@ pub fn index_word_prefix_database( // Make a prefix trie from the common prefixes that are shorter than self.max_prefix_length let prefixes = PrefixTrieNode::from_sorted_prefixes( common_prefix_fst_words - .into_iter() - .map(|s| s.into_iter()) - .flatten() + .iter() + .flat_map(|s| s.iter()) .map(|s| s.as_str()) .filter(|s| s.len() <= max_prefix_length), ); @@ -237,10 +236,7 @@ pub fn index_word_prefix_database( // Now we do the same thing with the new prefixes and all word pairs in the DB let prefixes = PrefixTrieNode::from_sorted_prefixes( - new_prefix_fst_words - .into_iter() - .map(|s| s.as_str()) - .filter(|s| s.len() <= max_prefix_length), + new_prefix_fst_words.iter().map(|s| s.as_str()).filter(|s| s.len() <= max_prefix_length), ); if !prefixes.is_empty() { @@ -366,7 +362,7 @@ fn execute_on_word_pairs_and_prefixes( &mut prefix_buffer, &prefix_search_start, |prefix_buffer| { - batch.insert(&prefix_buffer, data.to_vec()); + batch.insert(prefix_buffer, data.to_vec()); }, ); } @@ -484,7 +480,7 @@ impl PrefixTrieNode { fn set_search_start(&self, word: &[u8], search_start: &mut PrefixTrieNodeSearchStart) -> bool { let byte = word[0]; if self.children[search_start.0].1 == byte { - return true; + true } else { match self.children[search_start.0..].binary_search_by_key(&byte, |x| x.1) { Ok(position) => { @@ -502,7 +498,7 @@ impl PrefixTrieNode { fn from_sorted_prefixes<'a>(prefixes: impl Iterator) -> Self { let mut node = PrefixTrieNode::default(); for prefix in prefixes { - node.insert_sorted_prefix(prefix.as_bytes().into_iter()); + node.insert_sorted_prefix(prefix.as_bytes().iter()); } node } From 42cdc38c7bcf3fff7127775ebd5ce3343ecce41d Mon Sep 17 00:00:00 2001 From: Ewan Higgs Date: Tue, 25 Oct 2022 21:12:59 +0200 Subject: [PATCH 4/6] Allow weird ranges like 1..=0 to pass clippy. Everything else is just a warning and exit code will be 0. --- milli/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/milli/src/lib.rs b/milli/src/lib.rs index ffbe8f38f..05e80f9ba 100644 --- a/milli/src/lib.rs +++ b/milli/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::reversed_empty_ranges)] #[macro_use] pub mod documents; From 9d27ac8a2e222511465202e7f1764828cc3a4a91 Mon Sep 17 00:00:00 2001 From: Ewan Higgs Date: Tue, 25 Oct 2022 21:22:53 +0200 Subject: [PATCH 5/6] Ignore too many arguments to functions. --- milli/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/milli/src/lib.rs b/milli/src/lib.rs index 05e80f9ba..6fb83922a 100644 --- a/milli/src/lib.rs +++ b/milli/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::reversed_empty_ranges)] +#![allow(clippy::too_many_arguments)] #[macro_use] pub mod documents; From e883bccc7684800387bcec4c9651759d03746a0e Mon Sep 17 00:00:00 2001 From: curquiza Date: Wed, 26 Oct 2022 11:43:54 +0000 Subject: [PATCH 6/6] Update version for the next release (v0.35.0) in Cargo.toml files --- benchmarks/Cargo.toml | 2 +- cli/Cargo.toml | 2 +- filter-parser/Cargo.toml | 2 +- flatten-serde-json/Cargo.toml | 2 +- json-depth-checker/Cargo.toml | 2 +- milli/Cargo.toml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/benchmarks/Cargo.toml b/benchmarks/Cargo.toml index ee6bae7c0..b5fee6640 100644 --- a/benchmarks/Cargo.toml +++ b/benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "benchmarks" -version = "0.34.0" +version = "0.35.0" edition = "2018" publish = false diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 97931a371..30fab7851 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cli" -version = "0.34.0" +version = "0.35.0" edition = "2018" description = "A CLI to interact with a milli index" publish = false diff --git a/filter-parser/Cargo.toml b/filter-parser/Cargo.toml index 1245b097b..b22fdaad5 100644 --- a/filter-parser/Cargo.toml +++ b/filter-parser/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "filter-parser" -version = "0.34.0" +version = "0.35.0" edition = "2021" description = "The parser for the Meilisearch filter syntax" publish = false diff --git a/flatten-serde-json/Cargo.toml b/flatten-serde-json/Cargo.toml index 3953ad0f6..aa0787eed 100644 --- a/flatten-serde-json/Cargo.toml +++ b/flatten-serde-json/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "flatten-serde-json" -version = "0.34.0" +version = "0.35.0" edition = "2021" description = "Flatten serde-json objects like elastic search" readme = "README.md" diff --git a/json-depth-checker/Cargo.toml b/json-depth-checker/Cargo.toml index 2b71d1d18..db6132fe8 100644 --- a/json-depth-checker/Cargo.toml +++ b/json-depth-checker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "json-depth-checker" -version = "0.34.0" +version = "0.35.0" edition = "2021" description = "A library that indicates if a JSON must be flattened" publish = false diff --git a/milli/Cargo.toml b/milli/Cargo.toml index 835425714..f19d3781e 100644 --- a/milli/Cargo.toml +++ b/milli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "milli" -version = "0.34.0" +version = "0.35.0" authors = ["Kerollmops "] edition = "2018"