673: Add clippy job r=ManyTheFish a=unvalley

# Pull Request

## Related issue
Fixes #231 

## What does this PR do?
- fix some clippy errors remain
- add clippy job to CI (I set `nightly` as toolchain)

## PR checklist
Please check if your PR fulfills the following requirements:
- [x] Does this PR fix an existing issue, or have you listed the changes applied in the PR description (and why they are needed)?
- [x] Have you read the contributing guidelines?
- [x] Have you made sure that the title is accurate and descriptive of the changes?


Co-authored-by: unvalley <kirohi.code@gmail.com>
This commit is contained in:
bors[bot] 2022-11-08 09:43:26 +00:00 committed by GitHub
commit cf76ec7b37
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 46 additions and 35 deletions

View File

@ -48,6 +48,24 @@ jobs:
command: test
args: --release --all
clippy:
name: Run Clippy
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
components: clippy
- name: Cache dependencies
uses: Swatinem/rust-cache@v2.0.0
- name: Run cargo clippy
uses: actions-rs/cargo@v1
with:
command: clippy
fmt:
name: Run Rustfmt
runs-on: ubuntu-20.04

View File

@ -2,6 +2,7 @@ status = [
'Tests on ubuntu-20.04',
'Tests on macos-latest',
'Tests on windows-latest',
'Run Clippy',
'Run Rustfmt',
]
# 3 hours timeout

View File

@ -431,20 +431,17 @@ pub fn resolve_phrase(ctx: &dyn Context, phrase: &[Option<String>]) -> Result<Ro
for win in phrase.windows(winsize) {
// Get all the documents with the matching distance for each word pairs.
let mut bitmaps = Vec::with_capacity(winsize.pow(2));
for (offset, s1) in win.iter().enumerate().filter_map(|(index, word)| {
if let Some(word) = word {
Some((index, word))
} else {
None
}
}) {
for (dist, s2) in win.iter().skip(offset + 1).enumerate().filter_map(|(index, word)| {
if let Some(word) = word {
Some((index, word))
} else {
None
}
}) {
for (offset, s1) in win
.iter()
.enumerate()
.filter_map(|(index, word)| word.as_ref().map(|word| (index, word)))
{
for (dist, s2) in win
.iter()
.skip(offset + 1)
.enumerate()
.filter_map(|(index, word)| word.as_ref().map(|word| (index, word)))
{
if dist == 0 {
match ctx.word_pair_proximity_docids(s1, s2, 1)? {
Some(m) => bitmaps.push(m),

View File

@ -488,7 +488,7 @@ fn resolve_plane_sweep_candidates(
}
// make a consecutive plane-sweep on the subgroup of words.
let mut subgroup = Vec::with_capacity(words.len());
for word in words.into_iter().map(|w| w.as_deref().unwrap()) {
for word in words.iter().map(|w| w.as_deref().unwrap()) {
match words_positions.get(word) {
Some(positions) => {
subgroup.push(positions.iter().map(|p| (p, 0, p)).collect())

View File

@ -225,7 +225,7 @@ fn bytes_to_highlight(source: &str, target: &str) -> usize {
for (col, char_t) in target.chars().enumerate() {
let col = col + 1;
let last_match_row = *last_row.get(&char_t).unwrap_or(&0);
let cost = if char_s == char_t { 0 } else { 1 };
let cost = usize::from(char_s != char_t);
let dist_add = matrix[(row, col + 1)] + 1;
let dist_del = matrix[(row + 1, col)] + 1;

View File

@ -589,11 +589,8 @@ fn create_matching_words(
PrimitiveQueryPart::Phrase(words) => {
let ids: Vec<_> =
(0..words.len()).into_iter().map(|i| id + i as PrimitiveWordId).collect();
let words = words
.into_iter()
.filter_map(|w| w)
.map(|w| MatchingWord::new(w, 0, false))
.collect();
let words =
words.into_iter().flatten().map(|w| MatchingWord::new(w, 0, false)).collect();
matching_words.push((words, ids));
}
}

View File

@ -329,7 +329,7 @@ impl FacetsUpdateIncrementalInner {
let key =
FacetGroupKey { field_id, level, left_bound: insertion_key.left_bound.clone() };
let value = FacetGroupValue { size: size_left as u8, bitmap: values_left };
let value = FacetGroupValue { size: size_left, bitmap: values_left };
(key, value)
};
@ -345,7 +345,7 @@ impl FacetsUpdateIncrementalInner {
}
let key = FacetGroupKey { field_id, level, left_bound: right_left_bound.to_vec() };
let value = FacetGroupValue { size: size_right as u8, bitmap: values_right };
let value = FacetGroupValue { size: size_right, bitmap: values_right };
(key, value)
};
drop(iter);
@ -373,8 +373,7 @@ impl FacetsUpdateIncrementalInner {
let highest_level = get_highest_level(txn, self.db, field_id)?;
let result =
self.insert_in_level(txn, field_id, highest_level as u8, facet_value, docids)?;
let result = self.insert_in_level(txn, field_id, highest_level, facet_value, docids)?;
match result {
InsertionResult::InPlace => return Ok(()),
InsertionResult::Expand => return Ok(()),
@ -425,7 +424,7 @@ impl FacetsUpdateIncrementalInner {
level: highest_level + 1,
left_bound: first_key.unwrap().left_bound,
};
let value = FacetGroupValue { size: group_size as u8, bitmap: values };
let value = FacetGroupValue { size: group_size, bitmap: values };
to_add.push((key.into_owned(), value));
}
// now we add the rest of the level, in case its size is > group_size * min_level_size
@ -584,8 +583,7 @@ impl FacetsUpdateIncrementalInner {
}
let highest_level = get_highest_level(txn, self.db, field_id)?;
let result =
self.delete_in_level(txn, field_id, highest_level as u8, facet_value, docids)?;
let result = self.delete_in_level(txn, field_id, highest_level, facet_value, docids)?;
match result {
DeletionResult::InPlace => return Ok(()),
DeletionResult::Reduce { .. } => return Ok(()),

View File

@ -80,7 +80,7 @@ pub fn extract_docid_word_positions<R: io::Read + io::Seek>(
.map_err(|_| SerializationError::InvalidNumberSerialization)?;
let position = absolute_from_relative_position(field_id, position);
docid_word_positions_sorter
.insert(&key_buffer, &position.to_ne_bytes())?;
.insert(&key_buffer, position.to_ne_bytes())?;
}
}
}

View File

@ -43,7 +43,7 @@ pub fn extract_facet_string_docids<R: io::Read + io::Seek>(
let key_bytes = FacetGroupKeyCodec::<StrRefCodec>::bytes_encode(&key).unwrap();
// document id is encoded in native-endian because of the CBO roaring bitmap codec
facet_string_docids_sorter.insert(&key_bytes, &document_id.to_ne_bytes())?;
facet_string_docids_sorter.insert(&key_bytes, document_id.to_ne_bytes())?;
}
sorter_into_reader(facet_string_docids_sorter, indexer)

View File

@ -145,7 +145,7 @@ fn document_word_positions_into_sorter(
key_buffer.push(0);
key_buffer.extend_from_slice(w2.as_bytes());
word_pair_proximity_docids_sorter.insert(&key_buffer, &document_id.to_ne_bytes())?;
word_pair_proximity_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?;
}
Ok(())

View File

@ -41,7 +41,7 @@ pub fn extract_word_position_docids<R: io::Read + io::Seek>(
key_buffer.extend_from_slice(word_bytes);
key_buffer.extend_from_slice(&position.to_be_bytes());
word_position_docids_sorter.insert(&key_buffer, &document_id.to_ne_bytes())?;
word_position_docids_sorter.insert(&key_buffer, document_id.to_ne_bytes())?;
}
}

View File

@ -248,7 +248,7 @@ impl<'a, 'i> Transform<'a, 'i> {
skip_insertion = true;
} else {
// we associate the base document with the new key, everything will get merged later.
self.original_sorter.insert(&docid.to_be_bytes(), base_obkv)?;
self.original_sorter.insert(docid.to_be_bytes(), base_obkv)?;
match self.flatten_from_fields_ids_map(KvReader::new(base_obkv))? {
Some(buffer) => {
self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?
@ -261,7 +261,7 @@ impl<'a, 'i> Transform<'a, 'i> {
if !skip_insertion {
self.new_documents_ids.insert(docid);
// We use the extracted/generated user id as the key for this document.
self.original_sorter.insert(&docid.to_be_bytes(), obkv_buffer.clone())?;
self.original_sorter.insert(docid.to_be_bytes(), obkv_buffer.clone())?;
match self.flatten_from_fields_ids_map(KvReader::new(&obkv_buffer))? {
Some(buffer) => self.flattened_sorter.insert(docid.to_be_bytes(), &buffer)?,

View File

@ -36,7 +36,7 @@ impl<'t, 'u, 'i> WordsPrefixesFst<'t, 'u, 'i> {
/// Default value is `4` bytes. This value must be between 1 and 25 will be clamped
/// to these bounds, otherwise.
pub fn max_prefix_length(&mut self, value: usize) -> &mut Self {
self.max_prefix_length = value.min(25).max(1); // clamp [1, 25]
self.max_prefix_length = value.clamp(1, 25);
self
}