Merge pull request #5480 from meilisearch/bump-rustc-version

Bump Rust version to 1.85.1
This commit is contained in:
Louis Dureuil 2025-04-01 11:51:36 +00:00 committed by GitHub
commit 19f4c1ac98
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
93 changed files with 328 additions and 338 deletions

View File

@ -1,28 +1,27 @@
name: Bench (manual) name: Bench (manual)
on: on:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
workload: workload:
description: 'The path to the workloads to execute (workloads/...)' description: "The path to the workloads to execute (workloads/...)"
required: true required: true
default: 'workloads/movies.json' default: "workloads/movies.json"
env: env:
WORKLOAD_NAME: ${{ github.event.inputs.workload }} WORKLOAD_NAME: ${{ github.event.inputs.workload }}
jobs: jobs:
benchmarks: benchmarks:
name: Run and upload benchmarks name: Run and upload benchmarks
runs-on: benchmarks runs-on: benchmarks
timeout-minutes: 180 # 3h timeout-minutes: 180 # 3h
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
run: |
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
run: |
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}

View File

@ -1,82 +1,82 @@
name: Bench (PR) name: Bench (PR)
on: on:
issue_comment: issue_comment:
types: [created] types: [created]
permissions: permissions:
issues: write issues: write
env: env:
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }} GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
jobs: jobs:
run-benchmarks-on-comment: run-benchmarks-on-comment:
if: startsWith(github.event.comment.body, '/bench') if: startsWith(github.event.comment.body, '/bench')
name: Run and upload benchmarks name: Run and upload benchmarks
runs-on: benchmarks runs-on: benchmarks
timeout-minutes: 180 # 3h timeout-minutes: 180 # 3h
steps: steps:
- name: Check permissions - name: Check permissions
id: permission id: permission
env: env:
PR_AUTHOR: ${{github.event.issue.user.login }} PR_AUTHOR: ${{github.event.issue.user.login }}
COMMENT_AUTHOR: ${{github.event.comment.user.login }} COMMENT_AUTHOR: ${{github.event.comment.user.login }}
REPOSITORY: ${{github.repository}} REPOSITORY: ${{github.repository}}
PR_ID: ${{github.event.issue.number}} PR_ID: ${{github.event.issue.number}}
run: | run: |
PR_REPOSITORY=$(gh api /repos/"$REPOSITORY"/pulls/"$PR_ID" --jq .head.repo.full_name) PR_REPOSITORY=$(gh api /repos/"$REPOSITORY"/pulls/"$PR_ID" --jq .head.repo.full_name)
if $(gh api /repos/"$REPOSITORY"/collaborators/"$PR_AUTHOR"/permission --jq .user.permissions.push) if $(gh api /repos/"$REPOSITORY"/collaborators/"$PR_AUTHOR"/permission --jq .user.permissions.push)
then then
echo "::notice title=Authentication success::PR author authenticated" echo "::notice title=Authentication success::PR author authenticated"
else else
echo "::error title=Authentication error::PR author doesn't have push permission on this repository" echo "::error title=Authentication error::PR author doesn't have push permission on this repository"
exit 1 exit 1
fi fi
if $(gh api /repos/"$REPOSITORY"/collaborators/"$COMMENT_AUTHOR"/permission --jq .user.permissions.push) if $(gh api /repos/"$REPOSITORY"/collaborators/"$COMMENT_AUTHOR"/permission --jq .user.permissions.push)
then then
echo "::notice title=Authentication success::Comment author authenticated" echo "::notice title=Authentication success::Comment author authenticated"
else else
echo "::error title=Authentication error::Comment author doesn't have push permission on this repository" echo "::error title=Authentication error::Comment author doesn't have push permission on this repository"
exit 1 exit 1
fi fi
if [ "$PR_REPOSITORY" = "$REPOSITORY" ] if [ "$PR_REPOSITORY" = "$REPOSITORY" ]
then then
echo "::notice title=Authentication success::PR started from main repository" echo "::notice title=Authentication success::PR started from main repository"
else else
echo "::error title=Authentication error::PR started from a fork" echo "::error title=Authentication error::PR started from a fork"
exit 1 exit 1
fi fi
- name: Check for Command - name: Check for Command
id: command id: command
uses: xt0rted/slash-command-action@v2 uses: xt0rted/slash-command-action@v2
with: with:
command: bench command: bench
reaction-type: "rocket" reaction-type: "rocket"
repo-token: ${{ env.GH_TOKEN }} repo-token: ${{ env.GH_TOKEN }}
- uses: xt0rted/pull-request-comment-branch@v3 - uses: xt0rted/pull-request-comment-branch@v3
id: comment-branch id: comment-branch
with: with:
repo_token: ${{ env.GH_TOKEN }} repo_token: ${{ env.GH_TOKEN }}
- uses: actions/checkout@v3 - uses: actions/checkout@v3
if: success() if: success()
with: with:
fetch-depth: 0 # fetch full history to be able to get main commit sha fetch-depth: 0 # fetch full history to be able to get main commit sha
ref: ${{ steps.comment-branch.outputs.head_ref }} ref: ${{ steps.comment-branch.outputs.head_ref }}
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
- name: Run benchmarks on PR ${{ github.event.issue.id }} - name: Run benchmarks on PR ${{ github.event.issue.id }}
run: | run: |
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" \ cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" \
--dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" \ --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" \
--reason "[Comment](${{ github.event.comment.html_url }}) on [#${{ github.event.issue.number }}](${{ github.event.issue.html_url }})" \ --reason "[Comment](${{ github.event.comment.html_url }}) on [#${{ github.event.issue.number }}](${{ github.event.issue.html_url }})" \
-- ${{ steps.command.outputs.command-arguments }} > benchlinks.txt -- ${{ steps.command.outputs.command-arguments }} > benchlinks.txt
- name: Send comment in PR - name: Send comment in PR
run: | run: |
gh pr comment ${{github.event.issue.number}} --body-file benchlinks.txt gh pr comment ${{github.event.issue.number}} --body-file benchlinks.txt

View File

@ -1,23 +1,22 @@
name: Indexing bench (push) name: Indexing bench (push)
on: on:
push: push:
branches: branches:
- main - main
jobs: jobs:
benchmarks: benchmarks:
name: Run and upload benchmarks name: Run and upload benchmarks
runs-on: benchmarks runs-on: benchmarks
timeout-minutes: 180 # 3h timeout-minutes: 180 # 3h
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
# Run benchmarks
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
run: |
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json
# Run benchmarks
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
run: |
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json

View File

@ -4,9 +4,9 @@ on:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
dataset_name: dataset_name:
description: 'The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)' description: "The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)"
required: false required: false
default: 'search_songs' default: "search_songs"
env: env:
BENCH_NAME: ${{ github.event.inputs.dataset_name }} BENCH_NAME: ${{ github.event.inputs.dataset_name }}
@ -18,7 +18,7 @@ jobs:
timeout-minutes: 4320 # 72h timeout-minutes: 4320 # 72h
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
@ -67,7 +67,7 @@ jobs:
out_dir: critcmp_results out_dir: critcmp_results
# Helper # Helper
- name: 'README: compare with another benchmark' - name: "README: compare with another benchmark"
run: | run: |
echo "${{ steps.file.outputs.basename }}.json has just been pushed." echo "${{ steps.file.outputs.basename }}.json has just been pushed."
echo 'How to compare this benchmark with another one?' echo 'How to compare this benchmark with another one?'

View File

@ -44,7 +44,7 @@ jobs:
exit 1 exit 1
fi fi
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal

View File

@ -16,7 +16,7 @@ jobs:
timeout-minutes: 4320 # 72h timeout-minutes: 4320 # 72h
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
@ -69,7 +69,7 @@ jobs:
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
# Helper # Helper
- name: 'README: compare with another benchmark' - name: "README: compare with another benchmark"
run: | run: |
echo "${{ steps.file.outputs.basename }}.json has just been pushed." echo "${{ steps.file.outputs.basename }}.json has just been pushed."
echo 'How to compare this benchmark with another one?' echo 'How to compare this benchmark with another one?'

View File

@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks runs-on: benchmarks
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
@ -68,7 +68,7 @@ jobs:
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
# Helper # Helper
- name: 'README: compare with another benchmark' - name: "README: compare with another benchmark"
run: | run: |
echo "${{ steps.file.outputs.basename }}.json has just been pushed." echo "${{ steps.file.outputs.basename }}.json has just been pushed."
echo 'How to compare this benchmark with another one?' echo 'How to compare this benchmark with another one?'

View File

@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks runs-on: benchmarks
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
@ -68,7 +68,7 @@ jobs:
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
# Helper # Helper
- name: 'README: compare with another benchmark' - name: "README: compare with another benchmark"
run: | run: |
echo "${{ steps.file.outputs.basename }}.json has just been pushed." echo "${{ steps.file.outputs.basename }}.json has just been pushed."
echo 'How to compare this benchmark with another one?' echo 'How to compare this benchmark with another one?'

View File

@ -15,7 +15,7 @@ jobs:
runs-on: benchmarks runs-on: benchmarks
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
@ -68,7 +68,7 @@ jobs:
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
# Helper # Helper
- name: 'README: compare with another benchmark' - name: "README: compare with another benchmark"
run: | run: |
echo "${{ steps.file.outputs.basename }}.json has just been pushed." echo "${{ steps.file.outputs.basename }}.json has just been pushed."
echo 'How to compare this benchmark with another one?' echo 'How to compare this benchmark with another one?'

View File

@ -17,7 +17,7 @@ jobs:
run: | run: |
apt-get update && apt-get install -y curl apt-get update && apt-get install -y curl
apt-get install build-essential -y apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
- name: Install cargo-flaky - name: Install cargo-flaky
run: cargo install cargo-flaky run: cargo install cargo-flaky
- name: Run cargo flaky in the dumps - name: Run cargo flaky in the dumps

View File

@ -12,7 +12,7 @@ jobs:
timeout-minutes: 4320 # 72h timeout-minutes: 4320 # 72h
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal

View File

@ -25,7 +25,7 @@ jobs:
run: | run: |
apt-get update && apt-get install -y curl apt-get update && apt-get install -y curl
apt-get install build-essential -y apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
- name: Install cargo-deb - name: Install cargo-deb
run: cargo install cargo-deb run: cargo install cargo-deb
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View File

@ -45,7 +45,7 @@ jobs:
run: | run: |
apt-get update && apt-get install -y curl apt-get update && apt-get install -y curl
apt-get install build-essential -y apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
- name: Build - name: Build
run: cargo build --release --locked run: cargo build --release --locked
# No need to upload binaries for dry run (cron) # No need to upload binaries for dry run (cron)
@ -75,7 +75,7 @@ jobs:
asset_name: meilisearch-windows-amd64.exe asset_name: meilisearch-windows-amd64.exe
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
- name: Build - name: Build
run: cargo build --release --locked run: cargo build --release --locked
# No need to upload binaries for dry run (cron) # No need to upload binaries for dry run (cron)
@ -101,7 +101,7 @@ jobs:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Installing Rust toolchain - name: Installing Rust toolchain
uses: dtolnay/rust-toolchain@1.81 uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
target: ${{ matrix.target }} target: ${{ matrix.target }}
@ -148,7 +148,7 @@ jobs:
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update -y && apt-get install -y docker-ce apt-get update -y && apt-get install -y docker-ce
- name: Installing Rust toolchain - name: Installing Rust toolchain
uses: dtolnay/rust-toolchain@1.81 uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
target: ${{ matrix.target }} target: ${{ matrix.target }}

View File

@ -27,7 +27,7 @@ jobs:
apt-get update && apt-get install -y curl apt-get update && apt-get install -y curl
apt-get install build-essential -y apt-get install build-essential -y
- name: Setup test with Rust stable - name: Setup test with Rust stable
uses: dtolnay/rust-toolchain@1.81 uses: dtolnay/rust-toolchain@1.85
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.7.7 uses: Swatinem/rust-cache@v2.7.7
- name: Run cargo check without any default features - name: Run cargo check without any default features
@ -52,7 +52,7 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.7.7 uses: Swatinem/rust-cache@v2.7.7
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
- name: Run cargo check without any default features - name: Run cargo check without any default features
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
@ -77,7 +77,7 @@ jobs:
run: | run: |
apt-get update apt-get update
apt-get install --assume-yes build-essential curl apt-get install --assume-yes build-essential curl
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
- name: Run cargo build with almost all features - name: Run cargo build with almost all features
run: | run: |
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)" cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
@ -129,7 +129,7 @@ jobs:
run: | run: |
apt-get update apt-get update
apt-get install --assume-yes build-essential curl apt-get install --assume-yes build-essential curl
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
- name: Run cargo tree without default features and check lindera is not present - name: Run cargo tree without default features and check lindera is not present
run: | run: |
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
@ -153,7 +153,7 @@ jobs:
run: | run: |
apt-get update && apt-get install -y curl apt-get update && apt-get install -y curl
apt-get install build-essential -y apt-get install build-essential -y
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
- name: Cache dependencies - name: Cache dependencies
uses: Swatinem/rust-cache@v2.7.7 uses: Swatinem/rust-cache@v2.7.7
- name: Run tests in debug - name: Run tests in debug
@ -167,7 +167,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
components: clippy components: clippy
@ -184,7 +184,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
toolchain: nightly-2024-07-09 toolchain: nightly-2024-07-09

View File

@ -4,7 +4,7 @@ on:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
new_version: new_version:
description: 'The new version (vX.Y.Z)' description: "The new version (vX.Y.Z)"
required: true required: true
env: env:
@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@1.81 - uses: dtolnay/rust-toolchain@1.85
with: with:
profile: minimal profile: minimal
- name: Install sd - name: Install sd

6
Cargo.lock generated
View File

@ -1,6 +1,6 @@
# This file is automatically @generated by Cargo. # This file is automatically @generated by Cargo.
# It is not intended for manual editing. # It is not intended for manual editing.
version = 3 version = 4
[[package]] [[package]]
name = "actix-codec" name = "actix-codec"
@ -758,9 +758,9 @@ dependencies = [
[[package]] [[package]]
name = "bytemuck_derive" name = "bytemuck_derive"
version = "1.6.0" version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",

View File

@ -1,5 +1,5 @@
# Compile # Compile
FROM rust:1.81.0-alpine3.20 AS compiler FROM rust:1.85-alpine3.20 AS compiler
RUN apk add -q --no-cache build-base openssl-dev RUN apk add -q --no-cache build-base openssl-dev

View File

@ -108,7 +108,7 @@ where
/// not supported on untagged enums. /// not supported on untagged enums.
struct StarOrVisitor<T>(PhantomData<T>); struct StarOrVisitor<T>(PhantomData<T>);
impl<'de, T, FE> Visitor<'de> for StarOrVisitor<T> impl<T, FE> Visitor<'_> for StarOrVisitor<T>
where where
T: FromStr<Err = FE>, T: FromStr<Err = FE>,
FE: Display, FE: Display,

View File

@ -99,7 +99,7 @@ impl Task {
/// Return true when a task is finished. /// Return true when a task is finished.
/// A task is finished when its last state is either `Succeeded` or `Failed`. /// A task is finished when its last state is either `Succeeded` or `Failed`.
pub fn is_finished(&self) -> bool { pub fn is_finished(&self) -> bool {
self.events.last().map_or(false, |event| { self.events.last().is_some_and(|event| {
matches!(event, TaskEvent::Succeded { .. } | TaskEvent::Failed { .. }) matches!(event, TaskEvent::Succeded { .. } | TaskEvent::Failed { .. })
}) })
} }

View File

@ -108,7 +108,7 @@ where
/// not supported on untagged enums. /// not supported on untagged enums.
struct StarOrVisitor<T>(PhantomData<T>); struct StarOrVisitor<T>(PhantomData<T>);
impl<'de, T, FE> Visitor<'de> for StarOrVisitor<T> impl<T, FE> Visitor<'_> for StarOrVisitor<T>
where where
T: FromStr<Err = FE>, T: FromStr<Err = FE>,
FE: Display, FE: Display,

View File

@ -114,7 +114,7 @@ impl Task {
/// Return true when a task is finished. /// Return true when a task is finished.
/// A task is finished when its last state is either `Succeeded` or `Failed`. /// A task is finished when its last state is either `Succeeded` or `Failed`.
pub fn is_finished(&self) -> bool { pub fn is_finished(&self) -> bool {
self.events.last().map_or(false, |event| { self.events.last().is_some_and(|event| {
matches!(event, TaskEvent::Succeeded { .. } | TaskEvent::Failed { .. }) matches!(event, TaskEvent::Succeeded { .. } | TaskEvent::Failed { .. })
}) })
} }

View File

@ -35,7 +35,7 @@ impl<E> NomErrorExt<E> for nom::Err<E> {
pub fn cut_with_err<'a, O>( pub fn cut_with_err<'a, O>(
mut parser: impl FnMut(Span<'a>) -> IResult<'a, O>, mut parser: impl FnMut(Span<'a>) -> IResult<'a, O>,
mut with: impl FnMut(Error<'a>) -> Error<'a>, mut with: impl FnMut(Error<'a>) -> Error<'a>,
) -> impl FnMut(Span<'a>) -> IResult<O> { ) -> impl FnMut(Span<'a>) -> IResult<'a, O> {
move |input| match parser.parse(input) { move |input| match parser.parse(input) {
Err(nom::Err::Error(e)) => Err(nom::Err::Failure(with(e))), Err(nom::Err::Error(e)) => Err(nom::Err::Failure(with(e))),
rest => rest, rest => rest,
@ -121,7 +121,7 @@ impl<'a> ParseError<Span<'a>> for Error<'a> {
} }
} }
impl<'a> Display for Error<'a> { impl Display for Error<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let input = self.context.fragment(); let input = self.context.fragment();
// When printing our error message we want to escape all `\n` to be sure we keep our format with the // When printing our error message we want to escape all `\n` to be sure we keep our format with the

View File

@ -80,7 +80,7 @@ pub struct Token<'a> {
value: Option<String>, value: Option<String>,
} }
impl<'a> PartialEq for Token<'a> { impl PartialEq for Token<'_> {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.span.fragment() == other.span.fragment() self.span.fragment() == other.span.fragment()
} }
@ -226,7 +226,7 @@ impl<'a> FilterCondition<'a> {
} }
} }
pub fn parse(input: &'a str) -> Result<Option<Self>, Error> { pub fn parse(input: &'a str) -> Result<Option<Self>, Error<'a>> {
if input.trim().is_empty() { if input.trim().is_empty() {
return Ok(None); return Ok(None);
} }
@ -527,7 +527,7 @@ pub fn parse_filter(input: Span) -> IResult<FilterCondition> {
terminated(|input| parse_expression(input, 0), eof)(input) terminated(|input| parse_expression(input, 0), eof)(input)
} }
impl<'a> std::fmt::Display for FilterCondition<'a> { impl std::fmt::Display for FilterCondition<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
FilterCondition::Not(filter) => { FilterCondition::Not(filter) => {
@ -576,7 +576,8 @@ impl<'a> std::fmt::Display for FilterCondition<'a> {
} }
} }
} }
impl<'a> std::fmt::Display for Condition<'a> {
impl std::fmt::Display for Condition<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
Condition::GreaterThan(token) => write!(f, "> {token}"), Condition::GreaterThan(token) => write!(f, "> {token}"),
@ -594,7 +595,8 @@ impl<'a> std::fmt::Display for Condition<'a> {
} }
} }
} }
impl<'a> std::fmt::Display for Token<'a> {
impl std::fmt::Display for Token<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{{{}}}", self.value()) write!(f, "{{{}}}", self.value())
} }

View File

@ -52,7 +52,7 @@ fn quoted_by(quote: char, input: Span) -> IResult<Token> {
} }
// word = (alphanumeric | _ | - | .)+ except for reserved keywords // word = (alphanumeric | _ | - | .)+ except for reserved keywords
pub fn word_not_keyword<'a>(input: Span<'a>) -> IResult<Token<'a>> { pub fn word_not_keyword<'a>(input: Span<'a>) -> IResult<'a, Token<'a>> {
let (input, word): (_, Token<'a>) = let (input, word): (_, Token<'a>) =
take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?; take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?;
if is_keyword(word.value()) { if is_keyword(word.value()) {

View File

@ -696,7 +696,7 @@ impl IndexScheduler {
written: usize, written: usize,
} }
impl<'a, 'b> Read for TaskReader<'a, 'b> { impl Read for TaskReader<'_, '_> {
fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result<usize> { fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result<usize> {
if self.buffer.is_empty() { if self.buffer.is_empty() {
match self.tasks.next() { match self.tasks.next() {

View File

@ -315,7 +315,7 @@ impl Queue {
if let Some(batch_uids) = batch_uids { if let Some(batch_uids) = batch_uids {
let mut batch_tasks = RoaringBitmap::new(); let mut batch_tasks = RoaringBitmap::new();
for batch_uid in batch_uids { for batch_uid in batch_uids {
if processing_batch.as_ref().map_or(false, |batch| batch.uid == *batch_uid) { if processing_batch.as_ref().is_some_and(|batch| batch.uid == *batch_uid) {
batch_tasks |= &**processing_tasks; batch_tasks |= &**processing_tasks;
} else { } else {
batch_tasks |= self.tasks_in_batch(rtxn, *batch_uid)?; batch_tasks |= self.tasks_in_batch(rtxn, *batch_uid)?;

View File

@ -219,7 +219,7 @@ impl BatchKind {
primary_key.is_some() && primary_key.is_some() &&
// 2.1.1 If the task we're trying to accumulate have a pk it must be equal to our primary key // 2.1.1 If the task we're trying to accumulate have a pk it must be equal to our primary key
// 2.1.2 If the task don't have a primary-key -> we can continue // 2.1.2 If the task don't have a primary-key -> we can continue
kind.primary_key().map_or(true, |pk| pk == primary_key) kind.primary_key().is_none_or(|pk| pk == primary_key)
) || ) ||
// 2.2 If we don't have a primary-key -> // 2.2 If we don't have a primary-key ->
( (

View File

@ -960,7 +960,7 @@ impl<'de> Deserialize<'de> for RankingRuleView {
D: serde::Deserializer<'de>, D: serde::Deserializer<'de>,
{ {
struct Visitor; struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor { impl serde::de::Visitor<'_> for Visitor {
type Value = RankingRuleView; type Value = RankingRuleView;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "the name of a valid ranking rule (string)") write!(formatter, "the name of a valid ranking rule (string)")

View File

@ -66,7 +66,7 @@ where
/// not supported on untagged enums. /// not supported on untagged enums.
struct StarOrVisitor<T>(PhantomData<T>); struct StarOrVisitor<T>(PhantomData<T>);
impl<'de, T, FE> Visitor<'de> for StarOrVisitor<T> impl<T, FE> Visitor<'_> for StarOrVisitor<T>
where where
T: FromStr<Err = FE>, T: FromStr<Err = FE>,
FE: fmt::Display, FE: fmt::Display,

View File

@ -346,7 +346,7 @@ fn open_or_create_database_unchecked(
match ( match (
index_scheduler_builder(), index_scheduler_builder(),
auth_controller.map_err(anyhow::Error::from), auth_controller.map_err(anyhow::Error::from),
create_current_version_file(&opt.db_path).map_err(anyhow::Error::from), create_current_version_file(&opt.db_path),
) { ) {
(Ok(i), Ok(a), Ok(())) => Ok((i, a)), (Ok(i), Ok(a), Ok(())) => Ok((i, a)),
(Err(e), _, _) | (_, Err(e), _) | (_, _, Err(e)) => { (Err(e), _, _) | (_, Err(e), _) | (_, _, Err(e)) => {

View File

@ -69,7 +69,7 @@ fn setup(opt: &Opt) -> anyhow::Result<(LogRouteHandle, LogStderrHandle)> {
Ok((route_layer_handle, stderr_layer_handle)) Ok((route_layer_handle, stderr_layer_handle))
} }
fn on_panic(info: &std::panic::PanicInfo) { fn on_panic(info: &std::panic::PanicHookInfo) {
let info = info.to_string().replace('\n', " "); let info = info.to_string().replace('\n', " ");
tracing::error!(%info); tracing::error!(%info);
} }

View File

@ -940,7 +940,6 @@ where
} }
/// Functions used to get default value for `Opt` fields, needs to be function because of serde's default attribute. /// Functions used to get default value for `Opt` fields, needs to be function because of serde's default attribute.
fn default_db_path() -> PathBuf { fn default_db_path() -> PathBuf {
PathBuf::from(DEFAULT_DB_PATH) PathBuf::from(DEFAULT_DB_PATH)
} }
@ -1048,7 +1047,7 @@ where
{ {
struct BoolOrInt; struct BoolOrInt;
impl<'de> serde::de::Visitor<'de> for BoolOrInt { impl serde::de::Visitor<'_> for BoolOrInt {
type Value = ScheduleSnapshot; type Value = ScheduleSnapshot;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {

View File

@ -302,7 +302,7 @@ impl From<FacetSearchQuery> for SearchQuery {
// If exhaustive_facet_count is true, we need to set the page to 0 // If exhaustive_facet_count is true, we need to set the page to 0
// because the facet search is not exhaustive by default. // because the facet search is not exhaustive by default.
let page = if exhaustive_facet_count.map_or(false, |exhaustive| exhaustive) { let page = if exhaustive_facet_count.is_some_and(|exhaustive| exhaustive) {
// setting the page to 0 will force the search to be exhaustive when computing the number of hits, // setting the page to 0 will force the search to be exhaustive when computing the number of hits,
// but it will skip the bucket sort saving time. // but it will skip the bucket sort saving time.
Some(0) Some(0)

View File

@ -170,7 +170,7 @@ pub fn is_dry_run(req: &HttpRequest, opt: &Opt) -> Result<bool, ResponseError> {
}) })
}) })
.transpose()? .transpose()?
.map_or(false, |s| s.to_lowercase() == "true")) .is_some_and(|s| s.to_lowercase() == "true"))
} }
#[derive(Debug, Serialize, ToSchema)] #[derive(Debug, Serialize, ToSchema)]

View File

@ -32,7 +32,6 @@ pub const FEDERATION_REMOTE: &str = "remote";
#[derive(Debug, Default, Clone, PartialEq, Serialize, deserr::Deserr, ToSchema)] #[derive(Debug, Default, Clone, PartialEq, Serialize, deserr::Deserr, ToSchema)]
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)] #[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct FederationOptions { pub struct FederationOptions {
#[deserr(default, error = DeserrJsonError<InvalidMultiSearchWeight>)] #[deserr(default, error = DeserrJsonError<InvalidMultiSearchWeight>)]
#[schema(value_type = f64)] #[schema(value_type = f64)]

View File

@ -1544,7 +1544,7 @@ pub fn perform_facet_search(
let locales = localized_attributes_locales.map(|attr| { let locales = localized_attributes_locales.map(|attr| {
attr.locales attr.locales
.into_iter() .into_iter()
.filter(|locale| locales.as_ref().map_or(true, |locales| locales.contains(locale))) .filter(|locale| locales.as_ref().is_none_or(|locales| locales.contains(locale)))
.collect() .collect()
}); });

View File

@ -259,7 +259,7 @@ impl<'a> Index<'a, Owned> {
} }
} }
impl<'a> Index<'a, Shared> { impl Index<'_, Shared> {
/// You cannot modify the content of a shared index, thus the delete_document_by_filter call /// You cannot modify the content of a shared index, thus the delete_document_by_filter call
/// must fail. If the task successfully enqueue itself, we'll wait for the task to finishes, /// must fail. If the task successfully enqueue itself, we'll wait for the task to finishes,
/// and if it succeed the function will panic. /// and if it succeed the function will panic.

View File

@ -271,7 +271,7 @@ fn fetch_matching_values_in_object(
} }
fn starts_with(selector: &str, key: &str) -> bool { fn starts_with(selector: &str, key: &str) -> bool {
selector.strip_prefix(key).map_or(false, |tail| { selector.strip_prefix(key).is_some_and(|tail| {
tail.chars().next().map(|c| c == PRIMARY_KEY_SPLIT_SYMBOL).unwrap_or(true) tail.chars().next().map(|c| c == PRIMARY_KEY_SPLIT_SYMBOL).unwrap_or(true)
}) })
} }

View File

@ -27,7 +27,7 @@ impl<'a, W> DocumentVisitor<'a, W> {
} }
} }
impl<'a, 'de, W: Write> Visitor<'de> for &mut DocumentVisitor<'a, W> { impl<'de, W: Write> Visitor<'de> for &mut DocumentVisitor<'_, W> {
/// This Visitor value is nothing, since it write the value to a file. /// This Visitor value is nothing, since it write the value to a file.
type Value = Result<(), Error>; type Value = Result<(), Error>;
@ -61,7 +61,7 @@ impl<'a, 'de, W: Write> Visitor<'de> for &mut DocumentVisitor<'a, W> {
} }
} }
impl<'a, 'de, W> DeserializeSeed<'de> for &mut DocumentVisitor<'a, W> impl<'de, W> DeserializeSeed<'de> for &mut DocumentVisitor<'_, W>
where where
W: Write, W: Write,
{ {

View File

@ -151,7 +151,7 @@ and can not be more than 511 bytes.", .document_id.to_string()
matching_rule_indices: HashMap<String, usize>, matching_rule_indices: HashMap<String, usize>,
}, },
#[error(transparent)] #[error(transparent)]
InvalidGeoField(#[from] GeoError), InvalidGeoField(#[from] Box<GeoError>),
#[error("Invalid vector dimensions: expected: `{}`, found: `{}`.", .expected, .found)] #[error("Invalid vector dimensions: expected: `{}`, found: `{}`.", .expected, .found)]
InvalidVectorDimensions { expected: usize, found: usize }, InvalidVectorDimensions { expected: usize, found: usize },
#[error("The `_vectors` field in the document with id: `{document_id}` is not an object. Was expecting an object with a key for each embedder with manually provided vectors, but instead got `{value}`")] #[error("The `_vectors` field in the document with id: `{document_id}` is not an object. Was expecting an object with a key for each embedder with manually provided vectors, but instead got `{value}`")]
@ -519,7 +519,7 @@ error_from_sub_error! {
str::Utf8Error => InternalError, str::Utf8Error => InternalError,
ThreadPoolBuildError => InternalError, ThreadPoolBuildError => InternalError,
SerializationError => InternalError, SerializationError => InternalError,
GeoError => UserError, Box<GeoError> => UserError,
CriterionError => UserError, CriterionError => UserError,
} }

View File

@ -25,7 +25,7 @@ impl ExternalDocumentsIds {
/// Returns `true` if hard and soft external documents lists are empty. /// Returns `true` if hard and soft external documents lists are empty.
pub fn is_empty(&self, rtxn: &RoTxn<'_>) -> heed::Result<bool> { pub fn is_empty(&self, rtxn: &RoTxn<'_>) -> heed::Result<bool> {
self.0.is_empty(rtxn).map_err(Into::into) self.0.is_empty(rtxn)
} }
pub fn get<A: AsRef<str>>( pub fn get<A: AsRef<str>>(

View File

@ -119,7 +119,7 @@ impl<'indexing> GlobalFieldsIdsMap<'indexing> {
} }
} }
impl<'indexing> MutFieldIdMapper for GlobalFieldsIdsMap<'indexing> { impl MutFieldIdMapper for GlobalFieldsIdsMap<'_> {
fn insert(&mut self, name: &str) -> Option<FieldId> { fn insert(&mut self, name: &str) -> Option<FieldId> {
self.id_or_insert(name) self.id_or_insert(name)
} }

View File

@ -2954,10 +2954,15 @@ pub(crate) mod tests {
documents!({ "id" : 6, RESERVED_GEO_FIELD_NAME: {"lat": "unparseable", "lng": "unparseable"}}), documents!({ "id" : 6, RESERVED_GEO_FIELD_NAME: {"lat": "unparseable", "lng": "unparseable"}}),
) )
.unwrap_err(); .unwrap_err();
assert!(matches!( match err1 {
err1, Error::UserError(UserError::InvalidGeoField(err)) => match *err {
Error::UserError(UserError::InvalidGeoField(GeoError::BadLatitudeAndLongitude { .. })) GeoError::BadLatitudeAndLongitude { .. } => (),
)); otherwise => {
panic!("err1 is not a BadLatitudeAndLongitude error but rather a {otherwise:?}")
}
},
_ => panic!("err1 is not a BadLatitudeAndLongitude error but rather a {err1:?}"),
}
db_snap!(index, geo_faceted_documents_ids); // ensure that no more document was inserted db_snap!(index, geo_faceted_documents_ids); // ensure that no more document was inserted
} }

View File

@ -204,7 +204,7 @@ pub fn relative_from_absolute_position(absolute: Position) -> (FieldId, Relative
// Compute the absolute word position with the field id of the attribute and relative position in the attribute. // Compute the absolute word position with the field id of the attribute and relative position in the attribute.
pub fn absolute_from_relative_position(field_id: FieldId, relative: RelativePosition) -> Position { pub fn absolute_from_relative_position(field_id: FieldId, relative: RelativePosition) -> Position {
(field_id as u32) << 16 | (relative as u32) ((field_id as u32) << 16) | (relative as u32)
} }
// TODO: this is wrong, but will do for now // TODO: this is wrong, but will do for now
/// Compute the "bucketed" absolute position from the field id and relative position in the field. /// Compute the "bucketed" absolute position from the field id and relative position in the field.
@ -372,7 +372,7 @@ pub fn is_faceted(field: &str, faceted_fields: impl IntoIterator<Item = impl AsR
/// assert!(!is_faceted_by("animaux.chien", "animaux.chie")); /// assert!(!is_faceted_by("animaux.chien", "animaux.chie"));
/// ``` /// ```
pub fn is_faceted_by(field: &str, facet: &str) -> bool { pub fn is_faceted_by(field: &str, facet: &str) -> bool {
field.starts_with(facet) && field[facet.len()..].chars().next().map_or(true, |c| c == '.') field.starts_with(facet) && field[facet.len()..].chars().next().is_none_or(|c| c == '.')
} }
pub fn normalize_facet(original: &str) -> String { pub fn normalize_facet(original: &str) -> String {

View File

@ -15,7 +15,7 @@ impl<'a, D: ObjectView, F: ArrayView> Context<'a, D, F> {
} }
} }
impl<'a, D: ObjectView, F: ArrayView> ObjectView for Context<'a, D, F> { impl<D: ObjectView, F: ArrayView> ObjectView for Context<'_, D, F> {
fn as_value(&self) -> &dyn ValueView { fn as_value(&self) -> &dyn ValueView {
self self
} }
@ -52,7 +52,7 @@ impl<'a, D: ObjectView, F: ArrayView> ObjectView for Context<'a, D, F> {
} }
} }
impl<'a, D: ObjectView, F: ArrayView> ValueView for Context<'a, D, F> { impl<D: ObjectView, F: ArrayView> ValueView for Context<'_, D, F> {
fn as_debug(&self) -> &dyn std::fmt::Debug { fn as_debug(&self) -> &dyn std::fmt::Debug {
self self
} }

View File

@ -67,7 +67,7 @@ impl<'a> Document<'a> {
} }
} }
impl<'a> ObjectView for Document<'a> { impl ObjectView for Document<'_> {
fn as_value(&self) -> &dyn ValueView { fn as_value(&self) -> &dyn ValueView {
self self
} }
@ -98,7 +98,7 @@ impl<'a> ObjectView for Document<'a> {
} }
} }
impl<'a> ValueView for Document<'a> { impl ValueView for Document<'_> {
fn as_debug(&self) -> &dyn Debug { fn as_debug(&self) -> &dyn Debug {
self self
} }
@ -283,7 +283,7 @@ impl<'doc> ParseableArray<'doc> {
} }
} }
impl<'doc> ArrayView for ParseableArray<'doc> { impl ArrayView for ParseableArray<'_> {
fn as_value(&self) -> &dyn ValueView { fn as_value(&self) -> &dyn ValueView {
self self
} }
@ -311,7 +311,7 @@ impl<'doc> ArrayView for ParseableArray<'doc> {
} }
} }
impl<'doc> ValueView for ParseableArray<'doc> { impl ValueView for ParseableArray<'_> {
fn as_debug(&self) -> &dyn std::fmt::Debug { fn as_debug(&self) -> &dyn std::fmt::Debug {
self self
} }
@ -353,7 +353,7 @@ impl<'doc> ValueView for ParseableArray<'doc> {
} }
} }
impl<'doc> ObjectView for ParseableMap<'doc> { impl ObjectView for ParseableMap<'_> {
fn as_value(&self) -> &dyn ValueView { fn as_value(&self) -> &dyn ValueView {
self self
} }
@ -392,7 +392,7 @@ impl<'doc> ObjectView for ParseableMap<'doc> {
} }
} }
impl<'doc> ValueView for ParseableMap<'doc> { impl ValueView for ParseableMap<'_> {
fn as_debug(&self) -> &dyn std::fmt::Debug { fn as_debug(&self) -> &dyn std::fmt::Debug {
self self
} }
@ -441,7 +441,7 @@ impl<'doc> ValueView for ParseableMap<'doc> {
} }
} }
impl<'doc> ValueView for ParseableValue<'doc> { impl ValueView for ParseableValue<'_> {
fn as_debug(&self) -> &dyn Debug { fn as_debug(&self) -> &dyn Debug {
self self
} }
@ -622,7 +622,7 @@ struct ArraySource<'s, 'doc> {
s: &'s RawVec<'doc>, s: &'s RawVec<'doc>,
} }
impl<'s, 'doc> fmt::Display for ArraySource<'s, 'doc> { impl fmt::Display for ArraySource<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[")?; write!(f, "[")?;
for item in self.s { for item in self.s {
@ -638,7 +638,7 @@ struct ArrayRender<'s, 'doc> {
s: &'s RawVec<'doc>, s: &'s RawVec<'doc>,
} }
impl<'s, 'doc> fmt::Display for ArrayRender<'s, 'doc> { impl fmt::Display for ArrayRender<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for item in self.s { for item in self.s {
let v = ParseableValue::new(item, self.s.bump()); let v = ParseableValue::new(item, self.s.bump());

View File

@ -17,7 +17,7 @@ pub struct FieldValue<'a, D: ObjectView> {
metadata: Metadata, metadata: Metadata,
} }
impl<'a, D: ObjectView> ValueView for FieldValue<'a, D> { impl<D: ObjectView> ValueView for FieldValue<'_, D> {
fn as_debug(&self) -> &dyn std::fmt::Debug { fn as_debug(&self) -> &dyn std::fmt::Debug {
self self
} }
@ -78,7 +78,7 @@ impl<'a, D: ObjectView> FieldValue<'a, D> {
} }
} }
impl<'a, D: ObjectView> ObjectView for FieldValue<'a, D> { impl<D: ObjectView> ObjectView for FieldValue<'_, D> {
fn as_value(&self) -> &dyn ValueView { fn as_value(&self) -> &dyn ValueView {
self self
} }
@ -148,7 +148,7 @@ impl<'a, 'map, D: ObjectView> BorrowedFields<'a, 'map, D> {
} }
} }
impl<'a, D: ObjectView> ArrayView for OwnedFields<'a, D> { impl<D: ObjectView> ArrayView for OwnedFields<'_, D> {
fn as_value(&self) -> &dyn ValueView { fn as_value(&self) -> &dyn ValueView {
self.0.as_value() self.0.as_value()
} }
@ -170,7 +170,7 @@ impl<'a, D: ObjectView> ArrayView for OwnedFields<'a, D> {
} }
} }
impl<'a, 'map, D: ObjectView> ArrayView for BorrowedFields<'a, 'map, D> { impl<D: ObjectView> ArrayView for BorrowedFields<'_, '_, D> {
fn as_value(&self) -> &dyn ValueView { fn as_value(&self) -> &dyn ValueView {
self self
} }
@ -212,7 +212,7 @@ impl<'a, 'map, D: ObjectView> ArrayView for BorrowedFields<'a, 'map, D> {
} }
} }
impl<'a, 'map, D: ObjectView> ValueView for BorrowedFields<'a, 'map, D> { impl<D: ObjectView> ValueView for BorrowedFields<'_, '_, D> {
fn as_debug(&self) -> &dyn std::fmt::Debug { fn as_debug(&self) -> &dyn std::fmt::Debug {
self self
} }
@ -254,7 +254,7 @@ impl<'a, 'map, D: ObjectView> ValueView for BorrowedFields<'a, 'map, D> {
} }
} }
impl<'a, D: ObjectView> ValueView for OwnedFields<'a, D> { impl<D: ObjectView> ValueView for OwnedFields<'_, D> {
fn as_debug(&self) -> &dyn std::fmt::Debug { fn as_debug(&self) -> &dyn std::fmt::Debug {
self self
} }
@ -292,7 +292,7 @@ struct ArraySource<'a, 'map, D: ObjectView> {
s: &'a BorrowedFields<'a, 'map, D>, s: &'a BorrowedFields<'a, 'map, D>,
} }
impl<'a, 'map, D: ObjectView> fmt::Display for ArraySource<'a, 'map, D> { impl<D: ObjectView> fmt::Display for ArraySource<'_, '_, D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[")?; write!(f, "[")?;
for item in self.s.values() { for item in self.s.values() {
@ -307,7 +307,7 @@ struct ArrayRender<'a, 'map, D: ObjectView> {
s: &'a BorrowedFields<'a, 'map, D>, s: &'a BorrowedFields<'a, 'map, D>,
} }
impl<'a, 'map, D: ObjectView> fmt::Display for ArrayRender<'a, 'map, D> { impl<D: ObjectView> fmt::Display for ArrayRender<'_, '_, D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for item in self.s.values() { for item in self.s.values() {
write!(f, "{}", item.render())?; write!(f, "{}", item.render())?;

View File

@ -358,7 +358,7 @@ impl<'a> FacetDistribution<'a> {
) -> bool { ) -> bool {
// If the field is not filterable, we don't want to compute the facet distribution. // If the field is not filterable, we don't want to compute the facet distribution.
if !matching_features(name, filterable_attributes_rules) if !matching_features(name, filterable_attributes_rules)
.map_or(false, |(_, features)| features.is_filterable()) .is_some_and(|(_, features)| features.is_filterable())
{ {
return false; return false;
} }
@ -383,8 +383,7 @@ impl<'a> FacetDistribution<'a> {
if let Some(facets) = &self.facets { if let Some(facets) = &self.facets {
for field in facets.keys() { for field in facets.keys() {
let matched_rule = matching_features(field, filterable_attributes_rules); let matched_rule = matching_features(field, filterable_attributes_rules);
let is_filterable = let is_filterable = matched_rule.is_some_and(|(_, f)| f.is_filterable());
matched_rule.map_or(false, |(_, features)| features.is_filterable());
if !is_filterable { if !is_filterable {
invalid_facets.insert(field.to_string()); invalid_facets.insert(field.to_string());

View File

@ -79,7 +79,7 @@ struct FacetRangeSearch<'t, 'b, 'bitmap> {
docids: &'bitmap mut RoaringBitmap, docids: &'bitmap mut RoaringBitmap,
} }
impl<'t, 'b, 'bitmap> FacetRangeSearch<'t, 'b, 'bitmap> { impl<'t> FacetRangeSearch<'t, '_, '_> {
fn run_level_0(&mut self, starting_left_bound: &'t [u8], group_size: usize) -> Result<()> { fn run_level_0(&mut self, starting_left_bound: &'t [u8], group_size: usize) -> Result<()> {
let left_key = let left_key =
FacetGroupKey { field_id: self.field_id, level: 0, left_bound: starting_left_bound }; FacetGroupKey { field_id: self.field_id, level: 0, left_bound: starting_left_bound };

View File

@ -62,7 +62,7 @@ struct AscendingFacetSort<'t, 'e> {
)>, )>,
} }
impl<'t, 'e> Iterator for AscendingFacetSort<'t, 'e> { impl<'t> Iterator for AscendingFacetSort<'t, '_> {
type Item = Result<(RoaringBitmap, &'t [u8])>; type Item = Result<(RoaringBitmap, &'t [u8])>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {

View File

@ -66,15 +66,15 @@ enum FilterError<'a> {
ParseGeoError(BadGeoError), ParseGeoError(BadGeoError),
TooDeep, TooDeep,
} }
impl<'a> std::error::Error for FilterError<'a> {} impl std::error::Error for FilterError<'_> {}
impl<'a> From<BadGeoError> for FilterError<'a> { impl From<BadGeoError> for FilterError<'_> {
fn from(geo_error: BadGeoError) -> Self { fn from(geo_error: BadGeoError) -> Self {
FilterError::ParseGeoError(geo_error) FilterError::ParseGeoError(geo_error)
} }
} }
impl<'a> Display for FilterError<'a> { impl Display for FilterError<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
Self::AttributeNotFilterable { attribute, filterable_patterns } => { Self::AttributeNotFilterable { attribute, filterable_patterns } => {
@ -237,7 +237,7 @@ impl<'a> Filter<'a> {
for fid in self.condition.fids(MAX_FILTER_DEPTH) { for fid in self.condition.fids(MAX_FILTER_DEPTH) {
let attribute = fid.value(); let attribute = fid.value();
if matching_features(attribute, &filterable_attributes_rules) if matching_features(attribute, &filterable_attributes_rules)
.map_or(false, |(_, features)| features.is_filterable()) .is_some_and(|(_, features)| features.is_filterable())
{ {
continue; continue;
} }
@ -461,7 +461,7 @@ impl<'a> Filter<'a> {
filterable_attribute_rules: &[FilterableAttributesRule], filterable_attribute_rules: &[FilterableAttributesRule],
universe: Option<&RoaringBitmap>, universe: Option<&RoaringBitmap>,
) -> Result<RoaringBitmap> { ) -> Result<RoaringBitmap> {
if universe.map_or(false, |u| u.is_empty()) { if universe.is_some_and(|u| u.is_empty()) {
return Ok(RoaringBitmap::new()); return Ok(RoaringBitmap::new());
} }

View File

@ -77,7 +77,7 @@ impl<'a> SearchForFacetValues<'a> {
let filterable_attributes_rules = index.filterable_attributes_rules(rtxn)?; let filterable_attributes_rules = index.filterable_attributes_rules(rtxn)?;
let matched_rule = matching_features(&self.facet, &filterable_attributes_rules); let matched_rule = matching_features(&self.facet, &filterable_attributes_rules);
let is_facet_searchable = let is_facet_searchable =
matched_rule.map_or(false, |(_, features)| features.is_facet_searchable()); matched_rule.is_some_and(|(_, features)| features.is_facet_searchable());
if !is_facet_searchable { if !is_facet_searchable {
let matching_field_names = let matching_field_names =
@ -135,7 +135,7 @@ impl<'a> SearchForFacetValues<'a> {
if authorize_typos && field_authorizes_typos { if authorize_typos && field_authorizes_typos {
let exact_words_fst = self.search_query.index.exact_words(rtxn)?; let exact_words_fst = self.search_query.index.exact_words(rtxn)?;
if exact_words_fst.map_or(false, |fst| fst.contains(query)) { if exact_words_fst.is_some_and(|fst| fst.contains(query)) {
if fst.contains(query) { if fst.contains(query) {
self.fetch_original_facets_using_normalized( self.fetch_original_facets_using_normalized(
fid, fid,

View File

@ -151,7 +151,7 @@ impl ScoreWithRatioResult {
} }
} }
impl<'a> Search<'a> { impl Search<'_> {
#[tracing::instrument(level = "trace", skip_all, target = "search::hybrid")] #[tracing::instrument(level = "trace", skip_all, target = "search::hybrid")]
pub fn execute_hybrid(&self, semantic_ratio: f32) -> Result<(SearchResult, Option<u32>)> { pub fn execute_hybrid(&self, semantic_ratio: f32) -> Result<(SearchResult, Option<u32>)> {
// TODO: find classier way to achieve that than to reset vector and query params // TODO: find classier way to achieve that than to reset vector and query params

View File

@ -191,8 +191,7 @@ impl<'a> Search<'a> {
let filterable_fields = ctx.index.filterable_attributes_rules(ctx.txn)?; let filterable_fields = ctx.index.filterable_attributes_rules(ctx.txn)?;
// check if the distinct field is in the filterable fields // check if the distinct field is in the filterable fields
let matched_rule = matching_features(distinct, &filterable_fields); let matched_rule = matching_features(distinct, &filterable_fields);
let is_filterable = let is_filterable = matched_rule.is_some_and(|(_, features)| features.is_filterable());
matched_rule.map_or(false, |(_, features)| features.is_filterable());
if !is_filterable { if !is_filterable {
// if not, remove the hidden fields from the filterable fields to generate the error message // if not, remove the hidden fields from the filterable fields to generate the error message

View File

@ -537,7 +537,7 @@ impl<'ctx> SearchContext<'ctx> {
fid: u16, fid: u16,
) -> Result<Option<RoaringBitmap>> { ) -> Result<Option<RoaringBitmap>> {
// if the requested fid isn't in the restricted list, return None. // if the requested fid isn't in the restricted list, return None.
if self.restricted_fids.as_ref().map_or(false, |fids| !fids.contains(&fid)) { if self.restricted_fids.as_ref().is_some_and(|fids| !fids.contains(&fid)) {
return Ok(None); return Ok(None);
} }
@ -558,7 +558,7 @@ impl<'ctx> SearchContext<'ctx> {
fid: u16, fid: u16,
) -> Result<Option<RoaringBitmap>> { ) -> Result<Option<RoaringBitmap>> {
// if the requested fid isn't in the restricted list, return None. // if the requested fid isn't in the restricted list, return None.
if self.restricted_fids.as_ref().map_or(false, |fids| !fids.contains(&fid)) { if self.restricted_fids.as_ref().is_some_and(|fids| !fids.contains(&fid)) {
return Ok(None); return Ok(None);
} }

View File

@ -72,7 +72,7 @@ pub fn find_best_match_interval(matches: &[Match], crop_size: usize) -> [&Match;
let interval_score = get_interval_score(&matches[interval_first..=interval_last]); let interval_score = get_interval_score(&matches[interval_first..=interval_last]);
let is_interval_score_better = &best_interval let is_interval_score_better = &best_interval
.as_ref() .as_ref()
.map_or(true, |MatchIntervalWithScore { score, .. }| interval_score > *score); .is_none_or(|MatchIntervalWithScore { score, .. }| interval_score > *score);
if *is_interval_score_better { if *is_interval_score_better {
best_interval = Some(MatchIntervalWithScore { best_interval = Some(MatchIntervalWithScore {

View File

@ -123,7 +123,7 @@ pub struct Matcher<'t, 'tokenizer, 'b, 'lang> {
matches: Option<(Vec<Token<'t>>, Vec<Match>)>, matches: Option<(Vec<Token<'t>>, Vec<Match>)>,
} }
impl<'t, 'tokenizer> Matcher<'t, 'tokenizer, '_, '_> { impl<'t> Matcher<'t, '_, '_, '_> {
/// Iterates over tokens and save any of them that matches the query. /// Iterates over tokens and save any of them that matches the query.
fn compute_matches(&mut self) -> &mut Self { fn compute_matches(&mut self) -> &mut Self {
/// some words are counted as matches only if they are close together and in the good order, /// some words are counted as matches only if they are close together and in the good order,

View File

@ -327,7 +327,7 @@ impl QueryGraph {
let mut peekable = term_with_frequency.into_iter().peekable(); let mut peekable = term_with_frequency.into_iter().peekable();
while let Some((idx, frequency)) = peekable.next() { while let Some((idx, frequency)) = peekable.next() {
term_weight.insert(idx, weight); term_weight.insert(idx, weight);
if peekable.peek().map_or(false, |(_, f)| frequency != *f) { if peekable.peek().is_some_and(|(_, f)| frequency != *f) {
weight += 1; weight += 1;
} }
} }

View File

@ -418,7 +418,7 @@ fn split_best_frequency(
let right = ctx.word_interner.insert(right.to_owned()); let right = ctx.word_interner.insert(right.to_owned());
if let Some(frequency) = ctx.get_db_word_pair_proximity_docids_len(None, left, right, 1)? { if let Some(frequency) = ctx.get_db_word_pair_proximity_docids_len(None, left, right, 1)? {
if best.map_or(true, |(old, _, _)| frequency > old) { if best.is_none_or(|(old, _, _)| frequency > old) {
best = Some((frequency, left, right)); best = Some((frequency, left, right));
} }
} }

View File

@ -203,7 +203,7 @@ pub fn number_of_typos_allowed<'ctx>(
Ok(Box::new(move |word: &str| { Ok(Box::new(move |word: &str| {
if !authorize_typos if !authorize_typos
|| word.len() < min_len_one_typo as usize || word.len() < min_len_one_typo as usize
|| exact_words.as_ref().map_or(false, |fst| fst.contains(word)) || exact_words.as_ref().is_some_and(|fst| fst.contains(word))
{ {
0 0
} else if word.len() < min_len_two_typos as usize { } else if word.len() < min_len_two_typos as usize {

View File

@ -17,7 +17,7 @@ use crate::Result;
pub struct PhraseDocIdsCache { pub struct PhraseDocIdsCache {
pub cache: FxHashMap<Interned<Phrase>, RoaringBitmap>, pub cache: FxHashMap<Interned<Phrase>, RoaringBitmap>,
} }
impl<'ctx> SearchContext<'ctx> { impl SearchContext<'_> {
/// Get the document ids associated with the given phrase /// Get the document ids associated with the given phrase
pub fn get_phrase_docids(&mut self, phrase: Interned<Phrase>) -> Result<&RoaringBitmap> { pub fn get_phrase_docids(&mut self, phrase: Interned<Phrase>) -> Result<&RoaringBitmap> {
if self.phrase_docids.cache.contains_key(&phrase) { if self.phrase_docids.cache.contains_key(&phrase) {

View File

@ -263,7 +263,7 @@ impl SmallBitmapInternal {
pub fn contains(&self, x: u16) -> bool { pub fn contains(&self, x: u16) -> bool {
let (set, x) = self.get_set_index(x); let (set, x) = self.get_set_index(x);
set & 0b1 << x != 0 set & (0b1 << x) != 0
} }
pub fn insert(&mut self, x: u16) { pub fn insert(&mut self, x: u16) {
@ -381,7 +381,7 @@ pub enum SmallBitmapInternalIter<'b> {
Tiny(u64), Tiny(u64),
Small { cur: u64, next: &'b [u64], base: u16 }, Small { cur: u64, next: &'b [u64], base: u16 },
} }
impl<'b> Iterator for SmallBitmapInternalIter<'b> { impl Iterator for SmallBitmapInternalIter<'_> {
type Item = u16; type Item = u16;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {

View File

@ -101,8 +101,7 @@ impl FacetsUpdateIncremental {
let key = FacetGroupKeyCodec::<BytesRefCodec>::bytes_decode(key) let key = FacetGroupKeyCodec::<BytesRefCodec>::bytes_decode(key)
.map_err(heed::Error::Encoding)?; .map_err(heed::Error::Encoding)?;
if facet_level_may_be_updated if facet_level_may_be_updated && current_field_id.is_some_and(|fid| fid != key.field_id)
&& current_field_id.map_or(false, |fid| fid != key.field_id)
{ {
// Only add or remove a level after making all the field modifications. // Only add or remove a level after making all the field modifications.
self.inner.add_or_delete_level(wtxn, current_field_id.unwrap())?; self.inner.add_or_delete_level(wtxn, current_field_id.unwrap())?;
@ -530,8 +529,8 @@ impl FacetsUpdateIncrementalInner {
add_docids: Option<&RoaringBitmap>, add_docids: Option<&RoaringBitmap>,
del_docids: Option<&RoaringBitmap>, del_docids: Option<&RoaringBitmap>,
) -> Result<bool> { ) -> Result<bool> {
if add_docids.map_or(true, RoaringBitmap::is_empty) if add_docids.is_none_or(RoaringBitmap::is_empty)
&& del_docids.map_or(true, RoaringBitmap::is_empty) && del_docids.is_none_or(RoaringBitmap::is_empty)
{ {
return Ok(false); return Ok(false);
} }
@ -670,7 +669,7 @@ impl FacetsUpdateIncrementalInner {
} }
} }
impl<'a> FacetGroupKey<&'a [u8]> { impl FacetGroupKey<&[u8]> {
pub fn into_owned(self) -> FacetGroupKey<Vec<u8>> { pub fn into_owned(self) -> FacetGroupKey<Vec<u8>> {
FacetGroupKey { FacetGroupKey {
field_id: self.field_id, field_id: self.field_id,

View File

@ -115,7 +115,7 @@ pub fn enrich_documents_batch<R: Read + Seek>(
if let Some(geo_value) = geo_field_id.and_then(|fid| document.get(fid)) { if let Some(geo_value) = geo_field_id.and_then(|fid| document.get(fid)) {
if let Err(user_error) = validate_geo_from_json(&document_id, geo_value)? { if let Err(user_error) = validate_geo_from_json(&document_id, geo_value)? {
return Ok(Err(UserError::from(user_error))); return Ok(Err(UserError::from(Box::new(user_error))));
} }
} }

View File

@ -160,11 +160,11 @@ pub fn extract_fid_docid_facet_values<R: io::Read + io::Seek>(
let del_geo_support = settings_diff let del_geo_support = settings_diff
.old .old
.geo_fields_ids .geo_fields_ids
.map_or(false, |(lat, lng)| field_id == lat || field_id == lng); .is_some_and(|(lat, lng)| field_id == lat || field_id == lng);
let add_geo_support = settings_diff let add_geo_support = settings_diff
.new .new
.geo_fields_ids .geo_fields_ids
.map_or(false, |(lat, lng)| field_id == lat || field_id == lng); .is_some_and(|(lat, lng)| field_id == lat || field_id == lng);
let del_filterable_values = let del_filterable_values =
del_value.map(|value| extract_facet_values(&value, del_geo_support)); del_value.map(|value| extract_facet_values(&value, del_geo_support));
let add_filterable_values = let add_filterable_values =

View File

@ -80,22 +80,28 @@ fn extract_lat_lng(
let (lat, lng) = match (lat, lng) { let (lat, lng) = match (lat, lng) {
(Some(lat), Some(lng)) => (lat, lng), (Some(lat), Some(lng)) => (lat, lng),
(Some(_), None) => { (Some(_), None) => {
return Err(GeoError::MissingLatitude { document_id: document_id() }.into()) return Err(
Box::new(GeoError::MissingLatitude { document_id: document_id() }).into()
)
} }
(None, Some(_)) => { (None, Some(_)) => {
return Err(GeoError::MissingLongitude { document_id: document_id() }.into()) return Err(
Box::new(GeoError::MissingLongitude { document_id: document_id() }).into()
)
} }
(None, None) => return Ok(None), (None, None) => return Ok(None),
}; };
let lat = extract_finite_float_from_value( let lat = extract_finite_float_from_value(
serde_json::from_slice(lat).map_err(InternalError::SerdeJson)?, serde_json::from_slice(lat).map_err(InternalError::SerdeJson)?,
) )
.map_err(|lat| GeoError::BadLatitude { document_id: document_id(), value: lat })?; .map_err(|lat| GeoError::BadLatitude { document_id: document_id(), value: lat })
.map_err(Box::new)?;
let lng = extract_finite_float_from_value( let lng = extract_finite_float_from_value(
serde_json::from_slice(lng).map_err(InternalError::SerdeJson)?, serde_json::from_slice(lng).map_err(InternalError::SerdeJson)?,
) )
.map_err(|lng| GeoError::BadLongitude { document_id: document_id(), value: lng })?; .map_err(|lng| GeoError::BadLongitude { document_id: document_id(), value: lng })
.map_err(Box::new)?;
Ok(Some([lat, lng])) Ok(Some([lat, lng]))
} }
None => Ok(None), None => Ok(None),

View File

@ -69,7 +69,7 @@ pub fn extract_word_pair_proximity_docids<R: io::Read + io::Seek>(
let document_id = u32::from_be_bytes(document_id_bytes); let document_id = u32::from_be_bytes(document_id_bytes);
// if we change document, we fill the sorter // if we change document, we fill the sorter
if current_document_id.map_or(false, |id| id != document_id) { if current_document_id.is_some_and(|id| id != document_id) {
// FIXME: span inside of a hot loop might degrade performance and create big reports // FIXME: span inside of a hot loop might degrade performance and create big reports
let span = tracing::trace_span!(target: "indexing::details", "document_into_sorter"); let span = tracing::trace_span!(target: "indexing::details", "document_into_sorter");
let _entered = span.enter(); let _entered = span.enter();
@ -96,7 +96,7 @@ pub fn extract_word_pair_proximity_docids<R: io::Read + io::Seek>(
if let Some(deletion) = KvReaderDelAdd::from_slice(value).get(DelAdd::Deletion) { if let Some(deletion) = KvReaderDelAdd::from_slice(value).get(DelAdd::Deletion) {
for (position, word) in KvReaderU16::from_slice(deletion).iter() { for (position, word) in KvReaderU16::from_slice(deletion).iter() {
// drain the proximity window until the head word is considered close to the word we are inserting. // drain the proximity window until the head word is considered close to the word we are inserting.
while del_word_positions.front().map_or(false, |(_w, p)| { while del_word_positions.front().is_some_and(|(_w, p)| {
index_proximity(*p as u32, position as u32) >= MAX_DISTANCE index_proximity(*p as u32, position as u32) >= MAX_DISTANCE
}) { }) {
word_positions_into_word_pair_proximity( word_positions_into_word_pair_proximity(
@ -129,7 +129,7 @@ pub fn extract_word_pair_proximity_docids<R: io::Read + io::Seek>(
if let Some(addition) = KvReaderDelAdd::from_slice(value).get(DelAdd::Addition) { if let Some(addition) = KvReaderDelAdd::from_slice(value).get(DelAdd::Addition) {
for (position, word) in KvReaderU16::from_slice(addition).iter() { for (position, word) in KvReaderU16::from_slice(addition).iter() {
// drain the proximity window until the head word is considered close to the word we are inserting. // drain the proximity window until the head word is considered close to the word we are inserting.
while add_word_positions.front().map_or(false, |(_w, p)| { while add_word_positions.front().is_some_and(|(_w, p)| {
index_proximity(*p as u32, position as u32) >= MAX_DISTANCE index_proximity(*p as u32, position as u32) >= MAX_DISTANCE
}) { }) {
word_positions_into_word_pair_proximity( word_positions_into_word_pair_proximity(

View File

@ -46,7 +46,7 @@ pub fn extract_word_position_docids<R: io::Read + io::Seek>(
.ok_or(SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?; .ok_or(SerializationError::Decoding { db_name: Some(DOCID_WORD_POSITIONS) })?;
let document_id = DocumentId::from_be_bytes(document_id_bytes); let document_id = DocumentId::from_be_bytes(document_id_bytes);
if current_document_id.map_or(false, |id| document_id != id) { if current_document_id.is_some_and(|id| document_id != id) {
words_position_into_sorter( words_position_into_sorter(
current_document_id.unwrap(), current_document_id.unwrap(),
&mut key_buffer, &mut key_buffer,

View File

@ -281,7 +281,7 @@ fn send_original_documents_data(
}; };
if !(remove_vectors.is_empty() if !(remove_vectors.is_empty()
&& manual_vectors.is_empty() && manual_vectors.is_empty()
&& embeddings.as_ref().map_or(true, |e| e.is_empty())) && embeddings.as_ref().is_none_or(|e| e.is_empty()))
{ {
let _ = lmdb_writer_sx.send(Ok(TypedChunk::VectorPoints { let _ = lmdb_writer_sx.send(Ok(TypedChunk::VectorPoints {
remove_vectors, remove_vectors,

View File

@ -512,12 +512,9 @@ where
InternalError::DatabaseMissingEntry { db_name: "embedder_category_id", key: None }, InternalError::DatabaseMissingEntry { db_name: "embedder_category_id", key: None },
)?; )?;
let embedder_config = settings_diff.embedding_config_updates.get(&embedder_name); let embedder_config = settings_diff.embedding_config_updates.get(&embedder_name);
let was_quantized = settings_diff let was_quantized =
.old settings_diff.old.embedding_configs.get(&embedder_name).is_some_and(|conf| conf.2);
.embedding_configs let is_quantizing = embedder_config.is_some_and(|action| action.is_being_quantized);
.get(&embedder_name)
.map_or(false, |conf| conf.2);
let is_quantizing = embedder_config.map_or(false, |action| action.is_being_quantized);
pool.install(|| { pool.install(|| {
let mut writer = ArroyWrapper::new(vector_arroy, embedder_index, was_quantized); let mut writer = ArroyWrapper::new(vector_arroy, embedder_index, was_quantized);

View File

@ -197,7 +197,7 @@ impl<'a, 'i> Transform<'a, 'i> {
// drop_and_reuse is called instead of .clear() to communicate to the compiler that field_buffer // drop_and_reuse is called instead of .clear() to communicate to the compiler that field_buffer
// does not keep references from the cursor between loop iterations // does not keep references from the cursor between loop iterations
let mut field_buffer_cache = drop_and_reuse(field_buffer); let mut field_buffer_cache = drop_and_reuse(field_buffer);
if self.indexer_settings.log_every_n.map_or(false, |len| documents_count % len == 0) { if self.indexer_settings.log_every_n.is_some_and(|len| documents_count % len == 0) {
progress_callback(UpdateIndexingStep::RemapDocumentAddition { progress_callback(UpdateIndexingStep::RemapDocumentAddition {
documents_seen: documents_count, documents_seen: documents_count,
}); });

View File

@ -55,7 +55,7 @@ impl ChunkAccumulator {
match self match self
.inner .inner
.iter() .iter()
.position(|right| right.first().map_or(false, |right| chunk.mergeable_with(right))) .position(|right| right.first().is_some_and(|right| chunk.mergeable_with(right)))
{ {
Some(position) => { Some(position) => {
let v = self.inner.get_mut(position).unwrap(); let v = self.inner.get_mut(position).unwrap();
@ -664,11 +664,8 @@ pub(crate) fn write_typed_chunk_into_index(
let embedder_index = index.embedder_category_id.get(wtxn, &embedder_name)?.ok_or( let embedder_index = index.embedder_category_id.get(wtxn, &embedder_name)?.ok_or(
InternalError::DatabaseMissingEntry { db_name: "embedder_category_id", key: None }, InternalError::DatabaseMissingEntry { db_name: "embedder_category_id", key: None },
)?; )?;
let binary_quantized = settings_diff let binary_quantized =
.old settings_diff.old.embedding_configs.get(&embedder_name).is_some_and(|conf| conf.2);
.embedding_configs
.get(&embedder_name)
.map_or(false, |conf| conf.2);
// FIXME: allow customizing distance // FIXME: allow customizing distance
let writer = ArroyWrapper::new(index.vector_arroy, embedder_index, binary_quantized); let writer = ArroyWrapper::new(index.vector_arroy, embedder_index, binary_quantized);

View File

@ -56,13 +56,13 @@ where
content: &'t KvReaderFieldId, content: &'t KvReaderFieldId,
} }
impl<'t, Mapper: FieldIdMapper> Clone for DocumentFromDb<'t, Mapper> { impl<Mapper: FieldIdMapper> Clone for DocumentFromDb<'_, Mapper> {
#[inline] #[inline]
fn clone(&self) -> Self { fn clone(&self) -> Self {
*self *self
} }
} }
impl<'t, Mapper: FieldIdMapper> Copy for DocumentFromDb<'t, Mapper> {} impl<Mapper: FieldIdMapper> Copy for DocumentFromDb<'_, Mapper> {}
impl<'t, Mapper: FieldIdMapper> Document<'t> for DocumentFromDb<'t, Mapper> { impl<'t, Mapper: FieldIdMapper> Document<'t> for DocumentFromDb<'t, Mapper> {
fn iter_top_level_fields(&self) -> impl Iterator<Item = Result<(&'t str, &'t RawValue)>> { fn iter_top_level_fields(&self) -> impl Iterator<Item = Result<(&'t str, &'t RawValue)>> {
@ -154,7 +154,7 @@ impl<'a, 'doc> DocumentFromVersions<'a, 'doc> {
} }
} }
impl<'a, 'doc> Document<'doc> for DocumentFromVersions<'a, 'doc> { impl<'doc> Document<'doc> for DocumentFromVersions<'_, 'doc> {
fn iter_top_level_fields(&self) -> impl Iterator<Item = Result<(&'doc str, &'doc RawValue)>> { fn iter_top_level_fields(&self) -> impl Iterator<Item = Result<(&'doc str, &'doc RawValue)>> {
self.versions.iter_top_level_fields().map(Ok) self.versions.iter_top_level_fields().map(Ok)
} }

View File

@ -121,7 +121,7 @@ impl<'extractor> BalancedCaches<'extractor> {
} }
pub fn insert_del_u32(&mut self, key: &[u8], n: u32) -> Result<()> { pub fn insert_del_u32(&mut self, key: &[u8], n: u32) -> Result<()> {
if self.max_memory.map_or(false, |mm| self.alloc.allocated_bytes() >= mm) { if self.max_memory.is_some_and(|mm| self.alloc.allocated_bytes() >= mm) {
self.start_spilling()?; self.start_spilling()?;
} }
@ -138,7 +138,7 @@ impl<'extractor> BalancedCaches<'extractor> {
} }
pub fn insert_add_u32(&mut self, key: &[u8], n: u32) -> Result<()> { pub fn insert_add_u32(&mut self, key: &[u8], n: u32) -> Result<()> {
if self.max_memory.map_or(false, |mm| self.alloc.allocated_bytes() >= mm) { if self.max_memory.is_some_and(|mm| self.alloc.allocated_bytes() >= mm) {
self.start_spilling()?; self.start_spilling()?;
} }
@ -623,7 +623,7 @@ pub struct FrozenDelAddBbbul<'bump, B> {
pub add: Option<FrozenBbbul<'bump, B>>, pub add: Option<FrozenBbbul<'bump, B>>,
} }
impl<'bump, B> FrozenDelAddBbbul<'bump, B> { impl<B> FrozenDelAddBbbul<'_, B> {
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
self.del.is_none() && self.add.is_none() self.del.is_none() && self.add.is_none()
} }

View File

@ -31,7 +31,7 @@ pub struct DocumentExtractorData {
pub field_distribution_delta: HashMap<String, i64>, pub field_distribution_delta: HashMap<String, i64>,
} }
impl<'a, 'b, 'extractor> Extractor<'extractor> for DocumentsExtractor<'a, 'b> { impl<'extractor> Extractor<'extractor> for DocumentsExtractor<'_, '_> {
type Data = FullySend<RefCell<DocumentExtractorData>>; type Data = FullySend<RefCell<DocumentExtractorData>>;
fn init_data(&self, _extractor_alloc: &'extractor Bump) -> Result<Self::Data> { fn init_data(&self, _extractor_alloc: &'extractor Bump) -> Result<Self::Data> {

View File

@ -37,7 +37,7 @@ pub struct FacetedExtractorData<'a, 'b> {
is_geo_enabled: bool, is_geo_enabled: bool,
} }
impl<'a, 'b, 'extractor> Extractor<'extractor> for FacetedExtractorData<'a, 'b> { impl<'extractor> Extractor<'extractor> for FacetedExtractorData<'_, '_> {
type Data = RefCell<BalancedCaches<'extractor>>; type Data = RefCell<BalancedCaches<'extractor>>;
fn init_data(&self, extractor_alloc: &'extractor Bump) -> Result<Self::Data> { fn init_data(&self, extractor_alloc: &'extractor Bump) -> Result<Self::Data> {

View File

@ -92,7 +92,7 @@ pub struct FrozenGeoExtractorData<'extractor> {
pub spilled_inserted: Option<BufReader<File>>, pub spilled_inserted: Option<BufReader<File>>,
} }
impl<'extractor> FrozenGeoExtractorData<'extractor> { impl FrozenGeoExtractorData<'_> {
pub fn iter_and_clear_removed( pub fn iter_and_clear_removed(
&mut self, &mut self,
) -> io::Result<impl IntoIterator<Item = io::Result<ExtractedGeoPoint>> + '_> { ) -> io::Result<impl IntoIterator<Item = io::Result<ExtractedGeoPoint>> + '_> {
@ -160,7 +160,7 @@ impl<'extractor> Extractor<'extractor> for GeoExtractor {
for change in changes { for change in changes {
if data_ref.spilled_removed.is_none() if data_ref.spilled_removed.is_none()
&& max_memory.map_or(false, |mm| context.extractor_alloc.allocated_bytes() >= mm) && max_memory.is_some_and(|mm| context.extractor_alloc.allocated_bytes() >= mm)
{ {
// We must spill as we allocated too much memory // We must spill as we allocated too much memory
data_ref.spilled_removed = tempfile::tempfile().map(BufWriter::new).map(Some)?; data_ref.spilled_removed = tempfile::tempfile().map(BufWriter::new).map(Some)?;
@ -258,9 +258,11 @@ pub fn extract_geo_coordinates(
Value::Null => return Ok(None), Value::Null => return Ok(None),
Value::Object(map) => map, Value::Object(map) => map,
value => { value => {
return Err( return Err(Box::new(GeoError::NotAnObject {
GeoError::NotAnObject { document_id: Value::from(external_id), value }.into() document_id: Value::from(external_id),
) value,
})
.into())
} }
}; };
@ -269,23 +271,29 @@ pub fn extract_geo_coordinates(
if geo.is_empty() { if geo.is_empty() {
[lat, lng] [lat, lng]
} else { } else {
return Err(GeoError::UnexpectedExtraFields { return Err(Box::new(GeoError::UnexpectedExtraFields {
document_id: Value::from(external_id), document_id: Value::from(external_id),
value: Value::from(geo), value: Value::from(geo),
} })
.into()); .into());
} }
} }
(Some(_), None) => { (Some(_), None) => {
return Err(GeoError::MissingLongitude { document_id: Value::from(external_id) }.into()) return Err(Box::new(GeoError::MissingLongitude {
document_id: Value::from(external_id),
})
.into())
} }
(None, Some(_)) => { (None, Some(_)) => {
return Err(GeoError::MissingLatitude { document_id: Value::from(external_id) }.into()) return Err(Box::new(GeoError::MissingLatitude {
document_id: Value::from(external_id),
})
.into())
} }
(None, None) => { (None, None) => {
return Err(GeoError::MissingLatitudeAndLongitude { return Err(Box::new(GeoError::MissingLatitudeAndLongitude {
document_id: Value::from(external_id), document_id: Value::from(external_id),
} })
.into()) .into())
} }
}; };
@ -293,16 +301,18 @@ pub fn extract_geo_coordinates(
match (extract_finite_float_from_value(lat), extract_finite_float_from_value(lng)) { match (extract_finite_float_from_value(lat), extract_finite_float_from_value(lng)) {
(Ok(lat), Ok(lng)) => Ok(Some([lat, lng])), (Ok(lat), Ok(lng)) => Ok(Some([lat, lng])),
(Ok(_), Err(value)) => { (Ok(_), Err(value)) => {
Err(GeoError::BadLongitude { document_id: Value::from(external_id), value }.into()) Err(Box::new(GeoError::BadLongitude { document_id: Value::from(external_id), value })
.into())
} }
(Err(value), Ok(_)) => { (Err(value), Ok(_)) => {
Err(GeoError::BadLatitude { document_id: Value::from(external_id), value }.into()) Err(Box::new(GeoError::BadLatitude { document_id: Value::from(external_id), value })
.into())
} }
(Err(lat), Err(lng)) => Err(GeoError::BadLatitudeAndLongitude { (Err(lat), Err(lng)) => Err(Box::new(GeoError::BadLatitudeAndLongitude {
document_id: Value::from(external_id), document_id: Value::from(external_id),
lat, lat,
lng, lng,
} })
.into()), .into()),
} }
} }

View File

@ -31,7 +31,7 @@ pub struct WordDocidsBalancedCaches<'extractor> {
current_docid: Option<DocumentId>, current_docid: Option<DocumentId>,
} }
unsafe impl<'extractor> MostlySend for WordDocidsBalancedCaches<'extractor> {} unsafe impl MostlySend for WordDocidsBalancedCaches<'_> {}
impl<'extractor> WordDocidsBalancedCaches<'extractor> { impl<'extractor> WordDocidsBalancedCaches<'extractor> {
pub fn new_in(buckets: usize, max_memory: Option<usize>, alloc: &'extractor Bump) -> Self { pub fn new_in(buckets: usize, max_memory: Option<usize>, alloc: &'extractor Bump) -> Self {
@ -78,7 +78,7 @@ impl<'extractor> WordDocidsBalancedCaches<'extractor> {
buffer.extend_from_slice(&position.to_be_bytes()); buffer.extend_from_slice(&position.to_be_bytes());
self.word_position_docids.insert_add_u32(&buffer, docid)?; self.word_position_docids.insert_add_u32(&buffer, docid)?;
if self.current_docid.map_or(false, |id| docid != id) { if self.current_docid.is_some_and(|id| docid != id) {
self.flush_fid_word_count(&mut buffer)?; self.flush_fid_word_count(&mut buffer)?;
} }
@ -123,7 +123,7 @@ impl<'extractor> WordDocidsBalancedCaches<'extractor> {
buffer.extend_from_slice(&position.to_be_bytes()); buffer.extend_from_slice(&position.to_be_bytes());
self.word_position_docids.insert_del_u32(&buffer, docid)?; self.word_position_docids.insert_del_u32(&buffer, docid)?;
if self.current_docid.map_or(false, |id| docid != id) { if self.current_docid.is_some_and(|id| docid != id) {
self.flush_fid_word_count(&mut buffer)?; self.flush_fid_word_count(&mut buffer)?;
} }
@ -212,7 +212,7 @@ pub struct WordDocidsExtractorData<'a> {
searchable_attributes: Option<Vec<&'a str>>, searchable_attributes: Option<Vec<&'a str>>,
} }
impl<'a, 'extractor> Extractor<'extractor> for WordDocidsExtractorData<'a> { impl<'extractor> Extractor<'extractor> for WordDocidsExtractorData<'_> {
type Data = RefCell<Option<WordDocidsBalancedCaches<'extractor>>>; type Data = RefCell<Option<WordDocidsBalancedCaches<'extractor>>>;
fn init_data(&self, extractor_alloc: &'extractor Bump) -> Result<Self::Data> { fn init_data(&self, extractor_alloc: &'extractor Bump) -> Result<Self::Data> {

View File

@ -25,7 +25,7 @@ pub struct WordPairProximityDocidsExtractorData<'a> {
buckets: usize, buckets: usize,
} }
impl<'a, 'extractor> Extractor<'extractor> for WordPairProximityDocidsExtractorData<'a> { impl<'extractor> Extractor<'extractor> for WordPairProximityDocidsExtractorData<'_> {
type Data = RefCell<BalancedCaches<'extractor>>; type Data = RefCell<BalancedCaches<'extractor>>;
fn init_data(&self, extractor_alloc: &'extractor Bump) -> Result<Self::Data> { fn init_data(&self, extractor_alloc: &'extractor Bump) -> Result<Self::Data> {
@ -270,7 +270,7 @@ fn process_document_tokens<'doc>(
// drain the proximity window until the head word is considered close to the word we are inserting. // drain the proximity window until the head word is considered close to the word we are inserting.
while word_positions while word_positions
.front() .front()
.map_or(false, |(_w, p)| index_proximity(*p as u32, pos as u32) >= MAX_DISTANCE) .is_some_and(|(_w, p)| index_proximity(*p as u32, pos as u32) >= MAX_DISTANCE)
{ {
word_positions_into_word_pair_proximity(word_positions, word_pair_proximity); word_positions_into_word_pair_proximity(word_positions, word_pair_proximity);
} }

View File

@ -22,7 +22,7 @@ pub struct DocumentTokenizer<'a> {
pub max_positions_per_attributes: u32, pub max_positions_per_attributes: u32,
} }
impl<'a> DocumentTokenizer<'a> { impl DocumentTokenizer<'_> {
pub fn tokenize_document<'doc>( pub fn tokenize_document<'doc>(
&self, &self,
document: impl Document<'doc>, document: impl Document<'doc>,

View File

@ -43,7 +43,7 @@ pub struct EmbeddingExtractorData<'extractor>(
unsafe impl MostlySend for EmbeddingExtractorData<'_> {} unsafe impl MostlySend for EmbeddingExtractorData<'_> {}
impl<'a, 'b, 'extractor> Extractor<'extractor> for EmbeddingExtractor<'a, 'b> { impl<'extractor> Extractor<'extractor> for EmbeddingExtractor<'_, '_> {
type Data = RefCell<EmbeddingExtractorData<'extractor>>; type Data = RefCell<EmbeddingExtractorData<'extractor>>;
fn init_data<'doc>(&'doc self, extractor_alloc: &'extractor Bump) -> crate::Result<Self::Data> { fn init_data<'doc>(&'doc self, extractor_alloc: &'extractor Bump) -> crate::Result<Self::Data> {

View File

@ -29,8 +29,8 @@ impl<'p, 'indexer, Mapper: MutFieldIdMapper> FieldAndDocidExtractor<'p, 'indexer
} }
} }
impl<'de, 'p, 'indexer: 'de, Mapper: MutFieldIdMapper> Visitor<'de> impl<'de, 'indexer: 'de, Mapper: MutFieldIdMapper> Visitor<'de>
for FieldAndDocidExtractor<'p, 'indexer, Mapper> for FieldAndDocidExtractor<'_, 'indexer, Mapper>
{ {
type Value = type Value =
Result<Result<DeOrBumpStr<'de, 'indexer>, DocumentIdExtractionError>, crate::UserError>; Result<Result<DeOrBumpStr<'de, 'indexer>, DocumentIdExtractionError>, crate::UserError>;
@ -98,7 +98,7 @@ struct NestedPrimaryKeyVisitor<'a, 'bump> {
bump: &'bump Bump, bump: &'bump Bump,
} }
impl<'de, 'a, 'bump: 'de> Visitor<'de> for NestedPrimaryKeyVisitor<'a, 'bump> { impl<'de, 'bump: 'de> Visitor<'de> for NestedPrimaryKeyVisitor<'_, 'bump> {
type Value = std::result::Result<Option<DeOrBumpStr<'de, 'bump>>, DocumentIdExtractionError>; type Value = std::result::Result<Option<DeOrBumpStr<'de, 'bump>>, DocumentIdExtractionError>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
@ -237,7 +237,7 @@ impl<'de, 'a, Mapper: MutFieldIdMapper> Visitor<'de> for MutFieldIdMapVisitor<'a
pub struct FieldIdMapVisitor<'a, Mapper: FieldIdMapper>(pub &'a Mapper); pub struct FieldIdMapVisitor<'a, Mapper: FieldIdMapper>(pub &'a Mapper);
impl<'de, 'a, Mapper: FieldIdMapper> Visitor<'de> for FieldIdMapVisitor<'a, Mapper> { impl<'de, Mapper: FieldIdMapper> Visitor<'de> for FieldIdMapVisitor<'_, Mapper> {
type Value = Option<FieldId>; type Value = Option<FieldId>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {

View File

@ -149,16 +149,11 @@ pub struct IndexingContext<
pub grenad_parameters: &'indexer GrenadParameters, pub grenad_parameters: &'indexer GrenadParameters,
} }
impl< impl<MSP> Copy
'fid, // invariant lifetime of fields ids map
'indexer, // covariant lifetime of objects that are borrowed during the entire indexing operation
'index, // covariant lifetime of the index
MSP,
> Copy
for IndexingContext< for IndexingContext<
'fid, // invariant lifetime of fields ids map '_, // invariant lifetime of fields ids map
'indexer, // covariant lifetime of objects that are borrowed during the entire indexing operation '_, // covariant lifetime of objects that are borrowed during the entire indexing operation
'index, // covariant lifetime of the index '_, // covariant lifetime of the index
MSP, MSP,
> >
where where
@ -166,16 +161,11 @@ where
{ {
} }
impl< impl<MSP> Clone
'fid, // invariant lifetime of fields ids map
'indexer, // covariant lifetime of objects that are borrowed during the entire indexing operation
'index, // covariant lifetime of the index
MSP,
> Clone
for IndexingContext< for IndexingContext<
'fid, // invariant lifetime of fields ids map '_, // invariant lifetime of fields ids map
'indexer, // covariant lifetime of objects that are borrowed during the entire indexing operation '_, // covariant lifetime of objects that are borrowed during the entire indexing operation
'index, // covariant lifetime of the index '_, // covariant lifetime of the index
MSP, MSP,
> >
where where

View File

@ -110,7 +110,7 @@ mod test {
>, >,
} }
unsafe impl<'extractor> MostlySend for DeletionWithData<'extractor> {} unsafe impl MostlySend for DeletionWithData<'_> {}
struct TrackDeletion<'extractor>(PhantomData<&'extractor ()>); struct TrackDeletion<'extractor>(PhantomData<&'extractor ()>);

View File

@ -210,14 +210,8 @@ fn extract_addition_payload_changes<'r, 'pl: 'r>(
primary_key.as_ref().unwrap() primary_key.as_ref().unwrap()
}; };
let external_id = match retrieved_primary_key.extract_fields_and_docid( let external_id =
doc, retrieved_primary_key.extract_fields_and_docid(doc, new_fields_ids_map, indexer)?;
new_fields_ids_map,
indexer,
) {
Ok(edi) => edi,
Err(e) => return Err(e),
};
let external_id = external_id.to_de(); let external_id = external_id.to_de();
let current_offset = iter.byte_offset(); let current_offset = iter.byte_offset();
@ -580,12 +574,12 @@ impl<'pl> PayloadOperations<'pl> {
} }
} }
Some(InnerDocOp::Deletion) => { Some(InnerDocOp::Deletion) => {
return if self.is_new { if self.is_new {
Ok(None) Ok(None)
} else { } else {
let deletion = Deletion::create(self.docid, external_doc); let deletion = Deletion::create(self.docid, external_doc);
Ok(Some(DocumentChange::Deletion(deletion))) Ok(Some(DocumentChange::Deletion(deletion)))
}; }
} }
None => unreachable!("We must not have an empty set of operations on a document"), None => unreachable!("We must not have an empty set of operations on a document"),
} }

View File

@ -149,7 +149,7 @@ impl<'a, 'rtxn> FrozenPrefixBitmaps<'a, 'rtxn> {
} }
} }
unsafe impl<'a, 'rtxn> Sync for FrozenPrefixBitmaps<'a, 'rtxn> {} unsafe impl Sync for FrozenPrefixBitmaps<'_, '_> {}
struct WordPrefixIntegerDocids { struct WordPrefixIntegerDocids {
database: Database<Bytes, CboRoaringBitmapCodec>, database: Database<Bytes, CboRoaringBitmapCodec>,
@ -302,7 +302,7 @@ impl<'a, 'rtxn> FrozenPrefixIntegerBitmaps<'a, 'rtxn> {
} }
} }
unsafe impl<'a, 'rtxn> Sync for FrozenPrefixIntegerBitmaps<'a, 'rtxn> {} unsafe impl Sync for FrozenPrefixIntegerBitmaps<'_, '_> {}
#[tracing::instrument(level = "trace", skip_all, target = "indexing::prefix")] #[tracing::instrument(level = "trace", skip_all, target = "indexing::prefix")]
fn delete_prefixes( fn delete_prefixes(

View File

@ -560,7 +560,7 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
// Does the new FST differ from the previous one? // Does the new FST differ from the previous one?
if current if current
.map_or(true, |current| current.as_fst().as_bytes() != fst.as_fst().as_bytes()) .is_none_or(|current| current.as_fst().as_bytes() != fst.as_fst().as_bytes())
{ {
// we want to re-create our FST. // we want to re-create our FST.
self.index.put_stop_words(self.wtxn, &fst)?; self.index.put_stop_words(self.wtxn, &fst)?;
@ -580,7 +580,7 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
let current = self.index.non_separator_tokens(self.wtxn)?; let current = self.index.non_separator_tokens(self.wtxn)?;
// Does the new list differ from the previous one? // Does the new list differ from the previous one?
if current.map_or(true, |current| &current != non_separator_tokens) { if current.is_none_or(|current| &current != non_separator_tokens) {
self.index.put_non_separator_tokens(self.wtxn, non_separator_tokens)?; self.index.put_non_separator_tokens(self.wtxn, non_separator_tokens)?;
true true
} else { } else {
@ -605,7 +605,7 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
let current = self.index.separator_tokens(self.wtxn)?; let current = self.index.separator_tokens(self.wtxn)?;
// Does the new list differ from the previous one? // Does the new list differ from the previous one?
if current.map_or(true, |current| &current != separator_tokens) { if current.is_none_or(|current| &current != separator_tokens) {
self.index.put_separator_tokens(self.wtxn, separator_tokens)?; self.index.put_separator_tokens(self.wtxn, separator_tokens)?;
true true
} else { } else {
@ -630,7 +630,7 @@ impl<'a, 't, 'i> Settings<'a, 't, 'i> {
let current = self.index.dictionary(self.wtxn)?; let current = self.index.dictionary(self.wtxn)?;
// Does the new list differ from the previous one? // Does the new list differ from the previous one?
if current.map_or(true, |current| &current != dictionary) { if current.is_none_or(|current| &current != dictionary) {
self.index.put_dictionary(self.wtxn, dictionary)?; self.index.put_dictionary(self.wtxn, dictionary)?;
true true
} else { } else {
@ -1340,7 +1340,7 @@ impl InnerIndexSettingsDiff {
new_settings.embedding_configs.inner_as_ref() new_settings.embedding_configs.inner_as_ref()
{ {
let was_quantized = let was_quantized =
old_settings.embedding_configs.get(embedder_name).map_or(false, |conf| conf.2); old_settings.embedding_configs.get(embedder_name).is_some_and(|conf| conf.2);
// skip embedders that don't use document templates // skip embedders that don't use document templates
if !config.uses_document_template() { if !config.uses_document_template() {
continue; continue;

View File

@ -311,7 +311,7 @@ fn last_named_object<'a>(
last_named_object last_named_object
} }
impl<'a> std::fmt::Display for LastNamedObject<'a> { impl std::fmt::Display for LastNamedObject<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
LastNamedObject::Object { name } => write!(f, "`{name}`"), LastNamedObject::Object { name } => write!(f, "`{name}`"),

View File

@ -59,7 +59,7 @@ impl ArroyWrapper {
&'a self, &'a self,
rtxn: &'a RoTxn<'a>, rtxn: &'a RoTxn<'a>,
db: arroy::Database<D>, db: arroy::Database<D>,
) -> impl Iterator<Item = Result<arroy::Reader<D>, arroy::Error>> + 'a { ) -> impl Iterator<Item = Result<arroy::Reader<'a, D>, arroy::Error>> + 'a {
arroy_db_range_for_embedder(self.embedder_index).map_while(move |index| { arroy_db_range_for_embedder(self.embedder_index).map_while(move |index| {
match arroy::Reader::open(rtxn, index, db) { match arroy::Reader::open(rtxn, index, db) {
Ok(reader) => match reader.is_empty(rtxn) { Ok(reader) => match reader.is_empty(rtxn) {

View File

@ -242,11 +242,11 @@ fn execute_filter(filter: &str, document: &TestDocument) -> Option<String> {
id = contains_key_rec(opt1, "opt2").then(|| document.id.clone()); id = contains_key_rec(opt1, "opt2").then(|| document.id.clone());
} }
} else if matches!(filter, "opt1 IS NULL" | "NOT opt1 IS NOT NULL") { } else if matches!(filter, "opt1 IS NULL" | "NOT opt1 IS NOT NULL") {
id = document.opt1.as_ref().map_or(false, |v| v.is_null()).then(|| document.id.clone()); id = document.opt1.as_ref().is_some_and(|v| v.is_null()).then(|| document.id.clone());
} else if matches!(filter, "NOT opt1 IS NULL" | "opt1 IS NOT NULL") { } else if matches!(filter, "NOT opt1 IS NULL" | "opt1 IS NOT NULL") {
id = document.opt1.as_ref().map_or(true, |v| !v.is_null()).then(|| document.id.clone()); id = document.opt1.as_ref().is_none_or(|v| !v.is_null()).then(|| document.id.clone());
} else if matches!(filter, "opt1.opt2 IS NULL") { } else if matches!(filter, "opt1.opt2 IS NULL") {
if document.opt1opt2.as_ref().map_or(false, |v| v.is_null()) { if document.opt1opt2.as_ref().is_some_and(|v| v.is_null()) {
id = Some(document.id.clone()); id = Some(document.id.clone());
} else if let Some(opt1) = &document.opt1 { } else if let Some(opt1) = &document.opt1 {
if !opt1.is_null() { if !opt1.is_null() {
@ -254,15 +254,11 @@ fn execute_filter(filter: &str, document: &TestDocument) -> Option<String> {
} }
} }
} else if matches!(filter, "opt1 IS EMPTY" | "NOT opt1 IS NOT EMPTY") { } else if matches!(filter, "opt1 IS EMPTY" | "NOT opt1 IS NOT EMPTY") {
id = document.opt1.as_ref().map_or(false, is_empty_value).then(|| document.id.clone()); id = document.opt1.as_ref().is_some_and(is_empty_value).then(|| document.id.clone());
} else if matches!(filter, "NOT opt1 IS EMPTY" | "opt1 IS NOT EMPTY") { } else if matches!(filter, "NOT opt1 IS EMPTY" | "opt1 IS NOT EMPTY") {
id = document id = document.opt1.as_ref().is_none_or(|v| !is_empty_value(v)).then(|| document.id.clone());
.opt1
.as_ref()
.map_or(true, |v| !is_empty_value(v))
.then(|| document.id.clone());
} else if matches!(filter, "opt1.opt2 IS EMPTY") { } else if matches!(filter, "opt1.opt2 IS EMPTY") {
if document.opt1opt2.as_ref().map_or(false, is_empty_value) { if document.opt1opt2.as_ref().is_some_and(is_empty_value) {
id = Some(document.id.clone()); id = Some(document.id.clone());
} }
} else if matches!( } else if matches!(

View File

@ -66,7 +66,7 @@ use tracing_error::ExtractSpanTrace as _;
use tracing_subscriber::layer::SubscriberExt as _; use tracing_subscriber::layer::SubscriberExt as _;
use tracing_trace::processor; use tracing_trace::processor;
fn on_panic(info: &std::panic::PanicInfo) { fn on_panic(info: &std::panic::PanicHookInfo) {
let info = info.to_string(); let info = info.to_string();
let trace = SpanTrace::capture(); let trace = SpanTrace::capture();
tracing::error!(%info, %trace); tracing::error!(%info, %trace);

View File

@ -282,7 +282,7 @@ struct SpanMarker<'a> {
memory_delta: Option<MemoryStats>, memory_delta: Option<MemoryStats>,
} }
impl<'a> ProfilerMarker for SpanMarker<'a> { impl ProfilerMarker for SpanMarker<'_> {
const MARKER_TYPE_NAME: &'static str = "span"; const MARKER_TYPE_NAME: &'static str = "span";
fn schema() -> MarkerSchema { fn schema() -> MarkerSchema {
@ -369,7 +369,7 @@ struct EventMarker<'a> {
memory_delta: Option<MemoryStats>, memory_delta: Option<MemoryStats>,
} }
impl<'a> ProfilerMarker for EventMarker<'a> { impl ProfilerMarker for EventMarker<'_> {
const MARKER_TYPE_NAME: &'static str = "tracing-event"; const MARKER_TYPE_NAME: &'static str = "tracing-event";
fn schema() -> MarkerSchema { fn schema() -> MarkerSchema {

View File

@ -1,3 +1,3 @@
[toolchain] [toolchain]
channel = "1.81.0" channel = "1.85.1"
components = ["clippy"] components = ["clippy"]