Merge pull request #5406 from meilisearch/bump-heed

Bump heed to v0.22 and arroy to v0.6
This commit is contained in:
Clément Renault 2025-03-13 16:52:45 +01:00 committed by GitHub
commit 7df5715d39
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
51 changed files with 370 additions and 274 deletions

View file

@ -34,7 +34,7 @@ use error::PayloadError;
use extractors::payload::PayloadConfig;
use index_scheduler::versioning::Versioning;
use index_scheduler::{IndexScheduler, IndexSchedulerOptions};
use meilisearch_auth::AuthController;
use meilisearch_auth::{open_auth_store_env, AuthController};
use meilisearch_types::milli::constants::VERSION_MAJOR;
use meilisearch_types::milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
use meilisearch_types::milli::update::{IndexDocumentsConfig, IndexDocumentsMethod};
@ -335,9 +335,12 @@ fn open_or_create_database_unchecked(
) -> anyhow::Result<(IndexScheduler, AuthController)> {
// we don't want to create anything in the data.ms yet, thus we
// wrap our two builders in a closure that'll be executed later.
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key);
let index_scheduler_builder =
|| -> anyhow::Result<_> { Ok(IndexScheduler::new(index_scheduler_opt, version)?) };
std::fs::create_dir_all(&index_scheduler_opt.auth_path)?;
let auth_env = open_auth_store_env(&index_scheduler_opt.auth_path).unwrap();
let auth_controller = AuthController::new(auth_env.clone(), &opt.master_key);
let index_scheduler_builder = || -> anyhow::Result<_> {
Ok(IndexScheduler::new(index_scheduler_opt, auth_env, version)?)
};
match (
index_scheduler_builder(),
@ -420,6 +423,7 @@ pub fn update_version_file_for_dumpless_upgrade(
if from_major == 1 && from_minor == 12 {
let env = unsafe {
heed::EnvOpenOptions::new()
.read_txn_without_tls()
.max_dbs(Versioning::nb_db())
.map_size(index_scheduler_opt.task_db_size)
.open(&index_scheduler_opt.tasks_path)

View file

@ -340,7 +340,8 @@ impl SearchKind {
vector_len: Option<usize>,
route: Route,
) -> Result<(String, Arc<Embedder>, bool), ResponseError> {
let embedder_configs = index.embedding_configs(&index.read_txn()?)?;
let rtxn = index.read_txn()?;
let embedder_configs = index.embedding_configs(&rtxn)?;
let embedders = index_scheduler.embedders(index_uid, embedder_configs)?;
let (embedder, _, quantized) = embedders

View file

@ -144,14 +144,6 @@ async fn experimental_feature_metrics() {
let (response, code) = server.get_metrics().await;
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(response, @"null");
// startup without flag respects persisted metrics value
let disable_metrics =
Opt { experimental_enable_metrics: false, ..default_settings(dir.path()) };
let server_no_flag = Server::new_with_options(disable_metrics).await.unwrap();
let (response, code) = server_no_flag.get_metrics().await;
meili_snap::snapshot!(code, @"200 OK");
meili_snap::snapshot!(response, @"null");
}
#[actix_rt::test]

View file

@ -64,9 +64,6 @@ async fn version_requires_downgrade() {
#[actix_rt::test]
async fn upgrade_to_the_current_version() {
let temp = tempfile::tempdir().unwrap();
let server = Server::new_with_options(default_settings(temp.path())).await.unwrap();
drop(server);
let server = Server::new_with_options(Opt {
experimental_dumpless_upgrade: true,
..default_settings(temp.path())

View file

@ -108,6 +108,10 @@ async fn check_the_keys(server: &Server) {
/// 5.2. Enqueue a new task
/// 5.3. Create an index
async fn check_the_index_scheduler(server: &Server) {
// Wait until the upgrade has been applied to all indexes to avoid flakyness
let (tasks, _) = server.tasks_filter("types=upgradeDatabase&limit=1").await;
server.wait_task(Value(tasks["results"][0].clone()).uid()).await.succeeded();
// All the indexes are still present
let (indexes, _) = server.list_indexes(None, None).await;
snapshot!(indexes, @r#"
@ -156,10 +160,6 @@ async fn check_the_index_scheduler(server: &Server) {
}
"###);
// Wait until the upgrade has been applied to all indexes to avoid flakyness
let (tasks, _) = server.tasks_filter("types=upgradeDatabase&limit=1").await;
server.wait_task(Value(tasks["results"][0].clone()).uid()).await.succeeded();
// Tasks and batches should still work
// We rewrite the first task for all calls because it may be the upgrade database with unknown dates and duration.
// The other tasks should NOT change