mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-07-03 03:47:02 +02:00
expose the number of database in the index-scheduler and rewrite the lib.rs to use the value provided in the options instead of a magic number
This commit is contained in:
parent
705d31e8bd
commit
e41ebd3047
8 changed files with 105 additions and 54 deletions
|
@ -7,7 +7,12 @@ use meilisearch_types::heed::{Database, Env, RwTxn};
|
|||
use crate::error::FeatureNotEnabledError;
|
||||
use crate::Result;
|
||||
|
||||
const EXPERIMENTAL_FEATURES: &str = "experimental-features";
|
||||
/// The number of database used by features
|
||||
const NUMBER_OF_DATABASES: u32 = 1;
|
||||
/// Database const names for the `FeatureData`.
|
||||
mod db_name {
|
||||
pub const EXPERIMENTAL_FEATURES: &str = "experimental-features";
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct FeatureData {
|
||||
|
@ -84,14 +89,19 @@ impl RoFeatures {
|
|||
}
|
||||
|
||||
impl FeatureData {
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
pub fn new(env: &Env, instance_features: InstanceTogglableFeatures) -> Result<Self> {
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let runtime_features_db = env.create_database(&mut wtxn, Some(EXPERIMENTAL_FEATURES))?;
|
||||
let runtime_features_db =
|
||||
env.create_database(&mut wtxn, Some(db_name::EXPERIMENTAL_FEATURES))?;
|
||||
wtxn.commit()?;
|
||||
|
||||
let txn = env.read_txn()?;
|
||||
let persisted_features: RuntimeTogglableFeatures =
|
||||
runtime_features_db.get(&txn, EXPERIMENTAL_FEATURES)?.unwrap_or_default();
|
||||
runtime_features_db.get(&txn, db_name::EXPERIMENTAL_FEATURES)?.unwrap_or_default();
|
||||
let InstanceTogglableFeatures { metrics, logs_route, contains_filter } = instance_features;
|
||||
let runtime = Arc::new(RwLock::new(RuntimeTogglableFeatures {
|
||||
metrics: metrics || persisted_features.metrics,
|
||||
|
@ -108,7 +118,7 @@ impl FeatureData {
|
|||
mut wtxn: RwTxn,
|
||||
features: RuntimeTogglableFeatures,
|
||||
) -> Result<()> {
|
||||
self.persisted.put(&mut wtxn, EXPERIMENTAL_FEATURES, &features)?;
|
||||
self.persisted.put(&mut wtxn, db_name::EXPERIMENTAL_FEATURES, &features)?;
|
||||
wtxn.commit()?;
|
||||
|
||||
// safe to unwrap, the lock will only fail if:
|
||||
|
|
|
@ -20,8 +20,13 @@ use crate::{Error, IndexBudget, IndexSchedulerOptions, Result};
|
|||
|
||||
mod index_map;
|
||||
|
||||
const INDEX_MAPPING: &str = "index-mapping";
|
||||
const INDEX_STATS: &str = "index-stats";
|
||||
/// The number of database used by index mapper
|
||||
const NUMBER_OF_DATABASES: u32 = 2;
|
||||
/// Database const names for the `IndexMapper`.
|
||||
mod db_name {
|
||||
pub const INDEX_MAPPING: &str = "index-mapping";
|
||||
pub const INDEX_STATS: &str = "index-stats";
|
||||
}
|
||||
|
||||
/// Structure managing meilisearch's indexes.
|
||||
///
|
||||
|
@ -138,6 +143,10 @@ impl IndexStats {
|
|||
}
|
||||
|
||||
impl IndexMapper {
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
env: &Env,
|
||||
wtxn: &mut RwTxn,
|
||||
|
@ -146,8 +155,8 @@ impl IndexMapper {
|
|||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
index_map: Arc::new(RwLock::new(IndexMap::new(budget.index_count))),
|
||||
index_mapping: env.create_database(wtxn, Some(INDEX_MAPPING))?,
|
||||
index_stats: env.create_database(wtxn, Some(INDEX_STATS))?,
|
||||
index_mapping: env.create_database(wtxn, Some(db_name::INDEX_MAPPING))?,
|
||||
index_stats: env.create_database(wtxn, Some(db_name::INDEX_STATS))?,
|
||||
base_path: options.indexes_path.clone(),
|
||||
index_base_map_size: budget.map_size,
|
||||
index_growth_amount: options.index_growth_amount,
|
||||
|
|
|
@ -197,6 +197,10 @@ impl IndexScheduler {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
Queue::nb_db() + IndexMapper::nb_db() + features::FeatureData::nb_db()
|
||||
}
|
||||
|
||||
/// Create an index scheduler and start its run loop.
|
||||
#[allow(private_interfaces)] // because test_utils is private
|
||||
pub fn new(
|
||||
|
@ -232,7 +236,7 @@ impl IndexScheduler {
|
|||
|
||||
let env = unsafe {
|
||||
heed::EnvOpenOptions::new()
|
||||
.max_dbs(19)
|
||||
.max_dbs(Self::nb_db())
|
||||
.map_size(budget.task_db_size)
|
||||
.open(&options.tasks_path)
|
||||
}?;
|
||||
|
|
|
@ -17,6 +17,8 @@ use crate::utils::{
|
|||
};
|
||||
use crate::{Error, Result, BEI128};
|
||||
|
||||
/// The number of database used by the batch queue
|
||||
const NUMBER_OF_DATABASES: u32 = 7;
|
||||
/// Database const names for the `IndexScheduler`.
|
||||
mod db_name {
|
||||
pub const ALL_BATCHES: &str = "all-batches";
|
||||
|
@ -60,6 +62,10 @@ impl BatchQueue {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
pub(super) fn new(env: &Env, wtxn: &mut RwTxn) -> Result<Self> {
|
||||
Ok(Self {
|
||||
all_batches: env.create_database(wtxn, Some(db_name::ALL_BATCHES))?,
|
||||
|
|
|
@ -28,6 +28,8 @@ use crate::utils::{
|
|||
};
|
||||
use crate::{Error, IndexSchedulerOptions, Result, TaskId};
|
||||
|
||||
/// The number of database used by queue itself
|
||||
const NUMBER_OF_DATABASES: u32 = 1;
|
||||
/// Database const names for the `IndexScheduler`.
|
||||
mod db_name {
|
||||
pub const BATCH_TO_TASKS_MAPPING: &str = "batch-to-tasks-mapping";
|
||||
|
@ -148,6 +150,10 @@ impl Queue {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
tasks::TaskQueue::nb_db() + batches::BatchQueue::nb_db() + NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
/// Create an index scheduler and start its run loop.
|
||||
pub(crate) fn new(
|
||||
env: &Env,
|
||||
|
|
|
@ -14,9 +14,12 @@ use crate::utils::{
|
|||
};
|
||||
use crate::{Error, Result, TaskId, BEI128};
|
||||
|
||||
/// The number of database used by the task queue
|
||||
const NUMBER_OF_DATABASES: u32 = 8;
|
||||
/// Database const names for the `IndexScheduler`.
|
||||
mod db_name {
|
||||
pub const ALL_TASKS: &str = "all-tasks";
|
||||
|
||||
pub const STATUS: &str = "status";
|
||||
pub const KIND: &str = "kind";
|
||||
pub const INDEX_TASKS: &str = "index-tasks";
|
||||
|
@ -61,6 +64,10 @@ impl TaskQueue {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) const fn nb_db() -> u32 {
|
||||
NUMBER_OF_DATABASES
|
||||
}
|
||||
|
||||
pub(crate) fn new(env: &Env, wtxn: &mut RwTxn) -> Result<Self> {
|
||||
Ok(Self {
|
||||
all_tasks: env.create_database(wtxn, Some(db_name::ALL_TASKS))?,
|
||||
|
|
|
@ -8,8 +8,12 @@ use time::OffsetDateTime;
|
|||
use tracing::info;
|
||||
|
||||
use crate::queue::TaskQueue;
|
||||
use crate::IndexSchedulerOptions;
|
||||
|
||||
pub fn upgrade_task_queue(tasks_path: &Path, from: (u32, u32, u32)) -> anyhow::Result<()> {
|
||||
pub fn upgrade_task_queue(
|
||||
opt: &IndexSchedulerOptions,
|
||||
from: (u32, u32, u32),
|
||||
) -> anyhow::Result<()> {
|
||||
let current_major: u32 = VERSION_MAJOR.parse().unwrap();
|
||||
let current_minor: u32 = VERSION_MINOR.parse().unwrap();
|
||||
let current_patch: u32 = VERSION_PATCH.parse().unwrap();
|
||||
|
@ -40,15 +44,14 @@ pub fn upgrade_task_queue(tasks_path: &Path, from: (u32, u32, u32)) -> anyhow::R
|
|||
info!("Upgrading the task queue");
|
||||
for (upgrade, upgrade_name) in upgrade_functions[start..].iter() {
|
||||
info!("{upgrade_name}");
|
||||
(upgrade)(tasks_path)?;
|
||||
(upgrade)(&opt.tasks_path)?;
|
||||
}
|
||||
|
||||
let env = unsafe {
|
||||
heed::EnvOpenOptions::new()
|
||||
.max_dbs(19)
|
||||
// Since that's the only database memory-mapped currently we don't need to check the budget yet
|
||||
.map_size(100 * 1024 * 1024)
|
||||
.open(tasks_path)
|
||||
.max_dbs(TaskQueue::nb_db())
|
||||
.map_size(opt.task_db_size)
|
||||
.open(&opt.tasks_path)
|
||||
}?;
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let queue = TaskQueue::new(&env, &mut wtxn)?;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue