fix the consistency rules

This commit is contained in:
Tamo 2023-03-22 14:24:53 +01:00
parent 8089ec668e
commit 9112b26cd1
7 changed files with 57 additions and 39 deletions

1
Cargo.lock generated
View File

@ -2621,6 +2621,7 @@ name = "meilisearch-auth"
version = "1.1.0" version = "1.1.0"
dependencies = [ dependencies = [
"base64 0.13.1", "base64 0.13.1",
"cluster",
"enum-iterator", "enum-iterator",
"hmac", "hmac",
"maplit", "maplit",

View File

@ -1,6 +1,7 @@
use std::net::ToSocketAddrs; use std::net::ToSocketAddrs;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{atomic, Arc, Mutex, RwLock}; use std::sync::{atomic, Arc, Mutex, RwLock};
use std::time::Duration;
use bus::{Bus, BusReader}; use bus::{Bus, BusReader};
use crossbeam::channel::{unbounded, Receiver, Sender}; use crossbeam::channel::{unbounded, Receiver, Sender};
@ -182,31 +183,27 @@ impl Leader {
let batch_id = self.batch_id.write().unwrap(); let batch_id = self.batch_id.write().unwrap();
// if zero nodes needs to be sync we can commit right away and early exit let mut nodes_ready_to_commit = 1;
if consistency_level != Consistency::One {
// else, we wait till enough nodes are ready to commit
for ready_to_commit in self
.task_ready_to_commit
.iter()
// we need to filter out the messages from the old batches
.filter(|id| *id == *batch_id)
.enumerate()
// we do a +2 because enumerate starts at 1 and we must includes ourselves in the count
.map(|(id, _)| id + 2)
{
// TODO: if the last node dies we're stuck on the iterator
// we need to reload the cluster size everytime in case a node dies loop {
let size = self.active_followers.load(atomic::Ordering::Relaxed); let size = self.active_followers.load(atomic::Ordering::Relaxed);
info!("{ready_to_commit} nodes are ready to commit for a cluster size of {size}"); info!("{nodes_ready_to_commit} nodes are ready to commit for a cluster size of {size}");
match consistency_level { let all = nodes_ready_to_commit == size;
Consistency::Two if ready_to_commit >= 1 => break,
Consistency::Quorum if ready_to_commit >= (size / 2) => break, match consistency_level {
Consistency::All if ready_to_commit == size => break, Consistency::One if nodes_ready_to_commit >= 1 || all => break,
_ => (), Consistency::Two if nodes_ready_to_commit >= 2 || all => break,
} Consistency::Quorum if nodes_ready_to_commit >= (size / 2) || all => break,
Consistency::All if all => break,
_ => (),
} }
// we can't wait forever here because the cluster size might get updated while we wait if a node dies
match self.task_ready_to_commit.recv_timeout(Duration::new(1, 0)) {
Ok(id) if id == *batch_id => nodes_ready_to_commit += 1,
_ => continue,
};
} }
info!("Tells all the follower to commit"); info!("Tells all the follower to commit");

View File

@ -42,29 +42,48 @@ pub enum FollowerMsg {
} }
#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] #[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum Consistency { pub enum Consistency {
#[default]
One, One,
Two, Two,
Quorum, Quorum,
#[default]
All, All,
} }
impl std::fmt::Display for Consistency { impl std::fmt::Display for Consistency {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = serde_json::to_string(self).unwrap(); match self {
write!(f, "{s}") Consistency::One => write!(f, "one"),
Consistency::Two => write!(f, "two"),
Consistency::Quorum => write!(f, "quorum"),
Consistency::All => write!(f, "all"),
}
} }
} }
impl FromStr for Consistency { impl FromStr for Consistency {
type Err = serde_json::Error; type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
serde_json::from_str(s) match s {
"one" => Ok(Consistency::One),
"two" => Ok(Consistency::Two),
"quorum" => Ok(Consistency::Quorum),
"all" => Ok(Consistency::All),
s => Err(format!(
"Unexpected value `{s}`, expected one of `one`, `two`, `quorum`, `all`"
)),
}
} }
} }
#[derive(Clone)]
pub enum Cluster {
Leader(Leader),
Follower(Follower),
}
#[derive(Clone)] #[derive(Clone)]
pub struct Follower { pub struct Follower {
sender: ChannelSender<FollowerMsg>, sender: ChannelSender<FollowerMsg>,

View File

@ -40,7 +40,7 @@ use std::sync::{Arc, RwLock};
use std::time::Duration; use std::time::Duration;
use batch::Batch; use batch::Batch;
use cluster::{Consistency, Follower, Leader}; use cluster::{Cluster, Consistency};
use dump::{KindDump, TaskDump, UpdateFile}; use dump::{KindDump, TaskDump, UpdateFile};
pub use error::Error; pub use error::Error;
use file_store::FileStore; use file_store::FileStore;
@ -349,12 +349,6 @@ impl std::str::FromStr for ClusterMode {
} }
} }
#[derive(Clone)]
pub enum Cluster {
Leader(Leader),
Follower(Follower),
}
impl IndexScheduler { impl IndexScheduler {
fn private_clone(&self) -> IndexScheduler { fn private_clone(&self) -> IndexScheduler {
IndexScheduler { IndexScheduler {

View File

@ -12,6 +12,7 @@ license.workspace = true
[dependencies] [dependencies]
base64 = "0.13.1" base64 = "0.13.1"
cluster = { path = "../cluster" }
enum-iterator = "1.1.3" enum-iterator = "1.1.3"
hmac = "0.12.1" hmac = "0.12.1"
maplit = "1.0.2" maplit = "1.0.2"

View File

@ -6,6 +6,7 @@ use std::collections::{HashMap, HashSet};
use std::path::Path; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use cluster::Cluster;
use error::{AuthControllerError, Result}; use error::{AuthControllerError, Result};
use maplit::hashset; use maplit::hashset;
use meilisearch_types::index_uid_pattern::IndexUidPattern; use meilisearch_types::index_uid_pattern::IndexUidPattern;
@ -21,17 +22,22 @@ use uuid::Uuid;
pub struct AuthController { pub struct AuthController {
store: Arc<HeedAuthStore>, store: Arc<HeedAuthStore>,
master_key: Option<String>, master_key: Option<String>,
cluster: Option<Cluster>,
} }
impl AuthController { impl AuthController {
pub fn new(db_path: impl AsRef<Path>, master_key: &Option<String>) -> Result<Self> { pub fn new(
db_path: impl AsRef<Path>,
master_key: &Option<String>,
cluster: Option<Cluster>,
) -> Result<Self> {
let store = HeedAuthStore::new(db_path)?; let store = HeedAuthStore::new(db_path)?;
if store.is_empty()? { if store.is_empty()? {
generate_default_keys(&store)?; generate_default_keys(&store)?;
} }
Ok(Self { store: Arc::new(store), master_key: master_key.clone() }) Ok(Self { store: Arc::new(store), master_key: master_key.clone(), cluster })
} }
/// Return the size of the `AuthController` database in bytes. /// Return the size of the `AuthController` database in bytes.

View File

@ -26,11 +26,11 @@ use actix_web::web::Data;
use actix_web::{web, HttpRequest}; use actix_web::{web, HttpRequest};
use analytics::Analytics; use analytics::Analytics;
use anyhow::bail; use anyhow::bail;
use cluster::{Follower, Leader}; use cluster::{Cluster, Follower, Leader};
use error::PayloadError; use error::PayloadError;
use extractors::payload::PayloadConfig; use extractors::payload::PayloadConfig;
use http::header::CONTENT_TYPE; use http::header::CONTENT_TYPE;
use index_scheduler::{Cluster, IndexScheduler, IndexSchedulerOptions}; use index_scheduler::{IndexScheduler, IndexSchedulerOptions};
use log::{error, info}; use log::{error, info};
use meilisearch_auth::AuthController; use meilisearch_auth::AuthController;
use meilisearch_types::milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader}; use meilisearch_types::milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
@ -270,7 +270,7 @@ fn open_or_create_database_unchecked(
) -> anyhow::Result<(IndexScheduler, AuthController)> { ) -> anyhow::Result<(IndexScheduler, AuthController)> {
// we don't want to create anything in the data.ms yet, thus we // we don't want to create anything in the data.ms yet, thus we
// wrap our two builders in a closure that'll be executed later. // wrap our two builders in a closure that'll be executed later.
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key); let auth_controller = AuthController::new(&opt.db_path, &opt.master_key, cluster.clone());
let index_scheduler_builder = || -> anyhow::Result<_> { let index_scheduler_builder = || -> anyhow::Result<_> {
Ok(IndexScheduler::new( Ok(IndexScheduler::new(