WIP moving to the sync zookeeper API

This commit is contained in:
Kerollmops 2023-08-29 18:17:21 +02:00 committed by Clément Renault
parent 854745c670
commit 0c7d7c68bc
No known key found for this signature in database
GPG key ID: F250A4C4E3AE5F5F
17 changed files with 653 additions and 750 deletions

View file

@ -39,7 +39,7 @@ use meilisearch_types::versioning::{check_version_file, create_version_file};
use meilisearch_types::{compression, milli, VERSION_FILE_NAME};
pub use option::Opt;
use option::ScheduleSnapshot;
use zookeeper_client as zk;
use zookeeper::ZooKeeper;
use crate::error::MeilisearchHttpError;
@ -139,7 +139,7 @@ enum OnFailure {
pub async fn setup_meilisearch(
opt: &Opt,
zk: Option<zk::Client>,
zookeeper: Option<Arc<ZooKeeper>>,
) -> anyhow::Result<(Arc<IndexScheduler>, Arc<AuthController>)> {
let empty_db = is_empty_db(&opt.db_path);
let (index_scheduler, auth_controller) = if let Some(ref snapshot_path) = opt.import_snapshot {
@ -147,7 +147,7 @@ pub async fn setup_meilisearch(
// the db is empty and the snapshot exists, import it
if empty_db && snapshot_path_exists {
match compression::from_tar_gz(snapshot_path, &opt.db_path) {
Ok(()) => open_or_create_database_unchecked(opt, OnFailure::RemoveDb, zk).await?,
Ok(()) => open_or_create_database_unchecked(opt, OnFailure::RemoveDb, zookeeper)?,
Err(e) => {
std::fs::remove_dir_all(&opt.db_path)?;
return Err(e);
@ -164,14 +164,14 @@ pub async fn setup_meilisearch(
bail!("snapshot doesn't exist at {}", snapshot_path.display())
// the snapshot and the db exist, and we can ignore the snapshot because of the ignore_snapshot_if_db_exists flag
} else {
open_or_create_database(opt, empty_db, zk).await?
open_or_create_database(opt, empty_db, zookeeper)?
}
} else if let Some(ref path) = opt.import_dump {
let src_path_exists = path.exists();
// the db is empty and the dump exists, import it
if empty_db && src_path_exists {
let (mut index_scheduler, mut auth_controller) =
open_or_create_database_unchecked(opt, OnFailure::RemoveDb, zk).await?;
open_or_create_database_unchecked(opt, OnFailure::RemoveDb, zookeeper)?;
match import_dump(&opt.db_path, path, &mut index_scheduler, &mut auth_controller) {
Ok(()) => (index_scheduler, auth_controller),
Err(e) => {
@ -191,10 +191,10 @@ pub async fn setup_meilisearch(
// the dump and the db exist and we can ignore the dump because of the ignore_dump_if_db_exists flag
// or, the dump is missing but we can ignore that because of the ignore_missing_dump flag
} else {
open_or_create_database(opt, empty_db, zk).await?
open_or_create_database(opt, empty_db, zookeeper)?
}
} else {
open_or_create_database(opt, empty_db, zk).await?
open_or_create_database(opt, empty_db, zookeeper)?
};
// We create a loop in a thread that registers snapshotCreation tasks
@ -203,30 +203,26 @@ pub async fn setup_meilisearch(
if let ScheduleSnapshot::Enabled(snapshot_delay) = opt.schedule_snapshot {
let snapshot_delay = Duration::from_secs(snapshot_delay);
let index_scheduler = index_scheduler.clone();
tokio::task::spawn(async move {
loop {
thread::sleep(snapshot_delay);
if let Err(e) = index_scheduler.register(KindWithContent::SnapshotCreation).await {
error!("Error while registering snapshot: {}", e);
}
thread::spawn(move || loop {
thread::sleep(snapshot_delay);
if let Err(e) = index_scheduler.register(KindWithContent::SnapshotCreation) {
error!("Error while registering snapshot: {}", e);
}
})
.await
.unwrap();
});
}
Ok((index_scheduler, auth_controller))
}
/// Try to start the IndexScheduler and AuthController without checking the VERSION file or anything.
async fn open_or_create_database_unchecked(
fn open_or_create_database_unchecked(
opt: &Opt,
on_failure: OnFailure,
zk: Option<zk::Client>,
zookeeper: Option<Arc<ZooKeeper>>,
) -> anyhow::Result<(IndexScheduler, AuthController)> {
// we don't want to create anything in the data.ms yet, thus we
// wrap our two builders in a closure that'll be executed later.
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key, zk.clone());
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key, zookeeper.clone());
let instance_features = opt.to_instance_features();
let index_scheduler = IndexScheduler::new(IndexSchedulerOptions {
version_file_path: opt.db_path.join(VERSION_FILE_NAME),
@ -245,14 +241,13 @@ async fn open_or_create_database_unchecked(
index_growth_amount: byte_unit::Byte::from_str("10GiB").unwrap().get_bytes() as usize,
index_count: DEFAULT_INDEX_COUNT,
instance_features,
zk: zk.clone(),
zookeeper: zookeeper.clone(),
})
.await
.map_err(anyhow::Error::from);
match (
index_scheduler,
auth_controller.await.map_err(anyhow::Error::from),
auth_controller.map_err(anyhow::Error::from),
create_version_file(&opt.db_path).map_err(anyhow::Error::from),
) {
(Ok(i), Ok(a), Ok(())) => Ok((i, a)),
@ -266,16 +261,16 @@ async fn open_or_create_database_unchecked(
}
/// Ensure you're in a valid state and open the IndexScheduler + AuthController for you.
async fn open_or_create_database(
fn open_or_create_database(
opt: &Opt,
empty_db: bool,
zk: Option<zk::Client>,
zookeeper: Option<Arc<ZooKeeper>>,
) -> anyhow::Result<(IndexScheduler, AuthController)> {
if !empty_db {
check_version_file(&opt.db_path)?;
}
open_or_create_database_unchecked(opt, OnFailure::KeepDb, zk).await
open_or_create_database_unchecked(opt, OnFailure::KeepDb, zookeeper)
}
fn import_dump(

View file

@ -2,6 +2,7 @@ use std::env;
use std::io::{stderr, Write};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use actix_web::http::KeepAlive;
use actix_web::web::Data;
@ -12,7 +13,7 @@ use meilisearch::analytics::Analytics;
use meilisearch::{analytics, create_app, prototype_name, setup_meilisearch, Opt};
use meilisearch_auth::{generate_master_key, AuthController, MASTER_KEY_MIN_SIZE};
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
use zookeeper_client as zk;
use zookeeper::ZooKeeper;
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
@ -64,11 +65,10 @@ async fn main() -> anyhow::Result<()> {
_ => (),
}
let zk = match opt.zk_url {
Some(ref url) => Some(zk::Client::connect(url).await.unwrap()),
None => None,
};
let (index_scheduler, auth_controller) = setup_meilisearch(&opt, zk).await?;
let timeout = Duration::from_millis(2500);
let zookeeper =
opt.zk_url.as_ref().map(|url| Arc::new(ZooKeeper::connect(url, timeout, drop).unwrap()));
let (index_scheduler, auth_controller) = setup_meilisearch(&opt, zookeeper).await?;
#[cfg(all(not(debug_assertions), feature = "analytics"))]
let analytics = if !opt.no_analytics {

View file

@ -41,7 +41,7 @@ pub async fn create_api_key(
_req: HttpRequest,
) -> Result<HttpResponse, ResponseError> {
let v = body.into_inner();
let key = auth_controller.create_key(v).await?;
let key = auth_controller.create_key(v)?;
let key = KeyView::from_key(key, &auth_controller);
Ok(HttpResponse::Created().json(key))
@ -107,7 +107,7 @@ pub async fn patch_api_key(
let key = path.into_inner().key;
let patch_api_key = body.into_inner();
let uid = Uuid::parse_str(&key).or_else(|_| auth_controller.get_uid_from_encoded_key(&key))?;
let key = auth_controller.update_key(uid, patch_api_key).await?;
let key = auth_controller.update_key(uid, patch_api_key)?;
let key = KeyView::from_key(key, &auth_controller);
Ok(HttpResponse::Ok().json(key))
@ -119,7 +119,7 @@ pub async fn delete_api_key(
) -> Result<HttpResponse, ResponseError> {
let key = path.into_inner().key;
let uid = Uuid::parse_str(&key).or_else(|_| auth_controller.get_uid_from_encoded_key(&key))?;
auth_controller.delete_key(uid).await?;
auth_controller.delete_key(uid)?;
Ok(HttpResponse::NoContent().finish())
}

View file

@ -29,7 +29,7 @@ pub async fn create_dump(
keys: auth_controller.list_keys()?,
instance_uid: analytics.instance_uid().cloned(),
};
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))

View file

@ -129,7 +129,7 @@ pub async fn delete_document(
index_uid: index_uid.to_string(),
documents_ids: vec![document_id],
};
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
}
@ -444,7 +444,7 @@ async fn document_addition(
};
let scheduler = index_scheduler.clone();
let task = match scheduler.register(task).await {
let task = match scheduler.register(task) {
Ok(task) => task,
Err(e) => {
index_scheduler.delete_update_file(uuid)?;
@ -475,7 +475,7 @@ pub async fn delete_documents_batch(
let task =
KindWithContent::DocumentDeletion { index_uid: index_uid.to_string(), documents_ids: ids };
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
@ -510,7 +510,7 @@ pub async fn delete_documents_by_filter(
.map_err(|err| ResponseError::from_msg(err.message, Code::InvalidDocumentFilter))?;
let task = KindWithContent::DocumentDeletionByFilter { index_uid, filter_expr: filter };
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
@ -526,7 +526,7 @@ pub async fn clear_all_documents(
analytics.delete_documents(DocumentDeletionKind::ClearAll, &req);
let task = KindWithContent::DocumentClear { index_uid: index_uid.to_string() };
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))

View file

@ -135,7 +135,7 @@ pub async fn create_index(
);
let task = KindWithContent::IndexCreation { index_uid: uid.to_string(), primary_key };
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
Ok(HttpResponse::Accepted().json(task))
} else {
@ -202,7 +202,7 @@ pub async fn update_index(
primary_key: body.primary_key,
};
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
@ -214,7 +214,7 @@ pub async fn delete_index(
) -> Result<HttpResponse, ResponseError> {
let index_uid = IndexUid::try_from(index_uid.into_inner())?;
let task = KindWithContent::IndexDeletion { index_uid: index_uid.into_inner() };
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
Ok(HttpResponse::Accepted().json(task))
}

View file

@ -55,7 +55,7 @@ macro_rules! make_setting_route {
is_deletion: true,
allow_index_creation,
};
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
@ -94,7 +94,7 @@ macro_rules! make_setting_route {
is_deletion: false,
allow_index_creation,
};
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
@ -580,7 +580,7 @@ pub async fn update_all(
is_deletion: false,
allow_index_creation,
};
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))
@ -615,7 +615,7 @@ pub async fn delete_all(
is_deletion: true,
allow_index_creation,
};
let task: SummarizedTaskView = index_scheduler.register(task).await?.into();
let task: SummarizedTaskView = index_scheduler.register(task)?.into();
debug!("returns: {:?}", task);
Ok(HttpResponse::Accepted().json(task))

View file

@ -61,7 +61,7 @@ pub async fn swap_indexes(
let task = KindWithContent::IndexSwap { swaps };
let task = index_scheduler.register(task).await?;
let task = index_scheduler.register(task)?;
let task: SummarizedTaskView = task.into();
Ok(HttpResponse::Accepted().json(task))
}

View file

@ -332,7 +332,7 @@ async fn cancel_tasks(
let task_cancelation =
KindWithContent::TaskCancelation { query: format!("?{}", req.query_string()), tasks };
let task = index_scheduler.register(task_cancelation).await?;
let task = index_scheduler.register(task_cancelation)?;
let task: SummarizedTaskView = task.into();
Ok(HttpResponse::Ok().json(task))
@ -377,7 +377,7 @@ async fn delete_tasks(
let task_deletion =
KindWithContent::TaskDeletion { query: format!("?{}", req.query_string()), tasks };
let task = index_scheduler.register(task_deletion).await?;
let task = index_scheduler.register(task_deletion)?;
let task: SummarizedTaskView = task.into();
Ok(HttpResponse::Ok().json(task))