mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-07-04 20:37:15 +02:00
refactor index actor
This commit is contained in:
parent
12542bf922
commit
5353be74c3
18 changed files with 590 additions and 596 deletions
|
@ -1,8 +1,9 @@
|
|||
use std::fmt;
|
||||
use std::error::Error;
|
||||
|
||||
use meilisearch_error::{Code, ErrorCode};
|
||||
|
||||
use crate::index_controller::index_actor::error::IndexActorError;
|
||||
use crate::index_controller::indexes::error::IndexActorError;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, UpdateActorError>;
|
||||
|
||||
|
@ -25,15 +26,17 @@ pub enum UpdateActorError {
|
|||
PayloadError(#[from] actix_web::error::PayloadError),
|
||||
}
|
||||
|
||||
impl<T> From<tokio::sync::mpsc::error::SendError<T>> for UpdateActorError {
|
||||
fn from(_: tokio::sync::mpsc::error::SendError<T>) -> Self {
|
||||
Self::FatalUpdateStoreError
|
||||
impl<T> From<tokio::sync::mpsc::error::SendError<T>> for UpdateActorError
|
||||
where T: Sync + Send + 'static + fmt::Debug
|
||||
{
|
||||
fn from(other: tokio::sync::mpsc::error::SendError<T>) -> Self {
|
||||
Self::Internal(Box::new(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tokio::sync::oneshot::error::RecvError> for UpdateActorError {
|
||||
fn from(_: tokio::sync::oneshot::error::RecvError) -> Self {
|
||||
Self::FatalUpdateStoreError
|
||||
fn from(other: tokio::sync::oneshot::error::RecvError) -> Self {
|
||||
Self::Internal(Box::new(other))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ use uuid::Uuid;
|
|||
use super::error::Result;
|
||||
use super::{Update, UpdateStatus, UpdateStoreInfo};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum UpdateMsg {
|
||||
Update {
|
||||
uuid: Uuid,
|
||||
|
|
|
@ -27,19 +27,19 @@ use self::store::{UpdateStore, UpdateStoreInfo};
|
|||
use crate::index_controller::update_file_store::UpdateFileStore;
|
||||
use status::UpdateStatus;
|
||||
|
||||
use super::indexes::IndexHandlerSender;
|
||||
use super::{DocumentAdditionFormat, Payload, Update};
|
||||
|
||||
pub type UpdateSender = mpsc::Sender<UpdateMsg>;
|
||||
type IndexSender = mpsc::Sender<()>;
|
||||
|
||||
pub fn create_update_handler(
|
||||
index_sender: IndexSender,
|
||||
index_sender: IndexHandlerSender,
|
||||
db_path: impl AsRef<Path>,
|
||||
update_store_size: usize,
|
||||
) -> anyhow::Result<UpdateSender> {
|
||||
let path = db_path.as_ref().to_owned();
|
||||
let (sender, receiver) = mpsc::channel(100);
|
||||
let actor = UpdateHandler::new(update_store_size, receiver, path, index_sender)?;
|
||||
let actor = UpdateLoop::new(update_store_size, receiver, path, index_sender)?;
|
||||
|
||||
tokio::task::spawn_local(actor.run());
|
||||
|
||||
|
@ -96,20 +96,20 @@ impl<S: Stream<Item = std::result::Result<Bytes, PayloadError>> + Unpin> io::Rea
|
|||
}
|
||||
}
|
||||
|
||||
pub struct UpdateHandler {
|
||||
pub struct UpdateLoop {
|
||||
store: Arc<UpdateStore>,
|
||||
inbox: Option<mpsc::Receiver<UpdateMsg>>,
|
||||
update_file_store: UpdateFileStore,
|
||||
index_handle: IndexSender,
|
||||
index_handle: IndexHandlerSender,
|
||||
must_exit: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl UpdateHandler {
|
||||
impl UpdateLoop {
|
||||
pub fn new(
|
||||
update_db_size: usize,
|
||||
inbox: mpsc::Receiver<UpdateMsg>,
|
||||
path: impl AsRef<Path>,
|
||||
index_handle: IndexSender,
|
||||
index_handle: IndexHandlerSender,
|
||||
) -> anyhow::Result<Self> {
|
||||
let path = path.as_ref().to_owned();
|
||||
std::fs::create_dir_all(&path)?;
|
||||
|
|
|
@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize};
|
|||
use uuid::Uuid;
|
||||
|
||||
use super::{Result, State, UpdateStore};
|
||||
use crate::index_controller::{updates::{IndexSender, status::UpdateStatus}};
|
||||
use crate::index_controller::{indexes::{IndexHandlerSender, IndexMsg}, updates::{status::UpdateStatus}};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct UpdateEntry {
|
||||
|
@ -23,7 +23,7 @@ impl UpdateStore {
|
|||
&self,
|
||||
uuids: &HashSet<Uuid>,
|
||||
path: PathBuf,
|
||||
handle: IndexSender,
|
||||
handle: IndexHandlerSender,
|
||||
) -> Result<()> {
|
||||
let state_lock = self.state.write();
|
||||
state_lock.swap(State::Dumping);
|
||||
|
@ -172,12 +172,11 @@ impl UpdateStore {
|
|||
|
||||
async fn dump_indexes(
|
||||
uuids: &HashSet<Uuid>,
|
||||
handle: IndexSender,
|
||||
handle: IndexHandlerSender,
|
||||
path: impl AsRef<Path>,
|
||||
) -> Result<()> {
|
||||
for uuid in uuids {
|
||||
//handle.dump(*uuid, path.as_ref().to_owned()).await?;
|
||||
todo!()
|
||||
IndexMsg::dump(&handle, *uuid, path.as_ref().to_owned()).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
|
@ -30,14 +30,16 @@ use super::RegisterUpdate;
|
|||
use super::error::Result;
|
||||
use super::status::{Enqueued, Processing};
|
||||
use crate::EnvSizer;
|
||||
use crate::index_controller::indexes::{CONCURRENT_INDEX_MSG, IndexHandlerSender, IndexMsg};
|
||||
use crate::index_controller::update_files_path;
|
||||
use crate::index_controller::{index_actor::CONCURRENT_INDEX_MSG, updates::*};
|
||||
use crate::index_controller::updates::*;
|
||||
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
type BEU64 = U64<heed::byteorder::BE>;
|
||||
|
||||
const UPDATE_DIR: &str = "update_files";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct UpdateStoreInfo {
|
||||
/// Size of the update store in bytes.
|
||||
pub size: u64,
|
||||
|
@ -146,7 +148,7 @@ impl UpdateStore {
|
|||
pub fn open(
|
||||
options: EnvOpenOptions,
|
||||
path: impl AsRef<Path>,
|
||||
index_handle: IndexSender,
|
||||
index_handle: IndexHandlerSender,
|
||||
must_exit: Arc<AtomicBool>,
|
||||
) -> anyhow::Result<Arc<Self>> {
|
||||
let (update_store, mut notification_receiver) = Self::new(options, path)?;
|
||||
|
@ -284,7 +286,7 @@ impl UpdateStore {
|
|||
/// Executes the user provided function on the next pending update (the one with the lowest id).
|
||||
/// This is asynchronous as it let the user process the update with a read-only txn and
|
||||
/// only writing the result meta to the processed-meta store *after* it has been processed.
|
||||
fn process_pending_update(&self, index_handle: IndexSender) -> Result<Option<()>> {
|
||||
fn process_pending_update(&self, index_handle: IndexHandlerSender) -> Result<Option<()>> {
|
||||
// Create a read transaction to be able to retrieve the pending update in order.
|
||||
let rtxn = self.env.read_txn()?;
|
||||
let first_meta = self.pending_queue.first(&rtxn)?;
|
||||
|
@ -314,7 +316,7 @@ impl UpdateStore {
|
|||
fn perform_update(
|
||||
&self,
|
||||
processing: Processing,
|
||||
index_handle: IndexSender,
|
||||
index_handle: IndexHandlerSender,
|
||||
index_uuid: Uuid,
|
||||
global_id: u64,
|
||||
) -> Result<Option<()>> {
|
||||
|
@ -322,7 +324,7 @@ impl UpdateStore {
|
|||
let handle = Handle::current();
|
||||
let update_id = processing.id();
|
||||
let result =
|
||||
match handle.block_on(/*index_handle.update(index_uuid, processing.clone())*/ todo!()) {
|
||||
match handle.block_on(IndexMsg::update(&index_handle, index_uuid, processing.clone())) {
|
||||
Ok(result) => result,
|
||||
Err(e) => Err(processing.fail(e)),
|
||||
};
|
||||
|
@ -484,7 +486,7 @@ impl UpdateStore {
|
|||
&self,
|
||||
uuids: &HashSet<Uuid>,
|
||||
path: impl AsRef<Path>,
|
||||
handle: IndexSender,
|
||||
handle: IndexHandlerSender,
|
||||
) -> Result<()> {
|
||||
let state_lock = self.state.write();
|
||||
state_lock.swap(State::Snapshoting);
|
||||
|
@ -525,7 +527,7 @@ impl UpdateStore {
|
|||
// Perform the snapshot of each index concurently. Only a third of the capabilities of
|
||||
// the index actor at a time not to put too much pressure on the index actor
|
||||
let mut stream = futures::stream::iter(uuids.iter())
|
||||
.map(move |uuid| todo!() /*handle.snapshot(*uuid, path.clone())*/)
|
||||
.map(move |uuid| IndexMsg::snapshot(handle,*uuid, path.clone()))
|
||||
.buffer_unordered(CONCURRENT_INDEX_MSG / 3);
|
||||
|
||||
Handle::current().block_on(async {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue