MeiliSearch/meilisearch-http/src/index_controller/update_actor/actor.rs

280 lines
9.7 KiB
Rust
Raw Normal View History

2021-04-22 10:14:29 +02:00
use std::collections::HashSet;
2021-03-23 11:00:50 +01:00
use std::io::SeekFrom;
use std::path::{Path, PathBuf};
2021-04-13 17:14:02 +02:00
use std::sync::Arc;
2021-03-23 11:00:50 +01:00
2021-04-22 10:14:29 +02:00
use futures::StreamExt;
2021-03-23 11:00:50 +01:00
use log::info;
use oxidized_json_checker::JsonChecker;
use tokio::fs;
2021-04-22 10:14:29 +02:00
use tokio::io::AsyncWriteExt;
use tokio::runtime::Handle;
2021-03-24 11:29:11 +01:00
use tokio::sync::mpsc;
use uuid::Uuid;
2021-03-23 11:00:50 +01:00
2021-04-14 18:55:04 +02:00
use super::{PayloadData, Result, UpdateError, UpdateMsg, UpdateStore, UpdateStoreInfo};
2021-05-10 20:24:14 +02:00
use crate::index_controller::{index_actor::{IndexActorHandle, CONCURRENT_INDEX_MSG}};
2021-04-13 17:14:02 +02:00
use crate::index_controller::{UpdateMeta, UpdateStatus};
2021-03-23 11:00:50 +01:00
2021-04-13 17:14:02 +02:00
pub struct UpdateActor<D, I> {
2021-03-23 11:00:50 +01:00
path: PathBuf,
2021-04-13 17:14:02 +02:00
store: Arc<UpdateStore>,
2021-03-23 11:00:50 +01:00
inbox: mpsc::Receiver<UpdateMsg<D>>,
index_handle: I,
}
2021-04-13 17:14:02 +02:00
impl<D, I> UpdateActor<D, I>
2021-03-23 11:00:50 +01:00
where
D: AsRef<[u8]> + Sized + 'static,
2021-03-23 16:19:01 +01:00
I: IndexActorHandle + Clone + Send + Sync + 'static,
2021-03-23 11:00:50 +01:00
{
pub fn new(
2021-04-13 17:14:02 +02:00
update_db_size: usize,
2021-03-23 11:00:50 +01:00
inbox: mpsc::Receiver<UpdateMsg<D>>,
path: impl AsRef<Path>,
index_handle: I,
) -> anyhow::Result<Self> {
2021-04-22 10:14:29 +02:00
let path = path.as_ref().join("updates");
2021-04-13 17:14:02 +02:00
std::fs::create_dir_all(&path)?;
let mut options = heed::EnvOpenOptions::new();
options.map_size(update_db_size);
2021-04-22 10:14:29 +02:00
let store = UpdateStore::open(options, &path, index_handle.clone())?;
2021-03-23 11:00:50 +01:00
std::fs::create_dir_all(path.join("update_files"))?;
assert!(path.exists());
2021-04-27 17:51:12 +02:00
Ok(Self { path, store, inbox, index_handle })
2021-03-23 11:00:50 +01:00
}
pub async fn run(mut self) {
use UpdateMsg::*;
info!("Started update actor.");
loop {
match self.inbox.recv().await {
Some(Update {
uuid,
meta,
data,
ret,
}) => {
let _ = ret.send(self.handle_update(uuid, meta, data).await);
}
Some(ListUpdates { uuid, ret }) => {
let _ = ret.send(self.handle_list_updates(uuid).await);
}
Some(GetUpdate { uuid, ret, id }) => {
let _ = ret.send(self.handle_get_update(uuid, id).await);
}
Some(Delete { uuid, ret }) => {
let _ = ret.send(self.handle_delete(uuid).await);
}
2021-05-10 20:24:14 +02:00
Some(Snapshot { uuids, path, ret }) => {
let _ = ret.send(self.handle_snapshot(uuids, path).await);
2021-04-28 16:43:49 +02:00
}
2021-05-10 20:24:14 +02:00
Some(Dump { uuids, path, ret }) => {
let _ = ret.send(self.handle_dump(uuids, path).await);
2021-03-23 11:00:50 +01:00
}
2021-04-14 18:55:04 +02:00
Some(GetInfo { ret }) => {
let _ = ret.send(self.handle_get_info().await);
}
2021-03-23 11:00:50 +01:00
None => break,
}
}
}
async fn handle_update(
&self,
uuid: Uuid,
meta: UpdateMeta,
mut payload: mpsc::Receiver<PayloadData<D>>,
) -> Result<UpdateStatus> {
2021-04-22 10:14:29 +02:00
let file_path = match meta {
UpdateMeta::DocumentsAddition { .. }
| UpdateMeta::DeleteDocuments => {
let update_file_id = uuid::Uuid::new_v4();
let path = self
.path
.join(format!("update_files/update_{}", update_file_id));
let mut file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.await
.map_err(|e| UpdateError::Error(Box::new(e)))?;
let mut file_len = 0;
while let Some(bytes) = payload.recv().await {
match bytes {
Ok(bytes) => {
file_len += bytes.as_ref().len();
file.write_all(bytes.as_ref())
.await
.map_err(|e| UpdateError::Error(Box::new(e)))?;
}
Err(e) => {
return Err(UpdateError::Error(e));
}
}
}
if file_len != 0 {
file.flush()
2021-03-23 11:00:50 +01:00
.await
.map_err(|e| UpdateError::Error(Box::new(e)))?;
2021-04-22 10:14:29 +02:00
let file = file.into_std().await;
Some((file, path))
} else {
// empty update, delete the empty file.
fs::remove_file(&path)
.await
.map_err(|e| UpdateError::Error(Box::new(e)))?;
None
2021-03-23 11:00:50 +01:00
}
}
2021-04-22 10:14:29 +02:00
_ => None
};
2021-03-23 11:00:50 +01:00
2021-04-13 17:14:02 +02:00
let update_store = self.store.clone();
2021-03-23 11:00:50 +01:00
tokio::task::spawn_blocking(move || {
use std::io::{copy, sink, BufReader, Seek};
// If the payload is empty, ignore the check.
2021-04-22 10:14:29 +02:00
let path = if let Some((mut file, path)) = file_path {
// set the file back to the beginning
file.seek(SeekFrom::Start(0)).map_err(|e| UpdateError::Error(Box::new(e)))?;
2021-03-23 11:00:50 +01:00
// Check that the json payload is valid:
let reader = BufReader::new(&mut file);
let mut checker = JsonChecker::new(reader);
if copy(&mut checker, &mut sink()).is_err() || checker.finish().is_err() {
// The json file is invalid, we use Serde to get a nice error message:
file.seek(SeekFrom::Start(0))
.map_err(|e| UpdateError::Error(Box::new(e)))?;
let _: serde_json::Value = serde_json::from_reader(file)
.map_err(|e| UpdateError::Error(Box::new(e)))?;
}
2021-04-22 10:14:29 +02:00
Some(path)
} else {
None
};
2021-03-23 11:00:50 +01:00
// The payload is valid, we can register it to the update store.
update_store
.register_update(meta, path, uuid)
2021-04-07 19:46:36 +02:00
.map(UpdateStatus::Enqueued)
2021-03-23 11:00:50 +01:00
.map_err(|e| UpdateError::Error(Box::new(e)))
})
.await
.map_err(|e| UpdateError::Error(Box::new(e)))?
}
async fn handle_list_updates(&self, uuid: Uuid) -> Result<Vec<UpdateStatus>> {
2021-04-13 17:14:02 +02:00
let update_store = self.store.clone();
2021-03-23 11:00:50 +01:00
tokio::task::spawn_blocking(move || {
let result = update_store
2021-04-13 17:14:02 +02:00
.list(uuid)
2021-03-23 11:00:50 +01:00
.map_err(|e| UpdateError::Error(e.into()))?;
Ok(result)
})
.await
.map_err(|e| UpdateError::Error(Box::new(e)))?
}
async fn handle_get_update(&self, uuid: Uuid, id: u64) -> Result<UpdateStatus> {
2021-04-13 17:14:02 +02:00
let store = self.store.clone();
2021-03-23 11:00:50 +01:00
let result = store
2021-04-13 17:14:02 +02:00
.meta(uuid, id)
2021-03-23 11:00:50 +01:00
.map_err(|e| UpdateError::Error(Box::new(e)))?
.ok_or(UpdateError::UnexistingUpdate(id))?;
Ok(result)
}
async fn handle_delete(&self, uuid: Uuid) -> Result<()> {
2021-05-10 20:24:14 +02:00
let store = self.store.clone();
2021-04-28 16:43:49 +02:00
2021-05-10 20:24:14 +02:00
tokio::task::spawn_blocking(move || store.delete_all(uuid))
.await
.map_err(|e| UpdateError::Error(e.into()))?
.map_err(|e| UpdateError::Error(e.into()))?;
2021-04-28 16:43:49 +02:00
Ok(())
}
2021-03-23 11:00:50 +01:00
2021-05-10 20:24:14 +02:00
async fn handle_snapshot(&self, uuids: HashSet<Uuid>, path: PathBuf) -> Result<()> {
let index_handle = self.index_handle.clone();
let update_store = self.store.clone();
tokio::task::spawn_blocking(move || -> anyhow::Result<()> {
update_store.snapshot(&uuids, &path)?;
2021-04-28 16:43:49 +02:00
2021-05-10 20:24:14 +02:00
// Perform the snapshot of each index concurently. Only a third of the capabilities of
// the index actor at a time not to put too much pressure on the index actor
let path = &path;
let handle = &index_handle;
2021-04-28 16:43:49 +02:00
2021-05-10 20:24:14 +02:00
let mut stream = futures::stream::iter(uuids.iter())
.map(|&uuid| handle.snapshot(uuid, path.clone()))
.buffer_unordered(CONCURRENT_INDEX_MSG / 3);
Handle::current().block_on(async {
while let Some(res) = stream.next().await {
res?;
}
2021-04-28 16:43:49 +02:00
Ok(())
})
2021-05-10 20:24:14 +02:00
})
.await
.map_err(|e| UpdateError::Error(e.into()))?
.map_err(|e| UpdateError::Error(e.into()))?;
2021-03-23 11:00:50 +01:00
Ok(())
}
2021-05-05 14:11:56 +02:00
async fn handle_dump(&self, uuids: HashSet<(String, Uuid)>, path: PathBuf) -> Result<()> {
2021-03-23 11:00:50 +01:00
let index_handle = self.index_handle.clone();
2021-04-13 17:14:02 +02:00
let update_store = self.store.clone();
tokio::task::spawn_blocking(move || -> anyhow::Result<()> {
2021-05-05 14:11:56 +02:00
update_store.dump(&uuids, path.to_path_buf())?;
2021-04-14 17:53:12 +02:00
2021-05-05 18:03:21 +02:00
// Perform the dump of each index concurently. Only a third of the capabilities of
2021-04-14 17:53:12 +02:00
// the index actor at a time not to put too much pressure on the index actor
let path = &path;
let handle = &index_handle;
2021-04-13 17:14:02 +02:00
2021-04-14 17:53:12 +02:00
let mut stream = futures::stream::iter(uuids.iter())
2021-05-05 14:11:56 +02:00
.map(|(uid, uuid)| handle.dump(uid.clone(), *uuid, path.clone()))
2021-04-14 17:53:12 +02:00
.buffer_unordered(CONCURRENT_INDEX_MSG / 3);
2021-04-22 10:14:29 +02:00
Handle::current().block_on(async {
2021-04-14 17:53:12 +02:00
while let Some(res) = stream.next().await {
res?;
}
Ok(())
})
2021-04-13 17:14:02 +02:00
})
.await
2021-04-14 17:53:12 +02:00
.map_err(|e| UpdateError::Error(e.into()))?
.map_err(|e| UpdateError::Error(e.into()))?;
2021-03-23 11:00:50 +01:00
Ok(())
}
2021-04-14 18:55:04 +02:00
async fn handle_get_info(&self) -> Result<UpdateStoreInfo> {
2021-04-14 17:53:12 +02:00
let update_store = self.store.clone();
2021-04-14 18:55:04 +02:00
let info = tokio::task::spawn_blocking(move || -> anyhow::Result<UpdateStoreInfo> {
2021-04-22 10:14:29 +02:00
let info = update_store.get_info()?;
2021-04-14 18:55:04 +02:00
Ok(info)
2021-04-14 17:53:12 +02:00
})
.await
.map_err(|e| UpdateError::Error(e.into()))?
.map_err(|e| UpdateError::Error(e.into()))?;
2021-04-14 18:55:04 +02:00
Ok(info)
}
2021-03-23 11:00:50 +01:00
}