2021-03-23 11:00:50 +01:00
|
|
|
use std::io::SeekFrom;
|
|
|
|
use std::path::{Path, PathBuf};
|
2021-04-13 17:14:02 +02:00
|
|
|
use std::sync::Arc;
|
2021-03-23 11:00:50 +01:00
|
|
|
|
|
|
|
use log::info;
|
|
|
|
use oxidized_json_checker::JsonChecker;
|
|
|
|
use tokio::fs;
|
|
|
|
use tokio::io::{AsyncSeekExt, AsyncWriteExt};
|
2021-03-24 11:29:11 +01:00
|
|
|
use tokio::sync::mpsc;
|
|
|
|
use uuid::Uuid;
|
2021-04-14 17:53:12 +02:00
|
|
|
use futures::StreamExt;
|
2021-03-23 11:00:50 +01:00
|
|
|
|
2021-04-13 17:14:02 +02:00
|
|
|
use super::{PayloadData, Result, UpdateError, UpdateMsg, UpdateStore};
|
2021-04-14 17:53:12 +02:00
|
|
|
use crate::index_controller::index_actor::{IndexActorHandle, CONCURRENT_INDEX_MSG};
|
2021-04-13 17:14:02 +02:00
|
|
|
use crate::index_controller::{UpdateMeta, UpdateStatus};
|
2021-03-23 11:00:50 +01:00
|
|
|
|
2021-04-13 17:14:02 +02:00
|
|
|
pub struct UpdateActor<D, I> {
|
2021-03-23 11:00:50 +01:00
|
|
|
path: PathBuf,
|
2021-04-13 17:14:02 +02:00
|
|
|
store: Arc<UpdateStore>,
|
2021-03-23 11:00:50 +01:00
|
|
|
inbox: mpsc::Receiver<UpdateMsg<D>>,
|
|
|
|
index_handle: I,
|
|
|
|
}
|
|
|
|
|
2021-04-13 17:14:02 +02:00
|
|
|
impl<D, I> UpdateActor<D, I>
|
2021-03-23 11:00:50 +01:00
|
|
|
where
|
|
|
|
D: AsRef<[u8]> + Sized + 'static,
|
2021-03-23 16:19:01 +01:00
|
|
|
I: IndexActorHandle + Clone + Send + Sync + 'static,
|
2021-03-23 11:00:50 +01:00
|
|
|
{
|
|
|
|
pub fn new(
|
2021-04-13 17:14:02 +02:00
|
|
|
update_db_size: usize,
|
2021-03-23 11:00:50 +01:00
|
|
|
inbox: mpsc::Receiver<UpdateMsg<D>>,
|
|
|
|
path: impl AsRef<Path>,
|
|
|
|
index_handle: I,
|
|
|
|
) -> anyhow::Result<Self> {
|
2021-04-14 17:53:12 +02:00
|
|
|
let path = path.as_ref().to_owned().join("updates");
|
2021-04-13 17:14:02 +02:00
|
|
|
|
|
|
|
std::fs::create_dir_all(&path)?;
|
|
|
|
|
|
|
|
let mut options = heed::EnvOpenOptions::new();
|
|
|
|
options.map_size(update_db_size);
|
|
|
|
|
|
|
|
let handle = index_handle.clone();
|
|
|
|
let store = UpdateStore::open(options, &path, move |uuid, meta, file| {
|
|
|
|
futures::executor::block_on(handle.update(uuid, meta, file))
|
|
|
|
})
|
|
|
|
.map_err(|e| UpdateError::Error(e.into()))?;
|
2021-03-23 11:00:50 +01:00
|
|
|
std::fs::create_dir_all(path.join("update_files"))?;
|
|
|
|
assert!(path.exists());
|
|
|
|
Ok(Self {
|
|
|
|
store,
|
|
|
|
inbox,
|
|
|
|
path,
|
|
|
|
index_handle,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn run(mut self) {
|
|
|
|
use UpdateMsg::*;
|
|
|
|
|
|
|
|
info!("Started update actor.");
|
|
|
|
|
|
|
|
loop {
|
|
|
|
match self.inbox.recv().await {
|
|
|
|
Some(Update {
|
|
|
|
uuid,
|
|
|
|
meta,
|
|
|
|
data,
|
|
|
|
ret,
|
|
|
|
}) => {
|
|
|
|
let _ = ret.send(self.handle_update(uuid, meta, data).await);
|
|
|
|
}
|
|
|
|
Some(ListUpdates { uuid, ret }) => {
|
|
|
|
let _ = ret.send(self.handle_list_updates(uuid).await);
|
|
|
|
}
|
|
|
|
Some(GetUpdate { uuid, ret, id }) => {
|
|
|
|
let _ = ret.send(self.handle_get_update(uuid, id).await);
|
|
|
|
}
|
|
|
|
Some(Delete { uuid, ret }) => {
|
|
|
|
let _ = ret.send(self.handle_delete(uuid).await);
|
|
|
|
}
|
2021-04-14 17:53:12 +02:00
|
|
|
Some(Snapshot { uuids, path, ret }) => {
|
|
|
|
let _ = ret.send(self.handle_snapshot(uuids, path).await);
|
2021-03-23 11:00:50 +01:00
|
|
|
}
|
2021-04-14 17:53:12 +02:00
|
|
|
Some(GetSize { ret }) => {
|
|
|
|
let _ = ret.send(self.handle_get_size().await);
|
2021-04-09 14:41:24 +02:00
|
|
|
}
|
2021-03-23 11:00:50 +01:00
|
|
|
None => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn handle_update(
|
|
|
|
&self,
|
|
|
|
uuid: Uuid,
|
|
|
|
meta: UpdateMeta,
|
|
|
|
mut payload: mpsc::Receiver<PayloadData<D>>,
|
|
|
|
) -> Result<UpdateStatus> {
|
|
|
|
let update_file_id = uuid::Uuid::new_v4();
|
|
|
|
let path = self
|
|
|
|
.path
|
|
|
|
.join(format!("update_files/update_{}", update_file_id));
|
|
|
|
let mut file = fs::OpenOptions::new()
|
|
|
|
.read(true)
|
|
|
|
.write(true)
|
|
|
|
.create(true)
|
|
|
|
.open(&path)
|
|
|
|
.await
|
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))?;
|
|
|
|
|
|
|
|
while let Some(bytes) = payload.recv().await {
|
|
|
|
match bytes {
|
|
|
|
Ok(bytes) => {
|
|
|
|
file.write_all(bytes.as_ref())
|
|
|
|
.await
|
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))?;
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
return Err(UpdateError::Error(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
file.flush()
|
|
|
|
.await
|
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))?;
|
|
|
|
|
|
|
|
file.seek(SeekFrom::Start(0))
|
|
|
|
.await
|
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))?;
|
|
|
|
|
|
|
|
let mut file = file.into_std().await;
|
|
|
|
|
2021-04-13 17:14:02 +02:00
|
|
|
let update_store = self.store.clone();
|
|
|
|
|
2021-03-23 11:00:50 +01:00
|
|
|
tokio::task::spawn_blocking(move || {
|
|
|
|
use std::io::{copy, sink, BufReader, Seek};
|
|
|
|
|
|
|
|
// If the payload is empty, ignore the check.
|
|
|
|
if file
|
|
|
|
.metadata()
|
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))?
|
|
|
|
.len()
|
|
|
|
> 0
|
|
|
|
{
|
|
|
|
// Check that the json payload is valid:
|
|
|
|
let reader = BufReader::new(&mut file);
|
|
|
|
let mut checker = JsonChecker::new(reader);
|
|
|
|
|
|
|
|
if copy(&mut checker, &mut sink()).is_err() || checker.finish().is_err() {
|
|
|
|
// The json file is invalid, we use Serde to get a nice error message:
|
|
|
|
file.seek(SeekFrom::Start(0))
|
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))?;
|
|
|
|
let _: serde_json::Value = serde_json::from_reader(file)
|
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The payload is valid, we can register it to the update store.
|
|
|
|
update_store
|
|
|
|
.register_update(meta, path, uuid)
|
2021-04-07 19:46:36 +02:00
|
|
|
.map(UpdateStatus::Enqueued)
|
2021-03-23 11:00:50 +01:00
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))?
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn handle_list_updates(&self, uuid: Uuid) -> Result<Vec<UpdateStatus>> {
|
2021-04-13 17:14:02 +02:00
|
|
|
let update_store = self.store.clone();
|
2021-03-23 11:00:50 +01:00
|
|
|
tokio::task::spawn_blocking(move || {
|
|
|
|
let result = update_store
|
2021-04-13 17:14:02 +02:00
|
|
|
.list(uuid)
|
2021-03-23 11:00:50 +01:00
|
|
|
.map_err(|e| UpdateError::Error(e.into()))?;
|
|
|
|
Ok(result)
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))?
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn handle_get_update(&self, uuid: Uuid, id: u64) -> Result<UpdateStatus> {
|
2021-04-13 17:14:02 +02:00
|
|
|
let store = self.store.clone();
|
2021-03-23 11:00:50 +01:00
|
|
|
let result = store
|
2021-04-13 17:14:02 +02:00
|
|
|
.meta(uuid, id)
|
2021-03-23 11:00:50 +01:00
|
|
|
.map_err(|e| UpdateError::Error(Box::new(e)))?
|
|
|
|
.ok_or(UpdateError::UnexistingUpdate(id))?;
|
|
|
|
Ok(result)
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn handle_delete(&self, uuid: Uuid) -> Result<()> {
|
2021-04-13 17:14:02 +02:00
|
|
|
let store = self.store.clone();
|
2021-03-23 11:00:50 +01:00
|
|
|
|
2021-04-13 17:14:02 +02:00
|
|
|
tokio::task::spawn_blocking(move || store.delete_all(uuid))
|
|
|
|
.await
|
|
|
|
.map_err(|e| UpdateError::Error(e.into()))?
|
|
|
|
.map_err(|e| UpdateError::Error(e.into()))?;
|
2021-03-23 11:00:50 +01:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-04-14 17:53:12 +02:00
|
|
|
async fn handle_snapshot(&self, uuids: Vec<Uuid>, path: PathBuf) -> Result<()> {
|
2021-03-23 11:00:50 +01:00
|
|
|
let index_handle = self.index_handle.clone();
|
2021-04-13 17:14:02 +02:00
|
|
|
let update_store = self.store.clone();
|
|
|
|
tokio::task::spawn_blocking(move || -> anyhow::Result<()> {
|
|
|
|
// acquire write lock to prevent further writes during snapshot
|
|
|
|
// the update lock must be acquired BEFORE the write lock to prevent dead lock
|
|
|
|
let _lock = update_store.update_lock.lock();
|
|
|
|
let mut txn = update_store.env.write_txn()?;
|
|
|
|
|
|
|
|
// create db snapshot
|
2021-04-14 17:53:12 +02:00
|
|
|
update_store.snapshot(&mut txn, &path)?;
|
|
|
|
|
|
|
|
// Perform the snapshot of each index concurently. Only a third of the capabilities of
|
|
|
|
// the index actor at a time not to put too much pressure on the index actor
|
|
|
|
let path = &path;
|
|
|
|
let handle = &index_handle;
|
2021-04-13 17:14:02 +02:00
|
|
|
|
2021-04-14 17:53:12 +02:00
|
|
|
let mut stream = futures::stream::iter(uuids.iter())
|
|
|
|
.map(|&uuid| handle.snapshot(uuid, path.clone()))
|
|
|
|
.buffer_unordered(CONCURRENT_INDEX_MSG / 3);
|
|
|
|
|
|
|
|
futures::executor::block_on(async {
|
|
|
|
while let Some(res) = stream.next().await {
|
|
|
|
res?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
})
|
2021-04-13 17:14:02 +02:00
|
|
|
})
|
|
|
|
.await
|
2021-04-14 17:53:12 +02:00
|
|
|
.map_err(|e| UpdateError::Error(e.into()))?
|
|
|
|
.map_err(|e| UpdateError::Error(e.into()))?;
|
2021-03-23 11:00:50 +01:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2021-04-09 14:41:24 +02:00
|
|
|
|
2021-04-14 17:53:12 +02:00
|
|
|
async fn handle_get_size(&self) -> Result<u64> {
|
|
|
|
let update_store = self.store.clone();
|
|
|
|
let size = tokio::task::spawn_blocking(move || -> anyhow::Result<u64> {
|
|
|
|
let txn = update_store.env.read_txn()?;
|
2021-04-09 14:41:24 +02:00
|
|
|
|
2021-04-14 17:53:12 +02:00
|
|
|
update_store.get_size(&txn)
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
.map_err(|e| UpdateError::Error(e.into()))?
|
|
|
|
.map_err(|e| UpdateError::Error(e.into()))?;
|
2021-04-09 14:41:24 +02:00
|
|
|
|
|
|
|
Ok(size)
|
|
|
|
}
|
2021-03-23 11:00:50 +01:00
|
|
|
}
|