mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-11-26 23:04:26 +01:00
Remove useless routes and checks
This commit is contained in:
parent
23a89732a5
commit
e854d67a55
@ -1,6 +1,5 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
@ -35,7 +34,6 @@ pub struct DataInner {
|
|||||||
pub db_path: String,
|
pub db_path: String,
|
||||||
pub admin_token: Option<String>,
|
pub admin_token: Option<String>,
|
||||||
pub server_pid: Pid,
|
pub server_pid: Pid,
|
||||||
pub accept_updates: Arc<AtomicBool>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DataInner {
|
impl DataInner {
|
||||||
@ -70,25 +68,6 @@ impl DataInner {
|
|||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn last_backup(&self, reader: &heed::RoTxn) -> MResult<Option<DateTime<Utc>>> {
|
|
||||||
match self
|
|
||||||
.db
|
|
||||||
.common_store()
|
|
||||||
.get::<Str, SerdeDatetime>(&reader, "last-backup")?
|
|
||||||
{
|
|
||||||
Some(datetime) => Ok(Some(datetime)),
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_last_backup(&self, writer: &mut heed::RwTxn) -> MResult<()> {
|
|
||||||
self.db
|
|
||||||
.common_store()
|
|
||||||
.put::<Str, SerdeDatetime>(writer, "last-backup", &Utc::now())?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fields_frequency(
|
pub fn fields_frequency(
|
||||||
&self,
|
&self,
|
||||||
reader: &heed::RoTxn,
|
reader: &heed::RoTxn,
|
||||||
@ -143,14 +122,6 @@ impl DataInner {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn stop_accept_updates(&self) {
|
|
||||||
self.accept_updates.store(false, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn accept_updates(&self) -> bool {
|
|
||||||
self.accept_updates.load(Ordering::Relaxed)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Data {
|
impl Data {
|
||||||
@ -160,14 +131,12 @@ impl Data {
|
|||||||
let server_pid = sysinfo::get_current_pid().unwrap();
|
let server_pid = sysinfo::get_current_pid().unwrap();
|
||||||
|
|
||||||
let db = Arc::new(Database::open_or_create(opt.database_path.clone()).unwrap());
|
let db = Arc::new(Database::open_or_create(opt.database_path.clone()).unwrap());
|
||||||
let accept_updates = Arc::new(AtomicBool::new(true));
|
|
||||||
|
|
||||||
let inner_data = DataInner {
|
let inner_data = DataInner {
|
||||||
db: db.clone(),
|
db: db.clone(),
|
||||||
db_path,
|
db_path,
|
||||||
admin_token,
|
admin_token,
|
||||||
server_pid,
|
server_pid,
|
||||||
accept_updates,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let data = Data {
|
let data = Data {
|
||||||
|
@ -45,10 +45,6 @@ pub struct IndexUpdateResponse {
|
|||||||
pub async fn delete_document(ctx: Context<Data>) -> SResult<Response> {
|
pub async fn delete_document(ctx: Context<Data>) -> SResult<Response> {
|
||||||
ctx.is_allowed(DocumentsWrite)?;
|
ctx.is_allowed(DocumentsWrite)?;
|
||||||
|
|
||||||
if !ctx.state().accept_updates() {
|
|
||||||
return Err(ResponseError::Maintenance);
|
|
||||||
}
|
|
||||||
|
|
||||||
let index = ctx.index()?;
|
let index = ctx.index()?;
|
||||||
let identifier = ctx.identifier()?;
|
let identifier = ctx.identifier()?;
|
||||||
let document_id = meilidb_core::serde::compute_document_id(identifier.clone());
|
let document_id = meilidb_core::serde::compute_document_id(identifier.clone());
|
||||||
@ -154,9 +150,6 @@ fn infered_schema(document: &IndexMap<String, Value>) -> Option<meilidb_schema::
|
|||||||
async fn update_multiple_documents(mut ctx: Context<Data>, is_partial: bool) -> SResult<Response> {
|
async fn update_multiple_documents(mut ctx: Context<Data>, is_partial: bool) -> SResult<Response> {
|
||||||
ctx.is_allowed(DocumentsWrite)?;
|
ctx.is_allowed(DocumentsWrite)?;
|
||||||
|
|
||||||
if !ctx.state().accept_updates() {
|
|
||||||
return Err(ResponseError::Maintenance);
|
|
||||||
}
|
|
||||||
let data: Vec<IndexMap<String, Value>> =
|
let data: Vec<IndexMap<String, Value>> =
|
||||||
ctx.body_json().await.map_err(ResponseError::bad_request)?;
|
ctx.body_json().await.map_err(ResponseError::bad_request)?;
|
||||||
let index = ctx.index()?;
|
let index = ctx.index()?;
|
||||||
@ -211,9 +204,7 @@ pub async fn add_or_update_multiple_documents(ctx: Context<Data>) -> SResult<Res
|
|||||||
|
|
||||||
pub async fn delete_multiple_documents(mut ctx: Context<Data>) -> SResult<Response> {
|
pub async fn delete_multiple_documents(mut ctx: Context<Data>) -> SResult<Response> {
|
||||||
ctx.is_allowed(DocumentsWrite)?;
|
ctx.is_allowed(DocumentsWrite)?;
|
||||||
if !ctx.state().accept_updates() {
|
|
||||||
return Err(ResponseError::Maintenance);
|
|
||||||
}
|
|
||||||
let data: Vec<Value> = ctx.body_json().await.map_err(ResponseError::bad_request)?;
|
let data: Vec<Value> = ctx.body_json().await.map_err(ResponseError::bad_request)?;
|
||||||
let index = ctx.index()?;
|
let index = ctx.index()?;
|
||||||
|
|
||||||
@ -243,9 +234,7 @@ pub async fn delete_multiple_documents(mut ctx: Context<Data>) -> SResult<Respon
|
|||||||
|
|
||||||
pub async fn clear_all_documents(ctx: Context<Data>) -> SResult<Response> {
|
pub async fn clear_all_documents(ctx: Context<Data>) -> SResult<Response> {
|
||||||
ctx.is_allowed(DocumentsWrite)?;
|
ctx.is_allowed(DocumentsWrite)?;
|
||||||
if !ctx.state().accept_updates() {
|
|
||||||
return Err(ResponseError::Maintenance);
|
|
||||||
}
|
|
||||||
let index = ctx.index()?;
|
let index = ctx.index()?;
|
||||||
|
|
||||||
let env = &ctx.state().db.env;
|
let env = &ctx.state().db.env;
|
||||||
|
Loading…
Reference in New Issue
Block a user