bring back the IndexMeta and IndexStats in meilisearch-http

This commit is contained in:
Tamo 2022-09-27 19:52:06 +02:00 committed by Clément Renault
parent f4ecf75cda
commit 0d0b2a9ac1
No known key found for this signature in database
GPG key ID: 92ADA4E935E71FA4
6 changed files with 208 additions and 130 deletions

View file

@ -1,6 +1,11 @@
use std::convert::TryFrom;
use std::sync::Arc;
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::{IndexScheduler, KindWithContent};
use index::Index;
use index_scheduler::milli::FieldDistribution;
use index_scheduler::{IndexScheduler, KindWithContent, Query, Status};
use log::debug;
use meilisearch_types::error::ResponseError;
use serde::{Deserialize, Serialize};
@ -39,6 +44,30 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
);
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct IndexView {
pub uid: String,
#[serde(with = "time::serde::rfc3339")]
pub created_at: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub updated_at: OffsetDateTime,
pub primary_key: Option<String>,
}
impl TryFrom<&Index> for IndexView {
type Error = index::error::IndexError;
fn try_from(index: &Index) -> Result<IndexView, Self::Error> {
Ok(IndexView {
uid: index.name.clone(),
created_at: index.created_at()?,
updated_at: index.updated_at()?,
primary_key: index.primary_key()?,
})
}
}
pub async fn list_indexes(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_GET }>, Data<IndexScheduler>>,
paginate: web::Query<Pagination>,
@ -46,16 +75,13 @@ pub async fn list_indexes(
let search_rules = &index_scheduler.filters().search_rules;
let indexes: Vec<_> = index_scheduler.indexes()?;
let nb_indexes = indexes.len();
let iter = indexes
.into_iter()
.filter(|index| search_rules.is_index_authorized(&index.name));
/*
TODO: TAMO: implements me. It's missing a kind of IndexView or something
let ret = paginate
.into_inner()
.auto_paginate_unsized(nb_indexes, iter);
*/
let ret = todo!();
let indexes = indexes
.iter()
.filter(|index| search_rules.is_index_authorized(&index.name))
.map(IndexView::try_from)
.collect::<Result<Vec<_>, _>>()?;
let ret = paginate.auto_paginate_sized(indexes.into_iter());
debug!("returns: {:?}", ret);
Ok(HttpResponse::Ok().json(ret))
@ -104,29 +130,16 @@ pub struct UpdateIndexRequest {
primary_key: Option<String>,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct UpdateIndexResponse {
name: String,
uid: String,
#[serde(serialize_with = "time::serde::rfc3339::serialize")]
created_at: OffsetDateTime,
#[serde(serialize_with = "time::serde::rfc3339::serialize")]
updated_at: OffsetDateTime,
#[serde(serialize_with = "time::serde::rfc3339::serialize")]
primary_key: OffsetDateTime,
}
pub async fn get_index(
index_scheduler: GuardedData<ActionPolicy<{ actions::INDEXES_GET }>, Data<IndexScheduler>>,
index_uid: web::Path<String>,
) -> Result<HttpResponse, ResponseError> {
let meta = index_scheduler.index(&index_uid)?;
debug!("returns: {:?}", meta);
let index = index_scheduler.index(&index_uid)?;
let index_view: IndexView = (&index).try_into()?;
// TODO: TAMO: do this as well
todo!()
// Ok(HttpResponse::Ok().json(meta))
debug!("returns: {:?}", index_view);
Ok(HttpResponse::Ok().json(index_view))
}
pub async fn update_index(
@ -178,11 +191,40 @@ pub async fn get_index_stats(
json!({ "per_index_uid": true }),
Some(&req),
);
let index = index_scheduler.index(&index_uid)?;
// TODO: TAMO: Bring the index_stats in meilisearch-http
// let response = index.get_index_stats()?;
let response = todo!();
debug!("returns: {:?}", response);
Ok(HttpResponse::Ok().json(response))
let stats = IndexStats::new((*index_scheduler).clone(), index_uid.into_inner());
debug!("returns: {:?}", stats);
Ok(HttpResponse::Ok().json(stats))
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct IndexStats {
pub number_of_documents: u64,
pub is_indexing: bool,
pub field_distribution: FieldDistribution,
}
impl IndexStats {
pub fn new(
index_scheduler: Data<IndexScheduler>,
index_uid: String,
) -> Result<Self, ResponseError> {
// we check if there is currently a task processing associated with this index.
let processing_task = index_scheduler.get_tasks(
Query::default()
.with_status(Status::Processing)
.with_index(index_uid.clone())
.with_limit(1),
)?;
let is_processing = !processing_task.is_empty();
let index = index_scheduler.index(&index_uid)?;
Ok(IndexStats {
number_of_documents: index.number_of_documents()?,
is_indexing: is_processing,
field_distribution: index.field_distribution()?,
})
}
}

View file

@ -1,6 +1,8 @@
use std::collections::BTreeMap;
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::IndexScheduler;
use index_scheduler::{IndexScheduler, Query, Status};
use log::debug;
use serde::{Deserialize, Serialize};
@ -14,6 +16,8 @@ use meilisearch_types::star_or::StarOr;
use crate::analytics::Analytics;
use crate::extractors::authentication::{policies::*, GuardedData};
use self::indexes::{IndexStats, IndexView};
mod api_key;
mod dump;
pub mod indexes;
@ -232,6 +236,15 @@ pub async fn running() -> HttpResponse {
HttpResponse::Ok().json(serde_json::json!({ "status": "Meilisearch is running" }))
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct Stats {
pub database_size: u64,
#[serde(serialize_with = "time::serde::rfc3339::option::serialize")]
pub last_update: Option<OffsetDateTime>,
pub indexes: BTreeMap<String, IndexStats>,
}
async fn get_stats(
index_scheduler: GuardedData<ActionPolicy<{ actions::STATS_GET }>, Data<IndexScheduler>>,
req: HttpRequest,
@ -243,11 +256,48 @@ async fn get_stats(
Some(&req),
);
let search_rules = &index_scheduler.filters().search_rules;
// let response = index_scheduler.get_all_stats(search_rules).await?;
let response = todo!();
debug!("returns: {:?}", response);
Ok(HttpResponse::Ok().json(response))
let mut last_task: Option<OffsetDateTime> = None;
let mut indexes = BTreeMap::new();
let mut database_size = 0;
let processing_task = index_scheduler.get_tasks(
Query::default()
.with_status(Status::Processing)
.with_limit(1),
)?;
let processing_index = processing_task
.first()
.and_then(|task| task.index_uid.clone());
for index in index_scheduler.indexes()? {
if !search_rules.is_index_authorized(&index.name) {
continue;
}
database_size += index.size()?;
let stats = IndexStats {
number_of_documents: index.number_of_documents()?,
is_indexing: processing_index
.as_deref()
.map_or(false, |index_name| index.name == index_name),
field_distribution: index.field_distribution()?,
};
let updated_at = index.updated_at()?;
last_task = last_task.map_or(Some(updated_at), |last| Some(last.max(updated_at)));
indexes.insert(index.name.clone(), stats);
}
let stats = Stats {
database_size,
last_update: last_task,
indexes,
};
debug!("returns: {:?}", stats);
Ok(HttpResponse::Ok().json(stats))
}
#[derive(Serialize)]