Make sure that meilisearch-http works without index wrapper

This commit is contained in:
Kerollmops 2022-10-04 11:06:48 +02:00 committed by Clément Renault
parent b7898cd4ab
commit 4871509507
No known key found for this signature in database
GPG key ID: 92ADA4E935E71FA4
9 changed files with 230 additions and 43 deletions

View file

@ -8,6 +8,7 @@ use actix_web::{web, HttpRequest, HttpResponse};
use bstr::ByteSlice;
use document_formats::{read_csv, read_json, read_ndjson, PayloadType};
use futures::{Stream, StreamExt};
use index::{retrieve_document, retrieve_documents};
use index_scheduler::milli::update::IndexDocumentsMethod;
use index_scheduler::IndexScheduler;
use index_scheduler::{KindWithContent, TaskView};
@ -103,7 +104,7 @@ pub async fn get_document(
let attributes_to_retrieve = fields.and_then(fold_star_or);
let index = index_scheduler.index(&path.index_uid)?;
let document = index.retrieve_document(&path.document_id, attributes_to_retrieve)?;
let document = retrieve_document(&index, &path.document_id, attributes_to_retrieve)?;
debug!("returns: {:?}", document);
Ok(HttpResponse::Ok().json(document))
}
@ -149,7 +150,7 @@ pub async fn get_all_documents(
let attributes_to_retrieve = fields.and_then(fold_star_or);
let index = index_scheduler.index(&index_uid)?;
let (total, documents) = index.retrieve_documents(offset, limit, attributes_to_retrieve)?;
let (total, documents) = retrieve_documents(&index, offset, limit, attributes_to_retrieve)?;
let ret = PaginationView::new(offset, limit, total as usize, documents);

View file

@ -1,6 +1,6 @@
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index_scheduler::milli::FieldDistribution;
use index_scheduler::milli::{FieldDistribution, Index};
use index_scheduler::{IndexScheduler, KindWithContent, Query, Status};
use log::debug;
use meilisearch_types::error::ResponseError;
@ -11,7 +11,6 @@ use time::OffsetDateTime;
use crate::analytics::Analytics;
use crate::extractors::authentication::{policies::*, AuthenticationError, GuardedData};
use crate::extractors::sequential_extractor::SeqHandler;
use index_scheduler::task::TaskView;
use super::Pagination;
@ -51,15 +50,14 @@ pub struct IndexView {
pub primary_key: Option<String>,
}
impl TryFrom<&Index> for IndexView {
type Error = index::error::IndexError;
fn try_from(index: &Index) -> Result<IndexView, Self::Error> {
impl IndexView {
fn new(uid: String, index: &Index) -> Result<IndexView, index::error::IndexError> {
let rtxn = index.read_txn()?;
Ok(IndexView {
uid: index.name.clone(),
created_at: index.created_at()?,
updated_at: index.updated_at()?,
primary_key: index.primary_key()?,
uid,
created_at: index.created_at(&rtxn)?,
updated_at: index.updated_at(&rtxn)?,
primary_key: index.primary_key(&rtxn)?.map(String::from),
})
}
}
@ -71,9 +69,9 @@ pub async fn list_indexes(
let search_rules = &index_scheduler.filters().search_rules;
let indexes: Vec<_> = index_scheduler.indexes()?;
let indexes = indexes
.iter()
.filter(|index| search_rules.is_index_authorized(&index.name))
.map(IndexView::try_from)
.into_iter()
.filter(|(name, _)| search_rules.is_index_authorized(name))
.map(|(name, index)| IndexView::new(name, &index))
.collect::<Result<Vec<_>, _>>()?;
let ret = paginate.auto_paginate_sized(indexes.into_iter());
@ -130,7 +128,7 @@ pub async fn get_index(
index_uid: web::Path<String>,
) -> Result<HttpResponse, ResponseError> {
let index = index_scheduler.index(&index_uid)?;
let index_view: IndexView = (&index).try_into()?;
let index_view = IndexView::new(index_uid.into_inner(), &index)?;
debug!("returns: {:?}", index_view);
@ -216,10 +214,11 @@ impl IndexStats {
let is_processing = !processing_task.is_empty();
let index = index_scheduler.index(&index_uid)?;
let rtxn = index.read_txn()?;
Ok(IndexStats {
number_of_documents: index.number_of_documents()?,
number_of_documents: index.number_of_documents(&rtxn)?,
is_indexing: is_processing,
field_distribution: index.field_distribution()?,
field_distribution: index.field_distribution(&rtxn)?,
})
}
}

View file

@ -1,7 +1,7 @@
use actix_web::web::Data;
use actix_web::{web, HttpRequest, HttpResponse};
use index::{
MatchingStrategy, SearchQuery, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
perform_search, MatchingStrategy, SearchQuery, DEFAULT_CROP_LENGTH, DEFAULT_CROP_MARKER,
DEFAULT_HIGHLIGHT_POST_TAG, DEFAULT_HIGHLIGHT_PRE_TAG, DEFAULT_SEARCH_LIMIT,
};
use index_scheduler::IndexScheduler;
@ -151,7 +151,7 @@ pub async fn search_with_url_query(
let mut aggregate = SearchAggregator::from_query(&query, &req);
let index = index_scheduler.index(&index_uid)?;
let search_result = index.perform_search(query);
let search_result = perform_search(&index, query);
if let Ok(ref search_result) = search_result {
aggregate.succeed(search_result);
}
@ -185,7 +185,7 @@ pub async fn search_with_post(
let mut aggregate = SearchAggregator::from_query(&query, &req);
let index = index_scheduler.index(&index_uid)?;
let search_result = index.perform_search(query);
let search_result = perform_search(&index, query);
if let Ok(ref search_result) = search_result {
aggregate.succeed(search_result);
}

View file

@ -97,7 +97,8 @@ macro_rules! make_setting_route {
index_uid: actix_web::web::Path<String>,
) -> std::result::Result<HttpResponse, ResponseError> {
let index = index_scheduler.index(&index_uid)?;
let settings = index.settings()?;
let rtxn = index.read_txn()?;
let settings = index::settings(&index, &rtxn)?;
debug!("returns: {:?}", settings);
let mut json = serde_json::json!(&settings);
@ -454,7 +455,8 @@ pub async fn get_all(
index_uid: web::Path<String>,
) -> Result<HttpResponse, ResponseError> {
let index = index_scheduler.index(&index_uid)?;
let new_settings = index.settings()?;
let rtxn = index.read_txn()?;
let new_settings = index::settings(&index, &rtxn)?;
debug!("returns: {:?}", new_settings);
Ok(HttpResponse::Ok().json(new_settings))
}

View file

@ -5,14 +5,11 @@ use actix_web::{web, HttpRequest, HttpResponse};
use index::{Settings, Unchecked};
use index_scheduler::{IndexScheduler, Query, Status};
use log::debug;
use serde::{Deserialize, Serialize};
use serde_json::json;
use time::OffsetDateTime;
use index::{Settings, Unchecked};
use meilisearch_types::error::ResponseError;
use meilisearch_types::star_or::StarOr;
use serde::{Deserialize, Serialize};
use serde_json::json;
use time::OffsetDateTime;
use crate::analytics::Analytics;
use crate::extractors::authentication::{policies::*, GuardedData};
@ -270,25 +267,26 @@ async fn get_stats(
.first()
.and_then(|task| task.index_uid.clone());
for index in index_scheduler.indexes()? {
if !search_rules.is_index_authorized(&index.name) {
for (name, index) in index_scheduler.indexes()? {
if !search_rules.is_index_authorized(&name) {
continue;
}
database_size += index.size()?;
database_size += index.on_disk_size()?;
let rtxn = index.read_txn()?;
let stats = IndexStats {
number_of_documents: index.number_of_documents()?,
number_of_documents: index.number_of_documents(&rtxn)?,
is_indexing: processing_index
.as_deref()
.map_or(false, |index_name| index.name == index_name),
field_distribution: index.field_distribution()?,
.map_or(false, |index_name| name == index_name),
field_distribution: index.field_distribution(&rtxn)?,
};
let updated_at = index.updated_at()?;
let updated_at = index.updated_at(&rtxn)?;
last_task = last_task.map_or(Some(updated_at), |last| Some(last.max(updated_at)));
indexes.insert(index.name.clone(), stats);
indexes.insert(name, stats);
}
let stats = Stats {