feat(http): paginate the index resource

Fix #2373
This commit is contained in:
Irevoire 2022-05-24 11:29:03 +02:00 committed by Tamo
parent ab39df9693
commit 627f13df85
No known key found for this signature in database
GPG Key ID: 20CD8020AFA88D69
8 changed files with 208 additions and 75 deletions

View File

@ -37,19 +37,38 @@ pub fn configure(cfg: &mut web::ServiceConfig) {
);
}
const PAGINATION_DEFAULT_LIMIT: fn() -> usize = || 20;
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct Paginate {
#[serde(default)]
offset: usize,
#[serde(default = "PAGINATION_DEFAULT_LIMIT")]
limit: usize,
}
pub async fn list_indexes(
data: GuardedData<ActionPolicy<{ actions::INDEXES_GET }>, MeiliSearch>,
paginate: web::Query<Paginate>,
) -> Result<HttpResponse, ResponseError> {
let search_rules = &data.filters().search_rules;
let indexes: Vec<_> = data
.list_indexes()
.await?
let indexes: Vec<_> = data.list_indexes().await?;
let nb_indexes = indexes.len();
let indexes: Vec<_> = indexes
.into_iter()
.filter(|i| search_rules.is_index_authorized(&i.uid))
.skip(paginate.offset)
.take(paginate.limit)
.collect();
debug!("returns: {:?}", indexes);
Ok(HttpResponse::Ok().json(indexes))
Ok(HttpResponse::Ok().json(json!({
"results": indexes,
"offset": paginate.offset,
"limit": paginate.limit,
"total": nb_indexes,
})))
}
#[derive(Debug, Deserialize)]

View File

@ -353,10 +353,10 @@ async fn list_authorized_indexes_restricted_index() {
let key = response["key"].as_str().unwrap();
server.use_api_key(&key);
let (response, code) = server.list_indexes().await;
let (response, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
let response = response.as_array().unwrap();
let response = response["results"].as_array().unwrap();
// key should have access on `products` index.
assert!(response.iter().any(|index| index["uid"] == "products"));
@ -394,10 +394,10 @@ async fn list_authorized_indexes_no_index_restriction() {
let key = response["key"].as_str().unwrap();
server.use_api_key(&key);
let (response, code) = server.list_indexes().await;
let (response, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
let response = response.as_array().unwrap();
let response = response["results"].as_array().unwrap();
// key should have access on `products` index.
assert!(response.iter().any(|index| index["uid"] == "products"));

View File

@ -103,8 +103,27 @@ impl Server {
}
}
pub async fn list_indexes(&self) -> (Value, StatusCode) {
self.service.get("/indexes").await
pub async fn list_indexes(
&self,
offset: Option<usize>,
limit: Option<usize>,
) -> (Value, StatusCode) {
let (offset, limit) = (
offset.map(|offset| format!("offset={offset}")),
limit.map(|limit| format!("limit={limit}")),
);
let query_parameter = offset
.as_ref()
.zip(limit.as_ref())
.map(|(offset, limit)| format!("{offset}&{limit}"))
.or_else(|| offset.xor(limit));
if let Some(query_parameter) = query_parameter {
self.service
.get(format!("/indexes?{query_parameter}"))
.await
} else {
self.service.get("/indexes").await
}
}
pub async fn version(&self) -> (Value, StatusCode) {

View File

@ -41,13 +41,12 @@ async fn import_dump_v2_movie_raw() {
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes().await;
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes.as_array().unwrap().len(), 1);
assert_eq!(indexes[0]["uid"], json!("indexUID"));
assert_eq!(indexes[0]["name"], json!("indexUID"));
assert_eq!(indexes[0]["primaryKey"], json!("id"));
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
@ -106,13 +105,12 @@ async fn import_dump_v2_movie_with_settings() {
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes().await;
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes.as_array().unwrap().len(), 1);
assert_eq!(indexes[0]["uid"], json!("indexUID"));
assert_eq!(indexes[0]["name"], json!("indexUID"));
assert_eq!(indexes[0]["primaryKey"], json!("id"));
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
@ -171,13 +169,12 @@ async fn import_dump_v2_rubygems_with_settings() {
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes().await;
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes.as_array().unwrap().len(), 1);
assert_eq!(indexes[0]["uid"], json!("rubygems"));
assert_eq!(indexes[0]["name"], json!("rubygems"));
assert_eq!(indexes[0]["primaryKey"], json!("id"));
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("rubygems"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("rubygems");
@ -240,13 +237,12 @@ async fn import_dump_v3_movie_raw() {
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes().await;
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes.as_array().unwrap().len(), 1);
assert_eq!(indexes[0]["uid"], json!("indexUID"));
assert_eq!(indexes[0]["name"], json!("indexUID"));
assert_eq!(indexes[0]["primaryKey"], json!("id"));
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
@ -305,13 +301,12 @@ async fn import_dump_v3_movie_with_settings() {
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes().await;
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes.as_array().unwrap().len(), 1);
assert_eq!(indexes[0]["uid"], json!("indexUID"));
assert_eq!(indexes[0]["name"], json!("indexUID"));
assert_eq!(indexes[0]["primaryKey"], json!("id"));
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
@ -336,7 +331,7 @@ async fn import_dump_v3_movie_with_settings() {
json!({ "results": [{ "uid": 1, "indexUid": "indexUID", "status": "succeeded", "type": "settingsUpdate", "details": { "displayedAttributes": ["title", "genres", "overview", "poster", "release_date"], "searchableAttributes": ["title", "overview"], "filterableAttributes": ["genres"], "stopWords": ["of", "the"] }, "duration": "PT37.488777S", "enqueuedAt": "2021-09-08T08:24:02.323444Z", "startedAt": "2021-09-08T08:24:02.324145Z", "finishedAt": "2021-09-08T08:24:39.812922Z" }, { "uid": 0, "indexUid": "indexUID", "status": "succeeded", "type": "documentAdditionOrUpdate", "details": { "receivedDocuments": 0, "indexedDocuments": 31944 }, "duration": "PT39.941318S", "enqueuedAt": "2021-09-08T08:21:14.742672Z", "startedAt": "2021-09-08T08:21:14.750166Z", "finishedAt": "2021-09-08T08:21:54.691484Z" }]})
);
// finally we're just going to check that we can still get a few documents by id
// finally we're just going to check that we can["results"] still get a few documents by id
let (document, code) = index.get_document(100, None).await;
assert_eq!(code, 200);
assert_eq!(
@ -370,13 +365,12 @@ async fn import_dump_v3_rubygems_with_settings() {
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes().await;
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes.as_array().unwrap().len(), 1);
assert_eq!(indexes[0]["uid"], json!("rubygems"));
assert_eq!(indexes[0]["name"], json!("rubygems"));
assert_eq!(indexes[0]["primaryKey"], json!("id"));
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("rubygems"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("rubygems");
@ -439,13 +433,12 @@ async fn import_dump_v4_movie_raw() {
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes().await;
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes.as_array().unwrap().len(), 1);
assert_eq!(indexes[0]["uid"], json!("indexUID"));
assert_eq!(indexes[0]["name"], json!("indexUID"));
assert_eq!(indexes[0]["primaryKey"], json!("id"));
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
@ -504,13 +497,12 @@ async fn import_dump_v4_movie_with_settings() {
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes().await;
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes.as_array().unwrap().len(), 1);
assert_eq!(indexes[0]["uid"], json!("indexUID"));
assert_eq!(indexes[0]["name"], json!("indexUID"));
assert_eq!(indexes[0]["primaryKey"], json!("id"));
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("indexUID"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("indexUID");
@ -569,13 +561,12 @@ async fn import_dump_v4_rubygems_with_settings() {
};
let server = Server::new_with_options(options).await.unwrap();
let (indexes, code) = server.list_indexes().await;
let (indexes, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(indexes.as_array().unwrap().len(), 1);
assert_eq!(indexes[0]["uid"], json!("rubygems"));
assert_eq!(indexes[0]["name"], json!("rubygems"));
assert_eq!(indexes[0]["primaryKey"], json!("id"));
assert_eq!(indexes["results"].as_array().unwrap().len(), 1);
assert_eq!(indexes["results"][0]["uid"], json!("rubygems"));
assert_eq!(indexes["results"][0]["primaryKey"], json!("id"));
let index = server.index("rubygems");

View File

@ -16,12 +16,11 @@ async fn create_and_get_index() {
assert_eq!(code, 200);
assert_eq!(response["uid"], "test");
assert_eq!(response["name"], "test");
assert!(response.get("createdAt").is_some());
assert!(response.get("updatedAt").is_some());
assert_eq!(response["createdAt"], response["updatedAt"]);
assert_eq!(response["primaryKey"], Value::Null);
assert_eq!(response.as_object().unwrap().len(), 5);
assert_eq!(response.as_object().unwrap().len(), 4);
}
#[actix_rt::test]
@ -45,10 +44,10 @@ async fn error_get_unexisting_index() {
#[actix_rt::test]
async fn no_index_return_empty_list() {
let server = Server::new().await;
let (response, code) = server.list_indexes().await;
let (response, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert!(response.is_array());
assert!(response.as_array().unwrap().is_empty());
assert!(response["results"].is_array());
assert!(response["results"].as_array().unwrap().is_empty());
}
#[actix_rt::test]
@ -59,10 +58,10 @@ async fn list_multiple_indexes() {
server.index("test").wait_task(1).await;
let (response, code) = server.list_indexes().await;
let (response, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert!(response.is_array());
let arr = response.as_array().unwrap();
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 2);
assert!(arr
.iter()
@ -72,6 +71,119 @@ async fn list_multiple_indexes() {
.any(|entry| entry["uid"] == "test1" && entry["primaryKey"] == "key"));
}
#[actix_rt::test]
async fn get_and_paginate_indexes() {
let server = Server::new().await;
const NB_INDEXES: usize = 50;
for i in 0..NB_INDEXES {
server.index(&format!("test_{i:02}")).create(None).await;
}
server
.index(&format!("test_{NB_INDEXES}"))
.wait_task(NB_INDEXES as u64 - 1)
.await;
// basic
let (response, code) = server.list_indexes(None, None).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(20));
assert_eq!(response["offset"], json!(0));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 20);
// ensuring we get all the indexes in the alphabetical order
assert!((0..20)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with an offset
let (response, code) = server.list_indexes(Some(15), None).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(20));
assert_eq!(response["offset"], json!(15));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 20);
assert!((15..35)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with an offset and not enough elements
let (response, code) = server.list_indexes(Some(45), None).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(20));
assert_eq!(response["offset"], json!(45));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 5);
assert!((45..50)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with a limit lower than the default
let (response, code) = server.list_indexes(None, Some(5)).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(5));
assert_eq!(response["offset"], json!(0));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 5);
assert!((0..5)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with a limit higher than the default
let (response, code) = server.list_indexes(None, Some(40)).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(40));
assert_eq!(response["offset"], json!(0));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 40);
assert!((0..40)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with a limit higher than the default
let (response, code) = server.list_indexes(None, Some(80)).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(80));
assert_eq!(response["offset"], json!(0));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 50);
assert!((0..50)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
// with a limit and an offset
let (response, code) = server.list_indexes(Some(20), Some(10)).await;
assert_eq!(code, 200);
assert_eq!(response["limit"], json!(10));
assert_eq!(response["offset"], json!(20));
assert_eq!(response["total"], json!(NB_INDEXES));
assert!(response["results"].is_array());
let arr = response["results"].as_array().unwrap();
assert_eq!(arr.len(), 10);
assert!((20..30)
.map(|idx| format!("test_{idx:02}"))
.zip(arr)
.all(|(expected, entry)| entry["uid"] == expected));
}
#[actix_rt::test]
async fn get_invalid_index_uid() {
let server = Server::new().await;

View File

@ -21,7 +21,6 @@ async fn update_primary_key() {
assert_eq!(code, 200);
assert_eq!(response["uid"], "test");
assert_eq!(response["name"], "test");
assert!(response.get("createdAt").is_some());
assert!(response.get("updatedAt").is_some());
@ -32,7 +31,7 @@ async fn update_primary_key() {
assert!(created_at < updated_at);
assert_eq!(response["primaryKey"], "primary");
assert_eq!(response.as_object().unwrap().len(), 5);
assert_eq!(response.as_object().unwrap().len(), 4);
}
#[actix_rt::test]

View File

@ -70,7 +70,7 @@ async fn perform_snapshot() {
let snapshot_server = Server::new_with_options(options).await.unwrap();
verify_snapshot!(server, snapshot_server, |server| =>
server.list_indexes(),
server.list_indexes(None, None),
// for some reason the db sizes differ. this may be due to the compaction options we have
// set when performing the snapshot
//server.stats(),

View File

@ -62,7 +62,6 @@ pub struct IndexMetadata {
#[serde(skip)]
pub uuid: Uuid,
pub uid: String,
name: String,
#[serde(flatten)]
pub meta: IndexMeta,
}
@ -508,7 +507,6 @@ where
let meta = index.meta()?;
let meta = IndexMetadata {
uuid: index.uuid(),
name: uid.clone(),
uid,
meta,
};
@ -561,12 +559,7 @@ where
let index = self.index_resolver.get_index(uid.clone()).await?;
let uuid = index.uuid();
let meta = spawn_blocking(move || index.meta()).await??;
let meta = IndexMetadata {
uuid,
name: uid.clone(),
uid,
meta,
};
let meta = IndexMetadata { uuid, uid, meta };
Ok(meta)
}