mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-01-11 14:04:31 +01:00
Merge pull request #194 from meilisearch/set-code-public
Set code public
This commit is contained in:
commit
87e5998489
@ -1,3 +1,4 @@
|
||||
use std::ops::Deref;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
use super::Error;
|
||||
@ -6,6 +7,14 @@ use std::marker::PhantomData;
|
||||
#[derive(Clone)]
|
||||
pub struct CommonIndex(pub crate::CfTree);
|
||||
|
||||
impl Deref for CommonIndex {
|
||||
type Target = crate::CfTree;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl CommonIndex {
|
||||
pub fn get<T, K>(&self, key: K) -> Result<Option<T>, Error>
|
||||
where T: DeserializeOwned,
|
||||
|
@ -61,17 +61,24 @@ pub enum UpdateType {
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct DetailedDuration {
|
||||
main: Duration,
|
||||
pub main: Duration,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateStatus {
|
||||
pub struct UpdateResult {
|
||||
pub update_id: u64,
|
||||
pub update_type: UpdateType,
|
||||
pub result: Result<(), String>,
|
||||
pub detailed_duration: DetailedDuration,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub enum UpdateStatus {
|
||||
Enqueued,
|
||||
Processed(UpdateResult),
|
||||
Unknown,
|
||||
}
|
||||
|
||||
fn spawn_update_system(index: Index, subscription: Receiver<()>) -> thread::JoinHandle<()> {
|
||||
thread::spawn(move || {
|
||||
let mut subscription = subscription.into_iter();
|
||||
@ -115,7 +122,7 @@ fn spawn_update_system(index: Index, subscription: Receiver<()>) -> thread::Join
|
||||
};
|
||||
|
||||
let detailed_duration = DetailedDuration { main: duration };
|
||||
let status = UpdateStatus {
|
||||
let status = UpdateResult {
|
||||
update_id,
|
||||
update_type,
|
||||
result: result.map_err(|e| e.to_string()),
|
||||
@ -180,7 +187,7 @@ pub struct Index {
|
||||
updates_id: Arc<AtomicU64>,
|
||||
updates_index: crate::CfTree,
|
||||
updates_results_index: crate::CfTree,
|
||||
update_callback: Arc<ArcSwapOption<Box<dyn Fn(UpdateStatus) + Send + Sync + 'static>>>,
|
||||
update_callback: Arc<ArcSwapOption<Box<dyn Fn(UpdateResult) + Send + Sync + 'static>>>,
|
||||
}
|
||||
|
||||
pub(crate) struct Cache {
|
||||
@ -266,7 +273,7 @@ impl Index {
|
||||
}
|
||||
|
||||
pub fn set_update_callback<F>(&self, callback: F)
|
||||
where F: Fn(UpdateStatus) + Send + Sync + 'static
|
||||
where F: Fn(UpdateResult) + Send + Sync + 'static
|
||||
{
|
||||
self.update_callback.store(Some(Arc::new(Box::new(callback))));
|
||||
}
|
||||
@ -316,6 +323,18 @@ impl Index {
|
||||
self.cache.load().schema.clone()
|
||||
}
|
||||
|
||||
pub fn ranked_map(&self) -> RankedMap {
|
||||
self.cache.load().ranked_map.clone()
|
||||
}
|
||||
|
||||
pub fn synonyms_index(&self) -> SynonymsIndex {
|
||||
self.synonyms_index.clone()
|
||||
}
|
||||
|
||||
pub fn synonyms_set(&self) -> Arc<fst::Set> {
|
||||
self.cache.load().synonyms.clone()
|
||||
}
|
||||
|
||||
pub fn custom_settings(&self) -> CustomSettingsIndex {
|
||||
self.custom_settings_index.clone()
|
||||
}
|
||||
@ -343,36 +362,35 @@ impl Index {
|
||||
pub fn update_status(
|
||||
&self,
|
||||
update_id: u64,
|
||||
) -> Result<Option<UpdateStatus>, Error>
|
||||
) -> Result<UpdateStatus, Error>
|
||||
{
|
||||
let update_id = update_id.to_be_bytes();
|
||||
match self.updates_results_index.get(update_id)? {
|
||||
Some(value) => {
|
||||
let value = bincode::deserialize(&value)?;
|
||||
Ok(Some(value))
|
||||
Ok(UpdateStatus::Processed(value))
|
||||
},
|
||||
None => Ok(None),
|
||||
None => {
|
||||
match self.updates_index.get(update_id)? {
|
||||
Some(_) => Ok(UpdateStatus::Enqueued),
|
||||
None => Ok(UpdateStatus::Unknown),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_status_blocking(
|
||||
&self,
|
||||
update_id: u64,
|
||||
) -> Result<UpdateStatus, Error>
|
||||
) -> Result<UpdateResult, Error>
|
||||
{
|
||||
// if we find the update result return it now
|
||||
if let Some(result) = self.update_status(update_id)? {
|
||||
return Ok(result)
|
||||
}
|
||||
|
||||
loop {
|
||||
if self.updates_results_index.get(&update_id.to_be_bytes())?.is_some() { break }
|
||||
if let Some(value) = self.updates_results_index.get(&update_id.to_be_bytes())? {
|
||||
let value = bincode::deserialize(&value)?;
|
||||
return Ok(value)
|
||||
}
|
||||
std::thread::sleep(Duration::from_millis(300));
|
||||
}
|
||||
|
||||
// the thread has been unblocked, it means that the update result
|
||||
// has been inserted in the tree, retrieve it
|
||||
Ok(self.update_status(update_id)?.unwrap())
|
||||
}
|
||||
|
||||
pub fn documents_ids(&self) -> Result<DocumentsIdsIter, Error> {
|
||||
|
@ -12,7 +12,11 @@ mod update;
|
||||
use crate::CfTree;
|
||||
|
||||
pub use self::error::Error;
|
||||
pub use self::index::{Index, CustomSettingsIndex, CommonIndex, RankingOrdering, StopWords, RankingOrder, DistinctField, RankingRules};
|
||||
pub use self::index::{
|
||||
Index, CustomSettingsIndex, CommonIndex, RankingOrdering,
|
||||
StopWords, RankingOrder, DistinctField, RankingRules,
|
||||
UpdateType, DetailedDuration, UpdateResult, UpdateStatus
|
||||
};
|
||||
|
||||
pub use self::update::DocumentsAddition;
|
||||
pub use self::update::DocumentsDeletion;
|
||||
|
@ -7,7 +7,12 @@ mod ranked_map;
|
||||
mod serde;
|
||||
|
||||
pub use self::cf_tree::{CfTree, CfIter};
|
||||
pub use self::database::{Database, Index, CustomSettingsIndex, RankingOrdering, StopWords, RankingOrder, DistinctField, RankingRules};
|
||||
pub use self::database::{
|
||||
Database, Index, CustomSettingsIndex, RankingOrdering,
|
||||
StopWords, RankingOrder, DistinctField, RankingRules,
|
||||
UpdateType, DetailedDuration, UpdateResult, UpdateStatus,
|
||||
Error,
|
||||
};
|
||||
pub use self::number::Number;
|
||||
pub use self::ranked_map::RankedMap;
|
||||
pub use self::serde::{compute_document_id, extract_document_id, value_to_string};
|
||||
|
15
meilidb-data/tests/common.rs
Normal file
15
meilidb-data/tests/common.rs
Normal file
@ -0,0 +1,15 @@
|
||||
use meilidb_data::{Database};
|
||||
use meilidb_data::Index;
|
||||
use meilidb_schema::{SchemaBuilder, DISPLAYED, INDEXED};
|
||||
|
||||
pub fn simple_index() -> Index {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let database = Database::open(&tmp_dir).unwrap();
|
||||
|
||||
let mut builder = SchemaBuilder::with_identifier("objectId");
|
||||
builder.new_attribute("objectId", DISPLAYED | INDEXED);
|
||||
builder.new_attribute("title", DISPLAYED | INDEXED);
|
||||
let schema = builder.build();
|
||||
|
||||
database.create_index("hello", schema).unwrap()
|
||||
}
|
43
meilidb-data/tests/custom_settings_index.rs
Normal file
43
meilidb-data/tests/custom_settings_index.rs
Normal file
@ -0,0 +1,43 @@
|
||||
#[macro_use] extern crate maplit;
|
||||
|
||||
mod common;
|
||||
|
||||
use big_s::S;
|
||||
use meilidb_data::RankingOrdering;
|
||||
|
||||
#[test]
|
||||
fn stop_words() {
|
||||
let index = common::simple_index();
|
||||
let stop_words = hashset!{ S("le"), S("la"), S("les"), };
|
||||
index.custom_settings().set_stop_words(&stop_words).unwrap();
|
||||
let ret_stop_words = index.custom_settings().get_stop_words().unwrap().unwrap();
|
||||
assert_eq!(ret_stop_words, stop_words);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ranking_order() {
|
||||
let index = common::simple_index();
|
||||
let ranking_order = vec![S("SumOfTypos"), S("NumberOfWords"), S("WordsProximity"), S("SumOfWordsAttribute"), S("SumOfWordsPosition"), S("Exact"), S("DocumentId")];
|
||||
index.custom_settings().set_ranking_order(&ranking_order).unwrap();
|
||||
let ret_ranking_orderer = index.custom_settings().get_ranking_order().unwrap().unwrap();
|
||||
assert_eq!(ret_ranking_orderer, ranking_order);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn distinct_field() {
|
||||
let index = common::simple_index();
|
||||
let distinct_field = S("title");
|
||||
index.custom_settings().set_distinct_field(&distinct_field).unwrap();
|
||||
let ret_distinct_field = index.custom_settings().get_distinct_field().unwrap().unwrap();
|
||||
assert_eq!(ret_distinct_field, distinct_field);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ranking_rules() {
|
||||
let index = common::simple_index();
|
||||
let ranking_rules = hashmap!{ S("objectId") => RankingOrdering::Asc };
|
||||
index.custom_settings().set_ranking_rules(&ranking_rules).unwrap();
|
||||
let ret_ranking_rules = index.custom_settings().get_ranking_rules().unwrap().unwrap();
|
||||
assert_eq!(ret_ranking_rules, ranking_rules);
|
||||
}
|
||||
|
67
meilidb-data/tests/database.rs
Normal file
67
meilidb-data/tests/database.rs
Normal file
@ -0,0 +1,67 @@
|
||||
#[macro_use] extern crate maplit;
|
||||
|
||||
mod common;
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering::Relaxed};
|
||||
use std::sync::Arc;
|
||||
|
||||
use big_s::S;
|
||||
use serde_json::json;
|
||||
|
||||
#[test]
|
||||
fn database_stats() {
|
||||
let index = common::simple_index();
|
||||
let as_been_updated = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let as_been_updated_clone = as_been_updated.clone();
|
||||
index.set_update_callback(move |_| as_been_updated_clone.store(true, Relaxed));
|
||||
|
||||
let doc1 = json!({ "objectId": 123, "title": "hello" });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc1);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
let stats = index.stats().unwrap();
|
||||
let repartition = hashmap!{
|
||||
S("objectId") => 1u64,
|
||||
S("title") => 1u64,
|
||||
};
|
||||
assert_eq!(stats.number_of_documents, 1);
|
||||
assert_eq!(stats.documents_fields_repartition, repartition);
|
||||
|
||||
let doc2 = json!({ "objectId": 456, "title": "world" });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc2);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
let stats = index.stats().unwrap();
|
||||
let repartition = hashmap!{
|
||||
S("objectId") => 2u64,
|
||||
S("title") => 2u64,
|
||||
};
|
||||
assert_eq!(stats.number_of_documents, 2);
|
||||
assert_eq!(stats.documents_fields_repartition, repartition);
|
||||
|
||||
|
||||
let doc3 = json!({ "objectId": 789 });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc3);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
let stats = index.stats().unwrap();
|
||||
let repartition = hashmap!{
|
||||
S("objectId") => 3u64,
|
||||
S("title") => 2u64,
|
||||
};
|
||||
assert_eq!(stats.number_of_documents, 3);
|
||||
assert_eq!(stats.documents_fields_repartition, repartition);
|
||||
}
|
99
meilidb-data/tests/index.rs
Normal file
99
meilidb-data/tests/index.rs
Normal file
@ -0,0 +1,99 @@
|
||||
mod common;
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering::Relaxed};
|
||||
use std::sync::Arc;
|
||||
|
||||
use serde_json::json;
|
||||
|
||||
#[test]
|
||||
fn insert_delete_document() {
|
||||
let index = common::simple_index();
|
||||
let as_been_updated = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let as_been_updated_clone = as_been_updated.clone();
|
||||
index.set_update_callback(move |_| as_been_updated_clone.store(true, Relaxed));
|
||||
|
||||
let doc1 = json!({ "objectId": 123, "title": "hello" });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc1);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
assert_eq!(index.number_of_documents(), 1);
|
||||
|
||||
let docs = index.query_builder().query("hello", 0..10).unwrap();
|
||||
assert_eq!(docs.len(), 1);
|
||||
assert_eq!(index.document(None, docs[0].id).unwrap().as_ref(), Some(&doc1));
|
||||
|
||||
let mut deletion = index.documents_deletion();
|
||||
deletion.delete_document(&doc1).unwrap();
|
||||
let update_id = deletion.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
assert_eq!(index.number_of_documents(), 0);
|
||||
|
||||
let docs = index.query_builder().query("hello", 0..10).unwrap();
|
||||
assert_eq!(docs.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_document() {
|
||||
let index = common::simple_index();
|
||||
let as_been_updated = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let as_been_updated_clone = as_been_updated.clone();
|
||||
index.set_update_callback(move |_| as_been_updated_clone.store(true, Relaxed));
|
||||
|
||||
let doc1 = json!({ "objectId": 123, "title": "hello" });
|
||||
let doc2 = json!({ "objectId": 123, "title": "coucou" });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc1);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
assert_eq!(index.number_of_documents(), 1);
|
||||
|
||||
let docs = index.query_builder().query("hello", 0..10).unwrap();
|
||||
assert_eq!(docs.len(), 1);
|
||||
assert_eq!(index.document(None, docs[0].id).unwrap().as_ref(), Some(&doc1));
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc2);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
assert_eq!(index.number_of_documents(), 1);
|
||||
|
||||
let docs = index.query_builder().query("hello", 0..10).unwrap();
|
||||
assert_eq!(docs.len(), 0);
|
||||
|
||||
let docs = index.query_builder().query("coucou", 0..10).unwrap();
|
||||
assert_eq!(docs.len(), 1);
|
||||
assert_eq!(index.document(None, docs[0].id).unwrap().as_ref(), Some(&doc2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn documents_ids() {
|
||||
let index = common::simple_index();
|
||||
|
||||
let doc1 = json!({ "objectId": 123, "title": "hello" });
|
||||
let doc2 = json!({ "objectId": 456, "title": "world" });
|
||||
let doc3 = json!({ "objectId": 789 });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc1);
|
||||
addition.update_document(&doc2);
|
||||
addition.update_document(&doc3);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(status.result.is_ok());
|
||||
|
||||
let documents_ids_count = index.documents_ids().unwrap().count();
|
||||
assert_eq!(documents_ids_count, 3);
|
||||
}
|
@ -1,215 +0,0 @@
|
||||
#[macro_use] extern crate maplit;
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering::Relaxed};
|
||||
use std::sync::Arc;
|
||||
|
||||
use big_s::S;
|
||||
use serde_json::json;
|
||||
use meilidb_data::{Database, RankingOrdering};
|
||||
use meilidb_schema::{Schema, SchemaBuilder, DISPLAYED, INDEXED};
|
||||
|
||||
fn simple_schema() -> Schema {
|
||||
let mut builder = SchemaBuilder::with_identifier("objectId");
|
||||
builder.new_attribute("objectId", DISPLAYED | INDEXED);
|
||||
builder.new_attribute("title", DISPLAYED | INDEXED);
|
||||
builder.build()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_delete_document() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let database = Database::open(&tmp_dir).unwrap();
|
||||
|
||||
let as_been_updated = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let schema = simple_schema();
|
||||
let index = database.create_index("hello", schema).unwrap();
|
||||
|
||||
let as_been_updated_clone = as_been_updated.clone();
|
||||
index.set_update_callback(move |_| as_been_updated_clone.store(true, Relaxed));
|
||||
|
||||
let doc1 = json!({ "objectId": 123, "title": "hello" });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc1);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
assert_eq!(index.number_of_documents(), 1);
|
||||
|
||||
let docs = index.query_builder().query("hello", 0..10).unwrap();
|
||||
assert_eq!(docs.len(), 1);
|
||||
assert_eq!(index.document(None, docs[0].id).unwrap().as_ref(), Some(&doc1));
|
||||
|
||||
let mut deletion = index.documents_deletion();
|
||||
deletion.delete_document(&doc1).unwrap();
|
||||
let update_id = deletion.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
assert_eq!(index.number_of_documents(), 0);
|
||||
|
||||
let docs = index.query_builder().query("hello", 0..10).unwrap();
|
||||
assert_eq!(docs.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_document() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let database = Database::open(&tmp_dir).unwrap();
|
||||
|
||||
let as_been_updated = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let schema = simple_schema();
|
||||
let index = database.create_index("hello", schema).unwrap();
|
||||
|
||||
let as_been_updated_clone = as_been_updated.clone();
|
||||
index.set_update_callback(move |_| as_been_updated_clone.store(true, Relaxed));
|
||||
|
||||
let doc1 = json!({ "objectId": 123, "title": "hello" });
|
||||
let doc2 = json!({ "objectId": 123, "title": "coucou" });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc1);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
assert_eq!(index.number_of_documents(), 1);
|
||||
|
||||
let docs = index.query_builder().query("hello", 0..10).unwrap();
|
||||
assert_eq!(docs.len(), 1);
|
||||
assert_eq!(index.document(None, docs[0].id).unwrap().as_ref(), Some(&doc1));
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc2);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
assert_eq!(index.number_of_documents(), 1);
|
||||
|
||||
let docs = index.query_builder().query("hello", 0..10).unwrap();
|
||||
assert_eq!(docs.len(), 0);
|
||||
|
||||
let docs = index.query_builder().query("coucou", 0..10).unwrap();
|
||||
assert_eq!(docs.len(), 1);
|
||||
assert_eq!(index.document(None, docs[0].id).unwrap().as_ref(), Some(&doc2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn database_stats() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let database = Database::open(&tmp_dir).unwrap();
|
||||
|
||||
let as_been_updated = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let schema = simple_schema();
|
||||
let index = database.create_index("hello", schema).unwrap();
|
||||
|
||||
let as_been_updated_clone = as_been_updated.clone();
|
||||
index.set_update_callback(move |_| as_been_updated_clone.store(true, Relaxed));
|
||||
|
||||
let doc1 = json!({ "objectId": 123, "title": "hello" });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc1);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
let stats = index.stats().unwrap();
|
||||
let repartition = hashmap!{
|
||||
S("objectId") => 1u64,
|
||||
S("title") => 1u64,
|
||||
};
|
||||
assert_eq!(stats.number_of_documents, 1);
|
||||
assert_eq!(stats.documents_fields_repartition, repartition);
|
||||
|
||||
let doc2 = json!({ "objectId": 456, "title": "world" });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc2);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
let stats = index.stats().unwrap();
|
||||
let repartition = hashmap!{
|
||||
S("objectId") => 2u64,
|
||||
S("title") => 2u64,
|
||||
};
|
||||
assert_eq!(stats.number_of_documents, 2);
|
||||
assert_eq!(stats.documents_fields_repartition, repartition);
|
||||
|
||||
|
||||
let doc3 = json!({ "objectId": 789 });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc3);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(as_been_updated.swap(false, Relaxed));
|
||||
assert!(status.result.is_ok());
|
||||
let stats = index.stats().unwrap();
|
||||
let repartition = hashmap!{
|
||||
S("objectId") => 3u64,
|
||||
S("title") => 2u64,
|
||||
};
|
||||
assert_eq!(stats.number_of_documents, 3);
|
||||
assert_eq!(stats.documents_fields_repartition, repartition);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn custom_settings() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let database = Database::open(&tmp_dir).unwrap();
|
||||
|
||||
let schema = simple_schema();
|
||||
let index = database.create_index("hello", schema).unwrap();
|
||||
|
||||
let stop_words = hashset!{ S("le"), S("la"), S("les"), };
|
||||
let ranking_order = vec![S("SumOfTypos"), S("NumberOfWords"), S("WordsProximity"), S("SumOfWordsAttribute"), S("SumOfWordsPosition"), S("Exact"), S("DocumentId")];
|
||||
let distinct_field = S("title");
|
||||
let ranking_rules = hashmap!{ S("objectId") => RankingOrdering::Asc };
|
||||
|
||||
index.custom_settings().set_stop_words(&stop_words).unwrap();
|
||||
index.custom_settings().set_ranking_order(&ranking_order).unwrap();
|
||||
index.custom_settings().set_distinct_field(&distinct_field).unwrap();
|
||||
index.custom_settings().set_ranking_rules(&ranking_rules).unwrap();
|
||||
|
||||
let ret_stop_words = index.custom_settings().get_stop_words().unwrap().unwrap();
|
||||
let ret_ranking_orderer = index.custom_settings().get_ranking_order().unwrap().unwrap();
|
||||
let ret_distinct_field = index.custom_settings().get_distinct_field().unwrap().unwrap();
|
||||
let ret_ranking_rules = index.custom_settings().get_ranking_rules().unwrap().unwrap();
|
||||
|
||||
assert_eq!(ret_stop_words, stop_words);
|
||||
assert_eq!(ret_ranking_orderer, ranking_order);
|
||||
assert_eq!(ret_distinct_field, distinct_field);
|
||||
assert_eq!(ret_ranking_rules, ranking_rules);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn documents_ids() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let database = Database::open(&tmp_dir).unwrap();
|
||||
|
||||
let schema = simple_schema();
|
||||
let index = database.create_index("hello", schema).unwrap();
|
||||
|
||||
let doc1 = json!({ "objectId": 123, "title": "hello" });
|
||||
let doc2 = json!({ "objectId": 456, "title": "world" });
|
||||
let doc3 = json!({ "objectId": 789 });
|
||||
|
||||
let mut addition = index.documents_addition();
|
||||
addition.update_document(&doc1);
|
||||
addition.update_document(&doc2);
|
||||
addition.update_document(&doc3);
|
||||
let update_id = addition.finalize().unwrap();
|
||||
let status = index.update_status_blocking(update_id).unwrap();
|
||||
assert!(status.result.is_ok());
|
||||
|
||||
let documents_ids_count = index.documents_ids().unwrap().count();
|
||||
assert_eq!(documents_ids_count, 3);
|
||||
}
|
@ -1,6 +1,4 @@
|
||||
use std::collections::{HashMap, BTreeMap};
|
||||
use std::io::{Read, Write};
|
||||
use std::error::Error;
|
||||
use std::{fmt, u16};
|
||||
use std::ops::BitOr;
|
||||
use std::sync::Arc;
|
||||
@ -15,13 +13,13 @@ pub const RANKED: SchemaProps = SchemaProps { displayed: false, indexed: fals
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct SchemaProps {
|
||||
#[serde(default)]
|
||||
displayed: bool,
|
||||
pub displayed: bool,
|
||||
|
||||
#[serde(default)]
|
||||
indexed: bool,
|
||||
pub indexed: bool,
|
||||
|
||||
#[serde(default)]
|
||||
ranked: bool,
|
||||
pub ranked: bool,
|
||||
}
|
||||
|
||||
impl SchemaProps {
|
||||
|
Loading…
x
Reference in New Issue
Block a user