mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-12-23 21:20:24 +01:00
Merge branch 'release-v0.19.0' into stable
This commit is contained in:
commit
e0976d10ba
@ -1,3 +1,9 @@
|
||||
## v0.19.0
|
||||
|
||||
- The snapshots are now created and then renamed in atomically (#1172)
|
||||
- Fix a race condition when an update and a document addition are processed immediately one after the other (#1176)
|
||||
- Latin synonyms are normalized during indexation (#1174)
|
||||
|
||||
## v0.18.1
|
||||
|
||||
- Fix unexpected CORS error (#1185)
|
||||
@ -64,7 +70,7 @@
|
||||
|
||||
## v0.11.1
|
||||
|
||||
- Fix facet cache on document update (#789)
|
||||
- Fix facet cache on document update (#789)
|
||||
- Improvements on settings consistency (#778)
|
||||
|
||||
## v0.11.0
|
||||
|
13
Cargo.lock
generated
13
Cargo.lock
generated
@ -1639,7 +1639,7 @@ checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-core"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"assert_matches",
|
||||
@ -1686,14 +1686,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-error"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
dependencies = [
|
||||
"actix-http",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-http"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
dependencies = [
|
||||
"actix-cors",
|
||||
"actix-http",
|
||||
@ -1734,6 +1734,7 @@ dependencies = [
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"ureq",
|
||||
"uuid",
|
||||
"vergen",
|
||||
"walkdir",
|
||||
"whoami",
|
||||
@ -1741,7 +1742,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-schema"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"meilisearch-error",
|
||||
@ -1753,7 +1754,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "meilisearch-tokenizer"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/meilisearch/Tokenizer.git?tag=v0.1.2#8d91cd52f30aa4b651a085c15056938f7b599646"
|
||||
source = "git+https://github.com/meilisearch/Tokenizer.git?tag=v0.1.3#d3fe5311a66c1f31682a297df8a8b6b8916f4252"
|
||||
dependencies = [
|
||||
"character_converter",
|
||||
"cow-utils",
|
||||
@ -1768,7 +1769,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-types"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"zerocopy",
|
||||
|
2
LICENSE
2
LICENSE
@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019-2020 Meili SAS
|
||||
Copyright (c) 2019-2021 Meili SAS
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
@ -48,7 +48,7 @@ For more information about features go to [our documentation](https://docs.meili
|
||||
|
||||
### Deploy the Server
|
||||
|
||||
#### Brew (Mac OS)
|
||||
#### Homebrew (Mac OS)
|
||||
|
||||
```bash
|
||||
brew update && brew install meilisearch
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "meilisearch-core"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
license = "MIT"
|
||||
authors = ["Kerollmops <clement@meilisearch.com>"]
|
||||
edition = "2018"
|
||||
@ -24,10 +24,10 @@ intervaltree = "0.2.6"
|
||||
itertools = "0.10.0"
|
||||
levenshtein_automata = { version = "0.2.0", features = ["fst_automaton"] }
|
||||
log = "0.4.11"
|
||||
meilisearch-error = { path = "../meilisearch-error", version = "0.18.1" }
|
||||
meilisearch-schema = { path = "../meilisearch-schema", version = "0.18.1" }
|
||||
meilisearch-tokenizer = { git = "https://github.com/meilisearch/Tokenizer.git", tag = "v0.1.2" }
|
||||
meilisearch-types = { path = "../meilisearch-types", version = "0.18.1" }
|
||||
meilisearch-error = { path = "../meilisearch-error", version = "0.19.0" }
|
||||
meilisearch-schema = { path = "../meilisearch-schema", version = "0.19.0" }
|
||||
meilisearch-tokenizer = { git = "https://github.com/meilisearch/Tokenizer.git", tag = "v0.1.3" }
|
||||
meilisearch-types = { path = "../meilisearch-types", version = "0.19.0" }
|
||||
once_cell = "1.5.2"
|
||||
ordered-float = { version = "2.0.1", features = ["serde"] }
|
||||
pest = { git = "https://github.com/pest-parser/pest.git", rev = "51fd1d49f1041f7839975664ef71fe15c7dcaf67" }
|
||||
|
@ -39,6 +39,7 @@ pub use self::update::{EnqueuedUpdateResult, ProcessedUpdateResult, UpdateStatus
|
||||
pub use meilisearch_types::{DocIndex, DocumentId, Highlight};
|
||||
pub use meilisearch_schema::Schema;
|
||||
pub use query_words_mapper::QueryWordsMapper;
|
||||
pub use query_tree::MAX_QUERY_LEN;
|
||||
|
||||
use compact_arena::SmallArena;
|
||||
use log::{error, trace};
|
||||
|
@ -16,6 +16,8 @@ use crate::{store, DocumentId, DocIndex, MResult, FstSetCow};
|
||||
use crate::automaton::{build_dfa, build_prefix_dfa, build_exact_dfa};
|
||||
use crate::QueryWordsMapper;
|
||||
|
||||
pub const MAX_QUERY_LEN: usize = 10;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Hash)]
|
||||
pub enum Operation {
|
||||
And(Vec<Operation>),
|
||||
@ -181,6 +183,7 @@ fn split_query_string<'a, A: AsRef<[u8]>>(s: &str, stop_words: &'a fst::Set<A>)
|
||||
.tokens()
|
||||
.filter(|t| t.is_word())
|
||||
.map(|t| t.word.to_string())
|
||||
.take(MAX_QUERY_LEN)
|
||||
.enumerate()
|
||||
.collect()
|
||||
}
|
||||
|
@ -23,6 +23,8 @@ pub struct DocumentsAddition<D> {
|
||||
updates_store: store::Updates,
|
||||
updates_results_store: store::UpdatesResults,
|
||||
updates_notifier: UpdateEventsEmitter,
|
||||
// Whether the user explicitly set the primary key in the update
|
||||
primary_key: Option<String>,
|
||||
documents: Vec<D>,
|
||||
is_partial: bool,
|
||||
}
|
||||
@ -39,6 +41,7 @@ impl<D> DocumentsAddition<D> {
|
||||
updates_notifier,
|
||||
documents: Vec::new(),
|
||||
is_partial: false,
|
||||
primary_key: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,9 +56,14 @@ impl<D> DocumentsAddition<D> {
|
||||
updates_notifier,
|
||||
documents: Vec::new(),
|
||||
is_partial: true,
|
||||
primary_key: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_primary_key(&mut self, primary_key: String) {
|
||||
self.primary_key = Some(primary_key);
|
||||
}
|
||||
|
||||
pub fn update_document(&mut self, document: D) {
|
||||
self.documents.push(document);
|
||||
}
|
||||
@ -71,6 +79,7 @@ impl<D> DocumentsAddition<D> {
|
||||
self.updates_results_store,
|
||||
self.documents,
|
||||
self.is_partial,
|
||||
self.primary_key,
|
||||
)?;
|
||||
Ok(update_id)
|
||||
}
|
||||
@ -88,6 +97,7 @@ pub fn push_documents_addition<D: serde::Serialize>(
|
||||
updates_results_store: store::UpdatesResults,
|
||||
addition: Vec<D>,
|
||||
is_partial: bool,
|
||||
primary_key: Option<String>,
|
||||
) -> MResult<u64> {
|
||||
let mut values = Vec::with_capacity(addition.len());
|
||||
for add in addition {
|
||||
@ -99,9 +109,9 @@ pub fn push_documents_addition<D: serde::Serialize>(
|
||||
let last_update_id = next_update_id(writer, updates_store, updates_results_store)?;
|
||||
|
||||
let update = if is_partial {
|
||||
Update::documents_partial(values)
|
||||
Update::documents_partial(primary_key, values)
|
||||
} else {
|
||||
Update::documents_addition(values)
|
||||
Update::documents_addition(primary_key, values)
|
||||
};
|
||||
|
||||
updates_store.put_update(writer, last_update_id, &update)?;
|
||||
@ -149,7 +159,8 @@ pub fn apply_addition(
|
||||
writer: &mut heed::RwTxn<MainT>,
|
||||
index: &store::Index,
|
||||
new_documents: Vec<IndexMap<String, Value>>,
|
||||
partial: bool
|
||||
partial: bool,
|
||||
primary_key: Option<String>,
|
||||
) -> MResult<()>
|
||||
{
|
||||
let mut schema = match index.main.schema(writer)? {
|
||||
@ -162,7 +173,14 @@ pub fn apply_addition(
|
||||
let internal_docids = index.main.internal_docids(writer)?;
|
||||
let mut available_ids = DiscoverIds::new(&internal_docids);
|
||||
|
||||
let primary_key = schema.primary_key().ok_or(Error::MissingPrimaryKey)?;
|
||||
let primary_key = match schema.primary_key() {
|
||||
Some(primary_key) => primary_key.to_string(),
|
||||
None => {
|
||||
let name = primary_key.ok_or(Error::MissingPrimaryKey)?;
|
||||
schema.set_primary_key(&name)?;
|
||||
name
|
||||
}
|
||||
};
|
||||
|
||||
// 1. store documents ids for future deletion
|
||||
let mut documents_additions = HashMap::new();
|
||||
@ -275,16 +293,18 @@ pub fn apply_documents_partial_addition(
|
||||
writer: &mut heed::RwTxn<MainT>,
|
||||
index: &store::Index,
|
||||
new_documents: Vec<IndexMap<String, Value>>,
|
||||
primary_key: Option<String>,
|
||||
) -> MResult<()> {
|
||||
apply_addition(writer, index, new_documents, true)
|
||||
apply_addition(writer, index, new_documents, true, primary_key)
|
||||
}
|
||||
|
||||
pub fn apply_documents_addition(
|
||||
writer: &mut heed::RwTxn<MainT>,
|
||||
index: &store::Index,
|
||||
new_documents: Vec<IndexMap<String, Value>>,
|
||||
primary_key: Option<String>,
|
||||
) -> MResult<()> {
|
||||
apply_addition(writer, index, new_documents, false)
|
||||
apply_addition(writer, index, new_documents, false, primary_key)
|
||||
}
|
||||
|
||||
pub fn reindex_all_documents(writer: &mut heed::RwTxn<MainT>, index: &store::Index) -> MResult<()> {
|
||||
|
@ -52,16 +52,16 @@ impl Update {
|
||||
}
|
||||
}
|
||||
|
||||
fn documents_addition(documents: Vec<IndexMap<String, Value>>) -> Update {
|
||||
fn documents_addition(primary_key: Option<String>, documents: Vec<IndexMap<String, Value>>) -> Update {
|
||||
Update {
|
||||
data: UpdateData::DocumentsAddition(documents),
|
||||
data: UpdateData::DocumentsAddition{ documents, primary_key },
|
||||
enqueued_at: Utc::now(),
|
||||
}
|
||||
}
|
||||
|
||||
fn documents_partial(documents: Vec<IndexMap<String, Value>>) -> Update {
|
||||
fn documents_partial(primary_key: Option<String>, documents: Vec<IndexMap<String, Value>>) -> Update {
|
||||
Update {
|
||||
data: UpdateData::DocumentsPartial(documents),
|
||||
data: UpdateData::DocumentsPartial{ documents, primary_key },
|
||||
enqueued_at: Utc::now(),
|
||||
}
|
||||
}
|
||||
@ -85,8 +85,15 @@ impl Update {
|
||||
pub enum UpdateData {
|
||||
ClearAll,
|
||||
Customs(Vec<u8>),
|
||||
DocumentsAddition(Vec<IndexMap<String, Value>>),
|
||||
DocumentsPartial(Vec<IndexMap<String, Value>>),
|
||||
// (primary key, documents)
|
||||
DocumentsAddition {
|
||||
primary_key: Option<String>,
|
||||
documents: Vec<IndexMap<String, Value>>
|
||||
},
|
||||
DocumentsPartial {
|
||||
primary_key: Option<String>,
|
||||
documents: Vec<IndexMap<String, Value>>,
|
||||
},
|
||||
DocumentsDeletion(Vec<String>),
|
||||
Settings(Box<SettingsUpdate>)
|
||||
}
|
||||
@ -96,11 +103,11 @@ impl UpdateData {
|
||||
match self {
|
||||
UpdateData::ClearAll => UpdateType::ClearAll,
|
||||
UpdateData::Customs(_) => UpdateType::Customs,
|
||||
UpdateData::DocumentsAddition(addition) => UpdateType::DocumentsAddition {
|
||||
number: addition.len(),
|
||||
UpdateData::DocumentsAddition{ documents, .. } => UpdateType::DocumentsAddition {
|
||||
number: documents.len(),
|
||||
},
|
||||
UpdateData::DocumentsPartial(addition) => UpdateType::DocumentsPartial {
|
||||
number: addition.len(),
|
||||
UpdateData::DocumentsPartial{ documents, .. } => UpdateType::DocumentsPartial {
|
||||
number: documents.len(),
|
||||
},
|
||||
UpdateData::DocumentsDeletion(deletion) => UpdateType::DocumentsDeletion {
|
||||
number: deletion.len(),
|
||||
@ -239,25 +246,25 @@ pub fn update_task(
|
||||
|
||||
(update_type, result, start.elapsed())
|
||||
}
|
||||
UpdateData::DocumentsAddition(documents) => {
|
||||
UpdateData::DocumentsAddition { documents, primary_key } => {
|
||||
let start = Instant::now();
|
||||
|
||||
let update_type = UpdateType::DocumentsAddition {
|
||||
number: documents.len(),
|
||||
};
|
||||
|
||||
let result = apply_documents_addition(writer, index, documents);
|
||||
let result = apply_documents_addition(writer, index, documents, primary_key);
|
||||
|
||||
(update_type, result, start.elapsed())
|
||||
}
|
||||
UpdateData::DocumentsPartial(documents) => {
|
||||
UpdateData::DocumentsPartial{ documents, primary_key } => {
|
||||
let start = Instant::now();
|
||||
|
||||
let update_type = UpdateType::DocumentsPartial {
|
||||
number: documents.len(),
|
||||
};
|
||||
|
||||
let result = apply_documents_partial_addition(writer, index, documents);
|
||||
let result = apply_documents_partial_addition(writer, index, documents, primary_key);
|
||||
|
||||
(update_type, result, start.elapsed())
|
||||
}
|
||||
|
@ -1,9 +1,10 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::{borrow::Cow, collections::{BTreeMap, BTreeSet}};
|
||||
|
||||
use heed::Result as ZResult;
|
||||
use fst::{set::OpBuilder, SetBuilder};
|
||||
use fst::{SetBuilder, set::OpBuilder};
|
||||
use sdset::SetBuf;
|
||||
use meilisearch_schema::Schema;
|
||||
use meilisearch_tokenizer::analyzer::{Analyzer, AnalyzerConfig};
|
||||
|
||||
use crate::database::{MainT, UpdateT};
|
||||
use crate::settings::{UpdateState, SettingsUpdate, RankingRule};
|
||||
@ -289,10 +290,28 @@ pub fn apply_synonyms_update(
|
||||
|
||||
let main_store = index.main;
|
||||
let synonyms_store = index.synonyms;
|
||||
let stop_words = index.main.stop_words_fst(writer)?.map_data(Cow::into_owned)?;
|
||||
let analyzer = Analyzer::new(AnalyzerConfig::default_with_stopwords(&stop_words));
|
||||
|
||||
fn normalize<T: AsRef<[u8]>>(analyzer: &Analyzer<T>, text: &str) -> String {
|
||||
analyzer.analyze(&text)
|
||||
.tokens()
|
||||
.fold(String::new(), |s, t| s + t.text())
|
||||
}
|
||||
|
||||
// normalize synonyms and reorder them creating a BTreeMap
|
||||
let synonyms: BTreeMap<String, Vec<String>> = synonyms.into_iter().map( |(word, alternatives)| {
|
||||
let word = normalize(&analyzer, &word);
|
||||
let alternatives = alternatives.into_iter().map(|text| normalize(&analyzer, &text)).collect();
|
||||
|
||||
(word, alternatives)
|
||||
}).collect();
|
||||
|
||||
// index synonyms,
|
||||
// synyonyms have to be ordered by key before indexation
|
||||
let mut synonyms_builder = SetBuilder::memory();
|
||||
synonyms_store.clear(writer)?;
|
||||
for (word, alternatives) in synonyms.clone() {
|
||||
for (word, alternatives) in synonyms {
|
||||
synonyms_builder.insert(&word)?;
|
||||
|
||||
let alternatives = {
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "meilisearch-error"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
authors = ["marin <postma.marin@protonmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "meilisearch-http"
|
||||
description = "MeiliSearch HTTP server"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
license = "MIT"
|
||||
authors = [
|
||||
"Quentin de Quelen <quentin@dequelen.me>",
|
||||
@ -32,9 +32,9 @@ http = "0.2.2"
|
||||
indexmap = { version = "1.6.1", features = ["serde-1"] }
|
||||
log = "0.4.11"
|
||||
main_error = "0.1.1"
|
||||
meilisearch-core = { path = "../meilisearch-core", version = "0.18.0" }
|
||||
meilisearch-error = { path = "../meilisearch-error", version = "0.18.1" }
|
||||
meilisearch-schema = { path = "../meilisearch-schema", version = "0.18.1" }
|
||||
meilisearch-core = { path = "../meilisearch-core", version = "0.19.0" }
|
||||
meilisearch-error = { path = "../meilisearch-error", version = "0.19.0" }
|
||||
meilisearch-schema = { path = "../meilisearch-schema", version = "0.19.0" }
|
||||
mime = "0.3.16"
|
||||
once_cell = "1.5.2"
|
||||
rand = "0.8.1"
|
||||
@ -51,6 +51,7 @@ tar = "0.4.30"
|
||||
tempfile = "3.1.0"
|
||||
tokio = { version = "0.2", features = ["macros"] }
|
||||
ureq = { version = "2.0.0", features = ["tls"], default-features = false }
|
||||
uuid = "0.8"
|
||||
walkdir = "2.3.1"
|
||||
whoami = "1.0.3"
|
||||
|
||||
|
@ -128,15 +128,15 @@ fn import_index_v1(
|
||||
// push document in buffer
|
||||
values.push(document?);
|
||||
// if buffer is full, create and apply a batch, and clean buffer
|
||||
if values.len() == document_batch_size {
|
||||
if values.len() == document_batch_size {
|
||||
let batch = std::mem::replace(&mut values, Vec::with_capacity(document_batch_size));
|
||||
apply_documents_addition(write_txn, &index, batch)?;
|
||||
apply_documents_addition(write_txn, &index, batch, None)?;
|
||||
}
|
||||
}
|
||||
|
||||
// apply documents remaining in the buffer
|
||||
if !values.is_empty() {
|
||||
apply_documents_addition(write_txn, &index, values)?;
|
||||
// apply documents remaining in the buffer
|
||||
if !values.is_empty() {
|
||||
apply_documents_addition(write_txn, &index, values, None)?;
|
||||
}
|
||||
|
||||
// sync index information: stats, updated_at, last_update
|
||||
@ -289,7 +289,6 @@ fn dump_index_documents(data: &web::Data<Data>, reader: &MainReader, dir_path: &
|
||||
/// Write error with a context.
|
||||
fn fail_dump_process<E: std::error::Error>(data: &web::Data<Data>, dump_info: DumpInfo, context: &str, error: E) {
|
||||
let error_message = format!("{}; {}", context, error);
|
||||
|
||||
error!("Something went wrong during dump process: {}", &error_message);
|
||||
data.set_current_dump_info(dump_info.with_error(Error::dump_failed(error_message).into()))
|
||||
}
|
||||
@ -405,7 +404,7 @@ pub fn init_dump_process(data: &web::Data<Data>, dumps_dir: &Path) -> Result<Dum
|
||||
let dumps_dir = dumps_dir.to_path_buf();
|
||||
let info_cloned = info.clone();
|
||||
// run dump process in a new thread
|
||||
thread::spawn(move ||
|
||||
thread::spawn(move ||
|
||||
dump_process(data, dumps_dir, info_cloned)
|
||||
);
|
||||
|
||||
|
@ -1,19 +1,27 @@
|
||||
use flate2::Compression;
|
||||
use flate2::read::GzDecoder;
|
||||
use flate2::write::GzEncoder;
|
||||
use std::fs::{create_dir_all, File};
|
||||
use std::fs::{create_dir_all, rename, File};
|
||||
use std::path::Path;
|
||||
use tar::{Builder, Archive};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
pub fn to_tar_gz(src: &Path, dest: &Path) -> Result<(), Error> {
|
||||
let f = File::create(dest)?;
|
||||
let file_name = format!(".{}", Uuid::new_v4().to_urn());
|
||||
let p = dest.with_file_name(file_name);
|
||||
let tmp_dest = p.as_path();
|
||||
|
||||
let f = File::create(tmp_dest)?;
|
||||
let gz_encoder = GzEncoder::new(f, Compression::default());
|
||||
let mut tar_encoder = Builder::new(gz_encoder);
|
||||
tar_encoder.append_dir_all(".", src)?;
|
||||
let gz_encoder = tar_encoder.into_inner()?;
|
||||
gz_encoder.finish()?;
|
||||
|
||||
rename(tmp_dest, dest)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ async fn get_all_documents(
|
||||
let limit = params.limit.unwrap_or(20);
|
||||
let index_uid = &path.index_uid;
|
||||
let reader = data.db.main_read_txn()?;
|
||||
|
||||
|
||||
let documents = get_all_documents_sync(
|
||||
&data,
|
||||
&reader,
|
||||
@ -145,15 +145,6 @@ async fn get_all_documents(
|
||||
Ok(HttpResponse::Ok().json(documents))
|
||||
}
|
||||
|
||||
fn find_primary_key(document: &IndexMap<String, Value>) -> Option<String> {
|
||||
for key in document.keys() {
|
||||
if key.to_lowercase().contains("id") {
|
||||
return Some(key.to_string());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase", deny_unknown_fields)]
|
||||
struct UpdateDocumentsQuery {
|
||||
@ -168,26 +159,6 @@ async fn update_multiple_documents(
|
||||
is_partial: bool,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let update_id = data.get_or_create_index(&path.index_uid, |index| {
|
||||
let reader = data.db.main_read_txn()?;
|
||||
|
||||
let mut schema = index
|
||||
.main
|
||||
.schema(&reader)?
|
||||
.ok_or(meilisearch_core::Error::SchemaMissing)?;
|
||||
|
||||
if schema.primary_key().is_none() {
|
||||
let id = match ¶ms.primary_key {
|
||||
Some(id) => id.to_string(),
|
||||
None => body
|
||||
.first()
|
||||
.and_then(find_primary_key)
|
||||
.ok_or(meilisearch_core::Error::MissingPrimaryKey)?,
|
||||
};
|
||||
|
||||
schema.set_primary_key(&id).map_err(Error::bad_request)?;
|
||||
|
||||
data.db.main_write(|w| index.main.put_schema(w, &schema))?;
|
||||
}
|
||||
|
||||
let mut document_addition = if is_partial {
|
||||
index.documents_partial_addition()
|
||||
@ -195,6 +166,26 @@ async fn update_multiple_documents(
|
||||
index.documents_addition()
|
||||
};
|
||||
|
||||
// Return an early error if primary key is already set, otherwise, try to set it up in the
|
||||
// update later.
|
||||
let reader = data.db.main_read_txn()?;
|
||||
let schema = index
|
||||
.main
|
||||
.schema(&reader)?
|
||||
.ok_or(meilisearch_core::Error::SchemaMissing)?;
|
||||
|
||||
match (params.into_inner().primary_key, schema.primary_key()) {
|
||||
(Some(key), None) => document_addition.set_primary_key(key),
|
||||
(None, None) => {
|
||||
let key = body
|
||||
.first()
|
||||
.and_then(find_primary_key)
|
||||
.ok_or(meilisearch_core::Error::MissingPrimaryKey)?;
|
||||
document_addition.set_primary_key(key);
|
||||
}
|
||||
_ => ()
|
||||
}
|
||||
|
||||
for document in body.into_inner() {
|
||||
document_addition.update_document(document);
|
||||
}
|
||||
@ -204,6 +195,15 @@ async fn update_multiple_documents(
|
||||
return Ok(HttpResponse::Accepted().json(IndexUpdateResponse::with_id(update_id)));
|
||||
}
|
||||
|
||||
fn find_primary_key(document: &IndexMap<String, Value>) -> Option<String> {
|
||||
for key in document.keys() {
|
||||
if key.to_lowercase().contains("id") {
|
||||
return Some(key.to_string());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[post("/indexes/{index_uid}/documents", wrap = "Authentication::Private")]
|
||||
async fn add_documents(
|
||||
data: web::Data<Data>,
|
||||
|
@ -94,13 +94,21 @@ async fn return_update_status_of_pushed_documents() {
|
||||
];
|
||||
|
||||
let mut update_ids = Vec::new();
|
||||
|
||||
let mut bodies = bodies.into_iter();
|
||||
|
||||
let url = "/indexes/test/documents?primaryKey=title";
|
||||
let (response, status_code) = server.post_request(&url, bodies.next().unwrap()).await;
|
||||
assert_eq!(status_code, 202);
|
||||
let update_id = response["updateId"].as_u64().unwrap();
|
||||
update_ids.push(update_id);
|
||||
server.wait_update_id(update_id).await;
|
||||
|
||||
let url = "/indexes/test/documents";
|
||||
for body in bodies {
|
||||
let (response, status_code) = server.post_request(&url, body).await;
|
||||
assert_eq!(status_code, 202);
|
||||
let update_id = response["updateId"].as_u64().unwrap();
|
||||
update_ids.push(update_id);
|
||||
let (response, status_code) = server.post_request(&url, body).await;
|
||||
assert_eq!(status_code, 202);
|
||||
let update_id = response["updateId"].as_u64().unwrap();
|
||||
update_ids.push(update_id);
|
||||
}
|
||||
|
||||
// 2. Fetch the status of index.
|
||||
@ -173,7 +181,7 @@ async fn should_return_existing_update() {
|
||||
let (response, status_code) = server.create_index(body).await;
|
||||
assert_eq!(status_code, 201);
|
||||
assert_eq!(response["primaryKey"], json!(null));
|
||||
|
||||
|
||||
let body = json!([{
|
||||
"title": "Test",
|
||||
"comment": "comment test"
|
||||
|
@ -1945,3 +1945,32 @@ async fn test_filter_nb_hits_search_normal() {
|
||||
println!("result: {}", response);
|
||||
assert_eq!(response["nbHits"], 1);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_max_word_query() {
|
||||
use meilisearch_core::MAX_QUERY_LEN;
|
||||
|
||||
let mut server = common::Server::with_uid("test");
|
||||
let body = json!({
|
||||
"uid": "test",
|
||||
"primaryKey": "id",
|
||||
});
|
||||
server.create_index(body).await;
|
||||
let documents = json!([
|
||||
{"id": 1, "value": "1 2 3 4 5 6 7 8 9 10 11"},
|
||||
{"id": 2, "value": "1 2 3 4 5 6 7 8 9 10"}]
|
||||
);
|
||||
server.add_or_update_multiple_documents(documents).await;
|
||||
|
||||
// We want to create a request where the 11 will be ignored. We have 2 documents, where a query
|
||||
// with only one should return both, but a query with 1 and 11 should return only the first.
|
||||
// This is how we know that outstanding query words have been ignored
|
||||
let query = (0..MAX_QUERY_LEN)
|
||||
.map(|_| "1")
|
||||
.chain(std::iter::once("11"))
|
||||
.fold(String::new(), |s, w| s + " " + w);
|
||||
let (response, _) = server.search_post(json!({"q": query})).await;
|
||||
assert_eq!(response["nbHits"], 2);
|
||||
let (response, _) = server.search_post(json!({"q": "1 11"})).await;
|
||||
assert_eq!(response["nbHits"], 1);
|
||||
}
|
||||
|
@ -167,6 +167,89 @@ async fn search_with_settings_stop_words() {
|
||||
async fn search_with_settings_synonyms() {
|
||||
let mut server = common::Server::test_server().await;
|
||||
|
||||
let config = json!({
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
"words",
|
||||
"proximity",
|
||||
"attribute",
|
||||
"wordsPosition",
|
||||
"desc(age)",
|
||||
"exactness",
|
||||
"desc(balance)"
|
||||
],
|
||||
"distinctAttribute": null,
|
||||
"searchableAttributes": [
|
||||
"name",
|
||||
"age",
|
||||
"color",
|
||||
"gender",
|
||||
"email",
|
||||
"address",
|
||||
"about"
|
||||
],
|
||||
"displayedAttributes": [
|
||||
"name",
|
||||
"age",
|
||||
"gender",
|
||||
"color",
|
||||
"email",
|
||||
"phone",
|
||||
"address",
|
||||
"balance"
|
||||
],
|
||||
"stopWords": null,
|
||||
"synonyms": {
|
||||
"Application": [
|
||||
"Exercitation"
|
||||
]
|
||||
},
|
||||
});
|
||||
|
||||
server.update_all_settings(config).await;
|
||||
|
||||
let query = "q=application&limit=3";
|
||||
let expect = json!([
|
||||
{
|
||||
"balance": "$1,921.58",
|
||||
"age": 31,
|
||||
"color": "Green",
|
||||
"name": "Harper Carson",
|
||||
"gender": "male",
|
||||
"email": "harpercarson@chorizon.com",
|
||||
"phone": "+1 (912) 430-3243",
|
||||
"address": "883 Dennett Place, Knowlton, New Mexico, 9219"
|
||||
},
|
||||
{
|
||||
"balance": "$1,706.13",
|
||||
"age": 27,
|
||||
"color": "Green",
|
||||
"name": "Cherry Orr",
|
||||
"gender": "female",
|
||||
"email": "cherryorr@chorizon.com",
|
||||
"phone": "+1 (995) 479-3174",
|
||||
"address": "442 Beverly Road, Ventress, New Mexico, 3361"
|
||||
},
|
||||
{
|
||||
"balance": "$1,476.39",
|
||||
"age": 28,
|
||||
"color": "brown",
|
||||
"name": "Maureen Dale",
|
||||
"gender": "female",
|
||||
"email": "maureendale@chorizon.com",
|
||||
"phone": "+1 (984) 538-3684",
|
||||
"address": "817 Newton Street, Bannock, Wyoming, 1468"
|
||||
}
|
||||
]);
|
||||
|
||||
let (response, _status_code) = server.search_get(query).await;
|
||||
assert_json_eq!(expect, response["hits"].clone(), ordered: false);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn search_with_settings_normalized_synonyms() {
|
||||
let mut server = common::Server::test_server().await;
|
||||
|
||||
let config = json!({
|
||||
"rankingRules": [
|
||||
"typo",
|
||||
|
@ -171,6 +171,8 @@ async fn write_all_and_update() {
|
||||
"synonyms": {
|
||||
"road": ["street", "avenue"],
|
||||
"street": ["avenue"],
|
||||
"HP": ["Harry Potter"],
|
||||
"Harry Potter": ["HP"]
|
||||
},
|
||||
"attributesForFaceting": ["title"],
|
||||
});
|
||||
@ -208,6 +210,8 @@ async fn write_all_and_update() {
|
||||
"synonyms": {
|
||||
"road": ["street", "avenue"],
|
||||
"street": ["avenue"],
|
||||
"hp": ["harry potter"],
|
||||
"harry potter": ["hp"]
|
||||
},
|
||||
"attributesForFaceting": ["title"],
|
||||
});
|
||||
|
@ -1,13 +1,13 @@
|
||||
[package]
|
||||
name = "meilisearch-schema"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
license = "MIT"
|
||||
authors = ["Kerollmops <renault.cle@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
indexmap = { version = "1.6.1", features = ["serde-1"] }
|
||||
meilisearch-error = { path = "../meilisearch-error", version = "0.18.1" }
|
||||
meilisearch-error = { path = "../meilisearch-error", version = "0.19.0" }
|
||||
serde = { version = "1.0.118", features = ["derive"] }
|
||||
serde_json = { version = "1.0.61", features = ["preserve_order"] }
|
||||
zerocopy = "0.3.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "meilisearch-tokenizer"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
license = "MIT"
|
||||
authors = ["Kerollmops <renault.cle@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "meilisearch-types"
|
||||
version = "0.18.1"
|
||||
version = "0.19.0"
|
||||
license = "MIT"
|
||||
authors = ["Clément Renault <renault.cle@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
Loading…
x
Reference in New Issue
Block a user