mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-07-03 20:07:09 +02:00
Allow to disable specialized tokenizations (again)
In PR #2773, I added the `chinese`, `hebrew`, `japanese` and `thai` feature flags to allow melisearch to be built without huge specialed tokenizations that took up 90% of the melisearch binary size. Unfortunately, due to some recent changes, this doesn't work anymore. The problem lies in excessive use of the `default` feature flag, which infects the dependency graph. Instead of adding `default-features = false` here and there, it's easier and more future-proof to not declare `default` in `milli` and `meilisearch-types`. I've renamed it to `all-tokenizers`, which also makes it a bit clearer what it's about.
This commit is contained in:
parent
a95128df6b
commit
13f1277637
7 changed files with 9 additions and 9 deletions
|
@ -204,7 +204,7 @@ mod test {
|
|||
use super::*;
|
||||
use crate::index::tests::TempIndex;
|
||||
|
||||
#[cfg(feature = "default")]
|
||||
#[cfg(feature = "japanese")]
|
||||
#[test]
|
||||
fn test_kanji_language_detection() {
|
||||
let index = TempIndex::new();
|
||||
|
|
|
@ -4,7 +4,7 @@ pub mod distinct;
|
|||
pub mod exactness;
|
||||
pub mod geo_sort;
|
||||
pub mod integration;
|
||||
#[cfg(feature = "default")]
|
||||
#[cfg(feature = "all-tokenizations")]
|
||||
pub mod language;
|
||||
pub mod ngram_split_words;
|
||||
pub mod proximity;
|
||||
|
|
|
@ -1581,7 +1581,7 @@ mod tests {
|
|||
assert_eq!(count, 4);
|
||||
}
|
||||
|
||||
#[cfg(feature = "default")]
|
||||
#[cfg(feature = "chinese")]
|
||||
#[test]
|
||||
fn test_meilisearch_1714() {
|
||||
let index = TempIndex::new();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue