mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-12-27 15:10:05 +01:00
140 lines
3.9 KiB
TOML
140 lines
3.9 KiB
TOML
[package]
|
|
name = "milli"
|
|
edition = "2018"
|
|
publish = false
|
|
|
|
version.workspace = true
|
|
authors.workspace = true
|
|
description.workspace = true
|
|
homepage.workspace = true
|
|
readme.workspace = true
|
|
# edition.workspace = true
|
|
license.workspace = true
|
|
|
|
[dependencies]
|
|
bimap = { version = "0.6.3", features = ["serde"] }
|
|
bincode = "1.3.3"
|
|
bstr = "1.4.0"
|
|
bytemuck = { version = "1.13.1", features = ["extern_crate_alloc"] }
|
|
byteorder = "1.4.3"
|
|
charabia = { version = "0.8.5", default-features = false }
|
|
concat-arrays = "0.1.2"
|
|
crossbeam-channel = "0.5.8"
|
|
deserr = "0.6.0"
|
|
either = { version = "1.8.1", features = ["serde"] }
|
|
flatten-serde-json = { path = "../flatten-serde-json" }
|
|
fst = "0.4.7"
|
|
fxhash = "0.2.1"
|
|
geoutils = "0.5.1"
|
|
grenad = { version = "0.4.5", default-features = false, features = [
|
|
"rayon",
|
|
"tempfile",
|
|
] }
|
|
heed = { version = "0.20.0-alpha.9", default-features = false, features = [
|
|
"serde-json",
|
|
"serde-bincode",
|
|
"read-txn-no-tls",
|
|
] }
|
|
indexmap = { version = "2.0.0", features = ["serde"] }
|
|
json-depth-checker = { path = "../json-depth-checker" }
|
|
levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] }
|
|
memmap2 = "0.7.1"
|
|
obkv = "0.2.0"
|
|
once_cell = "1.17.1"
|
|
ordered-float = "3.6.0"
|
|
rand_pcg = { version = "0.3.1", features = ["serde1"] }
|
|
rayon = "1.7.0"
|
|
roaring = "0.10.1"
|
|
rstar = { version = "0.11.0", features = ["serde"] }
|
|
serde = { version = "1.0.160", features = ["derive"] }
|
|
serde_json = { version = "1.0.95", features = ["preserve_order"] }
|
|
slice-group-by = "0.3.0"
|
|
smallstr = { version = "0.3.0", features = ["serde"] }
|
|
smallvec = "1.10.0"
|
|
smartstring = "1.0.1"
|
|
tempfile = "3.5.0"
|
|
thiserror = "1.0.40"
|
|
time = { version = "0.3.20", features = [
|
|
"serde-well-known",
|
|
"formatting",
|
|
"parsing",
|
|
"macros",
|
|
] }
|
|
uuid = { version = "1.3.1", features = ["v4"] }
|
|
|
|
filter-parser = { path = "../filter-parser" }
|
|
|
|
# documents words self-join
|
|
itertools = "0.11.0"
|
|
|
|
# profiling
|
|
puffin = "0.16.0"
|
|
|
|
# logging
|
|
log = "0.4.17"
|
|
logging_timer = "1.1.0"
|
|
csv = "1.2.1"
|
|
candle-core = { git = "https://github.com/huggingface/candle.git", version = "0.3.1" }
|
|
candle-transformers = { git = "https://github.com/huggingface/candle.git", version = "0.3.1" }
|
|
candle-nn = { git = "https://github.com/huggingface/candle.git", version = "0.3.1" }
|
|
tokenizers = { git = "https://github.com/huggingface/tokenizers.git", tag = "v0.14.1", version = "0.14.1", default_features = false, features = ["onig"] }
|
|
hf-hub = { git = "https://github.com/dureuill/hf-hub.git", branch = "rust_tls", default_features = false, features = [
|
|
"online",
|
|
] }
|
|
tokio = { version = "1.34.0", features = ["rt"] }
|
|
futures = "0.3.29"
|
|
reqwest = { version = "0.11.16", features = [
|
|
"rustls-tls",
|
|
"json",
|
|
], default-features = false }
|
|
tiktoken-rs = "0.5.7"
|
|
liquid = "0.26.4"
|
|
arroy = { git = "https://github.com/meilisearch/arroy.git", version = "0.1.0" }
|
|
rand = "0.8.5"
|
|
|
|
[dev-dependencies]
|
|
mimalloc = { version = "0.1.37", default-features = false }
|
|
big_s = "1.0.2"
|
|
insta = "1.29.0"
|
|
maplit = "1.0.2"
|
|
md5 = "0.7.0"
|
|
meili-snap = { path = "../meili-snap" }
|
|
rand = { version = "0.8.5", features = ["small_rng"] }
|
|
|
|
[features]
|
|
all-tokenizations = [
|
|
"charabia/chinese",
|
|
"charabia/hebrew",
|
|
"charabia/japanese",
|
|
"charabia/thai",
|
|
"charabia/korean",
|
|
"charabia/greek",
|
|
"charabia/khmer",
|
|
]
|
|
|
|
# Use POSIX semaphores instead of SysV semaphores in LMDB
|
|
# For more information on this feature, see heed's Cargo.toml
|
|
lmdb-posix-sem = ["heed/posix-sem"]
|
|
|
|
# allow chinese specialized tokenization
|
|
chinese = ["charabia/chinese"]
|
|
|
|
# allow hebrew specialized tokenization
|
|
hebrew = ["charabia/hebrew"]
|
|
|
|
# allow japanese specialized tokenization
|
|
japanese = ["charabia/japanese"]
|
|
japanese-transliteration = ["charabia/japanese-transliteration"]
|
|
|
|
# allow korean specialized tokenization
|
|
korean = ["charabia/korean"]
|
|
|
|
# allow thai specialized tokenization
|
|
thai = ["charabia/thai"]
|
|
|
|
# allow greek specialized tokenization
|
|
greek = ["charabia/greek"]
|
|
|
|
# allow khmer specialized tokenization
|
|
khmer = ["charabia/khmer"]
|