mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-11-14 08:58:59 +01:00
69 lines
1.8 KiB
TOML
69 lines
1.8 KiB
TOML
[package]
|
|
name = "meilisearch-types"
|
|
publish = false
|
|
|
|
version.workspace = true
|
|
authors.workspace = true
|
|
description.workspace = true
|
|
homepage.workspace = true
|
|
readme.workspace = true
|
|
edition.workspace = true
|
|
license.workspace = true
|
|
|
|
[dependencies]
|
|
actix-web = { version = "4.8.0", default-features = false }
|
|
anyhow = "1.0.86"
|
|
convert_case = "0.6.0"
|
|
csv = "1.3.0"
|
|
deserr = { version = "0.6.2", features = ["actix-web"] }
|
|
either = { version = "1.13.0", features = ["serde"] }
|
|
enum-iterator = "2.1.0"
|
|
file-store = { path = "../file-store" }
|
|
flate2 = "1.0.30"
|
|
fst = "0.4.7"
|
|
memmap2 = "0.9.4"
|
|
milli = { path = "../milli" }
|
|
roaring = { version = "0.10.6", features = ["serde"] }
|
|
serde = { version = "1.0.204", features = ["derive"] }
|
|
serde-cs = "0.2.4"
|
|
serde_json = "1.0.120"
|
|
tar = "0.4.41"
|
|
tempfile = "3.10.1"
|
|
thiserror = "1.0.61"
|
|
time = { version = "0.3.36", features = [
|
|
"serde-well-known",
|
|
"formatting",
|
|
"parsing",
|
|
"macros",
|
|
] }
|
|
tokio = "1.38"
|
|
uuid = { version = "1.10.0", features = ["serde", "v4"] }
|
|
|
|
[dev-dependencies]
|
|
insta = "1.39.0"
|
|
meili-snap = { path = "../meili-snap" }
|
|
|
|
[features]
|
|
# all specialized tokenizations
|
|
all-tokenizations = ["milli/all-tokenizations"]
|
|
|
|
# chinese specialized tokenization
|
|
chinese = ["milli/chinese"]
|
|
chinese-pinyin = ["milli/chinese-pinyin"]
|
|
# hebrew specialized tokenization
|
|
hebrew = ["milli/hebrew"]
|
|
# japanese specialized tokenization
|
|
japanese = ["milli/japanese"]
|
|
# korean specialized tokenization
|
|
korean = ["milli/korean"]
|
|
# thai specialized tokenization
|
|
thai = ["milli/thai"]
|
|
# allow greek specialized tokenization
|
|
greek = ["milli/greek"]
|
|
# allow khmer specialized tokenization
|
|
khmer = ["milli/khmer"]
|
|
# allow vietnamese specialized tokenization
|
|
vietnamese = ["milli/vietnamese"]
|
|
# force swedish character recomposition
|
|
swedish-recomposition = ["milli/swedish-recomposition"]
|