[package] name = "meilisearch-types" publish = false version.workspace = true authors.workspace = true description.workspace = true homepage.workspace = true readme.workspace = true edition.workspace = true license.workspace = true [dependencies] actix-web = { version = "4.11.0", default-features = false } anyhow = "1.0.98" bumpalo = "3.18.1" bumparaw-collections = "0.1.4" convert_case = "0.6.0" csv = "1.3.1" deserr = { version = "0.6.3", features = ["actix-web"] } either = { version = "1.15.0", features = ["serde"] } enum-iterator = "2.1.0" file-store = { path = "../file-store" } flate2 = "1.1.2" fst = "0.4.7" memmap2 = "0.9.5" milli = { path = "../milli" } roaring = { version = "0.10.12", features = ["serde"] } rustc-hash = "2.1.1" serde = { version = "1.0.219", features = ["derive"] } serde-cs = "0.2.4" serde_json = { version = "1.0.140", features = ["preserve_order"] } tar = "0.4.44" tempfile = "3.20.0" thiserror = "2.0.12" time = { version = "0.3.41", features = [ "serde-well-known", "formatting", "parsing", "macros", ] } tokio = "1.45" utoipa = { version = "5.4.0", features = ["macros"] } uuid = { version = "1.17.0", features = ["serde", "v4"] } [dev-dependencies] # fixed version due to format breakages in v1.40 insta = "=1.39.0" meili-snap = { path = "../meili-snap" } [features] # all specialized tokenizations all-tokenizations = ["milli/all-tokenizations"] # chinese specialized tokenization chinese = ["milli/chinese"] chinese-pinyin = ["milli/chinese-pinyin"] # hebrew specialized tokenization hebrew = ["milli/hebrew"] # japanese specialized tokenization japanese = ["milli/japanese"] # korean specialized tokenization korean = ["milli/korean"] # thai specialized tokenization thai = ["milli/thai"] # allow greek specialized tokenization greek = ["milli/greek"] # allow khmer specialized tokenization khmer = ["milli/khmer"] # allow vietnamese specialized tokenization vietnamese = ["milli/vietnamese"] # force swedish character recomposition swedish-recomposition = ["milli/swedish-recomposition"] # allow german tokenization german = ["milli/german"] # allow turkish normalization turkish = ["milli/turkish"]