remove the segment feature and always import segment

This commit is contained in:
Tamo 2024-10-17 11:14:33 +02:00
parent fa1db6b721
commit 3a7a20c716
5 changed files with 20 additions and 17 deletions

View File

@ -75,7 +75,7 @@ reqwest = { version = "0.12.5", features = [
rustls = { version = "0.23.11", features = ["ring"], default-features = false } rustls = { version = "0.23.11", features = ["ring"], default-features = false }
rustls-pki-types = { version = "1.7.0", features = ["alloc"] } rustls-pki-types = { version = "1.7.0", features = ["alloc"] }
rustls-pemfile = "2.1.2" rustls-pemfile = "2.1.2"
segment = { version = "0.2.4", optional = true } segment = { version = "0.2.4" }
serde = { version = "1.0.204", features = ["derive"] } serde = { version = "1.0.204", features = ["derive"] }
serde_json = { version = "1.0.120", features = ["preserve_order"] } serde_json = { version = "1.0.120", features = ["preserve_order"] }
sha2 = "0.10.8" sha2 = "0.10.8"
@ -132,8 +132,7 @@ tempfile = { version = "3.10.1", optional = true }
zip = { version = "2.1.3", optional = true } zip = { version = "2.1.3", optional = true }
[features] [features]
default = ["analytics", "meilisearch-types/all-tokenizations", "mini-dashboard"] default = ["meilisearch-types/all-tokenizations", "mini-dashboard"]
analytics = ["segment"]
mini-dashboard = [ mini-dashboard = [
"static-files", "static-files",
"anyhow", "anyhow",

View File

@ -1,5 +1,3 @@
#![allow(clippy::transmute_ptr_to_ref)] // mopify isn't updated with the latest version of clippy yet
pub mod segment_analytics; pub mod segment_analytics;
use std::fs; use std::fs;
@ -85,13 +83,19 @@ pub enum DocumentFetchKind {
Normal { with_filter: bool, limit: usize, offset: usize, retrieve_vectors: bool }, Normal { with_filter: bool, limit: usize, offset: usize, retrieve_vectors: bool },
} }
/// To send an event to segment, your event must be able to aggregate itself with another event of the same type.
pub trait Aggregate: 'static + mopa::Any + Send { pub trait Aggregate: 'static + mopa::Any + Send {
/// The name of the event that will be sent to segment.
fn event_name(&self) -> &'static str; fn event_name(&self) -> &'static str;
/// Will be called every time an event has been used twice before segment flushed its buffer.
fn aggregate(self: Box<Self>, other: Box<Self>) -> Box<Self> fn aggregate(self: Box<Self>, other: Box<Self>) -> Box<Self>
where where
Self: Sized; Self: Sized;
/// An internal helper function, you shouldn't implement it yourself.
/// This function should always be called on the same type. If `this` and `other`
/// aren't the same type behind the function will do nothing and return `None`.
fn downcast_aggregate( fn downcast_aggregate(
this: Box<dyn Aggregate>, this: Box<dyn Aggregate>,
other: Box<dyn Aggregate>, other: Box<dyn Aggregate>,
@ -100,6 +104,7 @@ pub trait Aggregate: 'static + mopa::Any + Send {
Self: Sized, Self: Sized,
{ {
if this.is::<Self>() && other.is::<Self>() { if this.is::<Self>() && other.is::<Self>() {
// Both the two following lines cannot fail, but just to be sure we don't crash, we're still avoiding unwrapping
let this = this.downcast::<Self>().ok()?; let this = this.downcast::<Self>().ok()?;
let other = other.downcast::<Self>().ok()?; let other = other.downcast::<Self>().ok()?;
Some(Self::aggregate(this, other)) Some(Self::aggregate(this, other))
@ -108,18 +113,26 @@ pub trait Aggregate: 'static + mopa::Any + Send {
} }
} }
/// Converts your structure to the final event that'll be sent to segment.
fn into_event(self: Box<Self>) -> serde_json::Value; fn into_event(self: Box<Self>) -> serde_json::Value;
} }
mopafy!(Aggregate); mopafy!(Aggregate);
/// Helper trait to define multiple aggregate with the same content but a different name. /// Helper trait to define multiple aggregates with the same content but a different name.
/// Commonly used when you must aggregate a search with POST or with GET for example. /// Commonly used when you must aggregate a search with POST or with GET, for example.
pub trait AggregateMethod: 'static + Default + Send { pub trait AggregateMethod: 'static + Default + Send {
fn event_name() -> &'static str; fn event_name() -> &'static str;
} }
/// A macro used to quickly define multiple aggregate method with their name /// A macro used to quickly define multiple aggregate method with their name
/// Usage:
/// ```rust
/// aggregate_methods!(
/// SearchGET => "Documents Searched GET",
/// SearchPOST => "Documents Searched POST",
/// );
/// ```
#[macro_export] #[macro_export]
macro_rules! aggregate_methods { macro_rules! aggregate_methods {
($method:ident => $event_name:literal) => { ($method:ident => $event_name:literal) => {

View File

@ -695,7 +695,6 @@ impl<Method: AggregateMethod> SearchAggregator<Method> {
aggregate_methods!( aggregate_methods!(
SearchGET => "Documents Searched GET", SearchGET => "Documents Searched GET",
SearchPOST => "Documents Searched POST", SearchPOST => "Documents Searched POST",
); );
impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> { impl<Method: AggregateMethod> Aggregate for SearchAggregator<Method> {

View File

@ -29,7 +29,6 @@ const MEILI_MASTER_KEY: &str = "MEILI_MASTER_KEY";
const MEILI_ENV: &str = "MEILI_ENV"; const MEILI_ENV: &str = "MEILI_ENV";
const MEILI_TASK_WEBHOOK_URL: &str = "MEILI_TASK_WEBHOOK_URL"; const MEILI_TASK_WEBHOOK_URL: &str = "MEILI_TASK_WEBHOOK_URL";
const MEILI_TASK_WEBHOOK_AUTHORIZATION_HEADER: &str = "MEILI_TASK_WEBHOOK_AUTHORIZATION_HEADER"; const MEILI_TASK_WEBHOOK_AUTHORIZATION_HEADER: &str = "MEILI_TASK_WEBHOOK_AUTHORIZATION_HEADER";
#[cfg(feature = "analytics")]
const MEILI_NO_ANALYTICS: &str = "MEILI_NO_ANALYTICS"; const MEILI_NO_ANALYTICS: &str = "MEILI_NO_ANALYTICS";
const MEILI_HTTP_PAYLOAD_SIZE_LIMIT: &str = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT"; const MEILI_HTTP_PAYLOAD_SIZE_LIMIT: &str = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT";
const MEILI_SSL_CERT_PATH: &str = "MEILI_SSL_CERT_PATH"; const MEILI_SSL_CERT_PATH: &str = "MEILI_SSL_CERT_PATH";
@ -210,7 +209,6 @@ pub struct Opt {
/// Meilisearch automatically collects data from all instances that do not opt out using this flag. /// Meilisearch automatically collects data from all instances that do not opt out using this flag.
/// All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted /// All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted
/// at any time. /// at any time.
#[cfg(feature = "analytics")]
#[serde(default)] // we can't send true #[serde(default)] // we can't send true
#[clap(long, env = MEILI_NO_ANALYTICS)] #[clap(long, env = MEILI_NO_ANALYTICS)]
pub no_analytics: bool, pub no_analytics: bool,
@ -425,7 +423,6 @@ pub struct Opt {
impl Opt { impl Opt {
/// Whether analytics should be enabled or not. /// Whether analytics should be enabled or not.
#[cfg(all(not(debug_assertions), feature = "analytics"))]
pub fn analytics(&self) -> bool { pub fn analytics(&self) -> bool {
!self.no_analytics !self.no_analytics
} }
@ -505,7 +502,6 @@ impl Opt {
ignore_missing_dump: _, ignore_missing_dump: _,
ignore_dump_if_db_exists: _, ignore_dump_if_db_exists: _,
config_file_path: _, config_file_path: _,
#[cfg(feature = "analytics")]
no_analytics, no_analytics,
experimental_contains_filter, experimental_contains_filter,
experimental_enable_metrics, experimental_enable_metrics,
@ -533,10 +529,7 @@ impl Opt {
); );
} }
#[cfg(feature = "analytics")]
{
export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string()); export_to_env_if_not_present(MEILI_NO_ANALYTICS, no_analytics.to_string());
}
export_to_env_if_not_present( export_to_env_if_not_present(
MEILI_HTTP_PAYLOAD_SIZE_LIMIT, MEILI_HTTP_PAYLOAD_SIZE_LIMIT,
http_payload_size_limit.to_string(), http_payload_size_limit.to_string(),

View File

@ -381,7 +381,6 @@ pub fn default_settings(dir: impl AsRef<Path>) -> Opt {
db_path: dir.as_ref().join("db"), db_path: dir.as_ref().join("db"),
dump_dir: dir.as_ref().join("dumps"), dump_dir: dir.as_ref().join("dumps"),
env: "development".to_owned(), env: "development".to_owned(),
#[cfg(feature = "analytics")]
no_analytics: true, no_analytics: true,
max_index_size: Byte::from_u64_with_unit(100, Unit::MiB).unwrap(), max_index_size: Byte::from_u64_with_unit(100, Unit::MiB).unwrap(),
max_task_db_size: Byte::from_u64_with_unit(1, Unit::GiB).unwrap(), max_task_db_size: Byte::from_u64_with_unit(1, Unit::GiB).unwrap(),