1669: Fix windows integration tests r=MarinPostma a=ManyTheFish

Set max_memory value to unlimited during tests:
because tests run several meilisearch in parallel,
we overestimate the value for max_memory making the tests on Windows crash

Co-authored-by: many <maxime@meilisearch.com>
This commit is contained in:
bors[bot] 2021-09-08 12:44:50 +00:00 committed by GitHub
commit d595623162
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 27 additions and 28 deletions

View File

@ -31,9 +31,12 @@ pub struct IndexActor<S> {
} }
impl<S: IndexStore + Sync + Send> IndexActor<S> { impl<S: IndexStore + Sync + Send> IndexActor<S> {
pub fn new(receiver: mpsc::Receiver<IndexMsg>, store: S) -> anyhow::Result<Self> { pub fn new(
let options = IndexerOpts::default(); receiver: mpsc::Receiver<IndexMsg>,
let update_handler = UpdateHandler::new(&options)?; store: S,
options: &IndexerOpts,
) -> anyhow::Result<Self> {
let update_handler = UpdateHandler::new(options)?;
let update_handler = Arc::new(update_handler); let update_handler = Arc::new(update_handler);
let receiver = Some(receiver); let receiver = Some(receiver);
Ok(Self { Ok(Self {

View File

@ -1,3 +1,4 @@
use crate::option::IndexerOpts;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use tokio::sync::{mpsc, oneshot}; use tokio::sync::{mpsc, oneshot};
@ -148,11 +149,15 @@ impl IndexActorHandle for IndexActorHandleImpl {
} }
impl IndexActorHandleImpl { impl IndexActorHandleImpl {
pub fn new(path: impl AsRef<Path>, index_size: usize) -> anyhow::Result<Self> { pub fn new(
path: impl AsRef<Path>,
index_size: usize,
options: &IndexerOpts,
) -> anyhow::Result<Self> {
let (sender, receiver) = mpsc::channel(100); let (sender, receiver) = mpsc::channel(100);
let store = MapIndexStore::new(path, index_size); let store = MapIndexStore::new(path, index_size);
let actor = IndexActor::new(receiver, store)?; let actor = IndexActor::new(receiver, store, options)?;
tokio::task::spawn(actor.run()); tokio::task::spawn(actor.run());
Ok(Self { sender }) Ok(Self { sender })
} }

View File

@ -110,7 +110,8 @@ impl IndexController {
std::fs::create_dir_all(&path)?; std::fs::create_dir_all(&path)?;
let uuid_resolver = uuid_resolver::UuidResolverHandleImpl::new(&path)?; let uuid_resolver = uuid_resolver::UuidResolverHandleImpl::new(&path)?;
let index_handle = index_actor::IndexActorHandleImpl::new(&path, index_size)?; let index_handle =
index_actor::IndexActorHandleImpl::new(&path, index_size, &options.indexer_options)?;
let update_handle = update_actor::UpdateActorHandleImpl::new( let update_handle = update_actor::UpdateActorHandleImpl::new(
index_handle.clone(), index_handle.clone(),
&path, &path,

View File

@ -38,11 +38,6 @@ pub struct IndexerOpts {
#[structopt(long, default_value)] #[structopt(long, default_value)]
pub max_memory: MaxMemory, pub max_memory: MaxMemory,
/// Size of the linked hash map cache when indexing.
/// The bigger it is, the faster the indexing is but the more memory it takes.
#[structopt(long, default_value = "500")]
pub linked_hash_map_size: usize,
/// The name of the compression algorithm to use when compressing intermediate /// The name of the compression algorithm to use when compressing intermediate
/// Grenad chunks while indexing documents. /// Grenad chunks while indexing documents.
/// ///
@ -54,18 +49,6 @@ pub struct IndexerOpts {
#[structopt(long, requires = "chunk-compression-type")] #[structopt(long, requires = "chunk-compression-type")]
pub chunk_compression_level: Option<u32>, pub chunk_compression_level: Option<u32>,
/// The number of bytes to remove from the begining of the chunks while reading/sorting
/// or merging them.
///
/// File fusing must only be enable on file systems that support the `FALLOC_FL_COLLAPSE_RANGE`,
/// (i.e. ext4 and XFS). File fusing will only work if the `enable-chunk-fusing` is set.
#[structopt(long, default_value = "4 GiB")]
pub chunk_fusing_shrink_size: Byte,
/// Enable the chunk fusing or not, this reduces the amount of disk space used.
#[structopt(long)]
pub enable_chunk_fusing: bool,
/// Number of parallel jobs for indexing, defaults to # of CPUs. /// Number of parallel jobs for indexing, defaults to # of CPUs.
#[structopt(long)] #[structopt(long)]
pub indexing_jobs: Option<usize>, pub indexing_jobs: Option<usize>,
@ -77,11 +60,8 @@ impl Default for IndexerOpts {
log_every_n: 100_000, log_every_n: 100_000,
max_nb_chunks: None, max_nb_chunks: None,
max_memory: MaxMemory::default(), max_memory: MaxMemory::default(),
linked_hash_map_size: 500,
chunk_compression_type: CompressionType::None, chunk_compression_type: CompressionType::None,
chunk_compression_level: None, chunk_compression_level: None,
chunk_fusing_shrink_size: Byte::from_str("4GiB").unwrap(),
enable_chunk_fusing: false,
indexing_jobs: None, indexing_jobs: None,
} }
} }
@ -286,6 +266,12 @@ impl Deref for MaxMemory {
} }
} }
impl MaxMemory {
pub fn unlimited() -> Self {
Self(None)
}
}
/// Returns the total amount of bytes available or `None` if this system isn't supported. /// Returns the total amount of bytes available or `None` if this system isn't supported.
fn total_memory_bytes() -> Option<u64> { fn total_memory_bytes() -> Option<u64> {
if System::IS_SUPPORTED { if System::IS_SUPPORTED {

View File

@ -7,7 +7,7 @@ use tempdir::TempDir;
use urlencoding::encode; use urlencoding::encode;
use meilisearch_http::data::Data; use meilisearch_http::data::Data;
use meilisearch_http::option::{IndexerOpts, Opt}; use meilisearch_http::option::{IndexerOpts, MaxMemory, Opt};
use super::index::Index; use super::index::Index;
use super::service::Service; use super::service::Service;
@ -90,7 +90,11 @@ pub fn default_settings(dir: impl AsRef<Path>) -> Opt {
schedule_snapshot: false, schedule_snapshot: false,
snapshot_interval_sec: 0, snapshot_interval_sec: 0,
import_dump: None, import_dump: None,
indexer_options: IndexerOpts::default(), indexer_options: IndexerOpts {
// memory has to be unlimited because several meilisearch are running in test context.
max_memory: MaxMemory::unlimited(),
..Default::default()
},
log_level: "off".into(), log_level: "off".into(),
} }
} }