mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-11-22 21:04:27 +01:00
feat: Replace the elapsed dependency by std::time::Instant
This commit is contained in:
parent
bddb37e44f
commit
264fffa826
@ -8,7 +8,6 @@ authors = ["Kerollmops <renault.cle@gmail.com>"]
|
||||
arc-swap = "0.3"
|
||||
bincode = "1.0"
|
||||
byteorder = "1.2"
|
||||
elapsed = "0.1"
|
||||
fst = "0.3"
|
||||
hashbrown = { version = "0.1", features = ["serde"] }
|
||||
lazy_static = "1.1"
|
||||
|
@ -4,6 +4,7 @@ static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::io::{self, BufRead, BufReader};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Instant;
|
||||
use std::error::Error;
|
||||
use std::borrow::Cow;
|
||||
use std::fs::File;
|
||||
@ -124,14 +125,13 @@ fn main() -> Result<(), Box<Error>> {
|
||||
None => HashSet::new(),
|
||||
};
|
||||
|
||||
let (elapsed, result) = elapsed::measure_time(|| {
|
||||
index(schema, &opt.database_path, &opt.csv_data_path, opt.update_group_size, &stop_words)
|
||||
});
|
||||
let start = Instant::now();
|
||||
let result = index(schema, &opt.database_path, &opt.csv_data_path, opt.update_group_size, &stop_words);
|
||||
|
||||
if let Err(e) = result {
|
||||
return Err(e.into())
|
||||
}
|
||||
|
||||
println!("database created in {} at: {:?}", elapsed, opt.database_path);
|
||||
println!("database created in {:.2?} at: {:?}", start.elapsed(), opt.database_path);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
||||
use std::collections::btree_map::{BTreeMap, Entry};
|
||||
use std::iter::FromIterator;
|
||||
use std::io::{self, Write};
|
||||
use std::time::Instant;
|
||||
use std::path::PathBuf;
|
||||
use std::error::Error;
|
||||
|
||||
@ -102,9 +103,9 @@ fn main() -> Result<(), Box<Error>> {
|
||||
let _ = env_logger::init();
|
||||
let opt = Opt::from_args();
|
||||
|
||||
let (elapsed, result) = elapsed::measure_time(|| Database::open(&opt.database_path));
|
||||
let database = result?;
|
||||
println!("database prepared for you in {}", elapsed);
|
||||
let start = Instant::now();
|
||||
let database = Database::open(&opt.database_path)?;
|
||||
println!("database prepared for you in {:.2?}", start.elapsed());
|
||||
|
||||
let mut buffer = String::new();
|
||||
let input = io::stdin();
|
||||
@ -119,10 +120,10 @@ fn main() -> Result<(), Box<Error>> {
|
||||
let view = database.view("default")?;
|
||||
let schema = view.schema();
|
||||
|
||||
let (elapsed, documents) = elapsed::measure_time(|| {
|
||||
let builder = view.query_builder().unwrap();
|
||||
builder.query(query, 0..opt.number_results)
|
||||
});
|
||||
let start = Instant::now();
|
||||
|
||||
let builder = view.query_builder().unwrap();
|
||||
let documents = builder.query(query, 0..opt.number_results);
|
||||
|
||||
let number_of_documents = documents.len();
|
||||
for doc in documents {
|
||||
@ -160,7 +161,7 @@ fn main() -> Result<(), Box<Error>> {
|
||||
println!();
|
||||
}
|
||||
|
||||
eprintln!("===== Found {} results in {} =====", number_of_documents, elapsed);
|
||||
eprintln!("===== Found {} results in {:.2?} =====", number_of_documents, start.elapsed());
|
||||
buffer.clear();
|
||||
}
|
||||
|
||||
|
@ -4,8 +4,6 @@ mod shared_data;
|
||||
|
||||
use std::slice::from_raw_parts;
|
||||
use std::mem::size_of;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use self::doc_ids::DocIds;
|
||||
pub use self::doc_indexes::{DocIndexes, DocIndexesBuilder};
|
||||
|
@ -1,8 +1,7 @@
|
||||
use crate::DocumentId;
|
||||
use crate::database::schema::SchemaAttr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use std::error::Error;
|
||||
use std::ffi::OsStr;
|
||||
use std::sync::Arc;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
@ -17,8 +16,10 @@ use lockfree::map::Map;
|
||||
use hashbrown::HashMap;
|
||||
use log::{info, error, warn};
|
||||
|
||||
use crate::database::schema::SchemaAttr;
|
||||
use crate::shared_data_cursor::FromSharedDataCursor;
|
||||
use crate::write_to_bytes::WriteToBytes;
|
||||
use crate::DocumentId;
|
||||
|
||||
pub use self::document_key::{DocumentKey, DocumentKeyAttr};
|
||||
pub use self::view::{DatabaseView, DocumentIter};
|
||||
@ -56,25 +57,25 @@ where D: Deref<Target=DB>
|
||||
{
|
||||
use self::update::ReadIndexEvent::{self, *};
|
||||
|
||||
let (elapsed, vector) = elapsed::measure_time(|| snapshot.get(DATA_INDEX));
|
||||
info!("loading index from kv-store took {}", elapsed);
|
||||
let start = Instant::now();
|
||||
let vector = snapshot.get(DATA_INDEX)?;
|
||||
info!("loading index from kv-store took {:.2?}", start.elapsed());
|
||||
|
||||
match vector? {
|
||||
match vector {
|
||||
Some(vector) => {
|
||||
let start = Instant::now();
|
||||
|
||||
let bytes = vector.as_ref().to_vec();
|
||||
let size = SizeFormatterBinary::new(bytes.len() as u64);
|
||||
info!("index size is {}B", size);
|
||||
info!("index size is {}B", SizeFormatterBinary::new(bytes.len() as u64));
|
||||
|
||||
let (elapsed, result) = elapsed::measure_time(|| {
|
||||
match ReadIndexEvent::from_bytes(bytes.to_vec())? {
|
||||
RemovedDocuments(_) => unreachable!("BUG: Must not extract a RemovedDocuments"),
|
||||
UpdatedDocuments(index) => Ok(index),
|
||||
}
|
||||
});
|
||||
let index = match ReadIndexEvent::from_bytes(bytes)? {
|
||||
RemovedDocuments(_) => panic!("BUG: RemovedDocument event retrieved"),
|
||||
UpdatedDocuments(index) => index,
|
||||
};
|
||||
|
||||
info!("loading index from bytes took {}", elapsed);
|
||||
info!("loading index from bytes took {:.2?}", start.elapsed());
|
||||
|
||||
result
|
||||
Ok(index)
|
||||
},
|
||||
None => Ok(Index::default()),
|
||||
}
|
||||
@ -85,25 +86,25 @@ where D: Deref<Target=DB>,
|
||||
{
|
||||
use self::update::ReadRankedMapEvent::{self, *};
|
||||
|
||||
let (elapsed, vector) = elapsed::measure_time(|| snapshot.get(DATA_RANKED_MAP));
|
||||
info!("loading ranked map from kv-store took {}", elapsed);
|
||||
let start = Instant::now();
|
||||
let vector = snapshot.get(DATA_RANKED_MAP)?;
|
||||
info!("loading ranked map from kv-store took {:.2?}", start.elapsed());
|
||||
|
||||
match vector? {
|
||||
match vector {
|
||||
Some(vector) => {
|
||||
let start = Instant::now();
|
||||
|
||||
let bytes = vector.as_ref().to_vec();
|
||||
let size = SizeFormatterBinary::new(bytes.len() as u64);
|
||||
info!("ranked map size is {}B", size);
|
||||
info!("ranked map size is {}B", SizeFormatterBinary::new(bytes.len() as u64));
|
||||
|
||||
let (elapsed, result) = elapsed::measure_time(|| {
|
||||
match ReadRankedMapEvent::from_bytes(bytes.to_vec())? {
|
||||
RemovedDocuments(_) => unreachable!("BUG: Must not extract a RemovedDocuments"),
|
||||
UpdatedDocuments(ranked_map) => Ok(ranked_map),
|
||||
}
|
||||
});
|
||||
let ranked_map = match ReadRankedMapEvent::from_bytes(bytes)? {
|
||||
RemovedDocuments(_) => panic!("BUG: RemovedDocument event retrieved"),
|
||||
UpdatedDocuments(ranked_map) => ranked_map,
|
||||
};
|
||||
|
||||
info!("loading ranked map from bytes took {}", elapsed);
|
||||
info!("loading ranked map from bytes took {:.2?}", start.elapsed());
|
||||
|
||||
result
|
||||
Ok(ranked_map)
|
||||
},
|
||||
None => Ok(RankedMap::new()),
|
||||
}
|
||||
|
@ -1,12 +1,12 @@
|
||||
use std::{cmp, mem, vec, str, char};
|
||||
use std::ops::{Deref, Range};
|
||||
use std::time::Instant;
|
||||
use std::error::Error;
|
||||
use std::hash::Hash;
|
||||
use std::rc::Rc;
|
||||
|
||||
use rayon::slice::ParallelSliceMut;
|
||||
use slice_group_by::GroupByMut;
|
||||
use elapsed::measure_time;
|
||||
use hashbrown::HashMap;
|
||||
use fst::Streamer;
|
||||
use rocksdb::DB;
|
||||
@ -143,8 +143,9 @@ where D: Deref<Target=DB>,
|
||||
return builder.query(query, range);
|
||||
}
|
||||
|
||||
let (elapsed, mut documents) = measure_time(|| self.query_all(query));
|
||||
info!("query_all took {}", elapsed);
|
||||
let start = Instant::now();
|
||||
let mut documents = self.query_all(query);
|
||||
info!("query_all took {:.2?}", start.elapsed());
|
||||
|
||||
let mut groups = vec![documents.as_mut_slice()];
|
||||
|
||||
@ -163,10 +164,9 @@ where D: Deref<Target=DB>,
|
||||
continue;
|
||||
}
|
||||
|
||||
let (elapsed, _) = measure_time(|| {
|
||||
group.par_sort_unstable_by(|a, b| criterion.evaluate(a, b));
|
||||
});
|
||||
info!("criterion {} sort took {}", ci, elapsed);
|
||||
let start = Instant::now();
|
||||
group.par_sort_unstable_by(|a, b| criterion.evaluate(a, b));
|
||||
info!("criterion {} sort took {:.2?}", ci, start.elapsed());
|
||||
|
||||
for group in group.binary_group_by_mut(|a, b| criterion.eq(a, b)) {
|
||||
documents_seen += group.len();
|
||||
@ -214,8 +214,9 @@ where D: Deref<Target=DB>,
|
||||
K: Hash + Eq,
|
||||
{
|
||||
pub fn query(self, query: &str, range: Range<usize>) -> Vec<Document> {
|
||||
let (elapsed, mut documents) = measure_time(|| self.inner.query_all(query));
|
||||
info!("query_all took {}", elapsed);
|
||||
let start = Instant::now();
|
||||
let mut documents = self.inner.query_all(query);
|
||||
info!("query_all took {:.2?}", start.elapsed());
|
||||
|
||||
let mut groups = vec![documents.as_mut_slice()];
|
||||
let mut key_cache = HashMap::new();
|
||||
@ -244,10 +245,9 @@ where D: Deref<Target=DB>,
|
||||
continue;
|
||||
}
|
||||
|
||||
let (elapsed, _) = measure_time(|| {
|
||||
group.par_sort_unstable_by(|a, b| criterion.evaluate(a, b));
|
||||
});
|
||||
info!("criterion {} sort took {}", ci, elapsed);
|
||||
let start = Instant::now();
|
||||
group.par_sort_unstable_by(|a, b| criterion.evaluate(a, b));
|
||||
info!("criterion {} sort took {:.2?}", ci, start.elapsed());
|
||||
|
||||
for group in group.binary_group_by_mut(|a, b| criterion.eq(a, b)) {
|
||||
// we must compute the real distinguished len of this sub-group
|
||||
|
Loading…
Reference in New Issue
Block a user