2020-01-07 17:40:58 +01:00
|
|
|
use std::borrow::Cow;
|
|
|
|
use std::collections::HashMap;
|
2020-01-13 13:29:47 +01:00
|
|
|
use std::hash::{Hash, Hasher};
|
|
|
|
use std::ops::Range;
|
2020-01-07 17:40:58 +01:00
|
|
|
use std::time::Instant;
|
|
|
|
use std::{cmp, fmt, iter::once};
|
|
|
|
|
2020-01-16 14:11:17 +01:00
|
|
|
use fst::{IntoStreamer, Streamer};
|
|
|
|
use itertools::{EitherOrBoth, merge_join_by};
|
|
|
|
use meilisearch_tokenizer::split_query_string;
|
2020-01-07 17:40:58 +01:00
|
|
|
use sdset::{Set, SetBuf, SetOperation};
|
2020-01-21 11:05:34 +01:00
|
|
|
use log::debug;
|
2020-01-07 17:40:58 +01:00
|
|
|
|
|
|
|
use crate::database::MainT;
|
2020-05-22 15:00:50 +02:00
|
|
|
use crate::{store, DocumentId, DocIndex, MResult, FstSetCow};
|
2020-01-22 17:46:46 +01:00
|
|
|
use crate::automaton::{normalize_str, build_dfa, build_prefix_dfa, build_exact_dfa};
|
2020-01-13 13:29:47 +01:00
|
|
|
use crate::QueryWordsMapper;
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-13 13:29:47 +01:00
|
|
|
#[derive(Clone, PartialEq, Eq, Hash)]
|
2020-01-07 17:40:58 +01:00
|
|
|
pub enum Operation {
|
|
|
|
And(Vec<Operation>),
|
|
|
|
Or(Vec<Operation>),
|
|
|
|
Query(Query),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl fmt::Debug for Operation {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
fn pprint_tree(f: &mut fmt::Formatter<'_>, op: &Operation, depth: usize) -> fmt::Result {
|
|
|
|
match op {
|
|
|
|
Operation::And(children) => {
|
|
|
|
writeln!(f, "{:1$}AND", "", depth * 2)?;
|
|
|
|
children.iter().try_for_each(|c| pprint_tree(f, c, depth + 1))
|
|
|
|
},
|
|
|
|
Operation::Or(children) => {
|
|
|
|
writeln!(f, "{:1$}OR", "", depth * 2)?;
|
|
|
|
children.iter().try_for_each(|c| pprint_tree(f, c, depth + 1))
|
|
|
|
},
|
|
|
|
Operation::Query(query) => writeln!(f, "{:2$}{:?}", "", query, depth * 2),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pprint_tree(f, self, 0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-13 13:29:47 +01:00
|
|
|
impl Operation {
|
|
|
|
fn tolerant(id: QueryId, prefix: bool, s: &str) -> Operation {
|
2020-01-22 18:11:58 +01:00
|
|
|
Operation::Query(Query { id, prefix, exact: true, kind: QueryKind::Tolerant(s.to_string()) })
|
2020-01-13 13:29:47 +01:00
|
|
|
}
|
|
|
|
|
2020-01-22 18:11:58 +01:00
|
|
|
fn non_tolerant(id: QueryId, prefix: bool, s: &str) -> Operation {
|
|
|
|
Operation::Query(Query { id, prefix, exact: true, kind: QueryKind::NonTolerant(s.to_string()) })
|
2020-01-13 13:29:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn phrase2(id: QueryId, prefix: bool, (left, right): (&str, &str)) -> Operation {
|
2020-01-16 14:24:45 +01:00
|
|
|
let kind = QueryKind::Phrase(vec![left.to_owned(), right.to_owned()]);
|
2020-01-22 18:11:58 +01:00
|
|
|
Operation::Query(Query { id, prefix, exact: true, kind })
|
2020-01-13 13:29:47 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-07 17:40:58 +01:00
|
|
|
pub type QueryId = usize;
|
|
|
|
|
2020-01-13 13:29:47 +01:00
|
|
|
#[derive(Clone, Eq)]
|
2020-01-08 11:58:50 +01:00
|
|
|
pub struct Query {
|
|
|
|
pub id: QueryId,
|
|
|
|
pub prefix: bool,
|
2020-01-22 18:11:58 +01:00
|
|
|
pub exact: bool,
|
2020-01-08 11:58:50 +01:00
|
|
|
pub kind: QueryKind,
|
|
|
|
}
|
|
|
|
|
2020-01-13 13:29:47 +01:00
|
|
|
impl PartialEq for Query {
|
|
|
|
fn eq(&self, other: &Self) -> bool {
|
|
|
|
self.prefix == other.prefix && self.kind == other.kind
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
2020-01-13 13:29:47 +01:00
|
|
|
}
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-13 13:29:47 +01:00
|
|
|
impl Hash for Query {
|
|
|
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
|
|
|
self.prefix.hash(state);
|
|
|
|
self.kind.hash(state);
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
2020-01-13 13:29:47 +01:00
|
|
|
}
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-13 13:29:47 +01:00
|
|
|
#[derive(Clone, PartialEq, Eq, Hash)]
|
|
|
|
pub enum QueryKind {
|
|
|
|
Tolerant(String),
|
2020-01-22 18:11:58 +01:00
|
|
|
NonTolerant(String),
|
2020-01-13 13:29:47 +01:00
|
|
|
Phrase(Vec<String>),
|
2020-01-08 11:58:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl fmt::Debug for Query {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
2020-01-22 18:11:58 +01:00
|
|
|
let Query { id, prefix, kind, .. } = self;
|
2020-01-08 11:58:50 +01:00
|
|
|
let prefix = if *prefix { String::from("Prefix") } else { String::default() };
|
|
|
|
match kind {
|
2020-01-22 18:11:58 +01:00
|
|
|
QueryKind::NonTolerant(word) => {
|
|
|
|
f.debug_struct(&(prefix + "NonTolerant")).field("id", &id).field("word", &word).finish()
|
2020-01-08 11:58:50 +01:00
|
|
|
},
|
|
|
|
QueryKind::Tolerant(word) => {
|
|
|
|
f.debug_struct(&(prefix + "Tolerant")).field("id", &id).field("word", &word).finish()
|
|
|
|
},
|
|
|
|
QueryKind::Phrase(words) => {
|
|
|
|
f.debug_struct(&(prefix + "Phrase")).field("id", &id).field("words", &words).finish()
|
|
|
|
},
|
|
|
|
}
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Default)]
|
|
|
|
pub struct PostingsList {
|
|
|
|
docids: SetBuf<DocumentId>,
|
|
|
|
matches: SetBuf<DocIndex>,
|
|
|
|
}
|
|
|
|
|
2020-05-22 15:00:50 +02:00
|
|
|
pub struct Context<'a> {
|
|
|
|
pub words_set: FstSetCow<'a>,
|
|
|
|
pub stop_words: FstSetCow<'a>,
|
2020-01-08 13:37:22 +01:00
|
|
|
pub synonyms: store::Synonyms,
|
|
|
|
pub postings_lists: store::PostingsLists,
|
|
|
|
pub prefix_postings_lists: store::PrefixPostingsListsCache,
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
|
2020-01-08 13:37:22 +01:00
|
|
|
fn split_best_frequency<'a>(reader: &heed::RoTxn<MainT>, ctx: &Context, word: &'a str) -> MResult<Option<(&'a str, &'a str)>> {
|
2020-01-07 17:40:58 +01:00
|
|
|
let chars = word.char_indices().skip(1);
|
|
|
|
let mut best = None;
|
|
|
|
|
|
|
|
for (i, _) in chars {
|
|
|
|
let (left, right) = word.split_at(i);
|
|
|
|
|
2020-01-08 15:30:43 +01:00
|
|
|
let left_freq = ctx.postings_lists
|
|
|
|
.postings_list(reader, left.as_bytes())?
|
|
|
|
.map(|p| p.docids.len())
|
|
|
|
.unwrap_or(0);
|
|
|
|
let right_freq = ctx.postings_lists
|
|
|
|
.postings_list(reader, right.as_bytes())?
|
|
|
|
.map(|p| p.docids.len())
|
|
|
|
.unwrap_or(0);
|
2020-01-07 17:40:58 +01:00
|
|
|
|
|
|
|
let min_freq = cmp::min(left_freq, right_freq);
|
|
|
|
if min_freq != 0 && best.map_or(true, |(old, _, _)| min_freq > old) {
|
|
|
|
best = Some((min_freq, left, right));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(best.map(|(_, l, r)| (l, r)))
|
|
|
|
}
|
|
|
|
|
2020-01-08 13:37:22 +01:00
|
|
|
fn fetch_synonyms(reader: &heed::RoTxn<MainT>, ctx: &Context, words: &[&str]) -> MResult<Vec<Vec<String>>> {
|
2020-01-22 17:46:46 +01:00
|
|
|
let words = normalize_str(&words.join(" "));
|
2020-05-22 12:03:57 +02:00
|
|
|
let set = ctx.synonyms.synonyms_fst(reader, words.as_bytes())?;
|
2020-01-16 11:38:23 +01:00
|
|
|
|
|
|
|
let mut strings = Vec::new();
|
|
|
|
let mut stream = set.stream();
|
|
|
|
while let Some(input) = stream.next() {
|
|
|
|
if let Ok(input) = std::str::from_utf8(input) {
|
|
|
|
let alts = input.split_ascii_whitespace().map(ToOwned::to_owned).collect();
|
|
|
|
strings.push(alts);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(strings)
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn create_operation<I, F>(iter: I, f: F) -> Operation
|
|
|
|
where I: IntoIterator<Item=Operation>,
|
|
|
|
F: Fn(Vec<Operation>) -> Operation,
|
|
|
|
{
|
|
|
|
let mut iter = iter.into_iter();
|
|
|
|
match (iter.next(), iter.next()) {
|
|
|
|
(Some(first), None) => first,
|
|
|
|
(first, second) => f(first.into_iter().chain(second).chain(iter).collect()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const MAX_NGRAM: usize = 3;
|
|
|
|
|
2020-01-13 13:29:47 +01:00
|
|
|
pub fn create_query_tree(
|
|
|
|
reader: &heed::RoTxn<MainT>,
|
|
|
|
ctx: &Context,
|
|
|
|
query: &str,
|
|
|
|
) -> MResult<(Operation, HashMap<QueryId, Range<usize>>)>
|
|
|
|
{
|
2020-01-16 14:11:17 +01:00
|
|
|
let words = split_query_string(query).map(str::to_lowercase);
|
2020-02-10 16:50:55 +01:00
|
|
|
let words = words.filter(|w| !ctx.stop_words.contains(w));
|
2020-02-02 22:59:19 +01:00
|
|
|
let words: Vec<_> = words.enumerate().collect();
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-13 13:29:47 +01:00
|
|
|
let mut mapper = QueryWordsMapper::new(words.iter().map(|(_, w)| w));
|
|
|
|
|
2020-01-22 17:46:46 +01:00
|
|
|
fn create_inner(
|
|
|
|
reader: &heed::RoTxn<MainT>,
|
|
|
|
ctx: &Context,
|
|
|
|
mapper: &mut QueryWordsMapper,
|
|
|
|
words: &[(usize, String)],
|
|
|
|
) -> MResult<Vec<Operation>>
|
|
|
|
{
|
|
|
|
let mut alts = Vec::new();
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-22 17:46:46 +01:00
|
|
|
for ngram in 1..=MAX_NGRAM {
|
|
|
|
if let Some(group) = words.get(..ngram) {
|
|
|
|
let mut group_ops = Vec::new();
|
2020-01-13 13:29:47 +01:00
|
|
|
|
2020-01-22 17:46:46 +01:00
|
|
|
let tail = &words[ngram..];
|
|
|
|
let is_last = tail.is_empty();
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-22 17:46:46 +01:00
|
|
|
let mut group_alts = Vec::new();
|
|
|
|
match group {
|
2020-01-07 17:40:58 +01:00
|
|
|
[(id, word)] => {
|
2020-01-13 13:29:47 +01:00
|
|
|
let mut idgen = ((id + 1) * 100)..;
|
2020-01-22 17:46:46 +01:00
|
|
|
let range = (*id)..id+1;
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-22 18:11:58 +01:00
|
|
|
let phrase = split_best_frequency(reader, ctx, word)?
|
|
|
|
.map(|ws| {
|
2020-01-22 17:46:46 +01:00
|
|
|
let id = idgen.next().unwrap();
|
2020-01-22 18:11:58 +01:00
|
|
|
idgen.next().unwrap();
|
|
|
|
mapper.declare(range.clone(), id, &[ws.0, ws.1]);
|
|
|
|
Operation::phrase2(id, is_last, ws)
|
2020-01-13 13:29:47 +01:00
|
|
|
});
|
|
|
|
|
2020-01-22 18:11:58 +01:00
|
|
|
let synonyms = fetch_synonyms(reader, ctx, &[word])?
|
|
|
|
.into_iter()
|
|
|
|
.map(|alts| {
|
|
|
|
let exact = alts.len() == 1;
|
|
|
|
let id = idgen.next().unwrap();
|
|
|
|
mapper.declare(range.clone(), id, &alts);
|
|
|
|
|
|
|
|
let mut idgen = once(id).chain(&mut idgen);
|
|
|
|
let iter = alts.into_iter().map(|w| {
|
|
|
|
let id = idgen.next().unwrap();
|
|
|
|
let kind = QueryKind::NonTolerant(w);
|
|
|
|
Operation::Query(Query { id, prefix: false, exact, kind })
|
|
|
|
});
|
|
|
|
|
|
|
|
create_operation(iter, Operation::And)
|
|
|
|
});
|
2020-01-13 13:29:47 +01:00
|
|
|
|
2020-01-22 17:46:46 +01:00
|
|
|
let original = Operation::tolerant(*id, is_last, word);
|
|
|
|
|
|
|
|
group_alts.push(original);
|
|
|
|
group_alts.extend(synonyms.chain(phrase));
|
2020-01-07 17:40:58 +01:00
|
|
|
},
|
|
|
|
words => {
|
|
|
|
let id = words[0].0;
|
2020-01-13 13:29:47 +01:00
|
|
|
let mut idgen = ((id + 1) * 100_usize.pow(ngram as u32))..;
|
2020-01-22 17:46:46 +01:00
|
|
|
let range = id..id+ngram;
|
2020-01-13 13:29:47 +01:00
|
|
|
|
2020-01-07 17:40:58 +01:00
|
|
|
let words: Vec<_> = words.iter().map(|(_, s)| s.as_str()).collect();
|
|
|
|
|
2020-01-08 13:37:22 +01:00
|
|
|
for synonym in fetch_synonyms(reader, ctx, &words)? {
|
2020-01-22 18:11:58 +01:00
|
|
|
let exact = synonym.len() == 1;
|
2020-01-13 13:29:47 +01:00
|
|
|
let id = idgen.next().unwrap();
|
|
|
|
mapper.declare(range.clone(), id, &synonym);
|
|
|
|
|
|
|
|
let mut idgen = once(id).chain(&mut idgen);
|
|
|
|
let synonym = synonym.into_iter().map(|s| {
|
|
|
|
let id = idgen.next().unwrap();
|
2020-01-22 18:11:58 +01:00
|
|
|
let kind = QueryKind::NonTolerant(s);
|
|
|
|
Operation::Query(Query { id, prefix: false, exact, kind })
|
2020-01-13 13:29:47 +01:00
|
|
|
});
|
2020-01-22 17:46:46 +01:00
|
|
|
group_alts.push(create_operation(synonym, Operation::And));
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
|
2020-01-13 13:29:47 +01:00
|
|
|
let id = idgen.next().unwrap();
|
|
|
|
let concat = words.concat();
|
2020-01-22 17:46:46 +01:00
|
|
|
mapper.declare(range.clone(), id, &[&concat]);
|
2020-01-22 18:11:58 +01:00
|
|
|
group_alts.push(Operation::non_tolerant(id, is_last, &concat));
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-22 17:46:46 +01:00
|
|
|
group_ops.push(create_operation(group_alts, Operation::Or));
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-22 17:46:46 +01:00
|
|
|
if !tail.is_empty() {
|
|
|
|
let tail_ops = create_inner(reader, ctx, mapper, tail)?;
|
|
|
|
group_ops.push(create_operation(tail_ops, Operation::Or));
|
|
|
|
}
|
|
|
|
|
|
|
|
alts.push(create_operation(group_ops, Operation::And));
|
|
|
|
}
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
2020-01-22 17:46:46 +01:00
|
|
|
|
|
|
|
Ok(alts)
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
|
2020-01-22 17:46:46 +01:00
|
|
|
let alternatives = create_inner(reader, ctx, &mut mapper, &words)?;
|
|
|
|
let operation = Operation::Or(alternatives);
|
2020-01-16 14:11:17 +01:00
|
|
|
let mapping = mapper.mapping();
|
2020-01-13 13:29:47 +01:00
|
|
|
|
|
|
|
Ok((operation, mapping))
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
|
2020-01-16 14:11:17 +01:00
|
|
|
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
|
|
|
pub struct PostingsKey<'o> {
|
|
|
|
pub query: &'o Query,
|
|
|
|
pub input: Vec<u8>,
|
|
|
|
pub distance: u8,
|
|
|
|
pub is_exact: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
pub type Postings<'o, 'txn> = HashMap<PostingsKey<'o>, Cow<'txn, Set<DocIndex>>>;
|
2020-01-09 14:53:49 +01:00
|
|
|
pub type Cache<'o, 'txn> = HashMap<&'o Operation, Cow<'txn, Set<DocumentId>>>;
|
|
|
|
|
2020-01-07 18:23:55 +01:00
|
|
|
pub struct QueryResult<'o, 'txn> {
|
2020-01-08 15:30:43 +01:00
|
|
|
pub docids: Cow<'txn, Set<DocumentId>>,
|
2020-01-09 14:53:49 +01:00
|
|
|
pub queries: Postings<'o, 'txn>,
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
|
2020-01-07 18:23:55 +01:00
|
|
|
pub fn traverse_query_tree<'o, 'txn>(
|
|
|
|
reader: &'txn heed::RoTxn<MainT>,
|
2020-01-08 13:37:22 +01:00
|
|
|
ctx: &Context,
|
2020-01-07 18:23:55 +01:00
|
|
|
tree: &'o Operation,
|
|
|
|
) -> MResult<QueryResult<'o, 'txn>>
|
|
|
|
{
|
|
|
|
fn execute_and<'o, 'txn>(
|
|
|
|
reader: &'txn heed::RoTxn<MainT>,
|
2020-01-08 13:37:22 +01:00
|
|
|
ctx: &Context,
|
2020-01-07 18:23:55 +01:00
|
|
|
cache: &mut Cache<'o, 'txn>,
|
|
|
|
postings: &mut Postings<'o, 'txn>,
|
2020-01-07 17:40:58 +01:00
|
|
|
depth: usize,
|
|
|
|
operations: &'o [Operation],
|
2020-01-08 15:30:43 +01:00
|
|
|
) -> MResult<Cow<'txn, Set<DocumentId>>>
|
2020-01-07 17:40:58 +01:00
|
|
|
{
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:1$}AND", "", depth * 2);
|
2020-01-07 17:40:58 +01:00
|
|
|
|
|
|
|
let before = Instant::now();
|
|
|
|
let mut results = Vec::new();
|
|
|
|
|
|
|
|
for op in operations {
|
|
|
|
if cache.get(op).is_none() {
|
|
|
|
let docids = match op {
|
2020-01-08 13:37:22 +01:00
|
|
|
Operation::And(ops) => execute_and(reader, ctx, cache, postings, depth + 1, &ops)?,
|
|
|
|
Operation::Or(ops) => execute_or(reader, ctx, cache, postings, depth + 1, &ops)?,
|
|
|
|
Operation::Query(query) => execute_query(reader, ctx, postings, depth + 1, &query)?,
|
2020-01-07 17:40:58 +01:00
|
|
|
};
|
|
|
|
cache.insert(op, docids);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for op in operations {
|
|
|
|
if let Some(docids) = cache.get(op) {
|
|
|
|
results.push(docids.as_ref());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let op = sdset::multi::Intersection::new(results);
|
|
|
|
let docids = op.into_set_buf();
|
|
|
|
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:3$}--- AND fetched {} documents in {:.02?}", "", docids.len(), before.elapsed(), depth * 2);
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-08 15:30:43 +01:00
|
|
|
Ok(Cow::Owned(docids))
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
|
2020-01-07 18:23:55 +01:00
|
|
|
fn execute_or<'o, 'txn>(
|
|
|
|
reader: &'txn heed::RoTxn<MainT>,
|
2020-01-08 13:37:22 +01:00
|
|
|
ctx: &Context,
|
2020-01-07 18:23:55 +01:00
|
|
|
cache: &mut Cache<'o, 'txn>,
|
|
|
|
postings: &mut Postings<'o, 'txn>,
|
2020-01-07 17:40:58 +01:00
|
|
|
depth: usize,
|
|
|
|
operations: &'o [Operation],
|
2020-01-08 15:30:43 +01:00
|
|
|
) -> MResult<Cow<'txn, Set<DocumentId>>>
|
2020-01-07 17:40:58 +01:00
|
|
|
{
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:1$}OR", "", depth * 2);
|
2020-01-07 17:40:58 +01:00
|
|
|
|
|
|
|
let before = Instant::now();
|
2020-01-15 15:14:24 +01:00
|
|
|
let mut results = Vec::new();
|
2020-01-07 17:40:58 +01:00
|
|
|
|
|
|
|
for op in operations {
|
2020-01-15 15:14:24 +01:00
|
|
|
if cache.get(op).is_none() {
|
|
|
|
let docids = match op {
|
|
|
|
Operation::And(ops) => execute_and(reader, ctx, cache, postings, depth + 1, &ops)?,
|
|
|
|
Operation::Or(ops) => execute_or(reader, ctx, cache, postings, depth + 1, &ops)?,
|
|
|
|
Operation::Query(query) => execute_query(reader, ctx, postings, depth + 1, &query)?,
|
|
|
|
};
|
|
|
|
cache.insert(op, docids);
|
|
|
|
}
|
|
|
|
}
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-15 15:14:24 +01:00
|
|
|
for op in operations {
|
|
|
|
if let Some(docids) = cache.get(op) {
|
|
|
|
results.push(docids.as_ref());
|
|
|
|
}
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
|
2020-01-15 15:14:24 +01:00
|
|
|
let op = sdset::multi::Union::new(results);
|
|
|
|
let docids = op.into_set_buf();
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:3$}--- OR fetched {} documents in {:.02?}", "", docids.len(), before.elapsed(), depth * 2);
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-08 15:30:43 +01:00
|
|
|
Ok(Cow::Owned(docids))
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
|
2020-01-07 18:23:55 +01:00
|
|
|
fn execute_query<'o, 'txn>(
|
|
|
|
reader: &'txn heed::RoTxn<MainT>,
|
2020-01-08 13:37:22 +01:00
|
|
|
ctx: &Context,
|
2020-01-07 18:23:55 +01:00
|
|
|
postings: &mut Postings<'o, 'txn>,
|
2020-01-07 17:40:58 +01:00
|
|
|
depth: usize,
|
|
|
|
query: &'o Query,
|
2020-01-08 15:30:43 +01:00
|
|
|
) -> MResult<Cow<'txn, Set<DocumentId>>>
|
2020-01-07 17:40:58 +01:00
|
|
|
{
|
|
|
|
let before = Instant::now();
|
2020-01-08 11:58:50 +01:00
|
|
|
|
2020-01-22 18:11:58 +01:00
|
|
|
let Query { prefix, kind, exact, .. } = query;
|
2020-01-16 12:01:51 +01:00
|
|
|
let docids: Cow<Set<_>> = match kind {
|
2020-01-08 11:58:50 +01:00
|
|
|
QueryKind::Tolerant(word) => {
|
2020-01-14 18:07:14 +01:00
|
|
|
if *prefix && word.len() <= 2 {
|
|
|
|
let prefix = {
|
|
|
|
let mut array = [0; 4];
|
|
|
|
let bytes = word.as_bytes();
|
|
|
|
array[..bytes.len()].copy_from_slice(bytes);
|
|
|
|
array
|
|
|
|
};
|
|
|
|
|
2020-01-16 14:11:17 +01:00
|
|
|
// We retrieve the cached postings lists for all
|
2020-01-16 12:01:51 +01:00
|
|
|
// the words that starts with this short prefix.
|
2020-01-09 14:53:49 +01:00
|
|
|
let result = ctx.prefix_postings_lists.prefix_postings_list(reader, prefix)?.unwrap_or_default();
|
2020-01-16 14:11:17 +01:00
|
|
|
let key = PostingsKey { query, input: word.clone().into_bytes(), distance: 0, is_exact: false };
|
|
|
|
postings.insert(key, result.matches);
|
2020-01-16 18:41:27 +01:00
|
|
|
let prefix_docids = &result.docids;
|
2020-01-16 12:01:51 +01:00
|
|
|
|
|
|
|
// We retrieve the exact postings list for the prefix,
|
|
|
|
// because we must consider these matches as exact.
|
2020-01-16 17:09:27 +01:00
|
|
|
let result = ctx.postings_lists.postings_list(reader, word.as_bytes())?.unwrap_or_default();
|
|
|
|
let key = PostingsKey { query, input: word.clone().into_bytes(), distance: 0, is_exact: true };
|
|
|
|
postings.insert(key, result.matches);
|
2020-01-16 18:41:27 +01:00
|
|
|
let exact_docids = &result.docids;
|
2020-01-16 12:01:51 +01:00
|
|
|
|
|
|
|
let before = Instant::now();
|
2020-01-16 18:41:27 +01:00
|
|
|
let docids = sdset::duo::Union::new(prefix_docids, exact_docids).into_set_buf();
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:4$}prefix docids ({} and {}) construction took {:.02?}",
|
2020-01-19 11:07:32 +01:00
|
|
|
"", prefix_docids.len(), exact_docids.len(), before.elapsed(), depth * 2);
|
2020-01-16 12:01:51 +01:00
|
|
|
|
|
|
|
Cow::Owned(docids)
|
|
|
|
|
2020-01-08 13:06:12 +01:00
|
|
|
} else {
|
2020-01-08 13:14:07 +01:00
|
|
|
let dfa = if *prefix { build_prefix_dfa(word) } else { build_dfa(word) };
|
2020-01-08 13:06:12 +01:00
|
|
|
|
2020-01-08 13:14:07 +01:00
|
|
|
let byte = word.as_bytes()[0];
|
|
|
|
let mut stream = if byte == u8::max_value() {
|
2020-01-08 13:37:22 +01:00
|
|
|
ctx.words_set.search(&dfa).ge(&[byte]).into_stream()
|
2020-01-08 13:14:07 +01:00
|
|
|
} else {
|
2020-01-08 13:37:22 +01:00
|
|
|
ctx.words_set.search(&dfa).ge(&[byte]).lt(&[byte + 1]).into_stream()
|
2020-01-08 13:14:07 +01:00
|
|
|
};
|
|
|
|
|
2020-01-09 12:05:39 +01:00
|
|
|
let before = Instant::now();
|
2020-01-19 10:57:54 +01:00
|
|
|
let mut results = Vec::new();
|
2020-01-08 13:14:07 +01:00
|
|
|
while let Some(input) = stream.next() {
|
2020-01-09 14:53:49 +01:00
|
|
|
if let Some(result) = ctx.postings_lists.postings_list(reader, input)? {
|
2020-01-16 14:11:17 +01:00
|
|
|
let distance = dfa.eval(input).to_u8();
|
2020-01-22 18:11:58 +01:00
|
|
|
let is_exact = *exact && distance == 0 && input.len() == word.len();
|
2020-01-19 10:57:54 +01:00
|
|
|
results.push(result.docids);
|
2020-01-16 14:11:17 +01:00
|
|
|
let key = PostingsKey { query, input: input.to_owned(), distance, is_exact };
|
|
|
|
postings.insert(key, result.matches);
|
2020-01-08 13:14:07 +01:00
|
|
|
}
|
2020-01-08 11:58:50 +01:00
|
|
|
}
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:3$}docids retrieval ({:?}) took {:.02?}", "", results.len(), before.elapsed(), depth * 2);
|
2020-01-08 11:58:50 +01:00
|
|
|
|
2020-01-08 13:22:42 +01:00
|
|
|
let before = Instant::now();
|
2020-01-19 12:11:59 +01:00
|
|
|
let docids = if results.len() > 10 {
|
|
|
|
let cap = results.iter().map(|dis| dis.len()).sum();
|
|
|
|
let mut docids = Vec::with_capacity(cap);
|
|
|
|
for dis in results {
|
|
|
|
docids.extend_from_slice(&dis);
|
|
|
|
}
|
|
|
|
SetBuf::from_dirty(docids)
|
|
|
|
} else {
|
|
|
|
let sets = results.iter().map(AsRef::as_ref).collect();
|
|
|
|
sdset::multi::Union::new(sets).into_set_buf()
|
|
|
|
};
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:2$}docids construction took {:.02?}", "", before.elapsed(), depth * 2);
|
2020-01-08 13:22:42 +01:00
|
|
|
|
2020-01-08 15:30:43 +01:00
|
|
|
Cow::Owned(docids)
|
2020-01-08 13:14:07 +01:00
|
|
|
}
|
2020-01-08 11:58:50 +01:00
|
|
|
},
|
2020-01-22 18:11:58 +01:00
|
|
|
QueryKind::NonTolerant(word) => {
|
2020-01-08 11:58:50 +01:00
|
|
|
// TODO support prefix and non-prefix exact DFA
|
|
|
|
let dfa = build_exact_dfa(word);
|
|
|
|
|
2020-01-08 13:06:12 +01:00
|
|
|
let byte = word.as_bytes()[0];
|
|
|
|
let mut stream = if byte == u8::max_value() {
|
2020-01-08 13:37:22 +01:00
|
|
|
ctx.words_set.search(&dfa).ge(&[byte]).into_stream()
|
2020-01-08 13:06:12 +01:00
|
|
|
} else {
|
2020-01-08 13:37:22 +01:00
|
|
|
ctx.words_set.search(&dfa).ge(&[byte]).lt(&[byte + 1]).into_stream()
|
2020-01-08 13:06:12 +01:00
|
|
|
};
|
|
|
|
|
2020-01-19 10:57:54 +01:00
|
|
|
let before = Instant::now();
|
|
|
|
let mut results = Vec::new();
|
2020-01-08 11:58:50 +01:00
|
|
|
while let Some(input) = stream.next() {
|
2020-01-09 14:53:49 +01:00
|
|
|
if let Some(result) = ctx.postings_lists.postings_list(reader, input)? {
|
2020-01-16 14:11:17 +01:00
|
|
|
let distance = dfa.eval(input).to_u8();
|
2020-01-19 10:57:54 +01:00
|
|
|
results.push(result.docids);
|
2020-01-22 18:11:58 +01:00
|
|
|
let key = PostingsKey { query, input: input.to_owned(), distance, is_exact: *exact };
|
2020-01-16 14:11:17 +01:00
|
|
|
postings.insert(key, result.matches);
|
2020-01-08 11:58:50 +01:00
|
|
|
}
|
|
|
|
}
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:3$}docids retrieval ({:?}) took {:.02?}", "", results.len(), before.elapsed(), depth * 2);
|
2020-01-08 11:58:50 +01:00
|
|
|
|
2020-01-16 12:01:51 +01:00
|
|
|
let before = Instant::now();
|
2020-01-19 12:11:59 +01:00
|
|
|
let docids = if results.len() > 10 {
|
|
|
|
let cap = results.iter().map(|dis| dis.len()).sum();
|
|
|
|
let mut docids = Vec::with_capacity(cap);
|
|
|
|
for dis in results {
|
|
|
|
docids.extend_from_slice(&dis);
|
|
|
|
}
|
|
|
|
SetBuf::from_dirty(docids)
|
|
|
|
} else {
|
|
|
|
let sets = results.iter().map(AsRef::as_ref).collect();
|
|
|
|
sdset::multi::Union::new(sets).into_set_buf()
|
|
|
|
};
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:2$}docids construction took {:.02?}", "", before.elapsed(), depth * 2);
|
2020-01-16 12:01:51 +01:00
|
|
|
|
|
|
|
Cow::Owned(docids)
|
2020-01-07 17:40:58 +01:00
|
|
|
},
|
2020-01-08 11:58:50 +01:00
|
|
|
QueryKind::Phrase(words) => {
|
|
|
|
// TODO support prefix and non-prefix exact DFA
|
2020-01-07 17:40:58 +01:00
|
|
|
if let [first, second] = words.as_slice() {
|
2020-01-08 13:37:22 +01:00
|
|
|
let first = ctx.postings_lists.postings_list(reader, first.as_bytes())?.unwrap_or_default();
|
|
|
|
let second = ctx.postings_lists.postings_list(reader, second.as_bytes())?.unwrap_or_default();
|
2020-01-07 17:40:58 +01:00
|
|
|
|
2020-01-08 15:30:43 +01:00
|
|
|
let iter = merge_join_by(first.matches.as_slice(), second.matches.as_slice(), |a, b| {
|
2020-01-07 17:40:58 +01:00
|
|
|
let x = (a.document_id, a.attribute, (a.word_index as u32) + 1);
|
|
|
|
let y = (b.document_id, b.attribute, b.word_index as u32);
|
|
|
|
x.cmp(&y)
|
|
|
|
});
|
|
|
|
|
|
|
|
let matches: Vec<_> = iter
|
|
|
|
.filter_map(EitherOrBoth::both)
|
|
|
|
.flat_map(|(a, b)| once(*a).chain(Some(*b)))
|
|
|
|
.collect();
|
|
|
|
|
2020-01-08 13:22:42 +01:00
|
|
|
let before = Instant::now();
|
2020-01-07 17:40:58 +01:00
|
|
|
let mut docids: Vec<_> = matches.iter().map(|m| m.document_id).collect();
|
|
|
|
docids.dedup();
|
2020-01-08 13:22:42 +01:00
|
|
|
let docids = SetBuf::new(docids).unwrap();
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:2$}docids construction took {:.02?}", "", before.elapsed(), depth * 2);
|
2020-01-09 14:53:49 +01:00
|
|
|
|
2020-03-30 20:56:26 +02:00
|
|
|
let matches = Cow::Owned(SetBuf::from_dirty(matches));
|
2020-01-16 14:11:17 +01:00
|
|
|
let key = PostingsKey { query, input: vec![], distance: 0, is_exact: true };
|
|
|
|
postings.insert(key, matches);
|
2020-01-09 14:53:49 +01:00
|
|
|
|
2020-01-08 15:30:43 +01:00
|
|
|
Cow::Owned(docids)
|
2020-01-07 17:40:58 +01:00
|
|
|
} else {
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:2$}{:?} skipped", "", words, depth * 2);
|
2020-01-08 15:30:43 +01:00
|
|
|
Cow::default()
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2020-01-21 11:05:34 +01:00
|
|
|
debug!("{:4$}{:?} fetched {:?} documents in {:.02?}", "", query, docids.len(), before.elapsed(), depth * 2);
|
2020-01-07 18:23:55 +01:00
|
|
|
Ok(docids)
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
let mut cache = Cache::new();
|
|
|
|
let mut postings = Postings::new();
|
|
|
|
|
|
|
|
let docids = match tree {
|
2020-01-08 13:37:22 +01:00
|
|
|
Operation::And(ops) => execute_and(reader, ctx, &mut cache, &mut postings, 0, &ops)?,
|
|
|
|
Operation::Or(ops) => execute_or(reader, ctx, &mut cache, &mut postings, 0, &ops)?,
|
|
|
|
Operation::Query(query) => execute_query(reader, ctx, &mut postings, 0, &query)?,
|
2020-01-07 17:40:58 +01:00
|
|
|
};
|
|
|
|
|
2020-01-07 18:23:55 +01:00
|
|
|
Ok(QueryResult { docids, queries: postings })
|
2020-01-07 17:40:58 +01:00
|
|
|
}
|