mirror of
https://github.com/meilisearch/MeiliSearch
synced 2024-11-23 13:24:27 +01:00
Fix and optimize word_prefix_pair_proximity_docids database
This commit is contained in:
parent
2d1727697d
commit
5c962c03dd
@ -207,6 +207,24 @@ enum Command {
|
|||||||
word2: String,
|
word2: String,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/// Outputs a CSV with the proximities for the two specified words and
|
||||||
|
/// the documents ids where these relations appears.
|
||||||
|
///
|
||||||
|
/// `word1`, `prefix` defines the word pair specified *in this specific order*.
|
||||||
|
/// `proximity` defines the proximity between the two specified words.
|
||||||
|
/// `documents_ids` defines the documents ids where the relation appears.
|
||||||
|
WordPrefixPairProximitiesDocids {
|
||||||
|
/// Display the whole documents ids in details.
|
||||||
|
#[structopt(long)]
|
||||||
|
full_display: bool,
|
||||||
|
|
||||||
|
/// First word of the word pair.
|
||||||
|
word1: String,
|
||||||
|
|
||||||
|
/// Second word of the word pair.
|
||||||
|
prefix: String,
|
||||||
|
},
|
||||||
|
|
||||||
/// Outputs the words FST to standard output.
|
/// Outputs the words FST to standard output.
|
||||||
///
|
///
|
||||||
/// One can use the FST binary helper to dissect and analyze it,
|
/// One can use the FST binary helper to dissect and analyze it,
|
||||||
@ -282,6 +300,9 @@ fn main() -> anyhow::Result<()> {
|
|||||||
WordPairProximitiesDocids { full_display, word1, word2 } => {
|
WordPairProximitiesDocids { full_display, word1, word2 } => {
|
||||||
word_pair_proximities_docids(&index, &rtxn, !full_display, word1, word2)
|
word_pair_proximities_docids(&index, &rtxn, !full_display, word1, word2)
|
||||||
}
|
}
|
||||||
|
WordPrefixPairProximitiesDocids { full_display, word1, prefix } => {
|
||||||
|
word_prefix_pair_proximities_docids(&index, &rtxn, !full_display, word1, prefix)
|
||||||
|
}
|
||||||
ExportWordsFst => export_words_fst(&index, &rtxn),
|
ExportWordsFst => export_words_fst(&index, &rtxn),
|
||||||
ExportWordsPrefixFst => export_words_prefix_fst(&index, &rtxn),
|
ExportWordsPrefixFst => export_words_prefix_fst(&index, &rtxn),
|
||||||
ExportDocuments { internal_documents_ids } => {
|
ExportDocuments { internal_documents_ids } => {
|
||||||
@ -1131,3 +1152,46 @@ fn word_pair_proximities_docids(
|
|||||||
|
|
||||||
Ok(wtr.flush()?)
|
Ok(wtr.flush()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn word_prefix_pair_proximities_docids(
|
||||||
|
index: &Index,
|
||||||
|
rtxn: &heed::RoTxn,
|
||||||
|
debug: bool,
|
||||||
|
word1: String,
|
||||||
|
word_prefix: String,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
use heed::types::ByteSlice;
|
||||||
|
use milli::RoaringBitmapCodec;
|
||||||
|
|
||||||
|
let stdout = io::stdout();
|
||||||
|
let mut wtr = csv::Writer::from_writer(stdout.lock());
|
||||||
|
wtr.write_record(&["word1", "word_prefix", "proximity", "documents_ids"])?;
|
||||||
|
|
||||||
|
// Create the prefix key with only the pair of words.
|
||||||
|
let mut prefix = Vec::with_capacity(word1.len() + word_prefix.len() + 1);
|
||||||
|
prefix.extend_from_slice(word1.as_bytes());
|
||||||
|
prefix.push(0);
|
||||||
|
prefix.extend_from_slice(word_prefix.as_bytes());
|
||||||
|
|
||||||
|
let db = index.word_prefix_pair_proximity_docids.as_polymorph();
|
||||||
|
let iter = db.prefix_iter::<_, ByteSlice, RoaringBitmapCodec>(rtxn, &prefix)?;
|
||||||
|
for result in iter {
|
||||||
|
let (key, docids) = result?;
|
||||||
|
|
||||||
|
// Skip keys that are longer than the requested one,
|
||||||
|
// a longer key means that the second word is a prefix of the request word.
|
||||||
|
if key.len() != prefix.len() + 1 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let proximity = key.last().unwrap();
|
||||||
|
let docids = if debug {
|
||||||
|
format!("{:?}", docids)
|
||||||
|
} else {
|
||||||
|
format!("{:?}", docids.iter().collect::<Vec<_>>())
|
||||||
|
};
|
||||||
|
wtr.write_record(&[&word1, &word_prefix, &proximity.to_string(), &docids])?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(wtr.flush()?)
|
||||||
|
}
|
||||||
|
@ -180,10 +180,6 @@ fn resolve_state(
|
|||||||
if let Some(attribute_allowed_docids) =
|
if let Some(attribute_allowed_docids) =
|
||||||
ctx.field_id_word_count_docids(id, query_len)?
|
ctx.field_id_word_count_docids(id, query_len)?
|
||||||
{
|
{
|
||||||
println!(
|
|
||||||
"found candidates that have the good count: {:?}",
|
|
||||||
attribute_allowed_docids
|
|
||||||
);
|
|
||||||
let mut attribute_candidates_array =
|
let mut attribute_candidates_array =
|
||||||
attribute_start_with_docids(ctx, id as u32, query)?;
|
attribute_start_with_docids(ctx, id as u32, query)?;
|
||||||
attribute_candidates_array.push(attribute_allowed_docids);
|
attribute_candidates_array.push(attribute_allowed_docids);
|
||||||
|
@ -461,13 +461,18 @@ fn query_pair_proximity_docids(
|
|||||||
let prefix = right.prefix;
|
let prefix = right.prefix;
|
||||||
match (&left.kind, &right.kind) {
|
match (&left.kind, &right.kind) {
|
||||||
(QueryKind::Exact { word: left, .. }, QueryKind::Exact { word: right, .. }) => {
|
(QueryKind::Exact { word: left, .. }, QueryKind::Exact { word: right, .. }) => {
|
||||||
if prefix && ctx.in_prefix_cache(&right) {
|
if prefix {
|
||||||
Ok(ctx
|
match ctx.word_prefix_pair_proximity_docids(
|
||||||
.word_prefix_pair_proximity_docids(left.as_str(), right.as_str(), proximity)?
|
left.as_str(),
|
||||||
.unwrap_or_default())
|
right.as_str(),
|
||||||
} else if prefix {
|
proximity,
|
||||||
let r_words = word_derivations(&right, true, 0, ctx.words_fst(), wdcache)?;
|
)? {
|
||||||
all_word_pair_proximity_docids(ctx, &[(left, 0)], &r_words, proximity)
|
Some(docids) => Ok(docids),
|
||||||
|
None => {
|
||||||
|
let r_words = word_derivations(&right, true, 0, ctx.words_fst(), wdcache)?;
|
||||||
|
all_word_pair_proximity_docids(ctx, &[(left, 0)], &r_words, proximity)
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
Ok(ctx
|
Ok(ctx
|
||||||
.word_pair_proximity_docids(left.as_str(), right.as_str(), proximity)?
|
.word_pair_proximity_docids(left.as_str(), right.as_str(), proximity)?
|
||||||
@ -477,22 +482,24 @@ fn query_pair_proximity_docids(
|
|||||||
(QueryKind::Tolerant { typo, word: left }, QueryKind::Exact { word: right, .. }) => {
|
(QueryKind::Tolerant { typo, word: left }, QueryKind::Exact { word: right, .. }) => {
|
||||||
let l_words =
|
let l_words =
|
||||||
word_derivations(&left, false, *typo, ctx.words_fst(), wdcache)?.to_owned();
|
word_derivations(&left, false, *typo, ctx.words_fst(), wdcache)?.to_owned();
|
||||||
if prefix && ctx.in_prefix_cache(&right) {
|
if prefix {
|
||||||
let mut docids = RoaringBitmap::new();
|
let mut docids = RoaringBitmap::new();
|
||||||
for (left, _) in l_words {
|
for (left, _) in l_words {
|
||||||
let current_docids = ctx
|
let current_docids = match ctx.word_prefix_pair_proximity_docids(
|
||||||
.word_prefix_pair_proximity_docids(
|
left.as_str(),
|
||||||
left.as_ref(),
|
right.as_str(),
|
||||||
right.as_ref(),
|
proximity,
|
||||||
proximity,
|
)? {
|
||||||
)?
|
Some(docids) => Ok(docids),
|
||||||
.unwrap_or_default();
|
None => {
|
||||||
|
let r_words =
|
||||||
|
word_derivations(&right, true, 0, ctx.words_fst(), wdcache)?;
|
||||||
|
all_word_pair_proximity_docids(ctx, &[(left, 0)], &r_words, proximity)
|
||||||
|
}
|
||||||
|
}?;
|
||||||
docids |= current_docids;
|
docids |= current_docids;
|
||||||
}
|
}
|
||||||
Ok(docids)
|
Ok(docids)
|
||||||
} else if prefix {
|
|
||||||
let r_words = word_derivations(&right, true, 0, ctx.words_fst(), wdcache)?;
|
|
||||||
all_word_pair_proximity_docids(ctx, &l_words, &r_words, proximity)
|
|
||||||
} else {
|
} else {
|
||||||
all_word_pair_proximity_docids(ctx, &l_words, &[(right, 0)], proximity)
|
all_word_pair_proximity_docids(ctx, &l_words, &[(right, 0)], proximity)
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ use typed_chunk::{write_typed_chunk_into_index, TypedChunk};
|
|||||||
|
|
||||||
pub use self::helpers::{
|
pub use self::helpers::{
|
||||||
create_sorter, create_writer, merge_cbo_roaring_bitmaps, merge_roaring_bitmaps,
|
create_sorter, create_writer, merge_cbo_roaring_bitmaps, merge_roaring_bitmaps,
|
||||||
sorter_into_lmdb_database, write_into_lmdb_database, writer_into_reader,
|
sorter_into_lmdb_database, write_into_lmdb_database, writer_into_reader, MergeFn,
|
||||||
};
|
};
|
||||||
use self::helpers::{grenad_obkv_into_chunks, GrenadParameters};
|
use self::helpers::{grenad_obkv_into_chunks, GrenadParameters};
|
||||||
pub use self::transform::{Transform, TransformOutput};
|
pub use self::transform::{Transform, TransformOutput};
|
||||||
@ -81,7 +81,7 @@ pub struct IndexDocuments<'t, 'u, 'i, 'a> {
|
|||||||
pub(crate) thread_pool: Option<&'a ThreadPool>,
|
pub(crate) thread_pool: Option<&'a ThreadPool>,
|
||||||
facet_level_group_size: Option<NonZeroUsize>,
|
facet_level_group_size: Option<NonZeroUsize>,
|
||||||
facet_min_level_size: Option<NonZeroUsize>,
|
facet_min_level_size: Option<NonZeroUsize>,
|
||||||
words_prefix_threshold: Option<f64>,
|
words_prefix_threshold: Option<u32>,
|
||||||
max_prefix_length: Option<usize>,
|
max_prefix_length: Option<usize>,
|
||||||
words_positions_level_group_size: Option<NonZeroU32>,
|
words_positions_level_group_size: Option<NonZeroU32>,
|
||||||
words_positions_min_level_size: Option<NonZeroU32>,
|
words_positions_min_level_size: Option<NonZeroU32>,
|
||||||
|
@ -1,15 +1,13 @@
|
|||||||
use std::str;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use fst::automaton::{Automaton, Str};
|
use fst::IntoStreamer;
|
||||||
use fst::{IntoStreamer, Streamer};
|
|
||||||
use grenad::CompressionType;
|
use grenad::CompressionType;
|
||||||
use heed::types::ByteSlice;
|
use heed::types::ByteSlice;
|
||||||
use heed::BytesEncode;
|
|
||||||
use log::debug;
|
use log::debug;
|
||||||
|
use slice_group_by::GroupBy;
|
||||||
|
|
||||||
use crate::heed_codec::StrStrU8Codec;
|
|
||||||
use crate::update::index_documents::{
|
use crate::update::index_documents::{
|
||||||
create_sorter, merge_cbo_roaring_bitmaps, sorter_into_lmdb_database, WriteMethod,
|
create_sorter, merge_cbo_roaring_bitmaps, sorter_into_lmdb_database, MergeFn, WriteMethod,
|
||||||
};
|
};
|
||||||
use crate::{Index, Result};
|
use crate::{Index, Result};
|
||||||
|
|
||||||
@ -20,6 +18,7 @@ pub struct WordPrefixPairProximityDocids<'t, 'u, 'i> {
|
|||||||
pub(crate) chunk_compression_level: Option<u32>,
|
pub(crate) chunk_compression_level: Option<u32>,
|
||||||
pub(crate) max_nb_chunks: Option<usize>,
|
pub(crate) max_nb_chunks: Option<usize>,
|
||||||
pub(crate) max_memory: Option<usize>,
|
pub(crate) max_memory: Option<usize>,
|
||||||
|
threshold: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'t, 'u, 'i> WordPrefixPairProximityDocids<'t, 'u, 'i> {
|
impl<'t, 'u, 'i> WordPrefixPairProximityDocids<'t, 'u, 'i> {
|
||||||
@ -34,16 +33,26 @@ impl<'t, 'u, 'i> WordPrefixPairProximityDocids<'t, 'u, 'i> {
|
|||||||
chunk_compression_level: None,
|
chunk_compression_level: None,
|
||||||
max_nb_chunks: None,
|
max_nb_chunks: None,
|
||||||
max_memory: None,
|
max_memory: None,
|
||||||
|
threshold: 100,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set the number of words required to make a prefix be part of the words prefixes
|
||||||
|
/// database. If a word prefix is supposed to match more than this number of words in the
|
||||||
|
/// dictionnary, therefore this prefix is added to the words prefixes datastructures.
|
||||||
|
///
|
||||||
|
/// Default value is 100. This value must be higher than 50 and will be clamped
|
||||||
|
/// to these bound otherwise.
|
||||||
|
pub fn threshold(&mut self, value: u32) -> &mut Self {
|
||||||
|
self.threshold = value.max(50);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub fn execute(self) -> Result<()> {
|
pub fn execute(self) -> Result<()> {
|
||||||
debug!("Computing and writing the word prefix pair proximity docids into LMDB on disk...");
|
debug!("Computing and writing the word prefix pair proximity docids into LMDB on disk...");
|
||||||
|
|
||||||
self.index.word_prefix_pair_proximity_docids.clear(self.wtxn)?;
|
self.index.word_prefix_pair_proximity_docids.clear(self.wtxn)?;
|
||||||
|
|
||||||
let prefix_fst = self.index.words_prefixes_fst(self.wtxn)?;
|
|
||||||
|
|
||||||
// Here we create a sorter akin to the previous one.
|
// Here we create a sorter akin to the previous one.
|
||||||
let mut word_prefix_pair_proximity_docids_sorter = create_sorter(
|
let mut word_prefix_pair_proximity_docids_sorter = create_sorter(
|
||||||
merge_cbo_roaring_bitmaps,
|
merge_cbo_roaring_bitmaps,
|
||||||
@ -53,22 +62,59 @@ impl<'t, 'u, 'i> WordPrefixPairProximityDocids<'t, 'u, 'i> {
|
|||||||
self.max_memory,
|
self.max_memory,
|
||||||
);
|
);
|
||||||
|
|
||||||
// We insert all the word pairs corresponding to the word-prefix pairs
|
let prefix_fst = self.index.words_prefixes_fst(self.wtxn)?;
|
||||||
// where the prefixes appears in the prefix FST previously constructed.
|
let prefix_fst_keys = prefix_fst.into_stream().into_bytes();
|
||||||
let db = self.index.word_pair_proximity_docids.remap_data_type::<ByteSlice>();
|
let prefix_fst_keys: Vec<_> = prefix_fst_keys
|
||||||
for result in db.iter(self.wtxn)? {
|
.as_slice()
|
||||||
let ((word1, word2, prox), data) = result?;
|
.linear_group_by_key(|x| std::str::from_utf8(&x).unwrap().chars().nth(0).unwrap())
|
||||||
let automaton = Str::new(word2).starts_with();
|
.collect();
|
||||||
let mut matching_prefixes = prefix_fst.search(automaton).into_stream();
|
|
||||||
while let Some(prefix) = matching_prefixes.next() {
|
let mut db =
|
||||||
let prefix = str::from_utf8(prefix)?;
|
self.index.word_pair_proximity_docids.remap_data_type::<ByteSlice>().iter(self.wtxn)?;
|
||||||
let pair = (word1, prefix, prox);
|
|
||||||
let bytes = StrStrU8Codec::bytes_encode(&pair).unwrap();
|
let mut buffer = Vec::new();
|
||||||
word_prefix_pair_proximity_docids_sorter.insert(bytes, data)?;
|
let mut current_prefixes: Option<&&[Vec<u8>]> = None;
|
||||||
|
let mut prefixes_cache = HashMap::new();
|
||||||
|
while let Some(((w1, w2, prox), data)) = db.next().transpose()? {
|
||||||
|
current_prefixes = match current_prefixes.take() {
|
||||||
|
Some(prefixes) if w2.as_bytes().starts_with(&prefixes[0]) => Some(prefixes),
|
||||||
|
_otherwise => {
|
||||||
|
write_prefixes_in_sorter(
|
||||||
|
&mut prefixes_cache,
|
||||||
|
&mut word_prefix_pair_proximity_docids_sorter,
|
||||||
|
self.threshold,
|
||||||
|
)?;
|
||||||
|
prefix_fst_keys.iter().find(|prefixes| w2.as_bytes().starts_with(&prefixes[0]))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(prefixes) = current_prefixes {
|
||||||
|
buffer.clear();
|
||||||
|
buffer.extend_from_slice(w1.as_bytes());
|
||||||
|
buffer.push(0);
|
||||||
|
for prefix in prefixes.iter().filter(|prefix| w2.as_bytes().starts_with(prefix)) {
|
||||||
|
buffer.truncate(w1.len() + 1);
|
||||||
|
buffer.extend_from_slice(prefix);
|
||||||
|
buffer.push(prox);
|
||||||
|
|
||||||
|
match prefixes_cache.get_mut(&buffer) {
|
||||||
|
Some(value) => value.push(data),
|
||||||
|
None => {
|
||||||
|
prefixes_cache.insert(buffer.clone(), vec![data]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
write_prefixes_in_sorter(
|
||||||
|
&mut prefixes_cache,
|
||||||
|
&mut word_prefix_pair_proximity_docids_sorter,
|
||||||
|
self.threshold,
|
||||||
|
)?;
|
||||||
|
|
||||||
drop(prefix_fst);
|
drop(prefix_fst);
|
||||||
|
drop(db);
|
||||||
|
|
||||||
// We finally write the word prefix pair proximity docids into the LMDB database.
|
// We finally write the word prefix pair proximity docids into the LMDB database.
|
||||||
sorter_into_lmdb_database(
|
sorter_into_lmdb_database(
|
||||||
@ -82,3 +128,25 @@ impl<'t, 'u, 'i> WordPrefixPairProximityDocids<'t, 'u, 'i> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn write_prefixes_in_sorter(
|
||||||
|
prefixes: &mut HashMap<Vec<u8>, Vec<&[u8]>>,
|
||||||
|
sorter: &mut grenad::Sorter<MergeFn>,
|
||||||
|
min_word_per_prefix: u32,
|
||||||
|
) -> Result<()> {
|
||||||
|
for (i, (key, data_slices)) in prefixes.drain().enumerate() {
|
||||||
|
// if the number of words prefixed by the prefix is higher than the threshold,
|
||||||
|
// we insert it in the sorter.
|
||||||
|
if data_slices.len() > min_word_per_prefix as usize {
|
||||||
|
for data in data_slices {
|
||||||
|
sorter.insert(&key, data)?;
|
||||||
|
}
|
||||||
|
// if the first prefix isn't elligible for insertion,
|
||||||
|
// then the other prefixes can't be elligible.
|
||||||
|
} else if i == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
@ -8,7 +8,7 @@ use crate::{Index, Result, SmallString32};
|
|||||||
pub struct WordsPrefixesFst<'t, 'u, 'i> {
|
pub struct WordsPrefixesFst<'t, 'u, 'i> {
|
||||||
wtxn: &'t mut heed::RwTxn<'i, 'u>,
|
wtxn: &'t mut heed::RwTxn<'i, 'u>,
|
||||||
index: &'i Index,
|
index: &'i Index,
|
||||||
threshold: f64,
|
threshold: u32,
|
||||||
max_prefix_length: usize,
|
max_prefix_length: usize,
|
||||||
_update_id: u64,
|
_update_id: u64,
|
||||||
}
|
}
|
||||||
@ -22,20 +22,20 @@ impl<'t, 'u, 'i> WordsPrefixesFst<'t, 'u, 'i> {
|
|||||||
WordsPrefixesFst {
|
WordsPrefixesFst {
|
||||||
wtxn,
|
wtxn,
|
||||||
index,
|
index,
|
||||||
threshold: 0.1 / 100.0, // .01%
|
threshold: 100,
|
||||||
max_prefix_length: 4,
|
max_prefix_length: 4,
|
||||||
_update_id: update_id,
|
_update_id: update_id,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the ratio of concerned words required to make a prefix be part of the words prefixes
|
/// Set the number of words required to make a prefix be part of the words prefixes
|
||||||
/// database. If a word prefix is supposed to match more than this number of words in the
|
/// database. If a word prefix is supposed to match more than this number of words in the
|
||||||
/// dictionnary, therefore this prefix is added to the words prefixes datastructures.
|
/// dictionnary, therefore this prefix is added to the words prefixes datastructures.
|
||||||
///
|
///
|
||||||
/// Default value is `0.01` or `1%`. This value must be between 0 and 1 and will be clamped
|
/// Default value is 100. This value must be higher than 50 and will be clamped
|
||||||
/// to these bounds otherwise.
|
/// to this bound otherwise.
|
||||||
pub fn threshold(&mut self, value: f64) -> &mut Self {
|
pub fn threshold(&mut self, value: u32) -> &mut Self {
|
||||||
self.threshold = value.min(1.0).max(0.0); // clamp [0, 1]
|
self.threshold = value.max(50);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,8 +50,6 @@ impl<'t, 'u, 'i> WordsPrefixesFst<'t, 'u, 'i> {
|
|||||||
|
|
||||||
pub fn execute(self) -> Result<()> {
|
pub fn execute(self) -> Result<()> {
|
||||||
let words_fst = self.index.words_fst(&self.wtxn)?;
|
let words_fst = self.index.words_fst(&self.wtxn)?;
|
||||||
let number_of_words = words_fst.len();
|
|
||||||
let min_number_of_words = (number_of_words as f64 * self.threshold) as usize;
|
|
||||||
|
|
||||||
let mut prefix_fsts = Vec::with_capacity(self.max_prefix_length);
|
let mut prefix_fsts = Vec::with_capacity(self.max_prefix_length);
|
||||||
for n in 1..=self.max_prefix_length {
|
for n in 1..=self.max_prefix_length {
|
||||||
@ -80,7 +78,7 @@ impl<'t, 'u, 'i> WordsPrefixesFst<'t, 'u, 'i> {
|
|||||||
current_prefix_count += 1;
|
current_prefix_count += 1;
|
||||||
|
|
||||||
// There is enough words corresponding to this prefix to add it to the cache.
|
// There is enough words corresponding to this prefix to add it to the cache.
|
||||||
if current_prefix_count == min_number_of_words {
|
if current_prefix_count >= self.threshold {
|
||||||
builder.insert(prefix)?;
|
builder.insert(prefix)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user