Come back to the old tokenizer

This commit is contained in:
Clément Renault 2020-08-30 21:50:30 +02:00 committed by Kerollmops
parent 220ba0785c
commit bad0663138
No known key found for this signature in database
GPG key ID: 92ADA4E935E71FA4
8 changed files with 45 additions and 101 deletions

View file

@ -21,7 +21,8 @@ use rayon::prelude::*;
use roaring::RoaringBitmap;
use structopt::StructOpt;
use milli::{lexer, SmallVec32, Index, DocumentId, Position, Attribute, BEU32};
use milli::{SmallVec32, Index, DocumentId, Position, Attribute, BEU32};
use milli::tokenizer::{simple_tokenizer, only_words};
const LMDB_MAX_KEY_LENGTH: usize = 511;
const ONE_MILLION: usize = 1_000_000;
@ -367,7 +368,7 @@ fn merge(key: &[u8], values: &[Vec<u8>]) -> Result<Vec<u8>, ()> {
WORDS_FST_KEY => {
let fsts: Vec<_> = values.iter().map(|v| fst::Set::new(v).unwrap()).collect();
// Union of the two FSTs
// Union of the FSTs
let mut op = fst::set::OpBuilder::new();
fsts.iter().for_each(|fst| op.push(fst.into_stream()));
let op = op.r#union();
@ -387,15 +388,16 @@ fn merge(key: &[u8], values: &[Vec<u8>]) -> Result<Vec<u8>, ()> {
| WORD_FOUR_POSITIONS_DOCIDS_BYTE
| WORD_ATTRIBUTE_DOCIDS_BYTE =>
{
let mut first = RoaringBitmap::deserialize_from(values[0].as_slice()).unwrap();
let (head, tail) = values.split_first().unwrap();
for value in &values[1..] {
let mut head = RoaringBitmap::deserialize_from(head.as_slice()).unwrap();
for value in tail {
let bitmap = RoaringBitmap::deserialize_from(value.as_slice()).unwrap();
first.union_with(&bitmap);
head.union_with(&bitmap);
}
let mut vec = Vec::new();
first.serialize_into(&mut vec).unwrap();
let mut vec = Vec::with_capacity(head.serialized_size());
head.serialize_into(&mut vec).unwrap();
Ok(vec)
},
otherwise => panic!("wut {:?}", otherwise),
@ -505,8 +507,8 @@ fn index_csv(
let document_id = DocumentId::try_from(document_id).context("generated id is too big")?;
for (attr, content) in document.iter().enumerate().take(MAX_ATTRIBUTES) {
for (pos, word) in lexer::break_string(&content).enumerate().take(MAX_POSITION) {
let word = word.cow_to_lowercase();
for (pos, (_, token)) in simple_tokenizer(&content).filter(only_words).enumerate().take(MAX_POSITION) {
let word = token.cow_to_lowercase();
let position = (attr * MAX_POSITION + pos) as u32;
store.insert_word_position_docid(&word, position, document_id)?;
}

View file

@ -9,10 +9,10 @@ use std::time::Instant;
use askama_warp::Template;
use heed::EnvOpenOptions;
use serde::Deserialize;
use slice_group_by::StrGroupBy;
use structopt::StructOpt;
use warp::{Filter, http::Response};
use milli::tokenizer::{simple_tokenizer, TokenType};
use milli::{Index, SearchResult};
#[cfg(target_os = "linux")]
@ -47,12 +47,16 @@ struct Opt {
fn highlight_string(string: &str, words: &HashSet<String>) -> String {
let mut output = String::new();
for token in string.linear_group_by_key(|c| c.is_alphanumeric()) {
let lowercase_token = token.to_lowercase();
let to_highlight = words.contains(&lowercase_token);
if to_highlight { output.push_str("<mark>") }
output.push_str(token);
if to_highlight { output.push_str("</mark>") }
for (token_type, token) in simple_tokenizer(string) {
if token_type == TokenType::Word {
let lowercase_token = token.to_lowercase();
let to_highlight = words.contains(&lowercase_token);
if to_highlight { output.push_str("<mark>") }
output.push_str(token);
if to_highlight { output.push_str("</mark>") }
} else {
output.push_str(token);
}
}
output
}

View file

@ -1,44 +0,0 @@
use unicode_linebreak::{linebreaks, BreakClass, break_property};
fn can_be_broken(c: char) -> bool {
use BreakClass::*;
match break_property(c as u32) {
Ideographic
| Alphabetic
| Numeric
| CombiningMark
| WordJoiner
| NonBreakingGlue
| OpenPunctuation
| Symbol
| EmojiBase
| EmojiModifier
| HangulLJamo
| HangulVJamo
| HangulTJamo
| RegionalIndicator
| Quotation => false,
_ => true,
}
}
fn extract_token(s: &str) -> &str {
let end = s.char_indices().rev()
.take_while(|(_, c)| can_be_broken(*c))
.last()
.map(|(i, _)| i)
.unwrap_or(s.len());
&s[..end]
}
pub fn break_string(s: &str) -> impl Iterator<Item = &str> {
let mut prev = 0;
linebreaks(&s).map(move |(i, _)| {
let s = &s[prev..i];
prev = i;
extract_token(s)
})
.filter(|s| !s.is_empty())
}

View file

@ -3,7 +3,7 @@ mod node;
mod query_tokens;
mod search;
pub mod heed_codec;
pub mod lexer;
pub mod tokenizer;
use std::collections::HashMap;
use std::hash::BuildHasherDefault;

View file

@ -1,5 +1,4 @@
use std::{mem, str};
use unicode_linebreak::{break_property, BreakClass};
use QueryToken::{Quoted, Free};
@ -69,12 +68,6 @@ impl<'a> Iterator for QueryTokens<'a> {
},
State::Fused => return None,
}
} else if break_property(c as u32) == BreakClass::Ideographic {
match self.state.replace_by(State::Free(afteri)) {
State::Quoted(s) => return Some(Quoted(&self.string[s..afteri])),
State::Free(s) => return Some(Free(&self.string[s..afteri])),
_ => self.state = State::Free(afteri),
}
} else if !self.state.is_quoted() && !c.is_alphanumeric() {
match self.state.replace_by(State::Free(afteri)) {
State::Free(s) if i > s => return Some(Free(&self.string[s..i])),

21
src/tokenizer.rs Normal file
View file

@ -0,0 +1,21 @@
use slice_group_by::StrGroupBy;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TokenType {
Word,
Space,
}
pub fn simple_tokenizer(text: &str) -> impl Iterator<Item=(TokenType, &str)> {
text
.linear_group_by_key(|c| c.is_alphanumeric())
.map(|s| {
let first = s.chars().next().unwrap();
let type_ = if first.is_alphanumeric() { TokenType::Word } else { TokenType::Space };
(type_, s)
})
}
pub fn only_words((t, _): &(TokenType, &str)) -> bool {
*t == TokenType::Word
}