feat: Use the new Tokenizer in the csv-indexer

This commit is contained in:
Clément Renault 2018-09-27 16:59:41 +02:00
parent b84be67aa2
commit 8a0c82d51e
2 changed files with 32 additions and 24 deletions

View File

@ -7,11 +7,10 @@ use std::path::{Path, PathBuf};
use std::collections::{HashSet, BTreeMap}; use std::collections::{HashSet, BTreeMap};
use std::io::{self, BufReader, BufRead}; use std::io::{self, BufReader, BufRead};
use std::fs::File; use std::fs::File;
use std::iter;
use csv::ReaderBuilder; use csv::ReaderBuilder;
use structopt::StructOpt; use structopt::StructOpt;
use raptor::{MetadataBuilder, DocIndex}; use raptor::{MetadataBuilder, DocIndex, Tokenizer};
use rocksdb::{SstFileWriter, EnvOptions, ColumnFamilyOptions}; use rocksdb::{SstFileWriter, EnvOptions, ColumnFamilyOptions};
use unidecode::unidecode; use unidecode::unidecode;
@ -55,6 +54,30 @@ where P: AsRef<Path>,
Ok(set) Ok(set)
} }
fn insert_document_words<'a, I, A, B>(builder: &mut MetadataBuilder<A, B>, doc_index: u64, attr: u8, words: I)
where A: io::Write,
B: io::Write,
I: IntoIterator<Item=(usize, &'a str)>,
{
for (index, word) in words {
let doc_index = DocIndex {
document: doc_index,
attribute: attr,
attribute_index: index as u32,
};
// insert the exact representation
let word_lower = word.to_lowercase();
// and the unidecoded lowercased version
let word_unidecoded = unidecode(word).to_lowercase();
if word_lower != word_unidecoded {
builder.insert(word_unidecoded, doc_index);
}
builder.insert(word_lower, doc_index);
}
}
fn main() { fn main() {
let opt = Opt::from_args(); let opt = Opt::from_args();
@ -85,29 +108,13 @@ fn main() {
Err(e) => { eprintln!("{:?}", e); errors += 1; continue }, Err(e) => { eprintln!("{:?}", e); errors += 1; continue },
}; };
{ let title = Tokenizer::new(&product.title);
let title = iter::repeat(0).zip(product.title.split_whitespace()).filter(|&(_, w)| !common_words.contains(w)).enumerate(); let title = title.iter().filter(|&(_, w)| !common_words.contains(w));
let description = iter::repeat(1).zip(product.description.split_whitespace()).filter(|&(_, w)| !common_words.contains(w)).enumerate(); insert_document_words(&mut builder, product.id, 0, title);
let words = title.chain(description); let description = Tokenizer::new(&product.description);
for (i, (attr, word)) in words { let description = description.iter().filter(|&(_, w)| !common_words.contains(w));
let doc_index = DocIndex { insert_document_words(&mut builder, product.id, 1, description);
document: product.id,
attribute: attr,
attribute_index: i as u32,
};
// insert the exact representation
let word_lower = word.to_lowercase();
// and the unidecoded lowercased version
let word_unidecoded = unidecode(word).to_lowercase();
if word_lower != word_unidecoded {
builder.insert(word_unidecoded, doc_index);
}
builder.insert(word_lower, doc_index);
}
}
// TODO simplify this by using functions and // TODO simplify this by using functions and
// use the MetadataBuilder internal BTreeMap ? // use the MetadataBuilder internal BTreeMap ?

View File

@ -8,6 +8,7 @@ pub mod tokenizer;
pub use self::metadata::{Metadata, MetadataBuilder}; pub use self::metadata::{Metadata, MetadataBuilder};
pub use self::rank::RankedStream; pub use self::rank::RankedStream;
pub use self::tokenizer::Tokenizer;
pub type DocumentId = u64; pub type DocumentId = u64;