2018-09-27 16:32:17 +02:00
|
|
|
use std::mem;
|
|
|
|
use self::Separator::*;
|
|
|
|
|
2018-11-15 17:55:20 +01:00
|
|
|
pub trait TokenizerBuilder {
|
2018-12-23 16:46:49 +01:00
|
|
|
fn build<'a>(&self, text: &'a str) -> Box<Iterator<Item=Token<'a>> + 'a>;
|
2018-09-27 16:32:17 +02:00
|
|
|
}
|
|
|
|
|
2018-11-15 17:55:20 +01:00
|
|
|
pub struct DefaultBuilder;
|
|
|
|
|
|
|
|
impl DefaultBuilder {
|
|
|
|
pub fn new() -> DefaultBuilder {
|
|
|
|
DefaultBuilder
|
2018-09-27 16:32:17 +02:00
|
|
|
}
|
2018-11-15 17:55:20 +01:00
|
|
|
}
|
2018-09-27 16:32:17 +02:00
|
|
|
|
2018-12-23 16:46:49 +01:00
|
|
|
#[derive(Debug, PartialEq, Eq)]
|
|
|
|
pub struct Token<'a> {
|
|
|
|
pub word: &'a str,
|
|
|
|
pub word_index: usize,
|
|
|
|
pub char_index: usize,
|
|
|
|
}
|
|
|
|
|
2018-11-15 17:55:20 +01:00
|
|
|
impl TokenizerBuilder for DefaultBuilder {
|
2018-12-23 16:46:49 +01:00
|
|
|
fn build<'a>(&self, text: &'a str) -> Box<Iterator<Item=Token<'a>> + 'a> {
|
2018-11-15 17:55:20 +01:00
|
|
|
Box::new(Tokenizer::new(text))
|
2018-09-27 16:32:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-15 17:55:20 +01:00
|
|
|
pub struct Tokenizer<'a> {
|
2018-12-23 16:46:49 +01:00
|
|
|
word_index: usize,
|
|
|
|
char_index: usize,
|
2018-09-27 16:32:17 +02:00
|
|
|
inner: &'a str,
|
|
|
|
}
|
|
|
|
|
2018-11-15 17:55:20 +01:00
|
|
|
impl<'a> Tokenizer<'a> {
|
|
|
|
pub fn new(string: &str) -> Tokenizer {
|
2018-12-23 16:46:49 +01:00
|
|
|
let mut char_advance = 0;
|
|
|
|
let mut index_advance = 0;
|
|
|
|
for (n, (i, c)) in string.char_indices().enumerate() {
|
|
|
|
char_advance = n;
|
|
|
|
index_advance = i;
|
|
|
|
if detect_separator(c).is_none() { break }
|
|
|
|
}
|
|
|
|
|
2018-11-15 17:55:20 +01:00
|
|
|
Tokenizer {
|
2018-12-23 16:46:49 +01:00
|
|
|
word_index: 0,
|
|
|
|
char_index: char_advance,
|
|
|
|
inner: &string[index_advance..],
|
2018-09-27 16:32:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
|
|
enum Separator {
|
|
|
|
Short,
|
|
|
|
Long,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Separator {
|
|
|
|
fn add(self, add: Separator) -> Separator {
|
|
|
|
match (self, add) {
|
|
|
|
(_, Long) => Long,
|
|
|
|
(Short, Short) => Short,
|
|
|
|
(Long, Short) => Long,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn to_usize(self) -> usize {
|
|
|
|
match self {
|
|
|
|
Short => 1,
|
|
|
|
Long => 8,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-23 16:46:49 +01:00
|
|
|
fn detect_separator(c: char) -> Option<Separator> {
|
|
|
|
match c {
|
|
|
|
'.' | ';' | ',' | '!' | '?' | '-' => Some(Long),
|
|
|
|
' ' | '\'' | '"' => Some(Short),
|
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-15 17:55:20 +01:00
|
|
|
impl<'a> Iterator for Tokenizer<'a> {
|
2018-12-23 16:46:49 +01:00
|
|
|
type Item = Token<'a>;
|
2018-09-27 16:32:17 +02:00
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
let mut start_word = None;
|
|
|
|
let mut distance = None;
|
|
|
|
|
|
|
|
for (i, c) in self.inner.char_indices() {
|
2018-12-23 16:46:49 +01:00
|
|
|
match detect_separator(c) {
|
|
|
|
Some(sep) => {
|
2018-09-27 16:32:17 +02:00
|
|
|
if let Some(start_word) = start_word {
|
2018-12-23 16:46:49 +01:00
|
|
|
let (prefix, tail) = self.inner.split_at(i);
|
|
|
|
let (spaces, word) = prefix.split_at(start_word);
|
2018-09-27 16:32:17 +02:00
|
|
|
|
|
|
|
self.inner = tail;
|
2018-12-23 16:46:49 +01:00
|
|
|
self.char_index += spaces.len();
|
|
|
|
self.word_index += distance.map(Separator::to_usize).unwrap_or(0);
|
|
|
|
|
|
|
|
let token = Token {
|
|
|
|
word: word,
|
|
|
|
word_index: self.word_index,
|
|
|
|
char_index: self.char_index,
|
|
|
|
};
|
2018-09-27 16:32:17 +02:00
|
|
|
|
2018-12-23 16:46:49 +01:00
|
|
|
self.char_index += word.len();
|
|
|
|
return Some(token)
|
2018-09-27 16:32:17 +02:00
|
|
|
}
|
2018-12-23 16:46:49 +01:00
|
|
|
|
|
|
|
distance.replace(distance.map_or(sep, |s| s.add(sep)));
|
2018-09-27 16:32:17 +02:00
|
|
|
},
|
|
|
|
None => { start_word.get_or_insert(i); },
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(start_word) = start_word {
|
2018-12-23 16:46:49 +01:00
|
|
|
let prefix = mem::replace(&mut self.inner, "");
|
|
|
|
let (spaces, word) = prefix.split_at(start_word);
|
2018-09-27 16:32:17 +02:00
|
|
|
|
2018-12-23 16:46:49 +01:00
|
|
|
let token = Token {
|
|
|
|
word: word,
|
|
|
|
word_index: self.word_index + distance.map(Separator::to_usize).unwrap_or(0),
|
|
|
|
char_index: self.char_index + spaces.len(),
|
|
|
|
};
|
|
|
|
return Some(token)
|
2018-09-27 16:32:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn easy() {
|
2018-11-15 17:55:20 +01:00
|
|
|
let mut tokenizer = Tokenizer::new("salut");
|
2018-09-27 16:32:17 +02:00
|
|
|
|
2018-12-23 16:46:49 +01:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "salut", word_index: 0, char_index: 0 }));
|
2018-11-15 17:55:20 +01:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
2018-09-27 16:32:17 +02:00
|
|
|
|
2018-11-15 17:55:20 +01:00
|
|
|
let mut tokenizer = Tokenizer::new("yo ");
|
2018-09-27 16:32:17 +02:00
|
|
|
|
2018-12-23 16:46:49 +01:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "yo", word_index: 0, char_index: 0 }));
|
2018-11-15 17:55:20 +01:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
2018-09-27 16:32:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn hard() {
|
2018-11-15 17:55:20 +01:00
|
|
|
let mut tokenizer = Tokenizer::new(" .? yo lolo. aïe");
|
|
|
|
|
2018-12-23 16:46:49 +01:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "yo", word_index: 0, char_index: 4 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lolo", word_index: 1, char_index: 7 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "aïe", word_index: 9, char_index: 13 }));
|
2018-11-15 17:55:20 +01:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
|
|
|
|
|
|
|
let mut tokenizer = Tokenizer::new("yo ! lolo ? wtf - lol . aïe ,");
|
|
|
|
|
2018-12-23 16:46:49 +01:00
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "yo", word_index: 0, char_index: 0 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lolo", word_index: 8, char_index: 5 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "wtf", word_index: 16, char_index: 12 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lol", word_index: 24, char_index: 18 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "aïe", word_index: 32, char_index: 24 }));
|
|
|
|
assert_eq!(tokenizer.next(), None);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn hard_long_chars() {
|
|
|
|
let mut tokenizer = Tokenizer::new(" .? yo 😂. aïe");
|
|
|
|
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "yo", word_index: 0, char_index: 4 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "😂", word_index: 1, char_index: 7 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "aïe", word_index: 9, char_index: 13 }));
|
|
|
|
assert_eq!(tokenizer.next(), None);
|
|
|
|
|
|
|
|
let mut tokenizer = Tokenizer::new("yo ! lolo ? 😱 - lol . 😣 ,");
|
|
|
|
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "yo", word_index: 0, char_index: 0 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lolo", word_index: 8, char_index: 5 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "😱", word_index: 16, char_index: 12 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "lol", word_index: 24, char_index: 19 }));
|
|
|
|
assert_eq!(tokenizer.next(), Some(Token { word: "😣", word_index: 32, char_index: 25 }));
|
2018-11-15 17:55:20 +01:00
|
|
|
assert_eq!(tokenizer.next(), None);
|
2018-09-27 16:32:17 +02:00
|
|
|
}
|
|
|
|
}
|