From d19f394630fb21391eb7a2b2a88d1ee6e9a0ac64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Renault?= Date: Fri, 21 Aug 2020 18:10:24 +0200 Subject: [PATCH] Make the indexer support gzipped CSV as input --- Cargo.lock | 5 +++-- Cargo.toml | 1 + src/bin/indexer.rs | 14 +++++++++++--- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d59b09063..53546e3b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -445,9 +445,9 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "flate2" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" +checksum = "766d0e77a2c1502169d4a93ff3b8c15a71fd946cd0126309752104e5f3c46d94" dependencies = [ "cfg-if", "crc32fast", @@ -1006,6 +1006,7 @@ dependencies = [ "cow-utils", "criterion", "csv", + "flate2", "fst", "fxhash", "heed", diff --git a/Cargo.toml b/Cargo.toml index b6eef69ea..e86eac185 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,7 @@ bstr = "0.2.13" byteorder = "1.3.4" cow-utils = "0.1.2" csv = "1.1.3" +flate2 = "1.0.17" fst = "0.4.3" fxhash = "0.2.1" heed = { version = "0.8.1", default-features = false, features = ["lmdb"] } diff --git a/src/bin/indexer.rs b/src/bin/indexer.rs index 6d27d7ee2..4bb582fcd 100644 --- a/src/bin/indexer.rs +++ b/src/bin/indexer.rs @@ -1,5 +1,4 @@ -use std::convert::TryInto; -use std::convert::TryFrom; +use std::convert::{TryFrom, TryInto}; use std::fs::{File, OpenOptions}; use std::io::{self, Read, Write}; use std::iter::FromIterator; @@ -11,6 +10,7 @@ use anyhow::Context; use arc_cache::ArcCache; use bstr::ByteSlice as _; use cow_utils::CowUtils; +use flate2::read::GzDecoder; use fst::IntoStreamer; use heed::EnvOpenOptions; use heed::types::*; @@ -74,6 +74,9 @@ struct Opt { /// CSV file to index, if unspecified the CSV is read from standard input. /// + /// You can also provide a ".gz" or ".gzip" CSV file, the indexer will figure out + /// how to decode and read it. + /// /// Note that it is much faster to index from a file as when the indexer reads from stdin /// it will dedicate a thread for that and context switches could slow down the indexing jobs. csv_file: Option, @@ -501,7 +504,12 @@ fn main() -> anyhow::Result<()> { (0..num_threads) .map(|_| { let file = File::open(&file_path)?; - let r = Box::new(file) as Box; + // if the file extension is "gz" or "gzip" we can decode and read it. + let r = if file_path.extension().map_or(false, |ext| ext == "gz" || ext == "gzip") { + Box::new(GzDecoder::new(file)) as Box + } else { + Box::new(file) as Box + }; Ok(csv::Reader::from_reader(r)) as io::Result<_> }) .collect::, _>>()?