diff --git a/CMakeLists.txt b/CMakeLists.txt index f3c3ed5..2d235b5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,7 +16,7 @@ if (NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Release) endif () -find_package(Boost 1.71 COMPONENTS program_options log log_setup REQUIRED) +find_package(Boost 1.75 COMPONENTS program_options json log log_setup REQUIRED) # compile executable into bin/ set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin) diff --git a/README.md b/README.md index 93c2efe..f030635 100644 --- a/README.md +++ b/README.md @@ -42,11 +42,15 @@ warc2text -o [ -f ] [ --pdfpass ] [ --paragraph-identification ] [ --tag-filters ] ... ``` * `--output`/`-o` output folder -* `--files`/`-f` list of output files separated by commas (and without `.gz`); `text` and `url` are always written, while `mime` and `html` are optional +* `--files`/`-f` list of output files separated by commas (and without `.gz`); Options are `text`,`html`,`url`,`mime`,`file` and `date`. Defaults to `text,url`. See [output](#output). +* `--jsonl` Produce JSON Lines on stdout instead of writing to files per language. * `--pdfpass` WARC file where PDF records will be stored +* `--robotstxtpass` WARC file where robots.txt related records will be stored +* `--encode-urls` Escape non-ascii characters that appear in the record URL with `%dd` encoding. +* `--multilang` Detect multiple languages in the document, and split the document accordingly. Only supported with CLD2 classifier. * `--paragraph-identification` print the paragraph identifier for each sentence extracted from the HTML -* `--classifier` classifier to use: `cld2` or `fasttext`. -* `--fasttext-model` path to FastText model for fasttext classifier. +* `--classifier` classifier to use: `cld2` or `fasttext`. When `fasttext` is used, one also has to specify a model using `--fasttext-model`. +* `--fasttext-model` path to FastText model for fasttext classifier. Models can be any [FastText language identification model](https://fasttext.cc/docs/en/language-identification.html) such as [OpenLID lid201-model.ftz](https://github.com/laurieburchell/open-lid-dataset#quantised-model) * `--tag-filters` file containing filters that are used to eliminate matching documents * `--invert-tag-filters` output only documents that match the filter * `--url-filters` file containing regular expressions that match urls of documents to eliminate @@ -61,6 +65,39 @@ warc2text -o [ -f ] [ --pdfpass ] Lines beginning with `#` and empty lines are ignored. Any invalid filter will raise a warning message, but will not prevent other filters from being read. +## Output +When used with `--output`/`-o` (with optionally `--files`/`-f`), warc2text will +produce the following directory structure at the path specified by `--output`: + +- `./{lang}/text.gz` will contain the plain text per document as base64 encoded lines. E.g. `gzip -cd en/text.gz | head -n5 | tail -n1 | base64 -d` will give you the 5th document's text. +- `./{lang}/url.gz` contains [the crawled URL](https://iipc.github.io/warc-specifications/specifications/warc-format/warc-1.1/#warc-target-uri) for each record. +- `./{lang}/mime.gz` contains the mimetype as reported by the crawled server +- `./{lang}/html.gz` contains lines of base64 encoded HTML as returned by the server. For ePub, MS Office or ODF files this is the extracted XML. +- `./{lang}/file.gz` contains the `{filename}:{offset}:{length}` pointer to the warc archive the record was extracted from. `{offset}` and `{length}` are of the compressed data, e.g. `tail -c+{offset} < {filename} | head -c{length} | gzip -cd` will give you the original record. +- `./{lang}/date.gz` gives you the original crawl date/time as reported by the crawler. [This should be a UTC timestamp](https://iipc.github.io/warc-specifications/specifications/warc-format/warc-1.1/#warc-date-mandatory). + +In every file, each line corresponds to the same record. E.g. the fifth line in `text.gz` and fifth line in `url.gz` together give you the text and url for a single record. + +The `{lang}` part of the path is determined by the classifier (see `--classifier`) and may be a two-letter or three-letter code depending on the classifier used. See [this list](https://github.com/CLD2Owners/cld2/blob/b56fa78a2fe44ac2851bae5bf4f4693a0644da7b/internal/generated_language.cc#L647-L1262) for CLD2. + +When using `--jsonl`, the output is instead a single JSON record per line, with the following keys (always in this order): +```ts +{ + f: string, # filename of warc file (same as the `{filename}` part in `file.gz`) + o: number, # byte offset of record in warc file (same as `{offset}` in `file.gz`) + s: number, # warc file record size (same as `{size}` in `file.gz`) + rs: number, # byte size of record payload (uncompressed) + ps: number, # byte size of text only payload (so compare this against `rs` and you should get amount of HTML removed) + l: string, # identified language by classifier + u: string, # url + c: string, # content type as reported by the HTTP response header (or warc record header if that isn't present) + ts: string, # crawl date/time as reported by the crawler + p: string, # plain text +} +``` + +More keys might be added in the future (e.g. the raw HTML is not included now) and you should not expect the order of the keys to stay the same between different versions of warc2text. + ## Included dependencies HTML Tokenizer by [c-smile](https://www.codeproject.com/Articles/14076/Fast-and-Compact-HTML-XML-Scanner-Tokenizer) diff --git a/src/bilangwriter.cc b/src/bilangwriter.cc index 2fed853..5688000 100644 --- a/src/bilangwriter.cc +++ b/src/bilangwriter.cc @@ -3,30 +3,26 @@ #include "util/exception.hh" #include #include +#include +#include + namespace warc2text{ - GzipWriter::GzipWriter() { - dest = nullptr; - compressed = 0; - s.zalloc = nullptr; - s.zfree = nullptr; - s.opaque = nullptr; - int ret = deflateInit2(&s, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 31, 8, Z_DEFAULT_STRATEGY); - assert(ret == Z_OK); - buf = new unsigned char[BUFFER_SIZE]; + GzipWriter::GzipWriter() + : dest(nullptr), + buf(new unsigned char[BUFFER_SIZE]) { + // } GzipWriter::~GzipWriter() { - if (dest) { - this->compress("", 0, Z_FINISH); - deflateEnd(&s); - std::fclose(dest); - } + if (is_open()) + close(); delete[] buf; } void GzipWriter::compress(const char *in, std::size_t size, int flush) { + assert(is_open()); if (size == 0 && flush == Z_NO_FLUSH) return; s.avail_in = size; s.next_in = (Bytef *) in; @@ -39,7 +35,7 @@ namespace warc2text{ s.next_out = buf; ret = deflate(&s, flush); assert(ret == Z_OK || ret == Z_STREAM_END); // Z_STREAM_END only happens if flush == Z_FINISH - compressed = BUFFER_SIZE - s.avail_out; + std::size_t compressed = BUFFER_SIZE - s.avail_out; //written = std::fwrite(buf, 1, compressed, dest); std::fwrite(buf, 1, compressed, dest); // TODO error handling @@ -52,47 +48,68 @@ namespace warc2text{ void GzipWriter::open(const std::string& filename) { dest = std::fopen(filename.c_str(), "wb"); UTIL_THROW_IF(!dest, util::ErrnoException, "while creating " << filename); + s.zalloc = nullptr; + s.zfree = nullptr; + s.opaque = nullptr; + int ret = deflateInit2(&s, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 31, 8, Z_DEFAULT_STRATEGY); + assert(ret == Z_OK); + } + + void GzipWriter::close() { + compress("", 0, Z_FINISH); + deflateEnd(&s); + std::fclose(dest); + dest = nullptr; } void GzipWriter::write(const char* text, std::size_t size) { - this->compress(text, size, Z_NO_FLUSH); + compress(text, size, Z_NO_FLUSH); } void GzipWriter::writeLine(const char* text, std::size_t size) { - this->compress(text, size, Z_NO_FLUSH); - this->compress("\n", 1, Z_NO_FLUSH); + compress(text, size, Z_NO_FLUSH); + compress("\n", 1, Z_NO_FLUSH); } void GzipWriter::writeLine(const std::string& text) { - this->compress(text.c_str(), text.size(), Z_NO_FLUSH); - this->compress("\n", 1, Z_NO_FLUSH); + compress(text.c_str(), text.size(), Z_NO_FLUSH); + compress("\n", 1, Z_NO_FLUSH); } bool GzipWriter::is_open(){ return dest != nullptr; } - void BilangWriter::write(const std::string& lang, const std::string& b64text, const std::string& url, const std::string& mime, const std::string& b64html) { - GzipWriter* gzurl = &url_files[lang]; - GzipWriter* gztext = &text_files[lang]; - GzipWriter* gzmime = nullptr; - GzipWriter* gzhtml = nullptr; - if (output_files.count("mime") == 1) gzmime = &(mime_files[lang]); - if (output_files.count("html") == 1) gzhtml = &(html_files[lang]); - if (!gzurl->is_open()) { - // if one file does not exist, the rest shouldn't either - std::string path = folder + "/" + lang; - util::createDirectories(path); - gzurl->open(path + "/url.gz"); - gztext->open(path + "/text.gz"); - if (gzmime != nullptr) gzmime->open(path + "/mime.gz"); - if (gzhtml != nullptr) gzhtml->open(path + "/html.gz"); - } + LangWriter::LangWriter(const std::string& path, const std::unordered_set& output_files) { + util::createDirectories(path); + + if (output_files.count("url")) + url_file.open(path + "/url.gz"); + if (output_files.count("text")) + text_file.open(path + "/text.gz"); + if (output_files.count("mime")) + mime_file.open(path + "/mime.gz"); + if (output_files.count("html")) + html_file.open(path + "/html.gz"); + if (output_files.count("file")) + file_file.open(path + "/file.gz"); + if (output_files.count("date")) + date_file.open(path + "/date.gz"); + } - gzurl->writeLine(url); - gztext->writeLine(b64text); - if (gzmime != nullptr) gzmime->writeLine(mime); - if (gzhtml != nullptr) gzhtml->writeLine(b64html); + void LangWriter::write(Record const &record, std::string const &chunk) { + if (url_file.is_open()) + url_file.writeLine(record.getURL()); + if (mime_file.is_open()) + mime_file.writeLine(record.getHTTPcontentType()); + if (file_file.is_open()) + file_file.writeLine(record.getFilename() + ":" + std::to_string(record.getOffset()) + ":" + std::to_string(record.getSize())); + if (date_file.is_open()) + date_file.writeLine(record.getWARCdate()); + if (html_file.is_open()) + html_file.writeLine(util::encodeBase64(record.getPayload())); + if (text_file.is_open()) + text_file.writeLine(util::encodeBase64(chunk)); } std::string get_paragraph_id(const std::string& text) { @@ -111,23 +128,33 @@ namespace warc2text{ } void BilangWriter::write(const Record& record, bool paragraph_identification) { - std::string base64text; - std::string base64html; - - if (output_files.count("html") == 1) - util::encodeBase64(record.getPayload(), base64html); - for (const auto& it : record.getTextByLangs()) { - std::string payload = it.second; + std::string chunk = it.second; - if (paragraph_identification) { - payload = get_paragraph_id(payload); - } + if (paragraph_identification) + chunk = get_paragraph_id(chunk); - util::encodeBase64(payload, base64text); - this->write(it.first, base64text, record.getURL(), record.getHTTPcontentType(), base64html); + auto writer_it = writers.try_emplace(it.first, folder + "/" + it.first, output_files); + writer_it.first->second.write(record, chunk); } } + void JSONLinesWriter::write(const Record& record, [[maybe_unused]] bool paragraph_identification) { + // JSON lines format (https://jsonlines.org) + for (auto &&chunk : record.getTextByLangs()) { + out_ << boost::json::value{ + {"f", boost::json::string(record.getFilename())}, + {"o", boost::json::value(record.getOffset())}, + {"s", boost::json::value(record.getSize())}, + {"rs", boost::json::value(record.getPayload().size())}, + {"ps", boost::json::value(chunk.second.size())}, + {"l", boost::json::string(chunk.first)}, + {"u", boost::json::string(record.getURL())}, + {"c", boost::json::string(record.getHTTPcontentType())}, + {"ts", boost::json::string(record.getWARCdate())}, + {"p", boost::json::string(chunk.second)}, + } << "\n"; + } + } } diff --git a/src/bilangwriter.hh b/src/bilangwriter.hh index 1b7f83e..cf80d52 100644 --- a/src/bilangwriter.hh +++ b/src/bilangwriter.hh @@ -3,23 +3,37 @@ #include #include +#include #include "record.hh" #include "zlib.h" namespace warc2text { + /** + * Generic interface for writing records to some form of output. + */ + class RecordWriter { + public: + virtual void write(const Record& record, bool paragraph_identification = false) = 0; + virtual ~RecordWriter() = default; + }; + + /** + * Writer used by BilangWriter to write a single compressed file + * (i.e. a column for a specific language) + */ class GzipWriter { private: FILE* dest; z_stream s{}; unsigned char* buf; - std::size_t compressed; void compress(const char* in, std::size_t size, int flush); public: GzipWriter(); ~GzipWriter(); void open(const std::string& filename); + void close(); void write(const char* text, std::size_t size); void writeLine(const char* text, std::size_t size); void writeLine(const std::string& text); @@ -27,41 +41,46 @@ namespace warc2text { static const std::size_t BUFFER_SIZE = 4096; }; - class BilangWriter { + /** + * Writes records to a specific folder for a specific language. + */ + class LangWriter { + private: + GzipWriter url_file; + GzipWriter mime_file; + GzipWriter text_file; + GzipWriter html_file; + GzipWriter file_file; + GzipWriter date_file; + public: + LangWriter(const std::string& folder, const std::unordered_set& output_files); + void write(const Record& record, const std::string &chunk); + }; + + class BilangWriter : public RecordWriter { private: std::string folder; - std::unordered_map url_files; - std::unordered_map mime_files; - std::unordered_map text_files; - std::unordered_map html_files; std::unordered_set output_files; - - void write(const std::string& lang, const std::string& b64text, const std::string& url, const std::string& mime, const std::string& b64html); - + std::unordered_map writers; public: - explicit BilangWriter(const std::string& folder) : - folder(folder), - url_files(), - mime_files(), - text_files(), - html_files(), - output_files({}) // url and text are mandatory regardless - {}; - - explicit BilangWriter(const std::string& folder, const std::unordered_set& output_files) : - folder(folder), - url_files(), - mime_files(), - text_files(), - html_files(), - output_files(output_files) - {}; - - void write(const Record& record, bool paragraph_identification = false); + BilangWriter(const std::string& folder, const std::unordered_set& output_files = {}) + : folder(folder) + , output_files(output_files) + { + // + }; + virtual void write(const Record& record, bool paragraph_identification = false); }; + class JSONLinesWriter : public RecordWriter { + private: + std::ostream &out_; + public: + explicit JSONLinesWriter(std::ostream &out) : out_(out) {}; + virtual void write(const Record& record, bool paragraph_identification = false); + }; } #endif diff --git a/src/record.cc b/src/record.cc index 57d2b75..9df06c3 100644 --- a/src/record.cc +++ b/src/record.cc @@ -34,7 +34,11 @@ namespace warc2text { return header_end + 4; } - Record::Record(const std::string& content) { + Record::Record(const std::string& content, const std::string& filename, std::size_t size, std::size_t offset) : + filename(filename), + size(size), + offset(offset) + { std::string line; std::size_t last_pos = 0, payload_start = 0; std::size_t pos = content.find("WARC/1.0\r\n"); @@ -61,18 +65,23 @@ namespace warc2text { if (header.count("warc-target-uri") == 1) { // respect the original casing url = header["warc-target-uri"]; - } - if (!url.empty() && url[0] == '<' && url[url.size()-1] == '>') - url = url.substr(1, url.size()-2); + // Remove any "<" and ">" wrappings from the URL + if (!url.empty() && url[0] == '<' && url[url.size()-1] == '>') + url = url.substr(1, url.size()-2); + } if (header.count("content-type") == 1) { WARCcontentType = header["content-type"]; util::toLower(WARCcontentType); } + if (header.count("warc-date") == 1) { + WARCdate = header["warc-date"]; + } + payload_start = last_pos; - if (header["warc-type"] == "response") { + if (recordType == "response") { // parse HTTP header pos = content.find("HTTP/1.", last_pos); if (pos == last_pos) { // found HTTP header @@ -275,10 +284,6 @@ namespace warc2text { return plaintext; } - const std::string& Record::getLanguage() const { - return language; - } - const std::string& Record::getURL() const { return url; } @@ -287,6 +292,10 @@ namespace warc2text { return recordType; } + const std::string& Record::getWARCdate() const { + return WARCdate; + } + const std::string& Record::getWARCcontentType() const { return WARCcontentType; } @@ -310,5 +319,4 @@ namespace warc2text { void Record::encodeURL() { url = util::encodeURLs(url); } - } // warc2text diff --git a/src/record.hh b/src/record.hh index 13d9c7e..00069e7 100644 --- a/src/record.hh +++ b/src/record.hh @@ -14,9 +14,7 @@ namespace warc2text { class Record { public: - Record() {}; - - explicit Record(const std::string& content); + Record(const std::string& content, const std::string &filename, std::size_t size, std::size_t offset); const std::string& getHeaderProperty(const std::string& property) const; bool headerExists(const std::string& property) const; @@ -25,15 +23,27 @@ namespace warc2text { const std::string& getPayload() const; const std::string& getPlainText() const; - const std::string& getLanguage() const; const std::string& getURL() const; const std::string& getRecordType() const; const std::string& getWARCcontentType() const; + const std::string& getWARCdate() const; const std::string& getHTTPcontentType() const; const std::string& getCharset() const; bool isBroaderDocumentFormat() const; bool isTextFormat() const; + inline const std::string& getFilename() const { + return filename; + } + + inline std::size_t getSize() const { + return size; + } + + inline std::size_t getOffset() const { + return offset; + } + const std::unordered_map& getTextByLangs() const; int cleanPayload(); @@ -46,6 +56,10 @@ namespace warc2text { void encodeURL(); private: + const std::string &filename; + std::size_t size; // compressed record length in WARC + std::size_t offset; // byte offset of start of record in WARC + std::unordered_map header; std::unordered_map HTTPheader; std::string payload; @@ -57,6 +71,7 @@ namespace warc2text { // these are present in the headers, but it's convenient to have them apart also std::string recordType; std::string WARCcontentType; + std::string WARCdate; std::string cleanHTTPcontentType; std::string charset; std::string url; diff --git a/src/util.cc b/src/util.cc index 896303a..07c515d 100644 --- a/src/util.cc +++ b/src/util.cc @@ -97,12 +97,10 @@ namespace util { return boost::locale::conv::to_utf(text, charset); } - void encodeBase64(const std::string& original, std::string& base64){ - preprocess::base64_encode(original, base64); - } - - void decodeBase64(const std::string& base64, std::string& output){ - preprocess::base64_decode(base64, output); + std::string encodeBase64(const std::string &original) { + std::string out; + preprocess::base64_encode(original, out); + return out; } void readTagFiltersRegex(const std::string& filename, umap_tag_filters_regex& filters) { diff --git a/src/util.hh b/src/util.hh index a6f5702..dd4407c 100644 --- a/src/util.hh +++ b/src/util.hh @@ -25,9 +25,7 @@ namespace util { std::string toUTF8 (const std::string& text, const std::string& charset); std::string toUTF8 (const char* text, const std::string& charset); - void encodeBase64(const std::string& original, std::string& base64); - - void decodeBase64(const std::string& base64, std::string& output); + std::string encodeBase64(const std::string& original); const std::string reserved_chars_url("!#$&'()*+,/:;=?[]"); std::string encodeURLs(const std::string& url); diff --git a/src/warcpreprocessor.cc b/src/warcpreprocessor.cc index 6934c65..c62a861 100644 --- a/src/warcpreprocessor.cc +++ b/src/warcpreprocessor.cc @@ -1,3 +1,6 @@ + +#include +#include "src/bilangwriter.hh" #include "warcpreprocessor.hh" #include "src/lang.hh" #include "zipreader.hh" @@ -63,10 +66,10 @@ namespace warc2text { const std::unordered_set WARCPreprocessor::removeExtensions = {".jpg", ".jpeg", ".gif", ".png", ".css", ".js", ".mp3", ".mp4", ".flv", ".wmv", ".gz", ".zip", ".rar" }; - WARCPreprocessor::WARCPreprocessor(const LanguageDetector &detector, WARCPreprocessorOptions const &options) : + WARCPreprocessor::WARCPreprocessor(RecordWriter &writer, const LanguageDetector &detector, WARCPreprocessorOptions const &options) : + writer(writer), detector(detector), options(options), - writer(options.output, options.output_files), totalRecords(0), textRecords(0), langRecords(0), @@ -106,17 +109,21 @@ namespace warc2text { WARCReader reader(filename); std::string content; - bool done = false; int n_langs = 0; - while (!done) { - done = !reader.getRecord(content); + while (true) { + std::size_t offset = reader.tell(); + std::size_t size = reader.getRecord(content); + + // No more records (EOF or failure to inflate) + if (size == 0) + break; // Note that content.empty() will also be true when len(record) > max_size (which is 20MB by default) - if (done or content.empty()) + if (content.empty()) continue; - Record record(content); + Record record(content, filename, size, offset); if (record.getPayload().empty()) continue; diff --git a/src/warcpreprocessor.hh b/src/warcpreprocessor.hh index d4f85eb..0956a8c 100644 --- a/src/warcpreprocessor.hh +++ b/src/warcpreprocessor.hh @@ -6,6 +6,7 @@ #include "warcreader.hh" #include "bilangwriter.hh" #include "util.hh" +#include #include #include #include @@ -44,9 +45,9 @@ namespace warc2text { class WARCPreprocessor { private: + RecordWriter &writer; LanguageDetector const &detector; WARCPreprocessorOptions const &options; - BilangWriter writer; WARCWriter pdf_warc_writer; WARCWriter robots_warc_writer; unsigned int totalRecords; @@ -62,7 +63,7 @@ namespace warc2text { bool URLfilter(const std::string& url) const; public: - explicit WARCPreprocessor(LanguageDetector const &detector, WARCPreprocessorOptions const &options); + explicit WARCPreprocessor(RecordWriter &writer, LanguageDetector const &detector, WARCPreprocessorOptions const &options); void process(const std::string &filename); void printStatistics() const; }; diff --git a/src/warcreader.cc b/src/warcreader.cc index d78bcce..7722304 100644 --- a/src/warcreader.cc +++ b/src/warcreader.cc @@ -3,18 +3,13 @@ #include namespace warc2text { - WARCReader::WARCReader(){ - warc_filename = ""; - file = nullptr; - - buf = new uint8_t[BUFFER_SIZE]; - scratch = new uint8_t[BUFFER_SIZE]; - + WARCReader::WARCReader() + { s.zalloc = nullptr; s.zfree = nullptr; s.opaque = nullptr; s.avail_in = 0; - s.next_in = buf; + s.next_in = buf.data(); if (inflateInit2(&s, 32) != Z_OK) { BOOST_LOG_TRIVIAL(error) << "Failed to init zlib"; @@ -27,39 +22,36 @@ namespace warc2text { } WARCReader::~WARCReader(){ - delete[] buf; - delete[] scratch; inflateEnd(&s); - closeFile(); } - bool WARCReader::getRecord(std::string& out, std::size_t max_size){ + std::size_t WARCReader::getRecord(std::string& out, std::size_t max_size){ int inflate_ret = 0; out.clear(); - std::size_t len; + std::size_t offset = tell(); bool skip_record = false; while (inflate_ret != Z_STREAM_END) { if (s.avail_in == 0) { - len = readChunk(); + std::size_t len = readChunk(); if (len <= 0) { // nothing more to read out.clear(); - return false; + return 0; } s.avail_in = len; - s.next_in = buf; + s.next_in = buf.data(); } // inflate until either stream end is reached, or there is no more data while (inflate_ret != Z_STREAM_END && s.avail_in != 0) { - s.next_out = scratch; + s.next_out = scratch.data(); s.avail_out = BUFFER_SIZE; inflate_ret = inflate(&s, Z_NO_FLUSH); if (inflate_ret != Z_OK && inflate_ret != Z_STREAM_END) { BOOST_LOG_TRIVIAL(error) << "WARC " << warc_filename << ": error during decompressing"; out.clear(); - return false; + return 0; } - if (not skip_record) out.append(scratch, scratch + (BUFFER_SIZE - s.avail_out)); + if (not skip_record) out.append(scratch.data(), scratch.data() + (scratch.size() - s.avail_out)); if (out.size() > max_size) { BOOST_LOG_TRIVIAL(trace) << "WARC " << warc_filename << ": skipping large record"; out.clear(); @@ -74,30 +66,31 @@ namespace warc2text { // next in and avail_in are updated while inflating, so no need to update them manually } } - return true; + return tell() - offset; } void WARCReader::openFile(const std::string& filename){ warc_filename = filename; if (filename.empty() || filename == "-") - file = std::freopen(nullptr, "rb", stdin); // make sure stdin is open in binary mode - else file = std::fopen(filename.c_str(), "r"); - if (!file) { + file.reset(std::freopen(nullptr, "rb", stdin)); // make sure stdin is open in binary mode + else + file.reset(std::fopen(filename.c_str(), "r")); + if (!file.get()) { BOOST_LOG_TRIVIAL(error) << "WARC " << filename << ": file opening failed, skipping this WARC"; } } - void WARCReader::closeFile() { - if (file) std::fclose(file); - } - std::size_t WARCReader::readChunk(){ - std::size_t len = std::fread(buf, sizeof(uint8_t), BUFFER_SIZE, file); - if (std::ferror(file) && !std::feof(file)) { + std::size_t len = std::fread(buf.data(), sizeof(uint8_t), BUFFER_SIZE, file.get()); + if (std::ferror(file.get()) && !std::feof(file.get())) { BOOST_LOG_TRIVIAL(error) << "WARC " << warc_filename << ": error during reading"; return 0; } return len; } + std::size_t WARCReader::tell() const { + return std::ftell(const_cast(file.get())) - s.avail_in; + } + } // warc2text diff --git a/src/warcreader.hh b/src/warcreader.hh index 2bf1bba..42834eb 100644 --- a/src/warcreader.hh +++ b/src/warcreader.hh @@ -1,7 +1,9 @@ #ifndef WARC2TEXT_WARCREADER_HH #define WARC2TEXT_WARCREADER_HH +#include "util/file.hh" #include "zlib.h" +#include #include #include @@ -10,15 +12,16 @@ namespace warc2text { public: WARCReader(); explicit WARCReader(const std::string& filename); - bool getRecord(std::string& out, std::size_t max_size = 1024*1024*20); //20MB + std::size_t getRecord(std::string& out, std::size_t max_size = 1024*1024*20); //20MB + std::size_t tell() const; ~WARCReader(); private: - std::FILE* file; + util::scoped_FILE file; std::string warc_filename; z_stream s{}; static const std::size_t BUFFER_SIZE = 4096; - uint8_t* buf; - uint8_t* scratch; + std::array buf; + std::array scratch; void openFile(const std::string& filename); void closeFile(); diff --git a/warc2text_main.cc b/warc2text_main.cc index e94e467..2c7ff80 100644 --- a/warc2text_main.cc +++ b/warc2text_main.cc @@ -14,12 +14,13 @@ using namespace warc2text; struct Options : WARCPreprocessorOptions { - std::string file_list; std::vector warcs; - std::string classifier; - std::string fasttext_model; + std::string files; bool verbose{}; bool silent{}; + bool jsonl{}; + std::string classifier; + std::string fasttext_model; }; void parseArgs(int argc, char *argv[], Options& out) { @@ -28,7 +29,7 @@ void parseArgs(int argc, char *argv[], Options& out) { desc.add_options() ("help,h", po::bool_switch(), "Show this help message") ("output,o", po::value(&out.output)->default_value("."), "Output folder") - ("files,f", po::value(&out.file_list)->default_value("url,token"), "List of output files separated by commas. Default (mandatory files): 'url,text'. Optional: 'mime,html'") + ("files,f", po::value(&out.files)->default_value("url,text"), "List of output files separated by commas. Default: 'url,text'. Optional: 'mime,html,file'") ("input,i", po::value(&out.warcs)->multitoken(), "Input WARC file name(s)") ("tag-filters", po::value(&out.tag_filters_filename), "Plain text file containing tag filters") ("invert-tag-filters", po::bool_switch(&out.tag_filters_invert)->default_value(false), "Invert tag filter application") @@ -39,6 +40,7 @@ void parseArgs(int argc, char *argv[], Options& out) { ("verbose,v", po::bool_switch(&out.verbose)->default_value(false), "Verbosity level") ("silent,s", po::bool_switch(&out.silent)->default_value(false)) ("multilang", po::bool_switch(&out.multilang)->default_value(false), "Detect multiple languages in a single record") + ("jsonl", po::bool_switch(&out.jsonl)->default_value(false), "Output jsonl to stdout") ("classifier", po::value(&out.classifier)->default_value("cld2"), "Language classifier: cld2 or fasttext (default cld2)") ("fasttext-model", po::value(&out.fasttext_model)->default_value(""), "Path to fasttext model") ("encode-urls", po::bool_switch(&out.encodeURLs)->default_value(false), "Encode URLs obtained from WARC records"); @@ -55,7 +57,7 @@ void parseArgs(int argc, char *argv[], Options& out) { " -o Output folder, required\n" " -f List of output files separated by commas\n" " Default (mandatory): \"url,text\"\n" - " Optional values: \"mime,html\"\n" + " Optional values: \"mime,html,file,date\"\n" " --classifier Classifier to use: cld2 or fasttext\n" " --fasttext-model Path to FastText model for fasttext classifier\n" " --multilang Detect multiple languages in documents (up to 3),\n" @@ -70,6 +72,7 @@ void parseArgs(int argc, char *argv[], Options& out) { " --encode-urls Encode URLs obtained from WARC records\n" " --paragraph-identification Add paragraph index for each sentence extracted from the html\n" " -s Only output errors\n" + " --jsonl Write JSONLines to stdout\n" " -v Verbose output (print trace)\n\n"; exit(1); } @@ -92,11 +95,20 @@ int main(int argc, char *argv[]) { // prepare list of output files std::vector files_list; - boost::algorithm::split(files_list, options.file_list, [](char c) {return c == ',';}); + boost::algorithm::split(files_list, options.files, [](char c) {return c == ',';}); options.output_files.insert(files_list.begin(), files_list.end()); - std::unique_ptr detector; + std::unique_ptr writer; + if (options.jsonl) { + writer = std::make_unique(std::cout); + } else if (!options.output_files.empty()) { + writer = std::make_unique(options.output, options.output_files); + } else { + BOOST_LOG_TRIVIAL(error) << "No output files specified"; + abort(); + } + std::unique_ptr detector; if (options.classifier == "cld2") { if (options.multilang) { detector.reset(new CLD2MultiLangDetector()); @@ -107,6 +119,9 @@ int main(int argc, char *argv[]) { if (options.multilang) { BOOST_LOG_TRIVIAL(error) << "FastText classifier doesn't do multilang at the moment"; abort(); + } else if (options.fasttext_model.empty()) { + BOOST_LOG_TRIVIAL(error) << "No FastText language identification model specified. Use --fasttext-model"; + abort(); } else { detector.reset(new FastTextDetector(options.fasttext_model)); } @@ -116,7 +131,7 @@ int main(int argc, char *argv[]) { } std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); - WARCPreprocessor warcpproc(*detector, options); + WARCPreprocessor warcpproc(*writer, *detector, options); for (const std::string& file : options.warcs){ warcpproc.process(file); }