diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c9619e1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +disorder.o +md5.o +tmp.foo +wikiq +wikiq.o + diff --git a/Makefile b/Makefile index cc0e56f..b7df471 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,12 @@ CXXFLAGS = -O3 CFLAGS = $(CXXFLAGS) -OBJECTS = wikiq.o md5.o disorder.o +OBJECTS = wikiq.o md5.o all: wikiq wikiq: $(OBJECTS) $(CXX) $(CXXFLAGS) $(OBJECTS) -lpcrecpp -lpcre -lexpat -o wikiq -disorder.o: disorder.h md5.o: md5.h clean: diff --git a/README b/README deleted file mode 100644 index f4bae6a..0000000 --- a/README +++ /dev/null @@ -1,46 +0,0 @@ -wikiq: a WikiMedia XML data dump to .tsv parser - -author: Erik Garrison - - -overview: - -wikiq is written in C using expat. It is designed to enable researchers to -rapidly extract revision histories (minus text and comments) from impossibly -large XML datasets. - - -use: - -To use, first make sure you have libexpat and libpcrecpp installed, then: - - % make - % ./wikiq -h # prints usage - % 7za e -so hugewikidatadump.xml | ./wikiq >hugewikidatadump.tsv - - -features: - -In addition to parsing WikiMedia XML data dumps into a tab-separated tabular -format, wikiq extracts article diffs and can execute arbitrary Perl-compatible -regular expressions against the additions and deletions which differentiate any -revision from the previous. Any number of regular expressions may be supplied -on the command line, and may be tagged using the '-n' option. - -MD5 checksums are used at runtime for precise detection of reversions. - - -output: - -wikiq generates these fields for each revision: - -title, articleid, revid, timestamp, anon, editor, editorid, minor, -text_length, text_entropy, text_md5, reversion, additions_size, deletions_size -.... and additional fields for each regex executed against add/delete diffs - -Boolean fields are TRUE/FALSE except in the case of reversion, which is blank -unless the article is a revert to a previous revision, in which case, it -contains the revision ID of the revision which was reverted to. - - -author: Erik Garrison diff --git a/README.OLD b/README.OLD new file mode 100644 index 0000000..4937f59 --- /dev/null +++ b/README.OLD @@ -0,0 +1,45 @@ +wikiq: a simple and fast stream-based MediaWiki XML dump parser + +authors: Erik Garrison + Benjamin Mako Hill + +overview: + +wikiq is written in C++ using expat. It is designed to enable +researchers to rapidly extract revision histories (minus text and +comments) from large XML datasets. + +use: + +To use, first make sure you have libexpat and libpcrecpp installed (e.g. +via packages libexpat1 and libpcre3-dev on Debian or Ubuntu), then: + + % make + % ./wikiq -h # prints usage + % 7za e -so hugewikidatadump.xml | ./wikiq >hugewikidatadump.tsv + + +features: + +In addition to parsing WikiMedia XML data dumps into a tab-separated +tabular format, wikiq can match Perl-compatible regular expressions +against revision content, can extract article diffs, and can match +regexes against the additions and deletions between revisions. Any +number of regular expressions may be supplied on the command line, and +may be tagged using the '-n' and -N options. + +MD5 checksums of revisions are used at runtime. + +output: + +wikiq generates these fields for each revision: + +title, articleid, revid, timestamp, anon, editor, editorid, minor, +text_length, text_md5, reversion, additions_size, deletions_size +.... and additional fields for each regex executed against content or +added/deleted diffs + +Boolean fields are TRUE/FALSE except in the case of reversion, which is blank +unless the article is a revert to a previous revision, in which case, it +contains the revision ID of the revision which was reverted to. + diff --git a/README.md b/README.md new file mode 100644 index 0000000..bf7ff45 --- /dev/null +++ b/README.md @@ -0,0 +1,22 @@ +This C++ version of `wikiq` in this repository has not be updated since ~2011 +and has a number of critical limitations. The repository is being kept here for +historical and archival purposes. Please don't rely on it! + +**A improved version of a very similar stream-based XML-parser for MediaWiki by +the same authors can be found here:** + +> **[https://code.communitydata.cc/mediawiki\_dump\_tools.git](https://code.communitydata.cc/mediawiki_dump_tools.git)** + +These new tools are maintained by some of the same authors (now based in the +[Community Data Science Collective](https://communitydata.cc)) and the new tool +relies on many of the same libraries including the `expat` non-validating XML +parser. + +This new version has a very similar interface, is in written in Python, and +leverages [Python MediaWiki Utilities](https://github.com/mediawiki-utilities) +for XML dump parsing and several other tasks. The two tools have been +benchmarked and the new tool's performance measures are generally within 90% of +the C++ version of tool in this repository. + +>> —[Benjamin Mako Hill](https://mako.cc/) + diff --git a/disorder.c b/disorder.c deleted file mode 100644 index a5f7c35..0000000 --- a/disorder.c +++ /dev/null @@ -1,192 +0,0 @@ -/*************************************************************************** - * libdisorder: A Library for Measuring Byte Stream Entropy - * Copyright (C) 2010 Michael E. Locasto - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the: - * Free Software Foundation, Inc. - * 59 Temple Place, Suite 330 - * Boston, MA 02111-1307 USA - * - * $Id$ - **************************************************************************/ - -#include //for log2() -#include //for NULL -#include "disorder.h" - -#if defined(__FreeBSD__) -#define log2(x) (log((x)) * (1./M_LN2)) -#endif - -/** Frequecies for each byte */ -static int m_token_freqs[LIBDO_MAX_BYTES]; //frequency of each token in sample -static float m_token_probs[LIBDO_MAX_BYTES]; //P(each token appearing) -static int m_num_tokens = 0; //actual number of `seen' tokens, max 256 -static float m_maxent = 0.0; -static float m_ratio = 0.0; -static int LIBDISORDER_INITIALIZED = 0; - -static void -initialize_lib() -{ - int i = 0; - if(1==LIBDISORDER_INITIALIZED) - return; - - m_num_tokens = 0; - - for(i=0;iLIBDO_MAX_BYTES) - { - //report error somehow? - return 0.0; - } - - //iterate through whole m_token_freq array, but only count - //spots that have a registered token (i.e., freq>0) - for(i=0;i #include "expat.h" #include -#include "disorder.h" #include "md5.h" #include "dtl/dtl.hpp" #include @@ -56,9 +55,18 @@ typedef struct { char *comment; char *text; vector last_text_tokens; - vector regexes; - vector wp_namespace_res; - vector regex_names; + + // title regexes + vector title_regexes; + + // regexes for checking with revisions + vector content_regex_names; + vector content_regexes; + + // regexes for looking within diffs + vector diff_regex_names; + vector diff_regexes; + map revision_md5; // used for detecting reversions // track string size of the elements, to prevent O(N^2) processing in charhndl @@ -247,29 +255,39 @@ write_row(revisionData *data) ++pos; } - // skip this if the wp_namespace is not in the proscribed list of - // namespaces - bool wp_namespace_found = false; - if (!data->wp_namespace_res.empty()) { - for (vector::iterator r = data->wp_namespace_res.begin(); r != data->wp_namespace_res.end(); ++r) { - pcrecpp::RE& wp_namespace_re = *r; - if (wp_namespace_re.PartialMatch(data->title)) { - wp_namespace_found = true; + // look to see if (a) we've passed in a list of /any/ title_regexes + // and (b) if all of the title_regex_matches match + // if (a) is true and (b) is not, we return + bool any_title_regex_match = false; + if (!data->title_regexes.empty()) { + for (vector::iterator r = data->title_regexes.begin(); r != data->title_regexes.end(); ++r) { + pcrecpp::RE& title_regex = *r; + if (title_regex.PartialMatch(data->title)) { + any_title_regex_match = true; break; } } - if (!wp_namespace_found) { + if (!any_title_regex_match) { return; } } + // search the content of the revision for a any of the regexes + vector content_regex_matches; + if (!data->content_regexes.empty()) { + for (vector::iterator r = data->content_regexes.begin(); r != data->content_regexes.end(); ++r) { + pcrecpp::RE& content_regex = *r; + content_regex_matches.push_back(content_regex.PartialMatch(data->text)); + } + } + //vector additions; //vector deletions; string additions; string deletions; - vector regex_matches_adds; - vector regex_matches_dels; + vector diff_regex_matches_adds; + vector diff_regex_matches_dels; if (data->last_text_tokens.empty()) { additions = data->text; @@ -297,17 +315,17 @@ write_row(revisionData *data) if (!additions.empty()) { //cout << "ADD: " << additions << endl; - for (vector::iterator r = data->regexes.begin(); r != data->regexes.end(); ++r) { - pcrecpp::RE& regex = *r; - regex_matches_adds.push_back(regex.PartialMatch(additions)); + for (vector::iterator r = data->diff_regexes.begin(); r != data->diff_regexes.end(); ++r) { + pcrecpp::RE& diff_regex = *r; + diff_regex_matches_adds.push_back(diff_regex.PartialMatch(additions)); } } if (!deletions.empty()) { //cout << "DEL: " << deletions << endl; - for (vector::iterator r = data->regexes.begin(); r != data->regexes.end(); ++r) { - pcrecpp::RE& regex = *r; - regex_matches_dels.push_back(regex.PartialMatch(deletions)); + for (vector::iterator r = data->diff_regexes.begin(); r != data->diff_regexes.end(); ++r) { + pcrecpp::RE& diff_regex = *r; + diff_regex_matches_dels.push_back(diff_regex.PartialMatch(deletions)); } } @@ -326,15 +344,19 @@ write_row(revisionData *data) << data->editorid << "\t" << ((data->minor) ? "TRUE" : "FALSE") << "\t" << (unsigned int) data->text_size << "\t" - << shannon_H(data->text, data->text_size) << "\t" << md5_hex_output << "\t" << reverted_to << "\t" << (int) additions.size() << "\t" << (int) deletions.size(); - for (int n = 0; n < data->regex_names.size(); ++n) { - cout << "\t" << ((!regex_matches_adds.empty() && regex_matches_adds.at(n)) ? "TRUE" : "FALSE") - << "\t" << ((!regex_matches_dels.empty() && regex_matches_dels.at(n)) ? "TRUE" : "FALSE"); + for (int n = 0; n < data->content_regex_names.size(); ++n) { + cout << "\t" << ((!content_regex_matches.empty() + && content_regex_matches.at(n)) ? "TRUE" : "FALSE"); + } + + for (int n = 0; n < data->diff_regex_names.size(); ++n) { + cout << "\t" << ((!diff_regex_matches_adds.empty() && diff_regex_matches_adds.at(n)) ? "TRUE" : "FALSE") + << "\t" << ((!diff_regex_matches_dels.empty() && diff_regex_matches_dels.at(n)) ? "TRUE" : "FALSE"); } cout << endl; @@ -506,22 +528,25 @@ void print_usage(char* argv[]) { << endl << "options:" << endl << " -v verbose mode prints text and comments after each line of tab separated data" << endl - << " -n name of the following regex (e.g. -n name -r \"...\")" << endl - << " -r regex to check against additions and deletions" << endl - << " -t regex(es) to check title against as a way of limiting output to specific namespaces" << endl + << " -n name of the following regex for content (e.g. -n name -r \"...\")" << endl + << " -r regex to check against content of the revision" << endl + << " -N name of the following regex for diffs (e.g. -N name -R \"...\")" << endl + << " -R regex to check against diffs (i.e., additions and deletions)" << endl + << " -t parse revisions only from pages whose titles match regex(es)" << endl << endl << "Takes a wikimedia data dump XML stream on standard in, and produces" << endl << "a tab-separated stream of revisions on standard out:" << endl << endl << "title, articleid, revid, timestamp, anon, editor, editorid, minor," << endl - << "text_length, text_entropy, text_md5, reversion, additions_size, deletions_size" << endl + << "text_length, text_md5, reversion, additions_size, deletions_size" << endl << ".... and additional fields for each regex executed against add/delete diffs" << endl << endl << "Boolean fields are TRUE/FALSE except in the case of reversion, which is blank" << endl << "unless the article is a revert to a previous revision, in which case, it" << endl << "contains the revision ID of the revision which was reverted to." << endl << endl - << "author: Erik Garrison " << endl; + << "authors: Erik Garrison " << endl + << " Benjamin Mako Hill " << endl; } @@ -534,7 +559,8 @@ main(int argc, char *argv[]) // in "simple" output, we don't print text and comments output_type = SIMPLE; char c; - string regex_name; + string diff_regex_name; + string content_regex_name; // the user data struct which is passed to callback functions revisionData data; @@ -549,13 +575,23 @@ main(int argc, char *argv[]) output_type = FULL; break; case 'n': - regex_name = optarg; + content_regex_name = optarg; break; case 'r': - data.regexes.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8())); - data.regex_names.push_back(regex_name); - if (!regex_name.empty()) { - regex_name.clear(); + data.content_regexes.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8())); + data.content_regex_names.push_back(content_regex_name); + if (!content_regex_name.empty()) { + content_regex_name.clear(); + } + break; + case 'N': + diff_regex_name = optarg; + break; + case 'R': + data.diff_regexes.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8())); + data.diff_regex_names.push_back(diff_regex_name); + if (!diff_regex_name.empty()) { + diff_regex_name.clear(); } break; case 'h': @@ -563,7 +599,7 @@ main(int argc, char *argv[]) exit(0); break; case 't': - data.wp_namespace_res.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8())); + data.title_regexes.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8())); break; } @@ -598,31 +634,42 @@ main(int argc, char *argv[]) cout << "title" << "\t" << "articleid" << "\t" << "revid" << "\t" - << "date" << " " + << "date" << "_" << "time" << "\t" << "anon" << "\t" << "editor" << "\t" << "editor_id" << "\t" << "minor" << "\t" << "text_size" << "\t" - << "text_entropy" << "\t" << "text_md5" << "\t" << "reversion" << "\t" << "additions_size" << "\t" << "deletions_size"; int n = 0; - if (!data.regexes.empty()) { - for (vector::iterator r = data.regexes.begin(); r != data.regexes.end(); ++r, ++n) { - if (data.regex_names.at(n).empty()) { + if (!data.content_regexes.empty()) { + for (vector::iterator r = data.content_regexes.begin(); + r != data.content_regexes.end(); ++r, ++n) { + if (data.content_regex_names.at(n).empty()) { + cout << "\t" << "regex" << n; + } else { + cout << "\t" << data.content_regex_names.at(n); + } + } + } + + if (!data.diff_regexes.empty()) { + for (vector::iterator r = data.diff_regexes.begin(); r != data.diff_regexes.end(); ++r, ++n) { + if (data.diff_regex_names.at(n).empty()) { cout << "\t" << "regex_" << n << "_add" << "\t" << "regex_" << n << "_del"; } else { - cout << "\t" << data.regex_names.at(n) << "_add" - << "\t" << data.regex_names.at(n) << "_del"; + cout << "\t" << data.diff_regex_names.at(n) << "_add" + << "\t" << data.diff_regex_names.at(n) << "_del"; } } } + cout << endl; // shovel data into the parser