[honey] Comment tweak
[xapian.git] / xapian-applications / omega / index_file.cc
blob11e4376a377a44423d2ef30abb56a5966d28d5ff
1 /** @file index_file.cc
2 * @brief Handle indexing a document from a file
3 */
4 /* Copyright 1999,2000,2001 BrightStation PLC
5 * Copyright 2001,2005 James Aylett
6 * Copyright 2001,2002 Ananova Ltd
7 * Copyright 2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Olly Betts
8 * Copyright 2009 Frank J Bruzzaniti
9 * Copyright 2012 Mihai Bivol
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of the
14 * License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
24 * USA
27 #include <config.h>
29 #include "index_file.h"
31 #include <algorithm>
32 #include <iostream>
33 #include <limits>
34 #include <string>
35 #include <map>
36 #include <vector>
38 #include <sys/types.h>
39 #include "safeunistd.h"
40 #include <cstdio>
41 #include <cstdlib>
42 #include <cstring>
43 #include "safefcntl.h"
44 #include "safeerrno.h"
45 #include <ctime>
47 #include <xapian.h>
49 #include "append_filename_arg.h"
50 #include "atomparse.h"
51 #include "diritor.h"
52 #include "failed.h"
53 #include "md5wrap.h"
54 #include "metaxmlparse.h"
55 #include "mimemap.h"
56 #include "msxmlparse.h"
57 #include "myhtmlparse.h"
58 #include "opendocparse.h"
59 #include "pkglibbindir.h"
60 #include "runfilter.h"
61 #include "sample.h"
62 #include "str.h"
63 #include "stringutils.h"
64 #include "svgparse.h"
65 #include "tmpdir.h"
66 #include "utf8convert.h"
67 #include "utils.h"
68 #include "values.h"
69 #include "xmlparse.h"
70 #include "xlsxparse.h"
71 #include "xpsxmlparse.h"
73 using namespace std;
75 static Xapian::WritableDatabase db;
76 static Xapian::TermGenerator indexer;
78 static Xapian::doccount old_docs_not_seen;
79 static Xapian::docid old_lastdocid;
80 static vector<bool> updated;
82 static bool verbose;
83 static bool retry_failed;
84 static bool use_ctime;
85 static dup_action_type dup_action;
86 static bool ignore_exclusions;
87 static bool description_as_sample;
88 static bool date_terms;
90 static time_t last_altered_max;
91 static size_t sample_size;
92 static size_t title_size;
93 static size_t max_ext_len;
95 static empty_body_type empty_body;
97 static string root;
98 static string site_term, host_term;
100 static Failed failed;
102 map<string, Filter> commands;
104 static void
105 mark_as_seen(Xapian::docid did)
107 if (usual(did < updated.size() && !updated[did])) {
108 updated[did] = true;
109 --old_docs_not_seen;
113 void
114 skip(const string & urlterm, const string & context, const string & msg,
115 off_t size, time_t last_mod, unsigned flags)
117 failed.add(urlterm, last_mod, size);
119 if (!verbose || (flags & SKIP_SHOW_FILENAME)) {
120 if (!verbose && (flags & SKIP_VERBOSE_ONLY)) return;
121 cout << context << ": ";
124 cout << "Skipping - " << msg << endl;
127 static void
128 skip_cmd_failed(const string & urlterm, const string & context, const string & cmd,
129 off_t size, time_t last_mod)
131 skip(urlterm, context, "\"" + cmd + "\" failed", size, last_mod);
134 static void
135 skip_meta_tag(const string & urlterm, const string & context,
136 off_t size, time_t last_mod)
138 skip(urlterm, context, "indexing disallowed by meta tag", size, last_mod);
141 static void
142 skip_unknown_mimetype(const string & urlterm, const string & context,
143 const string & mimetype, off_t size, time_t last_mod)
145 skip(urlterm, context, "unknown MIME type '" + mimetype + "'", size, last_mod);
148 void
149 index_add_default_filters()
151 index_command("application/msword", Filter("antiword -mUTF-8.txt", false));
152 index_command("application/vnd.ms-excel",
153 Filter("xls2csv -c' ' -q0 -dutf-8", false));
154 index_command("application/vnd.ms-powerpoint",
155 Filter("catppt -dutf-8", false));
156 // Looking at the source of wpd2html and wpd2text I think both output
157 // UTF-8, but it's hard to be sure without sample Unicode .wpd files
158 // as they don't seem to be at all well documented.
159 index_command("application/vnd.wordperfect", Filter("wpd2text", false));
160 // wps2text produces UTF-8 output from the sample files I've tested.
161 index_command("application/vnd.ms-works", Filter("wps2text", false));
162 // Output is UTF-8 according to "man djvutxt". Generally this seems to
163 // be true, though some examples from djvu.org generate isolated byte
164 // 0x95 in a context which suggests it might be intended to be a bullet
165 // (as it is in CP1250).
166 index_command("image/vnd.djvu", Filter("djvutxt", false));
167 index_command("text/markdown", Filter("markdown", "text/html", false));
168 // The --text option unhelpfully converts all non-ASCII characters to "?"
169 // so we use --html instead, which produces HTML entities. The --nopict
170 // option suppresses exporting picture files as pictNNNN.wmf in the current
171 // directory. Note that this option was ignored in some older versions,
172 // but it was fixed in unrtf 0.20.4.
173 index_command("text/rtf",
174 Filter("unrtf --nopict --html 2>/dev/null", "text/html",
175 false));
176 index_command("text/x-rst", Filter("rst2html", "text/html", false));
177 index_command("application/x-mspublisher",
178 Filter("pub2xhtml", "text/html", false));
179 index_command("application/vnd.ms-outlook",
180 Filter(get_pkglibbindir() + "/outlookmsg2html", "text/html",
181 false));
182 // pod2text's output character set doesn't seem to be documented, but from
183 // inspecting the source it looks like it's probably iso-8859-1. We need
184 // to pass "--errors=stderr" or else minor POD formatting errors cause a
185 // file not to be indexed.
186 index_command("text/x-perl",
187 Filter("pod2text --errors=stderr",
188 "text/plain", "iso-8859-1", false));
189 // FIXME: -e0 means "UTF-8", but that results in "fi", "ff", "ffi", etc
190 // appearing as single ligatures. For European languages, it's actually
191 // better to use -e2 (ISO-8859-1) and then convert, so let's do that for
192 // now until we handle Unicode "compatibility decompositions".
193 index_command("application/x-dvi",
194 Filter("catdvi -e2 -s", "text/plain", "iso-8859-1", false));
195 // Simplistic - ought to look in index.rdf files for filename and character
196 // set.
197 index_command("application/x-maff",
198 Filter("unzip -p %f '*/*.*htm*'", "text/html", "iso-8859-1",
199 false));
200 index_command("application/x-mimearchive",
201 Filter(get_pkglibbindir() + "/mhtml2html", "text/html",
202 false));
203 index_command("message/news",
204 Filter(get_pkglibbindir() + "/rfc822tohtml", "text/html",
205 false));
206 index_command("message/rfc822",
207 Filter(get_pkglibbindir() + "/rfc822tohtml", "text/html",
208 false));
209 index_command("text/vcard",
210 Filter(get_pkglibbindir() + "/vcard2text", false));
213 void
214 index_init(const string & dbpath, const Xapian::Stem & stemmer,
215 const string & root_, const string & site_term_,
216 const string & host_term_,
217 empty_body_type empty_body_, dup_action_type dup_action_,
218 size_t sample_size_, size_t title_size_, size_t max_ext_len_,
219 bool overwrite, bool retry_failed_,
220 bool delete_removed_documents, bool verbose_, bool use_ctime_,
221 bool spelling, bool ignore_exclusions_, bool description_as_sample_,
222 bool date_terms_)
224 root = root_;
225 site_term = site_term_;
226 host_term = host_term_;
227 empty_body = empty_body_;
228 dup_action = dup_action_;
229 sample_size = sample_size_;
230 title_size = title_size_;
231 max_ext_len = max_ext_len_;
232 verbose = verbose_;
233 use_ctime = use_ctime_;
234 ignore_exclusions = ignore_exclusions_;
235 description_as_sample = description_as_sample_;
236 date_terms = date_terms_;
238 if (!overwrite) {
239 db = Xapian::WritableDatabase(dbpath, Xapian::DB_CREATE_OR_OPEN);
240 old_docs_not_seen = db.get_doccount();
241 old_lastdocid = db.get_lastdocid();
242 if (delete_removed_documents) {
243 // + 1 so that old_lastdocid is a valid subscript.
244 updated.resize(old_lastdocid + 1);
246 try {
247 Xapian::valueno slot = use_ctime ? VALUE_CTIME : VALUE_LASTMOD;
248 string ubound = db.get_value_upper_bound(slot);
249 if (!ubound.empty())
250 last_altered_max = binary_string_to_int(ubound);
251 } catch (const Xapian::UnimplementedError &) {
252 numeric_limits<time_t> n;
253 last_altered_max = n.max();
255 } else {
256 db = Xapian::WritableDatabase(dbpath, Xapian::DB_CREATE_OR_OVERWRITE);
259 if (spelling) {
260 indexer.set_database(db);
261 indexer.set_flags(indexer.FLAG_SPELLING);
263 indexer.set_stemmer(stemmer);
265 runfilter_init();
267 failed.init(db);
269 if (overwrite) {
270 // There are no failures to retry, so setting this flag doesn't
271 // change the outcome, but does mean we avoid the overhead of
272 // checking for a previous failure.
273 retry_failed = true;
274 } else if (retry_failed_) {
275 failed.clear();
276 retry_failed = true;
277 } else {
278 // If there are no existing failures, setting this flag doesn't
279 // change the outcome, but does mean we avoid the overhead of
280 // checking for a previous failure.
281 retry_failed = failed.empty();
285 static void
286 parse_pdfinfo_field(const char * p, const char * end, string & out, const char * field, size_t len)
288 if (size_t(end - p) > len && memcmp(p, field, len) == 0) {
289 p += len;
290 while (p != end && *p == ' ')
291 ++p;
292 if (p != end && (end[-1] != '\r' || --end != p))
293 out.assign(p, end - p);
297 #define PARSE_PDFINFO_FIELD(P, END, OUT, FIELD) \
298 parse_pdfinfo_field((P), (END), (OUT), FIELD":", CONST_STRLEN(FIELD) + 1)
300 static void
301 get_pdf_metainfo(const string & file, string &author, string &title,
302 string &keywords, string &topic)
304 try {
305 string cmd = "pdfinfo -enc UTF-8";
306 append_filename_argument(cmd, file);
307 string pdfinfo = stdout_to_string(cmd, false);
309 const char * p = pdfinfo.data();
310 const char * end = p + pdfinfo.size();
311 while (p != end) {
312 const char * start = p;
313 p = static_cast<const char *>(memchr(p, '\n', end - p));
314 const char * eol;
315 if (p) {
316 eol = p;
317 ++p;
318 } else {
319 p = eol = end;
321 switch (*start) {
322 case 'A':
323 PARSE_PDFINFO_FIELD(start, eol, author, "Author");
324 break;
325 case 'K':
326 PARSE_PDFINFO_FIELD(start, eol, keywords, "Keywords");
327 break;
328 case 'S':
329 PARSE_PDFINFO_FIELD(start, eol, topic, "Subject");
330 break;
331 case 'T':
332 PARSE_PDFINFO_FIELD(start, eol, title, "Title");
333 break;
336 } catch (ReadError) {
337 // It's probably best to index the document even if pdfinfo fails.
341 static void
342 generate_sample_from_csv(const string & csv_data, string & sample)
344 // Add 3 to allow for a 4 byte utf-8 sequence being appended when
345 // output is sample_size - 1 bytes long. Use csv_data.size() if smaller
346 // since the user might reasonably set sample_size really high.
347 sample.reserve(min(sample_size + 3, csv_data.size()));
348 size_t last_word_end = 0;
349 bool in_space = true;
350 bool in_quotes = false;
351 for (Xapian::Utf8Iterator i(csv_data); i != Xapian::Utf8Iterator(); ++i) {
352 unsigned ch = *i;
354 if (!in_quotes) {
355 // If not already in double quotes, '"' starts quoting and
356 // ',' starts a new field.
357 if (ch == '"') {
358 in_quotes = true;
359 continue;
361 if (ch == ',')
362 ch = ' ';
363 } else if (ch == '"') {
364 // In double quotes, '"' either ends double quotes, or
365 // if followed by another '"', means a literal '"'.
366 if (++i == Xapian::Utf8Iterator())
367 break;
368 ch = *i;
369 if (ch != '"') {
370 in_quotes = false;
371 if (ch == ',')
372 ch = ' ';
376 if (ch <= ' ' || ch == 0xa0) {
377 // FIXME: if all the whitespace characters between two
378 // words are 0xa0 (non-breaking space) then perhaps we
379 // should output 0xa0.
380 if (in_space)
381 continue;
382 last_word_end = sample.size();
383 sample += ' ';
384 in_space = true;
385 } else {
386 Xapian::Unicode::append_utf8(sample, ch);
387 in_space = false;
390 if (sample.size() >= sample_size) {
391 // Need to truncate sample.
392 if (last_word_end <= sample_size / 2) {
393 // Monster word! We'll have to just split it.
394 sample.replace(sample_size - 3, string::npos, "...", 3);
395 } else {
396 sample.replace(last_word_end, string::npos, " ...", 4);
398 break;
403 static bool
404 index_check_existing(const string & urlterm, time_t last_altered,
405 Xapian::docid & did)
407 switch (dup_action) {
408 case DUP_SKIP: {
409 Xapian::PostingIterator p = db.postlist_begin(urlterm);
410 if (p != db.postlist_end(urlterm)) {
411 if (verbose)
412 cout << "already indexed, not updating" << endl;
413 did = *p;
414 mark_as_seen(did);
415 return true;
417 break;
419 case DUP_CHECK_LAZILY: {
420 // If last_altered > last_altered_max, we know for sure that the
421 // file is new or updated.
422 if (last_altered > last_altered_max) {
423 return false;
426 Xapian::PostingIterator p = db.postlist_begin(urlterm);
427 if (p != db.postlist_end(urlterm)) {
428 did = *p;
429 Xapian::Document doc = db.get_document(did);
430 Xapian::valueno slot = use_ctime ? VALUE_CTIME : VALUE_LASTMOD;
431 string value = doc.get_value(slot);
432 time_t old_last_altered = binary_string_to_int(value);
433 if (last_altered <= old_last_altered) {
434 if (verbose)
435 cout << "already indexed" << endl;
436 // The docid should be in updated - the only valid
437 // exception is if the URL was long and hashed to the
438 // same URL as an existing document indexed in the same
439 // batch.
440 mark_as_seen(did);
441 return true;
444 break;
447 return false;
450 void
451 index_add_document(const string & urlterm, time_t last_altered,
452 Xapian::docid did, const Xapian::Document & doc)
454 if (dup_action != DUP_SKIP) {
455 // If this document has already been indexed, update the existing
456 // entry.
457 if (did) {
458 // We already found out the document id above.
459 db.replace_document(did, doc);
460 } else if (last_altered <= last_altered_max) {
461 // We checked for the UID term and didn't find it.
462 did = db.add_document(doc);
463 } else {
464 did = db.replace_document(urlterm, doc);
466 mark_as_seen(did);
467 if (verbose) {
468 if (did <= old_lastdocid) {
469 cout << "updated" << endl;
470 } else {
471 cout << "added" << endl;
474 } else {
475 // If this were a duplicate, we'd have skipped it above.
476 db.add_document(doc);
477 if (verbose)
478 cout << "added" << endl;
482 void
483 index_mimetype(const string & file, const string & urlterm, const string & url,
484 const string & ext,
485 const string &mimetype, DirectoryIterator &d,
486 Xapian::Document & newdocument,
487 string record)
489 string context(file, root.size(), string::npos);
491 // FIXME: We could be cleverer here and check mtime too when use_ctime is
492 // set - if the ctime has changed but the mtime is unchanged, we can just
493 // update the existing Document and avoid having to re-extract text, etc.
494 time_t last_altered = use_ctime ? d.get_ctime() : d.get_mtime();
496 Xapian::docid did = 0;
497 if (index_check_existing(urlterm, last_altered, did))
498 return;
500 if (!retry_failed) {
501 // We only store and check the mtime (last modified) - a change to the
502 // metadata won't generally cause a previous failure to now work
503 // (FIXME: except permissions).
504 time_t failed_last_mod;
505 off_t failed_size;
506 if (failed.contains(urlterm, failed_last_mod, failed_size)) {
507 if (d.get_mtime() <= failed_last_mod &&
508 d.get_size() == failed_size) {
509 if (verbose)
510 cout << "failed to extract text on earlier run" << endl;
511 return;
513 // The file has changed, so remove the entry for it. If it fails
514 // again on this attempt, we'll add a new one.
515 failed.del(urlterm);
519 if (verbose) cout << flush;
521 string author, title, sample, keywords, topic, dump;
522 string md5;
523 time_t created = time_t(-1);
525 map<string, Filter>::const_iterator cmd_it = commands.find(mimetype);
526 if (cmd_it == commands.end()) {
527 size_t slash = mimetype.find('/');
528 if (slash != string::npos) {
529 string wildtype(mimetype, 0, slash + 2);
530 wildtype[slash + 1] = '*';
531 cmd_it = commands.find(wildtype);
532 if (cmd_it == commands.end()) {
533 cmd_it = commands.find("*/*");
536 if (cmd_it == commands.end()) {
537 cmd_it = commands.find("*");
540 try {
541 if (cmd_it != commands.end()) {
542 // Easy "run a command and read text or HTML from stdout or a
543 // temporary file" cases.
544 string cmd = cmd_it->second.cmd;
545 if (cmd.empty()) {
546 skip(urlterm, context, "required filter not installed",
547 d.get_size(), d.get_mtime(), SKIP_VERBOSE_ONLY);
548 return;
550 if (cmd == "false") {
551 // Allow setting 'false' as a filter to mean that a MIME type
552 // should be quietly ignored.
553 string m = "ignoring MIME type '";
554 m += cmd_it->first;
555 m += "'";
556 skip(urlterm, context, m, d.get_size(), d.get_mtime(),
557 SKIP_VERBOSE_ONLY);
558 return;
560 bool use_shell = cmd_it->second.use_shell();
561 bool substituted = false;
562 string tmpout;
563 size_t pcent = 0;
564 while (true) {
565 pcent = cmd.find('%', pcent);
566 if (pcent >= cmd.size() - 1)
567 break;
568 switch (cmd[pcent + 1]) {
569 case '%': // %% -> %.
570 cmd.erase(++pcent, 1);
571 break;
572 case 'f': { // %f -> escaped filename.
573 substituted = true;
574 string tail(cmd, pcent + 2);
575 cmd.resize(pcent);
576 append_filename_argument(cmd, file);
577 // Remove the space append_filename_argument() adds before
578 // the argument - the command string either includes one,
579 // or won't expect one (e.g. --input=%f).
580 cmd.erase(pcent, 1);
581 pcent = cmd.size();
582 cmd += tail;
583 break;
585 case 't': { // %t -> temporary output file.
586 if (tmpout.empty()) {
587 // Use a temporary file with a suitable extension
588 // in case the command cares, and for more helpful
589 // error messages from the command.
590 if (cmd_it->second.output_type == "text/html") {
591 tmpout = get_tmpfile("tmp.html");
592 } else {
593 tmpout = get_tmpfile("tmp.txt");
596 substituted = true;
597 string tail(cmd, pcent + 2);
598 cmd.resize(pcent);
599 append_filename_argument(cmd, tmpout);
600 // Remove the space append_filename_argument() adds before
601 // the argument - the command string either includes one,
602 // or won't expect one (e.g. --input=%f).
603 cmd.erase(pcent, 1);
604 pcent = cmd.size();
605 cmd += tail;
606 break;
608 default:
609 // Leave anything else alone for now.
610 pcent += 2;
611 break;
614 if (!substituted && cmd != "true") {
615 // If no %f, append the filename to the command.
616 append_filename_argument(cmd, file);
618 try {
619 if (!tmpout.empty()) {
620 // Output in temporary file.
621 (void)stdout_to_string(cmd, use_shell);
622 if (!load_file(tmpout, dump)) {
623 throw ReadError("Couldn't read output file");
625 unlink(tmpout.c_str());
626 } else if (cmd == "true") {
627 // Ignore the file's contents, just index metadata from the
628 // filing system.
629 } else {
630 // Output on stdout.
631 dump = stdout_to_string(cmd, use_shell);
633 const string & charset = cmd_it->second.output_charset;
634 if (cmd_it->second.output_type == "text/html") {
635 MyHtmlParser p;
636 p.ignore_metarobots();
637 p.description_as_sample = description_as_sample;
638 try {
639 p.parse_html(dump, charset, false);
640 } catch (const string & newcharset) {
641 p.reset();
642 p.ignore_metarobots();
643 p.description_as_sample = description_as_sample;
644 p.parse_html(dump, newcharset, true);
645 } catch (ReadError) {
646 skip_cmd_failed(urlterm, context, cmd,
647 d.get_size(), d.get_mtime());
648 return;
650 dump = p.dump;
651 title = p.title;
652 keywords = p.keywords;
653 topic = p.topic;
654 sample = p.sample;
655 author = p.author;
656 created = p.created;
657 } else if (!charset.empty()) {
658 convert_to_utf8(dump, charset);
660 } catch (ReadError) {
661 skip_cmd_failed(urlterm, context, cmd,
662 d.get_size(), d.get_mtime());
663 return;
665 } else if (mimetype == "text/html" || mimetype == "text/x-php") {
666 const string & text = d.file_to_string();
667 MyHtmlParser p;
668 if (ignore_exclusions) p.ignore_metarobots();
669 p.description_as_sample = description_as_sample;
670 try {
671 // Default HTML character set is latin 1, though not specifying
672 // one is deprecated these days.
673 p.parse_html(text, "iso-8859-1", false);
674 } catch (const string & newcharset) {
675 p.reset();
676 if (ignore_exclusions) p.ignore_metarobots();
677 p.description_as_sample = description_as_sample;
678 p.parse_html(text, newcharset, true);
680 if (!p.indexing_allowed) {
681 skip_meta_tag(urlterm, context,
682 d.get_size(), d.get_mtime());
683 return;
685 dump = p.dump;
686 title = p.title;
687 keywords = p.keywords;
688 topic = p.topic;
689 sample = p.sample;
690 author = p.author;
691 created = p.created;
692 md5_string(text, md5);
693 } else if (mimetype == "text/plain") {
694 // Currently we assume that text files are UTF-8 unless they have a
695 // byte-order mark.
696 dump = d.file_to_string();
697 md5_string(dump, md5);
699 // Look for Byte-Order Mark (BOM).
700 if (startswith(dump, "\xfe\xff") || startswith(dump, "\xff\xfe")) {
701 // UTF-16 in big-endian/little-endian order - we just convert
702 // it as "UTF-16" and let the conversion handle the BOM as that
703 // way we avoid the copying overhead of erasing 2 bytes from
704 // the start of dump.
705 convert_to_utf8(dump, "UTF-16");
706 } else if (startswith(dump, "\xef\xbb\xbf")) {
707 // UTF-8 with stupid Windows not-the-byte-order mark.
708 dump.erase(0, 3);
709 } else {
710 // FIXME: What charset is the file? Look at contents?
712 } else if (mimetype == "application/pdf") {
713 string cmd = "pdftotext -enc UTF-8";
714 append_filename_argument(cmd, file);
715 cmd += " -";
716 try {
717 dump = stdout_to_string(cmd, false);
718 } catch (ReadError) {
719 skip_cmd_failed(urlterm, context, cmd,
720 d.get_size(), d.get_mtime());
721 return;
723 get_pdf_metainfo(file, author, title, keywords, topic);
724 } else if (mimetype == "application/postscript") {
725 // There simply doesn't seem to be a Unicode capable PostScript to
726 // text converter (e.g. pstotext always outputs ISO-8859-1). The
727 // only solution seems to be to convert via PDF using ps2pdf and
728 // then pdftotext. This gives plausible looking UTF-8 output for
729 // some Chinese PostScript files I found using Google. It also has
730 // the benefit of allowing us to extract meta information from
731 // PostScript files.
732 string tmpfile = get_tmpfile("tmp.pdf");
733 if (tmpfile.empty()) {
734 // FIXME: should this be fatal? Or disable indexing postscript?
735 string msg = "Couldn't create temporary directory (";
736 msg += strerror(errno);
737 msg += ")";
738 skip(urlterm, context, msg,
739 d.get_size(), d.get_mtime());
740 return;
742 string cmd = "ps2pdf";
743 append_filename_argument(cmd, file);
744 append_filename_argument(cmd, tmpfile);
745 try {
746 (void)stdout_to_string(cmd, false);
747 cmd = "pdftotext -enc UTF-8";
748 append_filename_argument(cmd, tmpfile);
749 cmd += " -";
750 dump = stdout_to_string(cmd, false);
751 } catch (ReadError) {
752 skip_cmd_failed(urlterm, context, cmd,
753 d.get_size(), d.get_mtime());
754 unlink(tmpfile.c_str());
755 return;
756 } catch (...) {
757 unlink(tmpfile.c_str());
758 throw;
760 try {
761 get_pdf_metainfo(tmpfile, author, title, keywords, topic);
762 } catch (...) {
763 unlink(tmpfile.c_str());
764 throw;
766 unlink(tmpfile.c_str());
767 } else if (startswith(mimetype, "application/vnd.sun.xml.") ||
768 startswith(mimetype, "application/vnd.oasis.opendocument."))
770 // Inspired by http://mjr.towers.org.uk/comp/sxw2text
771 string cmd = "unzip -p";
772 append_filename_argument(cmd, file);
773 cmd += " content.xml ; unzip -p";
774 append_filename_argument(cmd, file);
775 cmd += " styles.xml";
776 try {
777 OpenDocParser parser;
778 parser.parse(stdout_to_string(cmd, true));
779 dump = parser.dump;
780 } catch (ReadError) {
781 skip_cmd_failed(urlterm, context, cmd,
782 d.get_size(), d.get_mtime());
783 return;
786 cmd = "unzip -p";
787 append_filename_argument(cmd, file);
788 cmd += " meta.xml";
789 try {
790 MetaXmlParser metaxmlparser;
791 metaxmlparser.parse(stdout_to_string(cmd, false));
792 title = metaxmlparser.title;
793 keywords = metaxmlparser.keywords;
794 // FIXME: topic = metaxmlparser.topic;
795 sample = metaxmlparser.sample;
796 author = metaxmlparser.author;
797 } catch (ReadError) {
798 // It's probably best to index the document even if this fails.
800 } else if (startswith(mimetype, "application/vnd.openxmlformats-officedocument.")) {
801 const char * args = NULL;
802 string tail(mimetype, 46);
803 if (startswith(tail, "wordprocessingml.")) {
804 // unzip returns exit code 11 if a file to extract wasn't found
805 // which we want to ignore, because there may be no headers or
806 // no footers.
807 args = " word/document.xml 'word/header*.xml' 'word/footer*.xml' 2>/dev/null";
808 } else if (startswith(tail, "spreadsheetml.")) {
809 // Extract the shared string table first, so our parser can
810 // grab those ready for parsing the sheets which will reference
811 // the shared strings.
812 string cmd = "unzip -p";
813 append_filename_argument(cmd, file);
814 cmd += " xl/styles.xml xl/workbook.xml xl/sharedStrings.xml ; unzip -p";
815 append_filename_argument(cmd, file);
816 cmd += " xl/worksheets/sheet\\*.xml";
817 try {
818 XlsxParser parser;
819 parser.parse(stdout_to_string(cmd, true));
820 dump = parser.dump;
821 } catch (ReadError) {
822 skip_cmd_failed(urlterm, context, cmd,
823 d.get_size(), d.get_mtime());
824 return;
826 } else if (startswith(tail, "presentationml.")) {
827 // unzip returns exit code 11 if a file to extract wasn't found
828 // which we want to ignore, because there may be no notesSlides
829 // or comments.
830 args = " 'ppt/slides/slide*.xml' 'ppt/notesSlides/notesSlide*.xml' 'ppt/comments/comment*.xml' 2>/dev/null";
831 } else {
832 // Don't know how to index this type.
833 skip_unknown_mimetype(urlterm, context, mimetype,
834 d.get_size(), d.get_mtime());
835 return;
838 if (args) {
839 string cmd = "unzip -p";
840 append_filename_argument(cmd, file);
841 cmd += args;
842 try {
843 MSXmlParser xmlparser;
844 // Treat exit status 11 from unzip as success - this is
845 // what we get if one of the listed filenames to extract
846 // doesn't match anything in the zip file.
847 xmlparser.parse_xml(stdout_to_string(cmd, false, 11));
848 dump = xmlparser.dump;
849 } catch (ReadError) {
850 skip_cmd_failed(urlterm, context, cmd,
851 d.get_size(), d.get_mtime());
852 return;
856 string cmd = "unzip -p";
857 append_filename_argument(cmd, file);
858 cmd += " docProps/core.xml";
859 try {
860 MetaXmlParser metaxmlparser;
861 metaxmlparser.parse(stdout_to_string(cmd, false));
862 title = metaxmlparser.title;
863 keywords = metaxmlparser.keywords;
864 // FIXME: topic = metaxmlparser.topic;
865 sample = metaxmlparser.sample;
866 author = metaxmlparser.author;
867 } catch (ReadError) {
868 // It's probably best to index the document even if this fails.
870 } else if (mimetype == "application/x-abiword") {
871 // FIXME: Implement support for metadata.
872 XmlParser xmlparser;
873 const string & text = d.file_to_string();
874 xmlparser.parse_xml(text);
875 dump = xmlparser.dump;
876 md5_string(text, md5);
877 } else if (mimetype == "application/x-abiword-compressed") {
878 // FIXME: Implement support for metadata.
879 XmlParser xmlparser;
880 xmlparser.parse_xml(d.gzfile_to_string());
881 dump = xmlparser.dump;
882 } else if (mimetype == "application/vnd.ms-xpsdocument") {
883 string cmd = "unzip -p";
884 append_filename_argument(cmd, file);
885 cmd += " 'Documents/1/Pages/*.fpage'";
886 try {
887 XpsXmlParser xpsparser;
888 dump = stdout_to_string(cmd, false);
889 // Look for Byte-Order Mark (BOM).
890 if (startswith(dump, "\xfe\xff") || startswith(dump, "\xff\xfe")) {
891 // UTF-16 in big-endian/little-endian order - we just
892 // convert it as "UTF-16" and let the conversion handle the
893 // BOM as that way we avoid the copying overhead of erasing
894 // 2 bytes from the start of dump.
895 convert_to_utf8(dump, "UTF-16");
897 xpsparser.parse(dump);
898 dump = xpsparser.dump;
899 } catch (ReadError) {
900 skip_cmd_failed(urlterm, context, cmd,
901 d.get_size(), d.get_mtime());
902 return;
904 } else if (mimetype == "text/csv") {
905 // Currently we assume that text files are UTF-8 unless they have a
906 // byte-order mark.
907 dump = d.file_to_string();
908 md5_string(dump, md5);
910 // Look for Byte-Order Mark (BOM).
911 if (startswith(dump, "\xfe\xff") || startswith(dump, "\xff\xfe")) {
912 // UTF-16 in big-endian/little-endian order - we just convert
913 // it as "UTF-16" and let the conversion handle the BOM as that
914 // way we avoid the copying overhead of erasing 2 bytes from
915 // the start of dump.
916 convert_to_utf8(dump, "UTF-16");
917 } else if (startswith(dump, "\xef\xbb\xbf")) {
918 // UTF-8 with stupid Windows not-the-byte-order mark.
919 dump.erase(0, 3);
920 } else {
921 // FIXME: What charset is the file? Look at contents?
924 generate_sample_from_csv(dump, sample);
925 } else if (mimetype == "image/svg+xml") {
926 SvgParser svgparser;
927 const string & text = d.file_to_string();
928 md5_string(text, md5);
929 svgparser.parse(text);
930 dump = svgparser.dump;
931 title = svgparser.title;
932 keywords = svgparser.keywords;
933 // FIXME: topic = svgparser.topic;
934 author = svgparser.author;
935 } else if (mimetype == "application/vnd.debian.binary-package" ||
936 mimetype == "application/x-debian-package") {
937 string cmd("dpkg-deb -f");
938 append_filename_argument(cmd, file);
939 cmd += " Description";
940 const string & desc = stdout_to_string(cmd, false);
941 // First line is short description, which we use as the title.
942 string::size_type idx = desc.find('\n');
943 title.assign(desc, 0, idx);
944 if (idx != string::npos) {
945 dump.assign(desc, idx + 1, string::npos);
947 } else if (mimetype == "application/x-redhat-package-manager" ||
948 mimetype == "application/x-rpm") {
949 string cmd("rpm -q --qf '%{SUMMARY}\\n%{DESCRIPTION}' -p");
950 append_filename_argument(cmd, file);
951 const string & desc = stdout_to_string(cmd, false);
952 // First line is summary, which we use as the title.
953 string::size_type idx = desc.find('\n');
954 title.assign(desc, 0, idx);
955 if (idx != string::npos) {
956 dump.assign(desc, idx + 1, string::npos);
958 } else if (mimetype == "application/atom+xml") {
959 AtomParser atomparser;
960 const string & text = d.file_to_string();
961 md5_string(text, md5);
962 atomparser.parse(text);
963 dump = atomparser.dump;
964 title = atomparser.title;
965 keywords = atomparser.keywords;
966 // FIXME: topic = atomparser.topic;
967 author = atomparser.author;
968 } else {
969 // Don't know how to index this type.
970 skip_unknown_mimetype(urlterm, context, mimetype,
971 d.get_size(), d.get_mtime());
972 return;
975 // Compute the MD5 of the file if we haven't already.
976 if (md5.empty() && md5_file(file, md5, d.try_noatime()) == 0) {
977 if (errno == ENOENT || errno == ENOTDIR) {
978 skip(urlterm, context, "File removed during indexing",
979 d.get_size(), d.get_mtime(),
980 SKIP_VERBOSE_ONLY | SKIP_SHOW_FILENAME);
981 } else {
982 skip(urlterm, context, "failed to read file to calculate MD5 checksum",
983 d.get_size(), d.get_mtime());
985 return;
988 // Remove any trailing formfeeds, so we don't consider them when
989 // considering if we extracted any text (e.g. pdftotext outputs a
990 // formfeed between each page, even for blank pages).
992 // If dump contain only formfeeds, then trim_end will be string::npos
993 // and ++trim_end will be 0, which is the correct new size.
994 string::size_type trim_end = dump.find_last_not_of('\f');
995 if (++trim_end != dump.size())
996 dump.resize(trim_end);
998 if (dump.empty()) {
999 switch (empty_body) {
1000 case EMPTY_BODY_INDEX:
1001 break;
1002 case EMPTY_BODY_WARN:
1003 cout << "no text extracted from document body, "
1004 "but indexing metadata anyway" << endl;
1005 break;
1006 case EMPTY_BODY_SKIP:
1007 skip(urlterm, context, "no text extracted from document body",
1008 d.get_size(), d.get_mtime());
1009 return;
1013 // Produce a sample
1014 if (sample.empty()) {
1015 sample = generate_sample(dump, sample_size, "...", " ...");
1016 } else {
1017 sample = generate_sample(sample, sample_size, "...", " ...");
1020 // Put the data in the document
1021 if (record.empty()) {
1022 record = "url=";
1023 } else {
1024 record += "\nurl=";
1026 record += url;
1027 record += "\nsample=";
1028 record += sample;
1029 if (!title.empty()) {
1030 record += "\ncaption=";
1031 record += generate_sample(title, title_size, "...", " ...");
1033 if (!author.empty()) {
1034 record += "\nauthor=";
1035 record += author;
1037 record += "\ntype=";
1038 record += mimetype;
1039 time_t mtime = d.get_mtime();
1040 if (mtime != static_cast<time_t>(-1)) {
1041 record += "\nmodtime=";
1042 record += str(mtime);
1044 if (created != static_cast<time_t>(-1)) {
1045 record += "\ncreated=";
1046 record += str(created);
1048 off_t size = d.get_size();
1049 record += "\nsize=";
1050 record += str(size);
1051 newdocument.set_data(record);
1053 // Index the title, document text, keywords and topic.
1054 indexer.set_document(newdocument);
1055 if (!title.empty()) {
1056 indexer.index_text(title, 5, "S");
1057 indexer.increase_termpos(100);
1059 if (!dump.empty()) {
1060 indexer.index_text(dump);
1062 if (!keywords.empty()) {
1063 indexer.increase_termpos(100);
1064 indexer.index_text(keywords);
1066 if (!topic.empty()) {
1067 indexer.increase_termpos(100);
1068 indexer.index_text(topic, 1, "B");
1070 // Index the leafname of the file.
1072 indexer.increase_termpos(100);
1073 string leaf = d.leafname();
1074 string::size_type dot = leaf.find_last_of('.');
1075 if (dot != string::npos && leaf.size() - dot - 1 <= max_ext_len)
1076 leaf.resize(dot);
1077 indexer.index_text(leaf, 1, "F");
1079 // Also index with underscores and ampersands replaced by spaces.
1080 bool modified = false;
1081 string::size_type rep = 0;
1082 while ((rep = leaf.find_first_of("_&", rep)) != string::npos) {
1083 leaf[rep++] = ' ';
1084 modified = true;
1086 if (modified) {
1087 indexer.increase_termpos(100);
1088 indexer.index_text(leaf, 1, "F");
1092 if (!author.empty()) {
1093 indexer.increase_termpos(100);
1094 indexer.index_text(author, 1, "A");
1097 // mimeType:
1098 newdocument.add_boolean_term("T" + mimetype);
1100 newdocument.add_boolean_term(site_term);
1102 if (!host_term.empty())
1103 newdocument.add_boolean_term(host_term);
1105 if (date_terms) {
1106 struct tm *tm = localtime(&mtime);
1107 string date_term = "D";
1108 date_term += date_to_string(tm->tm_year + 1900,
1109 tm->tm_mon + 1,
1110 tm->tm_mday);
1111 newdocument.add_boolean_term(date_term); // Date (YYYYMMDD)
1112 date_term.resize(7);
1113 date_term[0] = 'M';
1114 newdocument.add_boolean_term(date_term); // Month (YYYYMM)
1115 date_term.resize(5);
1116 date_term[0] = 'Y';
1117 newdocument.add_boolean_term(date_term); // Year (YYYY)
1120 newdocument.add_boolean_term(urlterm); // Url
1122 // Add mtime as a value to allow "sort by date".
1123 newdocument.add_value(VALUE_LASTMOD,
1124 int_to_binary_string(uint32_t(mtime)));
1125 if (use_ctime) {
1126 // Add ctime as a value to track modifications.
1127 time_t ctime = d.get_ctime();
1128 newdocument.add_value(VALUE_CTIME,
1129 int_to_binary_string(uint32_t(ctime)));
1132 // Add MD5 as a value to allow duplicate documents to be collapsed
1133 // together.
1134 newdocument.add_value(VALUE_MD5, md5);
1136 // Add the file size as a value to allow "sort by size" and size ranges.
1137 newdocument.add_value(VALUE_SIZE,
1138 Xapian::sortable_serialise(size));
1140 bool inc_tag_added = false;
1141 if (d.is_other_readable()) {
1142 inc_tag_added = true;
1143 newdocument.add_boolean_term("I*");
1144 } else if (d.is_group_readable()) {
1145 const char * group = d.get_group();
1146 if (group) {
1147 newdocument.add_boolean_term(string("I#") + group);
1150 const char * owner = d.get_owner();
1151 if (owner) {
1152 newdocument.add_boolean_term(string("O") + owner);
1153 if (!inc_tag_added && d.is_owner_readable())
1154 newdocument.add_boolean_term(string("I@") + owner);
1157 string ext_term("E");
1158 for (string::const_iterator i = ext.begin(); i != ext.end(); ++i) {
1159 char ch = *i;
1160 if (ch >= 'A' && ch <= 'Z')
1161 ch |= 32;
1162 ext_term += ch;
1164 newdocument.add_boolean_term(ext_term);
1166 index_add_document(urlterm, last_altered, did, newdocument);
1167 } catch (ReadError) {
1168 skip(urlterm, context, string("can't read file: ") + strerror(errno),
1169 d.get_size(), d.get_mtime());
1170 } catch (NoSuchFilter) {
1171 string filter_entry;
1172 if (cmd_it != commands.end()) {
1173 filter_entry = cmd_it->first;
1174 } else {
1175 filter_entry = mimetype;
1177 string m = "Filter for \"";
1178 m += filter_entry;
1179 m += "\" not installed";
1180 skip(urlterm, context, m, d.get_size(), d.get_mtime());
1181 commands[filter_entry] = Filter();
1182 } catch (FileNotFound) {
1183 skip(urlterm, context, "File removed during indexing",
1184 d.get_size(), d.get_mtime(),
1185 SKIP_VERBOSE_ONLY | SKIP_SHOW_FILENAME);
1186 } catch (const std::string & error) {
1187 skip(urlterm, context, error, d.get_size(), d.get_mtime());
1191 void
1192 index_handle_deletion()
1194 if (updated.empty() || old_docs_not_seen == 0) return;
1196 if (verbose) {
1197 cout << "Deleting " << old_docs_not_seen << " old documents which weren't found" << endl;
1199 Xapian::PostingIterator alldocs = db.postlist_begin(string());
1200 Xapian::docid did = *alldocs;
1201 while (did < updated.size()) {
1202 if (!updated[did]) {
1203 alldocs.skip_to(did);
1204 if (alldocs == db.postlist_end(string()))
1205 break;
1206 if (*alldocs != did) {
1207 // Document #did didn't exist before we started.
1208 did = *alldocs;
1209 continue;
1211 db.delete_document(did);
1212 if (--old_docs_not_seen == 0)
1213 break;
1215 ++did;
1219 void
1220 index_commit()
1222 db.commit();
1225 void
1226 index_done()
1228 // If we created a temporary directory then delete it.
1229 remove_tmpdir();