Make sure EOF is defined
[xapian.git] / xapian-applications / omega / index_file.cc
blob3ca85360dfbe281bc744d63128761b071525eaa7
1 /** @file index_file.cc
2 * @brief Handle indexing a document from a file
3 */
4 /* Copyright 1999,2000,2001 BrightStation PLC
5 * Copyright 2001,2005 James Aylett
6 * Copyright 2001,2002 Ananova Ltd
7 * Copyright 2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Olly Betts
8 * Copyright 2009 Frank J Bruzzaniti
9 * Copyright 2012 Mihai Bivol
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of the
14 * License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
24 * USA
27 #include <config.h>
29 #include "index_file.h"
31 #include <algorithm>
32 #include <iostream>
33 #include <limits>
34 #include <string>
35 #include <map>
36 #include <vector>
38 #include <sys/types.h>
39 #include "safeunistd.h"
40 #include <cstdio>
41 #include <cstdlib>
42 #include <cstring>
43 #include "safefcntl.h"
44 #include "safeerrno.h"
45 #include <ctime>
47 #include <xapian.h>
49 #include "append_filename_arg.h"
50 #include "atomparse.h"
51 #include "diritor.h"
52 #include "failed.h"
53 #include "md5wrap.h"
54 #include "metaxmlparse.h"
55 #include "mimemap.h"
56 #include "msxmlparse.h"
57 #include "myhtmlparse.h"
58 #include "opendocparse.h"
59 #include "pkglibbindir.h"
60 #include "runfilter.h"
61 #include "sample.h"
62 #include "str.h"
63 #include "stringutils.h"
64 #include "svgparse.h"
65 #include "tmpdir.h"
66 #include "utf8convert.h"
67 #include "utils.h"
68 #include "values.h"
69 #include "xmlparse.h"
70 #include "xlsxparse.h"
71 #include "xpsxmlparse.h"
73 using namespace std;
75 static Xapian::WritableDatabase db;
76 static Xapian::TermGenerator indexer;
78 static Xapian::doccount old_docs_not_seen;
79 static Xapian::docid old_lastdocid;
80 static vector<bool> updated;
82 static bool verbose;
83 static bool retry_failed;
84 static bool use_ctime;
85 static dup_action_type dup_action;
86 static bool ignore_exclusions;
87 static bool description_as_sample;
88 static bool date_terms;
90 static time_t last_altered_max;
91 static size_t sample_size;
92 static size_t title_size;
93 static size_t max_ext_len;
95 static empty_body_type empty_body;
97 static string root;
98 static string site_term, host_term;
100 static Failed failed;
102 map<string, Filter> commands;
104 static void
105 mark_as_seen(Xapian::docid did)
107 if (usual(did < updated.size() && !updated[did])) {
108 updated[did] = true;
109 --old_docs_not_seen;
113 void
114 skip(const string & urlterm, const string & context, const string & msg,
115 off_t size, time_t last_mod, unsigned flags)
117 failed.add(urlterm, last_mod, size);
119 if (!verbose || (flags & SKIP_SHOW_FILENAME)) {
120 if (!verbose && (flags & SKIP_VERBOSE_ONLY)) return;
121 cout << context << ": ";
124 cout << "Skipping - " << msg << endl;
127 static void
128 skip_cmd_failed(const string & urlterm, const string & context, const string & cmd,
129 off_t size, time_t last_mod)
131 skip(urlterm, context, "\"" + cmd + "\" failed", size, last_mod);
134 static void
135 skip_meta_tag(const string & urlterm, const string & context,
136 off_t size, time_t last_mod)
138 skip(urlterm, context, "indexing disallowed by meta tag", size, last_mod);
141 static void
142 skip_unknown_mimetype(const string & urlterm, const string & context,
143 const string & mimetype, off_t size, time_t last_mod)
145 skip(urlterm, context, "unknown MIME type '" + mimetype + "'", size, last_mod);
148 void
149 index_add_default_filters()
151 index_command("application/msword", Filter("antiword -mUTF-8.txt", false));
152 index_command("application/vnd.ms-excel",
153 Filter("xls2csv -c' ' -q0 -dutf-8", false));
154 index_command("application/vnd.ms-powerpoint",
155 Filter("catppt -dutf-8", false));
156 // Looking at the source of wpd2html and wpd2text I think both output
157 // UTF-8, but it's hard to be sure without sample Unicode .wpd files
158 // as they don't seem to be at all well documented.
159 index_command("application/vnd.wordperfect", Filter("wpd2text", false));
160 // wps2text produces UTF-8 output from the sample files I've tested.
161 index_command("application/vnd.ms-works", Filter("wps2text", false));
162 // Output is UTF-8 according to "man djvutxt". Generally this seems to
163 // be true, though some examples from djvu.org generate isolated byte
164 // 0x95 in a context which suggests it might be intended to be a bullet
165 // (as it is in CP1250).
166 index_command("image/vnd.djvu", Filter("djvutxt", false));
167 index_command("text/markdown", Filter("markdown", "text/html", false));
168 // The --text option unhelpfully converts all non-ASCII characters to "?"
169 // so we use --html instead, which produces HTML entities. The --nopict
170 // option suppresses exporting picture files as pictNNNN.wmf in the current
171 // directory. Note that this option was ignored in some older versions,
172 // but it was fixed in unrtf 0.20.4.
173 index_command("text/rtf",
174 Filter("unrtf --nopict --html 2>/dev/null", "text/html",
175 false));
176 index_command("text/x-rst", Filter("rst2html", "text/html", false));
177 index_command("application/x-mspublisher",
178 Filter("pub2xhtml", "text/html", false));
179 index_command("application/vnd.ms-outlook",
180 Filter(get_pkglibbindir() + "/outlookmsg2html", "text/html",
181 false));
182 // pod2text's output character set doesn't seem to be documented, but from
183 // inspecting the source it looks like it's probably iso-8859-1.
184 index_command("text/x-perl",
185 Filter("pod2text", "text/plain", "iso-8859-1", false));
186 // FIXME: -e0 means "UTF-8", but that results in "fi", "ff", "ffi", etc
187 // appearing as single ligatures. For European languages, it's actually
188 // better to use -e2 (ISO-8859-1) and then convert, so let's do that for
189 // now until we handle Unicode "compatibility decompositions".
190 index_command("application/x-dvi",
191 Filter("catdvi -e2 -s", "text/plain", "iso-8859-1", false));
192 // Simplistic - ought to look in index.rdf files for filename and character
193 // set.
194 index_command("application/x-maff",
195 Filter("unzip -p %f '*/*.*htm*'", "text/html", "iso-8859-1",
196 false));
197 index_command("application/x-mimearchive",
198 Filter(get_pkglibbindir() + "/mhtml2html", "text/html",
199 false));
200 index_command("message/news",
201 Filter(get_pkglibbindir() + "/rfc822tohtml", "text/html",
202 false));
203 index_command("message/rfc822",
204 Filter(get_pkglibbindir() + "/rfc822tohtml", "text/html",
205 false));
206 index_command("text/vcard",
207 Filter(get_pkglibbindir() + "/vcard2text", false));
210 void
211 index_init(const string & dbpath, const Xapian::Stem & stemmer,
212 const string & root_, const string & site_term_,
213 const string & host_term_,
214 empty_body_type empty_body_, dup_action_type dup_action_,
215 size_t sample_size_, size_t title_size_, size_t max_ext_len_,
216 bool overwrite, bool retry_failed_,
217 bool delete_removed_documents, bool verbose_, bool use_ctime_,
218 bool spelling, bool ignore_exclusions_, bool description_as_sample_,
219 bool date_terms_)
221 root = root_;
222 site_term = site_term_;
223 host_term = host_term_;
224 empty_body = empty_body_;
225 dup_action = dup_action_;
226 sample_size = sample_size_;
227 title_size = title_size_;
228 max_ext_len = max_ext_len_;
229 verbose = verbose_;
230 use_ctime = use_ctime_;
231 ignore_exclusions = ignore_exclusions_;
232 description_as_sample = description_as_sample_;
233 date_terms = date_terms_;
235 if (!overwrite) {
236 db = Xapian::WritableDatabase(dbpath, Xapian::DB_CREATE_OR_OPEN);
237 old_docs_not_seen = db.get_doccount();
238 old_lastdocid = db.get_lastdocid();
239 if (delete_removed_documents) {
240 // + 1 so that old_lastdocid is a valid subscript.
241 updated.resize(old_lastdocid + 1);
243 try {
244 Xapian::valueno slot = use_ctime ? VALUE_CTIME : VALUE_LASTMOD;
245 string ubound = db.get_value_upper_bound(slot);
246 if (!ubound.empty())
247 last_altered_max = binary_string_to_int(ubound);
248 } catch (const Xapian::UnimplementedError &) {
249 numeric_limits<time_t> n;
250 last_altered_max = n.max();
252 } else {
253 db = Xapian::WritableDatabase(dbpath, Xapian::DB_CREATE_OR_OVERWRITE);
256 if (spelling) {
257 indexer.set_database(db);
258 indexer.set_flags(indexer.FLAG_SPELLING);
260 indexer.set_stemmer(stemmer);
262 runfilter_init();
264 failed.init(db);
266 if (overwrite) {
267 // There are no failures to retry, so setting this flag doesn't
268 // change the outcome, but does mean we avoid the overhead of
269 // checking for a previous failure.
270 retry_failed = true;
271 } else if (retry_failed_) {
272 failed.clear();
273 retry_failed = true;
274 } else {
275 // If there are no existing failures, setting this flag doesn't
276 // change the outcome, but does mean we avoid the overhead of
277 // checking for a previous failure.
278 retry_failed = failed.empty();
282 static void
283 parse_pdfinfo_field(const char * p, const char * end, string & out, const char * field, size_t len)
285 if (size_t(end - p) > len && memcmp(p, field, len) == 0) {
286 p += len;
287 while (p != end && *p == ' ')
288 ++p;
289 if (p != end && (end[-1] != '\r' || --end != p))
290 out.assign(p, end - p);
294 #define PARSE_PDFINFO_FIELD(P, END, OUT, FIELD) \
295 parse_pdfinfo_field((P), (END), (OUT), FIELD":", CONST_STRLEN(FIELD) + 1)
297 static void
298 get_pdf_metainfo(const string & file, string &author, string &title,
299 string &keywords, string &topic)
301 try {
302 string cmd = "pdfinfo -enc UTF-8";
303 append_filename_argument(cmd, file);
304 string pdfinfo = stdout_to_string(cmd, false);
306 const char * p = pdfinfo.data();
307 const char * end = p + pdfinfo.size();
308 while (p != end) {
309 const char * start = p;
310 p = static_cast<const char *>(memchr(p, '\n', end - p));
311 const char * eol;
312 if (p) {
313 eol = p;
314 ++p;
315 } else {
316 p = eol = end;
318 switch (*start) {
319 case 'A':
320 PARSE_PDFINFO_FIELD(start, eol, author, "Author");
321 break;
322 case 'K':
323 PARSE_PDFINFO_FIELD(start, eol, keywords, "Keywords");
324 break;
325 case 'S':
326 PARSE_PDFINFO_FIELD(start, eol, topic, "Subject");
327 break;
328 case 'T':
329 PARSE_PDFINFO_FIELD(start, eol, title, "Title");
330 break;
333 } catch (ReadError) {
334 // It's probably best to index the document even if pdfinfo fails.
338 static void
339 generate_sample_from_csv(const string & csv_data, string & sample)
341 // Add 3 to allow for a 4 byte utf-8 sequence being appended when
342 // output is sample_size - 1 bytes long. Use csv_data.size() if smaller
343 // since the user might reasonably set sample_size really high.
344 sample.reserve(min(sample_size + 3, csv_data.size()));
345 size_t last_word_end = 0;
346 bool in_space = true;
347 bool in_quotes = false;
348 for (Xapian::Utf8Iterator i(csv_data); i != Xapian::Utf8Iterator(); ++i) {
349 unsigned ch = *i;
351 if (!in_quotes) {
352 // If not already in double quotes, '"' starts quoting and
353 // ',' starts a new field.
354 if (ch == '"') {
355 in_quotes = true;
356 continue;
358 if (ch == ',')
359 ch = ' ';
360 } else if (ch == '"') {
361 // In double quotes, '"' either ends double quotes, or
362 // if followed by another '"', means a literal '"'.
363 if (++i == Xapian::Utf8Iterator())
364 break;
365 ch = *i;
366 if (ch != '"') {
367 in_quotes = false;
368 if (ch == ',')
369 ch = ' ';
373 if (ch <= ' ' || ch == 0xa0) {
374 // FIXME: if all the whitespace characters between two
375 // words are 0xa0 (non-breaking space) then perhaps we
376 // should output 0xa0.
377 if (in_space)
378 continue;
379 last_word_end = sample.size();
380 sample += ' ';
381 in_space = true;
382 } else {
383 Xapian::Unicode::append_utf8(sample, ch);
384 in_space = false;
387 if (sample.size() >= sample_size) {
388 // Need to truncate sample.
389 if (last_word_end <= sample_size / 2) {
390 // Monster word! We'll have to just split it.
391 sample.replace(sample_size - 3, string::npos, "...", 3);
392 } else {
393 sample.replace(last_word_end, string::npos, " ...", 4);
395 break;
400 static bool
401 index_check_existing(const string & urlterm, time_t last_altered,
402 Xapian::docid & did)
404 switch (dup_action) {
405 case DUP_SKIP: {
406 Xapian::PostingIterator p = db.postlist_begin(urlterm);
407 if (p != db.postlist_end(urlterm)) {
408 if (verbose)
409 cout << "already indexed, not updating" << endl;
410 did = *p;
411 mark_as_seen(did);
412 return true;
414 break;
416 case DUP_CHECK_LAZILY: {
417 // If last_altered > last_altered_max, we know for sure that the
418 // file is new or updated.
419 if (last_altered > last_altered_max) {
420 return false;
423 Xapian::PostingIterator p = db.postlist_begin(urlterm);
424 if (p != db.postlist_end(urlterm)) {
425 did = *p;
426 Xapian::Document doc = db.get_document(did);
427 Xapian::valueno slot = use_ctime ? VALUE_CTIME : VALUE_LASTMOD;
428 string value = doc.get_value(slot);
429 time_t old_last_altered = binary_string_to_int(value);
430 if (last_altered <= old_last_altered) {
431 if (verbose)
432 cout << "already indexed" << endl;
433 // The docid should be in updated - the only valid
434 // exception is if the URL was long and hashed to the
435 // same URL as an existing document indexed in the same
436 // batch.
437 mark_as_seen(did);
438 return true;
441 break;
444 return false;
447 void
448 index_add_document(const string & urlterm, time_t last_altered,
449 Xapian::docid did, const Xapian::Document & doc)
451 if (dup_action != DUP_SKIP) {
452 // If this document has already been indexed, update the existing
453 // entry.
454 if (did) {
455 // We already found out the document id above.
456 db.replace_document(did, doc);
457 } else if (last_altered <= last_altered_max) {
458 // We checked for the UID term and didn't find it.
459 did = db.add_document(doc);
460 } else {
461 did = db.replace_document(urlterm, doc);
463 mark_as_seen(did);
464 if (verbose) {
465 if (did <= old_lastdocid) {
466 cout << "updated" << endl;
467 } else {
468 cout << "added" << endl;
471 } else {
472 // If this were a duplicate, we'd have skipped it above.
473 db.add_document(doc);
474 if (verbose)
475 cout << "added" << endl;
479 void
480 index_mimetype(const string & file, const string & urlterm, const string & url,
481 const string & ext,
482 const string &mimetype, DirectoryIterator &d,
483 Xapian::Document & newdocument,
484 string record)
486 string context(file, root.size(), string::npos);
488 // FIXME: We could be cleverer here and check mtime too when use_ctime is
489 // set - if the ctime has changed but the mtime is unchanged, we can just
490 // update the existing Document and avoid having to re-extract text, etc.
491 time_t last_altered = use_ctime ? d.get_ctime() : d.get_mtime();
493 Xapian::docid did = 0;
494 if (index_check_existing(urlterm, last_altered, did))
495 return;
497 if (!retry_failed) {
498 // We only store and check the mtime (last modified) - a change to the
499 // metadata won't generally cause a previous failure to now work
500 // (FIXME: except permissions).
501 time_t failed_last_mod;
502 off_t failed_size;
503 if (failed.contains(urlterm, failed_last_mod, failed_size)) {
504 if (d.get_mtime() <= failed_last_mod &&
505 d.get_size() == failed_size) {
506 if (verbose)
507 cout << "failed to extract text on earlier run" << endl;
508 return;
510 // The file has changed, so remove the entry for it. If it fails
511 // again on this attempt, we'll add a new one.
512 failed.del(urlterm);
516 if (verbose) cout << flush;
518 string author, title, sample, keywords, topic, dump;
519 string md5;
520 time_t created = time_t(-1);
522 map<string, Filter>::const_iterator cmd_it = commands.find(mimetype);
523 if (cmd_it == commands.end()) {
524 size_t slash = mimetype.find('/');
525 if (slash != string::npos) {
526 string wildtype(mimetype, 0, slash + 2);
527 wildtype[slash + 1] = '*';
528 cmd_it = commands.find(wildtype);
529 if (cmd_it == commands.end()) {
530 cmd_it = commands.find("*/*");
533 if (cmd_it == commands.end()) {
534 cmd_it = commands.find("*");
537 try {
538 if (cmd_it != commands.end()) {
539 // Easy "run a command and read text or HTML from stdout or a
540 // temporary file" cases.
541 string cmd = cmd_it->second.cmd;
542 if (cmd.empty()) {
543 skip(urlterm, context, "required filter not installed",
544 d.get_size(), d.get_mtime(), SKIP_VERBOSE_ONLY);
545 return;
547 if (cmd == "false") {
548 // Allow setting 'false' as a filter to mean that a MIME type
549 // should be quietly ignored.
550 string m = "ignoring MIME type '";
551 m += cmd_it->first;
552 m += "'";
553 skip(urlterm, context, m, d.get_size(), d.get_mtime(),
554 SKIP_VERBOSE_ONLY);
555 return;
557 bool use_shell = cmd_it->second.use_shell();
558 bool substituted = false;
559 string tmpout;
560 size_t pcent = 0;
561 while (true) {
562 pcent = cmd.find('%', pcent);
563 if (pcent >= cmd.size() - 1)
564 break;
565 switch (cmd[pcent + 1]) {
566 case '%': // %% -> %.
567 cmd.erase(++pcent, 1);
568 break;
569 case 'f': { // %f -> escaped filename.
570 substituted = true;
571 string tail(cmd, pcent + 2);
572 cmd.resize(pcent);
573 append_filename_argument(cmd, file);
574 // Remove the space append_filename_argument() adds before
575 // the argument - the command string either includes one,
576 // or won't expect one (e.g. --input=%f).
577 cmd.erase(pcent, 1);
578 pcent = cmd.size();
579 cmd += tail;
580 break;
582 case 't': { // %t -> temporary output file.
583 if (tmpout.empty()) {
584 // Use a temporary file with a suitable extension
585 // in case the command cares, and for more helpful
586 // error messages from the command.
587 if (cmd_it->second.output_type == "text/html") {
588 tmpout = get_tmpfile("tmp.html");
589 } else {
590 tmpout = get_tmpfile("tmp.txt");
593 substituted = true;
594 string tail(cmd, pcent + 2);
595 cmd.resize(pcent);
596 append_filename_argument(cmd, tmpout);
597 // Remove the space append_filename_argument() adds before
598 // the argument - the command string either includes one,
599 // or won't expect one (e.g. --input=%f).
600 cmd.erase(pcent, 1);
601 pcent = cmd.size();
602 cmd += tail;
603 break;
605 default:
606 // Leave anything else alone for now.
607 pcent += 2;
608 break;
611 if (!substituted && cmd != "true") {
612 // If no %f, append the filename to the command.
613 append_filename_argument(cmd, file);
615 try {
616 if (!tmpout.empty()) {
617 // Output in temporary file.
618 (void)stdout_to_string(cmd, use_shell);
619 if (!load_file(tmpout, dump)) {
620 throw ReadError("Couldn't read output file");
622 unlink(tmpout.c_str());
623 } else if (cmd == "true") {
624 // Ignore the file's contents, just index metadata from the
625 // filing system.
626 } else {
627 // Output on stdout.
628 dump = stdout_to_string(cmd, use_shell);
630 const string & charset = cmd_it->second.output_charset;
631 if (cmd_it->second.output_type == "text/html") {
632 MyHtmlParser p;
633 p.ignore_metarobots();
634 p.description_as_sample = description_as_sample;
635 try {
636 p.parse_html(dump, charset, false);
637 } catch (const string & newcharset) {
638 p.reset();
639 p.ignore_metarobots();
640 p.description_as_sample = description_as_sample;
641 p.parse_html(dump, newcharset, true);
642 } catch (ReadError) {
643 skip_cmd_failed(urlterm, context, cmd,
644 d.get_size(), d.get_mtime());
645 return;
647 dump = p.dump;
648 title = p.title;
649 keywords = p.keywords;
650 topic = p.topic;
651 sample = p.sample;
652 author = p.author;
653 created = p.created;
654 } else if (!charset.empty()) {
655 convert_to_utf8(dump, charset);
657 } catch (ReadError) {
658 skip_cmd_failed(urlterm, context, cmd,
659 d.get_size(), d.get_mtime());
660 return;
662 } else if (mimetype == "text/html" || mimetype == "text/x-php") {
663 const string & text = d.file_to_string();
664 MyHtmlParser p;
665 if (ignore_exclusions) p.ignore_metarobots();
666 p.description_as_sample = description_as_sample;
667 try {
668 // Default HTML character set is latin 1, though not specifying
669 // one is deprecated these days.
670 p.parse_html(text, "iso-8859-1", false);
671 } catch (const string & newcharset) {
672 p.reset();
673 if (ignore_exclusions) p.ignore_metarobots();
674 p.description_as_sample = description_as_sample;
675 p.parse_html(text, newcharset, true);
677 if (!p.indexing_allowed) {
678 skip_meta_tag(urlterm, context,
679 d.get_size(), d.get_mtime());
680 return;
682 dump = p.dump;
683 title = p.title;
684 keywords = p.keywords;
685 topic = p.topic;
686 sample = p.sample;
687 author = p.author;
688 created = p.created;
689 md5_string(text, md5);
690 } else if (mimetype == "text/plain") {
691 // Currently we assume that text files are UTF-8 unless they have a
692 // byte-order mark.
693 dump = d.file_to_string();
694 md5_string(dump, md5);
696 // Look for Byte-Order Mark (BOM).
697 if (startswith(dump, "\xfe\xff") || startswith(dump, "\xff\xfe")) {
698 // UTF-16 in big-endian/little-endian order - we just convert
699 // it as "UTF-16" and let the conversion handle the BOM as that
700 // way we avoid the copying overhead of erasing 2 bytes from
701 // the start of dump.
702 convert_to_utf8(dump, "UTF-16");
703 } else if (startswith(dump, "\xef\xbb\xbf")) {
704 // UTF-8 with stupid Windows not-the-byte-order mark.
705 dump.erase(0, 3);
706 } else {
707 // FIXME: What charset is the file? Look at contents?
709 } else if (mimetype == "application/pdf") {
710 string cmd = "pdftotext -enc UTF-8";
711 append_filename_argument(cmd, file);
712 cmd += " -";
713 try {
714 dump = stdout_to_string(cmd, false);
715 } catch (ReadError) {
716 skip_cmd_failed(urlterm, context, cmd,
717 d.get_size(), d.get_mtime());
718 return;
720 get_pdf_metainfo(file, author, title, keywords, topic);
721 } else if (mimetype == "application/postscript") {
722 // There simply doesn't seem to be a Unicode capable PostScript to
723 // text converter (e.g. pstotext always outputs ISO-8859-1). The
724 // only solution seems to be to convert via PDF using ps2pdf and
725 // then pdftotext. This gives plausible looking UTF-8 output for
726 // some Chinese PostScript files I found using Google. It also has
727 // the benefit of allowing us to extract meta information from
728 // PostScript files.
729 string tmpfile = get_tmpfile("tmp.pdf");
730 if (tmpfile.empty()) {
731 // FIXME: should this be fatal? Or disable indexing postscript?
732 string msg = "Couldn't create temporary directory (";
733 msg += strerror(errno);
734 msg += ")";
735 skip(urlterm, context, msg,
736 d.get_size(), d.get_mtime());
737 return;
739 string cmd = "ps2pdf";
740 append_filename_argument(cmd, file);
741 append_filename_argument(cmd, tmpfile);
742 try {
743 (void)stdout_to_string(cmd, false);
744 cmd = "pdftotext -enc UTF-8";
745 append_filename_argument(cmd, tmpfile);
746 cmd += " -";
747 dump = stdout_to_string(cmd, false);
748 } catch (ReadError) {
749 skip_cmd_failed(urlterm, context, cmd,
750 d.get_size(), d.get_mtime());
751 unlink(tmpfile.c_str());
752 return;
753 } catch (...) {
754 unlink(tmpfile.c_str());
755 throw;
757 try {
758 get_pdf_metainfo(tmpfile, author, title, keywords, topic);
759 } catch (...) {
760 unlink(tmpfile.c_str());
761 throw;
763 unlink(tmpfile.c_str());
764 } else if (startswith(mimetype, "application/vnd.sun.xml.") ||
765 startswith(mimetype, "application/vnd.oasis.opendocument."))
767 // Inspired by http://mjr.towers.org.uk/comp/sxw2text
768 string cmd = "unzip -p";
769 append_filename_argument(cmd, file);
770 cmd += " content.xml ; unzip -p";
771 append_filename_argument(cmd, file);
772 cmd += " styles.xml";
773 try {
774 OpenDocParser parser;
775 parser.parse(stdout_to_string(cmd, true));
776 dump = parser.dump;
777 } catch (ReadError) {
778 skip_cmd_failed(urlterm, context, cmd,
779 d.get_size(), d.get_mtime());
780 return;
783 cmd = "unzip -p";
784 append_filename_argument(cmd, file);
785 cmd += " meta.xml";
786 try {
787 MetaXmlParser metaxmlparser;
788 metaxmlparser.parse(stdout_to_string(cmd, false));
789 title = metaxmlparser.title;
790 keywords = metaxmlparser.keywords;
791 // FIXME: topic = metaxmlparser.topic;
792 sample = metaxmlparser.sample;
793 author = metaxmlparser.author;
794 } catch (ReadError) {
795 // It's probably best to index the document even if this fails.
797 } else if (startswith(mimetype, "application/vnd.openxmlformats-officedocument.")) {
798 const char * args = NULL;
799 string tail(mimetype, 46);
800 if (startswith(tail, "wordprocessingml.")) {
801 // unzip returns exit code 11 if a file to extract wasn't found
802 // which we want to ignore, because there may be no headers or
803 // no footers.
804 args = " word/document.xml 'word/header*.xml' 'word/footer*.xml' 2>/dev/null";
805 } else if (startswith(tail, "spreadsheetml.")) {
806 // Extract the shared string table first, so our parser can
807 // grab those ready for parsing the sheets which will reference
808 // the shared strings.
809 string cmd = "unzip -p";
810 append_filename_argument(cmd, file);
811 cmd += " xl/styles.xml xl/workbook.xml xl/sharedStrings.xml ; unzip -p";
812 append_filename_argument(cmd, file);
813 cmd += " xl/worksheets/sheet\\*.xml";
814 try {
815 XlsxParser parser;
816 parser.parse(stdout_to_string(cmd, true));
817 dump = parser.dump;
818 } catch (ReadError) {
819 skip_cmd_failed(urlterm, context, cmd,
820 d.get_size(), d.get_mtime());
821 return;
823 } else if (startswith(tail, "presentationml.")) {
824 // unzip returns exit code 11 if a file to extract wasn't found
825 // which we want to ignore, because there may be no notesSlides
826 // or comments.
827 args = " 'ppt/slides/slide*.xml' 'ppt/notesSlides/notesSlide*.xml' 'ppt/comments/comment*.xml' 2>/dev/null";
828 } else {
829 // Don't know how to index this type.
830 skip_unknown_mimetype(urlterm, context, mimetype,
831 d.get_size(), d.get_mtime());
832 return;
835 if (args) {
836 string cmd = "unzip -p";
837 append_filename_argument(cmd, file);
838 cmd += args;
839 try {
840 MSXmlParser xmlparser;
841 // Treat exit status 11 from unzip as success - this is
842 // what we get if one of the listed filenames to extract
843 // doesn't match anything in the zip file.
844 xmlparser.parse_xml(stdout_to_string(cmd, false, 11));
845 dump = xmlparser.dump;
846 } catch (ReadError) {
847 skip_cmd_failed(urlterm, context, cmd,
848 d.get_size(), d.get_mtime());
849 return;
853 string cmd = "unzip -p";
854 append_filename_argument(cmd, file);
855 cmd += " docProps/core.xml";
856 try {
857 MetaXmlParser metaxmlparser;
858 metaxmlparser.parse(stdout_to_string(cmd, false));
859 title = metaxmlparser.title;
860 keywords = metaxmlparser.keywords;
861 // FIXME: topic = metaxmlparser.topic;
862 sample = metaxmlparser.sample;
863 author = metaxmlparser.author;
864 } catch (ReadError) {
865 // It's probably best to index the document even if this fails.
867 } else if (mimetype == "application/x-abiword") {
868 // FIXME: Implement support for metadata.
869 XmlParser xmlparser;
870 const string & text = d.file_to_string();
871 xmlparser.parse_xml(text);
872 dump = xmlparser.dump;
873 md5_string(text, md5);
874 } else if (mimetype == "application/x-abiword-compressed") {
875 // FIXME: Implement support for metadata.
876 XmlParser xmlparser;
877 xmlparser.parse_xml(d.gzfile_to_string());
878 dump = xmlparser.dump;
879 } else if (mimetype == "application/vnd.ms-xpsdocument") {
880 string cmd = "unzip -p";
881 append_filename_argument(cmd, file);
882 cmd += " 'Documents/1/Pages/*.fpage'";
883 try {
884 XpsXmlParser xpsparser;
885 dump = stdout_to_string(cmd, false);
886 // Look for Byte-Order Mark (BOM).
887 if (startswith(dump, "\xfe\xff") || startswith(dump, "\xff\xfe")) {
888 // UTF-16 in big-endian/little-endian order - we just
889 // convert it as "UTF-16" and let the conversion handle the
890 // BOM as that way we avoid the copying overhead of erasing
891 // 2 bytes from the start of dump.
892 convert_to_utf8(dump, "UTF-16");
894 xpsparser.parse(dump);
895 dump = xpsparser.dump;
896 } catch (ReadError) {
897 skip_cmd_failed(urlterm, context, cmd,
898 d.get_size(), d.get_mtime());
899 return;
901 } else if (mimetype == "text/csv") {
902 // Currently we assume that text files are UTF-8 unless they have a
903 // byte-order mark.
904 dump = d.file_to_string();
905 md5_string(dump, md5);
907 // Look for Byte-Order Mark (BOM).
908 if (startswith(dump, "\xfe\xff") || startswith(dump, "\xff\xfe")) {
909 // UTF-16 in big-endian/little-endian order - we just convert
910 // it as "UTF-16" and let the conversion handle the BOM as that
911 // way we avoid the copying overhead of erasing 2 bytes from
912 // the start of dump.
913 convert_to_utf8(dump, "UTF-16");
914 } else if (startswith(dump, "\xef\xbb\xbf")) {
915 // UTF-8 with stupid Windows not-the-byte-order mark.
916 dump.erase(0, 3);
917 } else {
918 // FIXME: What charset is the file? Look at contents?
921 generate_sample_from_csv(dump, sample);
922 } else if (mimetype == "image/svg+xml") {
923 SvgParser svgparser;
924 const string & text = d.file_to_string();
925 md5_string(text, md5);
926 svgparser.parse(text);
927 dump = svgparser.dump;
928 title = svgparser.title;
929 keywords = svgparser.keywords;
930 // FIXME: topic = svgparser.topic;
931 author = svgparser.author;
932 } else if (mimetype == "application/vnd.debian.binary-package" ||
933 mimetype == "application/x-debian-package") {
934 string cmd("dpkg-deb -f");
935 append_filename_argument(cmd, file);
936 cmd += " Description";
937 const string & desc = stdout_to_string(cmd, false);
938 // First line is short description, which we use as the title.
939 string::size_type idx = desc.find('\n');
940 title.assign(desc, 0, idx);
941 if (idx != string::npos) {
942 dump.assign(desc, idx + 1, string::npos);
944 } else if (mimetype == "application/x-redhat-package-manager" ||
945 mimetype == "application/x-rpm") {
946 string cmd("rpm -q --qf '%{SUMMARY}\\n%{DESCRIPTION}' -p");
947 append_filename_argument(cmd, file);
948 const string & desc = stdout_to_string(cmd, false);
949 // First line is summary, which we use as the title.
950 string::size_type idx = desc.find('\n');
951 title.assign(desc, 0, idx);
952 if (idx != string::npos) {
953 dump.assign(desc, idx + 1, string::npos);
955 } else if (mimetype == "application/atom+xml") {
956 AtomParser atomparser;
957 const string & text = d.file_to_string();
958 md5_string(text, md5);
959 atomparser.parse(text);
960 dump = atomparser.dump;
961 title = atomparser.title;
962 keywords = atomparser.keywords;
963 // FIXME: topic = atomparser.topic;
964 author = atomparser.author;
965 } else {
966 // Don't know how to index this type.
967 skip_unknown_mimetype(urlterm, context, mimetype,
968 d.get_size(), d.get_mtime());
969 return;
972 // Compute the MD5 of the file if we haven't already.
973 if (md5.empty() && md5_file(file, md5, d.try_noatime()) == 0) {
974 if (errno == ENOENT || errno == ENOTDIR) {
975 skip(urlterm, context, "File removed during indexing",
976 d.get_size(), d.get_mtime(),
977 SKIP_VERBOSE_ONLY | SKIP_SHOW_FILENAME);
978 } else {
979 skip(urlterm, context, "failed to read file to calculate MD5 checksum",
980 d.get_size(), d.get_mtime());
982 return;
985 // Remove any trailing formfeeds, so we don't consider them when
986 // considering if we extracted any text (e.g. pdftotext outputs a
987 // formfeed between each page, even for blank pages).
989 // If dump contain only formfeeds, then trim_end will be string::npos
990 // and ++trim_end will be 0, which is the correct new size.
991 string::size_type trim_end = dump.find_last_not_of('\f');
992 if (++trim_end != dump.size())
993 dump.resize(trim_end);
995 if (dump.empty()) {
996 switch (empty_body) {
997 case EMPTY_BODY_INDEX:
998 break;
999 case EMPTY_BODY_WARN:
1000 cout << "no text extracted from document body, "
1001 "but indexing metadata anyway" << endl;
1002 break;
1003 case EMPTY_BODY_SKIP:
1004 skip(urlterm, context, "no text extracted from document body",
1005 d.get_size(), d.get_mtime());
1006 return;
1010 // Produce a sample
1011 if (sample.empty()) {
1012 sample = generate_sample(dump, sample_size, "...", " ...");
1013 } else {
1014 sample = generate_sample(sample, sample_size, "...", " ...");
1017 // Put the data in the document
1018 if (record.empty()) {
1019 record = "url=";
1020 } else {
1021 record += "\nurl=";
1023 record += url;
1024 record += "\nsample=";
1025 record += sample;
1026 if (!title.empty()) {
1027 record += "\ncaption=";
1028 record += generate_sample(title, title_size, "...", " ...");
1030 if (!author.empty()) {
1031 record += "\nauthor=";
1032 record += author;
1034 record += "\ntype=";
1035 record += mimetype;
1036 time_t mtime = d.get_mtime();
1037 if (mtime != static_cast<time_t>(-1)) {
1038 record += "\nmodtime=";
1039 record += str(mtime);
1041 if (created != static_cast<time_t>(-1)) {
1042 record += "\ncreated=";
1043 record += str(created);
1045 off_t size = d.get_size();
1046 record += "\nsize=";
1047 record += str(size);
1048 newdocument.set_data(record);
1050 // Index the title, document text, keywords and topic.
1051 indexer.set_document(newdocument);
1052 if (!title.empty()) {
1053 indexer.index_text(title, 5, "S");
1054 indexer.increase_termpos(100);
1056 if (!dump.empty()) {
1057 indexer.index_text(dump);
1059 if (!keywords.empty()) {
1060 indexer.increase_termpos(100);
1061 indexer.index_text(keywords);
1063 if (!topic.empty()) {
1064 indexer.increase_termpos(100);
1065 indexer.index_text(topic, 1, "B");
1067 // Index the leafname of the file.
1069 indexer.increase_termpos(100);
1070 string leaf = d.leafname();
1071 string::size_type dot = leaf.find_last_of('.');
1072 if (dot != string::npos && leaf.size() - dot - 1 <= max_ext_len)
1073 leaf.resize(dot);
1074 indexer.index_text(leaf, 1, "F");
1076 // Also index with underscores and ampersands replaced by spaces.
1077 bool modified = false;
1078 string::size_type rep = 0;
1079 while ((rep = leaf.find_first_of("_&", rep)) != string::npos) {
1080 leaf[rep++] = ' ';
1081 modified = true;
1083 if (modified) {
1084 indexer.increase_termpos(100);
1085 indexer.index_text(leaf, 1, "F");
1089 if (!author.empty()) {
1090 indexer.increase_termpos(100);
1091 indexer.index_text(author, 1, "A");
1094 // mimeType:
1095 newdocument.add_boolean_term("T" + mimetype);
1097 newdocument.add_boolean_term(site_term);
1099 if (!host_term.empty())
1100 newdocument.add_boolean_term(host_term);
1102 if (date_terms) {
1103 struct tm *tm = localtime(&mtime);
1104 string date_term = "D";
1105 date_term += date_to_string(tm->tm_year + 1900,
1106 tm->tm_mon + 1,
1107 tm->tm_mday);
1108 newdocument.add_boolean_term(date_term); // Date (YYYYMMDD)
1109 date_term.resize(7);
1110 date_term[0] = 'M';
1111 newdocument.add_boolean_term(date_term); // Month (YYYYMM)
1112 date_term.resize(5);
1113 date_term[0] = 'Y';
1114 newdocument.add_boolean_term(date_term); // Year (YYYY)
1117 newdocument.add_boolean_term(urlterm); // Url
1119 // Add mtime as a value to allow "sort by date".
1120 newdocument.add_value(VALUE_LASTMOD,
1121 int_to_binary_string(uint32_t(mtime)));
1122 if (use_ctime) {
1123 // Add ctime as a value to track modifications.
1124 time_t ctime = d.get_ctime();
1125 newdocument.add_value(VALUE_CTIME,
1126 int_to_binary_string(uint32_t(ctime)));
1129 // Add MD5 as a value to allow duplicate documents to be collapsed
1130 // together.
1131 newdocument.add_value(VALUE_MD5, md5);
1133 // Add the file size as a value to allow "sort by size" and size ranges.
1134 newdocument.add_value(VALUE_SIZE,
1135 Xapian::sortable_serialise(size));
1137 bool inc_tag_added = false;
1138 if (d.is_other_readable()) {
1139 inc_tag_added = true;
1140 newdocument.add_boolean_term("I*");
1141 } else if (d.is_group_readable()) {
1142 const char * group = d.get_group();
1143 if (group) {
1144 newdocument.add_boolean_term(string("I#") + group);
1147 const char * owner = d.get_owner();
1148 if (owner) {
1149 newdocument.add_boolean_term(string("O") + owner);
1150 if (!inc_tag_added && d.is_owner_readable())
1151 newdocument.add_boolean_term(string("I@") + owner);
1154 string ext_term("E");
1155 for (string::const_iterator i = ext.begin(); i != ext.end(); ++i) {
1156 char ch = *i;
1157 if (ch >= 'A' && ch <= 'Z')
1158 ch |= 32;
1159 ext_term += ch;
1161 newdocument.add_boolean_term(ext_term);
1163 index_add_document(urlterm, last_altered, did, newdocument);
1164 } catch (ReadError) {
1165 skip(urlterm, context, string("can't read file: ") + strerror(errno),
1166 d.get_size(), d.get_mtime());
1167 } catch (NoSuchFilter) {
1168 string filter_entry;
1169 if (cmd_it != commands.end()) {
1170 filter_entry = cmd_it->first;
1171 } else {
1172 filter_entry = mimetype;
1174 string m = "Filter for \"";
1175 m += filter_entry;
1176 m += "\" not installed";
1177 skip(urlterm, context, m, d.get_size(), d.get_mtime());
1178 commands[filter_entry] = Filter();
1179 } catch (FileNotFound) {
1180 skip(urlterm, context, "File removed during indexing",
1181 d.get_size(), d.get_mtime(),
1182 SKIP_VERBOSE_ONLY | SKIP_SHOW_FILENAME);
1183 } catch (const std::string & error) {
1184 skip(urlterm, context, error, d.get_size(), d.get_mtime());
1188 void
1189 index_handle_deletion()
1191 if (updated.empty() || old_docs_not_seen == 0) return;
1193 if (verbose) {
1194 cout << "Deleting " << old_docs_not_seen << " old documents which weren't found" << endl;
1196 Xapian::PostingIterator alldocs = db.postlist_begin(string());
1197 Xapian::docid did = *alldocs;
1198 while (did < updated.size()) {
1199 if (!updated[did]) {
1200 alldocs.skip_to(did);
1201 if (alldocs == db.postlist_end(string()))
1202 break;
1203 if (*alldocs != did) {
1204 // Document #did didn't exist before we started.
1205 did = *alldocs;
1206 continue;
1208 db.delete_document(did);
1209 if (--old_docs_not_seen == 0)
1210 break;
1212 ++did;
1216 void
1217 index_commit()
1219 db.commit();
1222 void
1223 index_done()
1225 // If we created a temporary directory then delete it.
1226 remove_tmpdir();