1 /** @file index_file.cc
2 * @brief Handle indexing a document from a file
4 /* Copyright 1999,2000,2001 BrightStation PLC
5 * Copyright 2001,2005 James Aylett
6 * Copyright 2001,2002 Ananova Ltd
7 * Copyright 2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Olly Betts
8 * Copyright 2009 Frank J Bruzzaniti
9 * Copyright 2012 Mihai Bivol
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of the
14 * License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
29 #include "index_file.h"
38 #include <sys/types.h>
39 #include "safeunistd.h"
43 #include "safefcntl.h"
44 #include "safeerrno.h"
49 #include "append_filename_arg.h"
50 #include "atomparse.h"
54 #include "metaxmlparse.h"
56 #include "msxmlparse.h"
57 #include "myhtmlparse.h"
58 #include "opendocparse.h"
59 #include "pkglibbindir.h"
60 #include "runfilter.h"
63 #include "stringutils.h"
66 #include "utf8convert.h"
70 #include "xlsxparse.h"
71 #include "xpsxmlparse.h"
75 static Xapian::WritableDatabase db
;
76 static Xapian::TermGenerator indexer
;
78 static Xapian::doccount old_docs_not_seen
;
79 static Xapian::docid old_lastdocid
;
80 static vector
<bool> updated
;
83 static bool retry_failed
;
84 static bool use_ctime
;
85 static dup_action_type dup_action
;
86 static bool ignore_exclusions
;
87 static bool description_as_sample
;
88 static bool date_terms
;
90 static time_t last_altered_max
;
91 static size_t sample_size
;
92 static size_t title_size
;
93 static size_t max_ext_len
;
95 static empty_body_type empty_body
;
98 static string site_term
, host_term
;
100 static Failed failed
;
102 map
<string
, Filter
> commands
;
105 mark_as_seen(Xapian::docid did
)
107 if (usual(did
< updated
.size() && !updated
[did
])) {
114 skip(const string
& urlterm
, const string
& context
, const string
& msg
,
115 off_t size
, time_t last_mod
, unsigned flags
)
117 failed
.add(urlterm
, last_mod
, size
);
119 if (!verbose
|| (flags
& SKIP_SHOW_FILENAME
)) {
120 if (!verbose
&& (flags
& SKIP_VERBOSE_ONLY
)) return;
121 cout
<< context
<< ": ";
124 cout
<< "Skipping - " << msg
<< endl
;
128 skip_cmd_failed(const string
& urlterm
, const string
& context
, const string
& cmd
,
129 off_t size
, time_t last_mod
)
131 skip(urlterm
, context
, "\"" + cmd
+ "\" failed", size
, last_mod
);
135 skip_meta_tag(const string
& urlterm
, const string
& context
,
136 off_t size
, time_t last_mod
)
138 skip(urlterm
, context
, "indexing disallowed by meta tag", size
, last_mod
);
142 skip_unknown_mimetype(const string
& urlterm
, const string
& context
,
143 const string
& mimetype
, off_t size
, time_t last_mod
)
145 skip(urlterm
, context
, "unknown MIME type '" + mimetype
+ "'", size
, last_mod
);
149 index_add_default_filters()
151 index_command("application/msword", Filter("antiword -mUTF-8.txt", false));
152 index_command("application/vnd.ms-excel",
153 Filter("xls2csv -c' ' -q0 -dutf-8", false));
154 index_command("application/vnd.ms-powerpoint",
155 Filter("catppt -dutf-8", false));
156 // Looking at the source of wpd2html and wpd2text I think both output
157 // UTF-8, but it's hard to be sure without sample Unicode .wpd files
158 // as they don't seem to be at all well documented.
159 index_command("application/vnd.wordperfect", Filter("wpd2text", false));
160 // wps2text produces UTF-8 output from the sample files I've tested.
161 index_command("application/vnd.ms-works", Filter("wps2text", false));
162 // Output is UTF-8 according to "man djvutxt". Generally this seems to
163 // be true, though some examples from djvu.org generate isolated byte
164 // 0x95 in a context which suggests it might be intended to be a bullet
165 // (as it is in CP1250).
166 index_command("image/vnd.djvu", Filter("djvutxt", false));
167 index_command("text/markdown", Filter("markdown", "text/html", false));
168 // The --text option unhelpfully converts all non-ASCII characters to "?"
169 // so we use --html instead, which produces HTML entities. The --nopict
170 // option suppresses exporting picture files as pictNNNN.wmf in the current
171 // directory. Note that this option was ignored in some older versions,
172 // but it was fixed in unrtf 0.20.4.
173 index_command("text/rtf",
174 Filter("unrtf --nopict --html 2>/dev/null", "text/html",
176 index_command("text/x-rst", Filter("rst2html", "text/html", false));
177 index_command("application/x-mspublisher",
178 Filter("pub2xhtml", "text/html", false));
179 index_command("application/vnd.ms-outlook",
180 Filter(get_pkglibbindir() + "/outlookmsg2html", "text/html",
182 // pod2text's output character set doesn't seem to be documented, but from
183 // inspecting the source it looks like it's probably iso-8859-1.
184 index_command("text/x-perl",
185 Filter("pod2text", "text/plain", "iso-8859-1", false));
186 // FIXME: -e0 means "UTF-8", but that results in "fi", "ff", "ffi", etc
187 // appearing as single ligatures. For European languages, it's actually
188 // better to use -e2 (ISO-8859-1) and then convert, so let's do that for
189 // now until we handle Unicode "compatibility decompositions".
190 index_command("application/x-dvi",
191 Filter("catdvi -e2 -s", "text/plain", "iso-8859-1", false));
192 // Simplistic - ought to look in index.rdf files for filename and character
194 index_command("application/x-maff",
195 Filter("unzip -p %f '*/*.*htm*'", "text/html", "iso-8859-1",
197 index_command("application/x-mimearchive",
198 Filter(get_pkglibbindir() + "/mhtml2html", "text/html",
200 index_command("message/news",
201 Filter(get_pkglibbindir() + "/rfc822tohtml", "text/html",
203 index_command("message/rfc822",
204 Filter(get_pkglibbindir() + "/rfc822tohtml", "text/html",
206 index_command("text/vcard",
207 Filter(get_pkglibbindir() + "/vcard2text", false));
211 index_init(const string
& dbpath
, const Xapian::Stem
& stemmer
,
212 const string
& root_
, const string
& site_term_
,
213 const string
& host_term_
,
214 empty_body_type empty_body_
, dup_action_type dup_action_
,
215 size_t sample_size_
, size_t title_size_
, size_t max_ext_len_
,
216 bool overwrite
, bool retry_failed_
,
217 bool delete_removed_documents
, bool verbose_
, bool use_ctime_
,
218 bool spelling
, bool ignore_exclusions_
, bool description_as_sample_
,
222 site_term
= site_term_
;
223 host_term
= host_term_
;
224 empty_body
= empty_body_
;
225 dup_action
= dup_action_
;
226 sample_size
= sample_size_
;
227 title_size
= title_size_
;
228 max_ext_len
= max_ext_len_
;
230 use_ctime
= use_ctime_
;
231 ignore_exclusions
= ignore_exclusions_
;
232 description_as_sample
= description_as_sample_
;
233 date_terms
= date_terms_
;
236 db
= Xapian::WritableDatabase(dbpath
, Xapian::DB_CREATE_OR_OPEN
);
237 old_docs_not_seen
= db
.get_doccount();
238 old_lastdocid
= db
.get_lastdocid();
239 if (delete_removed_documents
) {
240 // + 1 so that old_lastdocid is a valid subscript.
241 updated
.resize(old_lastdocid
+ 1);
244 Xapian::valueno slot
= use_ctime
? VALUE_CTIME
: VALUE_LASTMOD
;
245 string ubound
= db
.get_value_upper_bound(slot
);
247 last_altered_max
= binary_string_to_int(ubound
);
248 } catch (const Xapian::UnimplementedError
&) {
249 numeric_limits
<time_t> n
;
250 last_altered_max
= n
.max();
253 db
= Xapian::WritableDatabase(dbpath
, Xapian::DB_CREATE_OR_OVERWRITE
);
257 indexer
.set_database(db
);
258 indexer
.set_flags(indexer
.FLAG_SPELLING
);
260 indexer
.set_stemmer(stemmer
);
267 // There are no failures to retry, so setting this flag doesn't
268 // change the outcome, but does mean we avoid the overhead of
269 // checking for a previous failure.
271 } else if (retry_failed_
) {
275 // If there are no existing failures, setting this flag doesn't
276 // change the outcome, but does mean we avoid the overhead of
277 // checking for a previous failure.
278 retry_failed
= failed
.empty();
283 parse_pdfinfo_field(const char * p
, const char * end
, string
& out
, const char * field
, size_t len
)
285 if (size_t(end
- p
) > len
&& memcmp(p
, field
, len
) == 0) {
287 while (p
!= end
&& *p
== ' ')
289 if (p
!= end
&& (end
[-1] != '\r' || --end
!= p
))
290 out
.assign(p
, end
- p
);
294 #define PARSE_PDFINFO_FIELD(P, END, OUT, FIELD) \
295 parse_pdfinfo_field((P), (END), (OUT), FIELD":", CONST_STRLEN(FIELD) + 1)
298 get_pdf_metainfo(const string
& file
, string
&author
, string
&title
,
299 string
&keywords
, string
&topic
)
302 string cmd
= "pdfinfo -enc UTF-8";
303 append_filename_argument(cmd
, file
);
304 string pdfinfo
= stdout_to_string(cmd
, false);
306 const char * p
= pdfinfo
.data();
307 const char * end
= p
+ pdfinfo
.size();
309 const char * start
= p
;
310 p
= static_cast<const char *>(memchr(p
, '\n', end
- p
));
320 PARSE_PDFINFO_FIELD(start
, eol
, author
, "Author");
323 PARSE_PDFINFO_FIELD(start
, eol
, keywords
, "Keywords");
326 PARSE_PDFINFO_FIELD(start
, eol
, topic
, "Subject");
329 PARSE_PDFINFO_FIELD(start
, eol
, title
, "Title");
333 } catch (ReadError
) {
334 // It's probably best to index the document even if pdfinfo fails.
339 generate_sample_from_csv(const string
& csv_data
, string
& sample
)
341 // Add 3 to allow for a 4 byte utf-8 sequence being appended when
342 // output is sample_size - 1 bytes long. Use csv_data.size() if smaller
343 // since the user might reasonably set sample_size really high.
344 sample
.reserve(min(sample_size
+ 3, csv_data
.size()));
345 size_t last_word_end
= 0;
346 bool in_space
= true;
347 bool in_quotes
= false;
348 for (Xapian::Utf8Iterator
i(csv_data
); i
!= Xapian::Utf8Iterator(); ++i
) {
352 // If not already in double quotes, '"' starts quoting and
353 // ',' starts a new field.
360 } else if (ch
== '"') {
361 // In double quotes, '"' either ends double quotes, or
362 // if followed by another '"', means a literal '"'.
363 if (++i
== Xapian::Utf8Iterator())
373 if (ch
<= ' ' || ch
== 0xa0) {
374 // FIXME: if all the whitespace characters between two
375 // words are 0xa0 (non-breaking space) then perhaps we
376 // should output 0xa0.
379 last_word_end
= sample
.size();
383 Xapian::Unicode::append_utf8(sample
, ch
);
387 if (sample
.size() >= sample_size
) {
388 // Need to truncate sample.
389 if (last_word_end
<= sample_size
/ 2) {
390 // Monster word! We'll have to just split it.
391 sample
.replace(sample_size
- 3, string::npos
, "...", 3);
393 sample
.replace(last_word_end
, string::npos
, " ...", 4);
401 index_check_existing(const string
& urlterm
, time_t last_altered
,
404 switch (dup_action
) {
406 Xapian::PostingIterator p
= db
.postlist_begin(urlterm
);
407 if (p
!= db
.postlist_end(urlterm
)) {
409 cout
<< "already indexed, not updating" << endl
;
416 case DUP_CHECK_LAZILY
: {
417 // If last_altered > last_altered_max, we know for sure that the
418 // file is new or updated.
419 if (last_altered
> last_altered_max
) {
423 Xapian::PostingIterator p
= db
.postlist_begin(urlterm
);
424 if (p
!= db
.postlist_end(urlterm
)) {
426 Xapian::Document doc
= db
.get_document(did
);
427 Xapian::valueno slot
= use_ctime
? VALUE_CTIME
: VALUE_LASTMOD
;
428 string value
= doc
.get_value(slot
);
429 time_t old_last_altered
= binary_string_to_int(value
);
430 if (last_altered
<= old_last_altered
) {
432 cout
<< "already indexed" << endl
;
433 // The docid should be in updated - the only valid
434 // exception is if the URL was long and hashed to the
435 // same URL as an existing document indexed in the same
448 index_add_document(const string
& urlterm
, time_t last_altered
,
449 Xapian::docid did
, const Xapian::Document
& doc
)
451 if (dup_action
!= DUP_SKIP
) {
452 // If this document has already been indexed, update the existing
455 // We already found out the document id above.
456 db
.replace_document(did
, doc
);
457 } else if (last_altered
<= last_altered_max
) {
458 // We checked for the UID term and didn't find it.
459 did
= db
.add_document(doc
);
461 did
= db
.replace_document(urlterm
, doc
);
465 if (did
<= old_lastdocid
) {
466 cout
<< "updated" << endl
;
468 cout
<< "added" << endl
;
472 // If this were a duplicate, we'd have skipped it above.
473 db
.add_document(doc
);
475 cout
<< "added" << endl
;
480 index_mimetype(const string
& file
, const string
& urlterm
, const string
& url
,
482 const string
&mimetype
, DirectoryIterator
&d
,
483 Xapian::Document
& newdocument
,
486 string
context(file
, root
.size(), string::npos
);
488 // FIXME: We could be cleverer here and check mtime too when use_ctime is
489 // set - if the ctime has changed but the mtime is unchanged, we can just
490 // update the existing Document and avoid having to re-extract text, etc.
491 time_t last_altered
= use_ctime
? d
.get_ctime() : d
.get_mtime();
493 Xapian::docid did
= 0;
494 if (index_check_existing(urlterm
, last_altered
, did
))
498 // We only store and check the mtime (last modified) - a change to the
499 // metadata won't generally cause a previous failure to now work
500 // (FIXME: except permissions).
501 time_t failed_last_mod
;
503 if (failed
.contains(urlterm
, failed_last_mod
, failed_size
)) {
504 if (d
.get_mtime() <= failed_last_mod
&&
505 d
.get_size() == failed_size
) {
507 cout
<< "failed to extract text on earlier run" << endl
;
510 // The file has changed, so remove the entry for it. If it fails
511 // again on this attempt, we'll add a new one.
516 if (verbose
) cout
<< flush
;
518 string author
, title
, sample
, keywords
, topic
, dump
;
520 time_t created
= time_t(-1);
522 map
<string
, Filter
>::const_iterator cmd_it
= commands
.find(mimetype
);
523 if (cmd_it
== commands
.end()) {
524 size_t slash
= mimetype
.find('/');
525 if (slash
!= string::npos
) {
526 string
wildtype(mimetype
, 0, slash
+ 2);
527 wildtype
[slash
+ 1] = '*';
528 cmd_it
= commands
.find(wildtype
);
529 if (cmd_it
== commands
.end()) {
530 cmd_it
= commands
.find("*/*");
533 if (cmd_it
== commands
.end()) {
534 cmd_it
= commands
.find("*");
538 if (cmd_it
!= commands
.end()) {
539 // Easy "run a command and read text or HTML from stdout or a
540 // temporary file" cases.
541 string cmd
= cmd_it
->second
.cmd
;
543 skip(urlterm
, context
, "required filter not installed",
544 d
.get_size(), d
.get_mtime(), SKIP_VERBOSE_ONLY
);
547 if (cmd
== "false") {
548 // Allow setting 'false' as a filter to mean that a MIME type
549 // should be quietly ignored.
550 string m
= "ignoring MIME type '";
553 skip(urlterm
, context
, m
, d
.get_size(), d
.get_mtime(),
557 bool use_shell
= cmd_it
->second
.use_shell();
558 bool substituted
= false;
562 pcent
= cmd
.find('%', pcent
);
563 if (pcent
>= cmd
.size() - 1)
565 switch (cmd
[pcent
+ 1]) {
566 case '%': // %% -> %.
567 cmd
.erase(++pcent
, 1);
569 case 'f': { // %f -> escaped filename.
571 string
tail(cmd
, pcent
+ 2);
573 append_filename_argument(cmd
, file
);
574 // Remove the space append_filename_argument() adds before
575 // the argument - the command string either includes one,
576 // or won't expect one (e.g. --input=%f).
582 case 't': { // %t -> temporary output file.
583 if (tmpout
.empty()) {
584 // Use a temporary file with a suitable extension
585 // in case the command cares, and for more helpful
586 // error messages from the command.
587 if (cmd_it
->second
.output_type
== "text/html") {
588 tmpout
= get_tmpfile("tmp.html");
590 tmpout
= get_tmpfile("tmp.txt");
594 string
tail(cmd
, pcent
+ 2);
596 append_filename_argument(cmd
, tmpout
);
597 // Remove the space append_filename_argument() adds before
598 // the argument - the command string either includes one,
599 // or won't expect one (e.g. --input=%f).
606 // Leave anything else alone for now.
611 if (!substituted
&& cmd
!= "true") {
612 // If no %f, append the filename to the command.
613 append_filename_argument(cmd
, file
);
616 if (!tmpout
.empty()) {
617 // Output in temporary file.
618 (void)stdout_to_string(cmd
, use_shell
);
619 if (!load_file(tmpout
, dump
)) {
620 throw ReadError("Couldn't read output file");
622 unlink(tmpout
.c_str());
623 } else if (cmd
== "true") {
624 // Ignore the file's contents, just index metadata from the
628 dump
= stdout_to_string(cmd
, use_shell
);
630 const string
& charset
= cmd_it
->second
.output_charset
;
631 if (cmd_it
->second
.output_type
== "text/html") {
633 p
.ignore_metarobots();
634 p
.description_as_sample
= description_as_sample
;
636 p
.parse_html(dump
, charset
, false);
637 } catch (const string
& newcharset
) {
639 p
.ignore_metarobots();
640 p
.description_as_sample
= description_as_sample
;
641 p
.parse_html(dump
, newcharset
, true);
642 } catch (ReadError
) {
643 skip_cmd_failed(urlterm
, context
, cmd
,
644 d
.get_size(), d
.get_mtime());
649 keywords
= p
.keywords
;
654 } else if (!charset
.empty()) {
655 convert_to_utf8(dump
, charset
);
657 } catch (ReadError
) {
658 skip_cmd_failed(urlterm
, context
, cmd
,
659 d
.get_size(), d
.get_mtime());
662 } else if (mimetype
== "text/html" || mimetype
== "text/x-php") {
663 const string
& text
= d
.file_to_string();
665 if (ignore_exclusions
) p
.ignore_metarobots();
666 p
.description_as_sample
= description_as_sample
;
668 // Default HTML character set is latin 1, though not specifying
669 // one is deprecated these days.
670 p
.parse_html(text
, "iso-8859-1", false);
671 } catch (const string
& newcharset
) {
673 if (ignore_exclusions
) p
.ignore_metarobots();
674 p
.description_as_sample
= description_as_sample
;
675 p
.parse_html(text
, newcharset
, true);
677 if (!p
.indexing_allowed
) {
678 skip_meta_tag(urlterm
, context
,
679 d
.get_size(), d
.get_mtime());
684 keywords
= p
.keywords
;
689 md5_string(text
, md5
);
690 } else if (mimetype
== "text/plain") {
691 // Currently we assume that text files are UTF-8 unless they have a
693 dump
= d
.file_to_string();
694 md5_string(dump
, md5
);
696 // Look for Byte-Order Mark (BOM).
697 if (startswith(dump
, "\xfe\xff") || startswith(dump
, "\xff\xfe")) {
698 // UTF-16 in big-endian/little-endian order - we just convert
699 // it as "UTF-16" and let the conversion handle the BOM as that
700 // way we avoid the copying overhead of erasing 2 bytes from
701 // the start of dump.
702 convert_to_utf8(dump
, "UTF-16");
703 } else if (startswith(dump
, "\xef\xbb\xbf")) {
704 // UTF-8 with stupid Windows not-the-byte-order mark.
707 // FIXME: What charset is the file? Look at contents?
709 } else if (mimetype
== "application/pdf") {
710 string cmd
= "pdftotext -enc UTF-8";
711 append_filename_argument(cmd
, file
);
714 dump
= stdout_to_string(cmd
, false);
715 } catch (ReadError
) {
716 skip_cmd_failed(urlterm
, context
, cmd
,
717 d
.get_size(), d
.get_mtime());
720 get_pdf_metainfo(file
, author
, title
, keywords
, topic
);
721 } else if (mimetype
== "application/postscript") {
722 // There simply doesn't seem to be a Unicode capable PostScript to
723 // text converter (e.g. pstotext always outputs ISO-8859-1). The
724 // only solution seems to be to convert via PDF using ps2pdf and
725 // then pdftotext. This gives plausible looking UTF-8 output for
726 // some Chinese PostScript files I found using Google. It also has
727 // the benefit of allowing us to extract meta information from
729 string tmpfile
= get_tmpfile("tmp.pdf");
730 if (tmpfile
.empty()) {
731 // FIXME: should this be fatal? Or disable indexing postscript?
732 string msg
= "Couldn't create temporary directory (";
733 msg
+= strerror(errno
);
735 skip(urlterm
, context
, msg
,
736 d
.get_size(), d
.get_mtime());
739 string cmd
= "ps2pdf";
740 append_filename_argument(cmd
, file
);
741 append_filename_argument(cmd
, tmpfile
);
743 (void)stdout_to_string(cmd
, false);
744 cmd
= "pdftotext -enc UTF-8";
745 append_filename_argument(cmd
, tmpfile
);
747 dump
= stdout_to_string(cmd
, false);
748 } catch (ReadError
) {
749 skip_cmd_failed(urlterm
, context
, cmd
,
750 d
.get_size(), d
.get_mtime());
751 unlink(tmpfile
.c_str());
754 unlink(tmpfile
.c_str());
758 get_pdf_metainfo(tmpfile
, author
, title
, keywords
, topic
);
760 unlink(tmpfile
.c_str());
763 unlink(tmpfile
.c_str());
764 } else if (startswith(mimetype
, "application/vnd.sun.xml.") ||
765 startswith(mimetype
, "application/vnd.oasis.opendocument."))
767 // Inspired by http://mjr.towers.org.uk/comp/sxw2text
768 string cmd
= "unzip -p";
769 append_filename_argument(cmd
, file
);
770 cmd
+= " content.xml ; unzip -p";
771 append_filename_argument(cmd
, file
);
772 cmd
+= " styles.xml";
774 OpenDocParser parser
;
775 parser
.parse(stdout_to_string(cmd
, true));
777 } catch (ReadError
) {
778 skip_cmd_failed(urlterm
, context
, cmd
,
779 d
.get_size(), d
.get_mtime());
784 append_filename_argument(cmd
, file
);
787 MetaXmlParser metaxmlparser
;
788 metaxmlparser
.parse(stdout_to_string(cmd
, false));
789 title
= metaxmlparser
.title
;
790 keywords
= metaxmlparser
.keywords
;
791 // FIXME: topic = metaxmlparser.topic;
792 sample
= metaxmlparser
.sample
;
793 author
= metaxmlparser
.author
;
794 } catch (ReadError
) {
795 // It's probably best to index the document even if this fails.
797 } else if (startswith(mimetype
, "application/vnd.openxmlformats-officedocument.")) {
798 const char * args
= NULL
;
799 string
tail(mimetype
, 46);
800 if (startswith(tail
, "wordprocessingml.")) {
801 // unzip returns exit code 11 if a file to extract wasn't found
802 // which we want to ignore, because there may be no headers or
804 args
= " word/document.xml 'word/header*.xml' 'word/footer*.xml' 2>/dev/null";
805 } else if (startswith(tail
, "spreadsheetml.")) {
806 // Extract the shared string table first, so our parser can
807 // grab those ready for parsing the sheets which will reference
808 // the shared strings.
809 string cmd
= "unzip -p";
810 append_filename_argument(cmd
, file
);
811 cmd
+= " xl/styles.xml xl/workbook.xml xl/sharedStrings.xml ; unzip -p";
812 append_filename_argument(cmd
, file
);
813 cmd
+= " xl/worksheets/sheet\\*.xml";
816 parser
.parse(stdout_to_string(cmd
, true));
818 } catch (ReadError
) {
819 skip_cmd_failed(urlterm
, context
, cmd
,
820 d
.get_size(), d
.get_mtime());
823 } else if (startswith(tail
, "presentationml.")) {
824 // unzip returns exit code 11 if a file to extract wasn't found
825 // which we want to ignore, because there may be no notesSlides
827 args
= " 'ppt/slides/slide*.xml' 'ppt/notesSlides/notesSlide*.xml' 'ppt/comments/comment*.xml' 2>/dev/null";
829 // Don't know how to index this type.
830 skip_unknown_mimetype(urlterm
, context
, mimetype
,
831 d
.get_size(), d
.get_mtime());
836 string cmd
= "unzip -p";
837 append_filename_argument(cmd
, file
);
840 MSXmlParser xmlparser
;
841 // Treat exit status 11 from unzip as success - this is
842 // what we get if one of the listed filenames to extract
843 // doesn't match anything in the zip file.
844 xmlparser
.parse_xml(stdout_to_string(cmd
, false, 11));
845 dump
= xmlparser
.dump
;
846 } catch (ReadError
) {
847 skip_cmd_failed(urlterm
, context
, cmd
,
848 d
.get_size(), d
.get_mtime());
853 string cmd
= "unzip -p";
854 append_filename_argument(cmd
, file
);
855 cmd
+= " docProps/core.xml";
857 MetaXmlParser metaxmlparser
;
858 metaxmlparser
.parse(stdout_to_string(cmd
, false));
859 title
= metaxmlparser
.title
;
860 keywords
= metaxmlparser
.keywords
;
861 // FIXME: topic = metaxmlparser.topic;
862 sample
= metaxmlparser
.sample
;
863 author
= metaxmlparser
.author
;
864 } catch (ReadError
) {
865 // It's probably best to index the document even if this fails.
867 } else if (mimetype
== "application/x-abiword") {
868 // FIXME: Implement support for metadata.
870 const string
& text
= d
.file_to_string();
871 xmlparser
.parse_xml(text
);
872 dump
= xmlparser
.dump
;
873 md5_string(text
, md5
);
874 } else if (mimetype
== "application/x-abiword-compressed") {
875 // FIXME: Implement support for metadata.
877 xmlparser
.parse_xml(d
.gzfile_to_string());
878 dump
= xmlparser
.dump
;
879 } else if (mimetype
== "application/vnd.ms-xpsdocument") {
880 string cmd
= "unzip -p";
881 append_filename_argument(cmd
, file
);
882 cmd
+= " 'Documents/1/Pages/*.fpage'";
884 XpsXmlParser xpsparser
;
885 dump
= stdout_to_string(cmd
, false);
886 // Look for Byte-Order Mark (BOM).
887 if (startswith(dump
, "\xfe\xff") || startswith(dump
, "\xff\xfe")) {
888 // UTF-16 in big-endian/little-endian order - we just
889 // convert it as "UTF-16" and let the conversion handle the
890 // BOM as that way we avoid the copying overhead of erasing
891 // 2 bytes from the start of dump.
892 convert_to_utf8(dump
, "UTF-16");
894 xpsparser
.parse(dump
);
895 dump
= xpsparser
.dump
;
896 } catch (ReadError
) {
897 skip_cmd_failed(urlterm
, context
, cmd
,
898 d
.get_size(), d
.get_mtime());
901 } else if (mimetype
== "text/csv") {
902 // Currently we assume that text files are UTF-8 unless they have a
904 dump
= d
.file_to_string();
905 md5_string(dump
, md5
);
907 // Look for Byte-Order Mark (BOM).
908 if (startswith(dump
, "\xfe\xff") || startswith(dump
, "\xff\xfe")) {
909 // UTF-16 in big-endian/little-endian order - we just convert
910 // it as "UTF-16" and let the conversion handle the BOM as that
911 // way we avoid the copying overhead of erasing 2 bytes from
912 // the start of dump.
913 convert_to_utf8(dump
, "UTF-16");
914 } else if (startswith(dump
, "\xef\xbb\xbf")) {
915 // UTF-8 with stupid Windows not-the-byte-order mark.
918 // FIXME: What charset is the file? Look at contents?
921 generate_sample_from_csv(dump
, sample
);
922 } else if (mimetype
== "image/svg+xml") {
924 const string
& text
= d
.file_to_string();
925 md5_string(text
, md5
);
926 svgparser
.parse(text
);
927 dump
= svgparser
.dump
;
928 title
= svgparser
.title
;
929 keywords
= svgparser
.keywords
;
930 // FIXME: topic = svgparser.topic;
931 author
= svgparser
.author
;
932 } else if (mimetype
== "application/vnd.debian.binary-package" ||
933 mimetype
== "application/x-debian-package") {
934 string
cmd("dpkg-deb -f");
935 append_filename_argument(cmd
, file
);
936 cmd
+= " Description";
937 const string
& desc
= stdout_to_string(cmd
, false);
938 // First line is short description, which we use as the title.
939 string::size_type idx
= desc
.find('\n');
940 title
.assign(desc
, 0, idx
);
941 if (idx
!= string::npos
) {
942 dump
.assign(desc
, idx
+ 1, string::npos
);
944 } else if (mimetype
== "application/x-redhat-package-manager" ||
945 mimetype
== "application/x-rpm") {
946 string
cmd("rpm -q --qf '%{SUMMARY}\\n%{DESCRIPTION}' -p");
947 append_filename_argument(cmd
, file
);
948 const string
& desc
= stdout_to_string(cmd
, false);
949 // First line is summary, which we use as the title.
950 string::size_type idx
= desc
.find('\n');
951 title
.assign(desc
, 0, idx
);
952 if (idx
!= string::npos
) {
953 dump
.assign(desc
, idx
+ 1, string::npos
);
955 } else if (mimetype
== "application/atom+xml") {
956 AtomParser atomparser
;
957 const string
& text
= d
.file_to_string();
958 md5_string(text
, md5
);
959 atomparser
.parse(text
);
960 dump
= atomparser
.dump
;
961 title
= atomparser
.title
;
962 keywords
= atomparser
.keywords
;
963 // FIXME: topic = atomparser.topic;
964 author
= atomparser
.author
;
966 // Don't know how to index this type.
967 skip_unknown_mimetype(urlterm
, context
, mimetype
,
968 d
.get_size(), d
.get_mtime());
972 // Compute the MD5 of the file if we haven't already.
973 if (md5
.empty() && md5_file(file
, md5
, d
.try_noatime()) == 0) {
974 if (errno
== ENOENT
|| errno
== ENOTDIR
) {
975 skip(urlterm
, context
, "File removed during indexing",
976 d
.get_size(), d
.get_mtime(),
977 SKIP_VERBOSE_ONLY
| SKIP_SHOW_FILENAME
);
979 skip(urlterm
, context
, "failed to read file to calculate MD5 checksum",
980 d
.get_size(), d
.get_mtime());
985 // Remove any trailing formfeeds, so we don't consider them when
986 // considering if we extracted any text (e.g. pdftotext outputs a
987 // formfeed between each page, even for blank pages).
989 // If dump contain only formfeeds, then trim_end will be string::npos
990 // and ++trim_end will be 0, which is the correct new size.
991 string::size_type trim_end
= dump
.find_last_not_of('\f');
992 if (++trim_end
!= dump
.size())
993 dump
.resize(trim_end
);
996 switch (empty_body
) {
997 case EMPTY_BODY_INDEX
:
999 case EMPTY_BODY_WARN
:
1000 cout
<< "no text extracted from document body, "
1001 "but indexing metadata anyway" << endl
;
1003 case EMPTY_BODY_SKIP
:
1004 skip(urlterm
, context
, "no text extracted from document body",
1005 d
.get_size(), d
.get_mtime());
1011 if (sample
.empty()) {
1012 sample
= generate_sample(dump
, sample_size
, "...", " ...");
1014 sample
= generate_sample(sample
, sample_size
, "...", " ...");
1017 // Put the data in the document
1018 if (record
.empty()) {
1024 record
+= "\nsample=";
1026 if (!title
.empty()) {
1027 record
+= "\ncaption=";
1028 record
+= generate_sample(title
, title_size
, "...", " ...");
1030 if (!author
.empty()) {
1031 record
+= "\nauthor=";
1034 record
+= "\ntype=";
1036 time_t mtime
= d
.get_mtime();
1037 if (mtime
!= static_cast<time_t>(-1)) {
1038 record
+= "\nmodtime=";
1039 record
+= str(mtime
);
1041 if (created
!= static_cast<time_t>(-1)) {
1042 record
+= "\ncreated=";
1043 record
+= str(created
);
1045 off_t size
= d
.get_size();
1046 record
+= "\nsize=";
1047 record
+= str(size
);
1048 newdocument
.set_data(record
);
1050 // Index the title, document text, keywords and topic.
1051 indexer
.set_document(newdocument
);
1052 if (!title
.empty()) {
1053 indexer
.index_text(title
, 5, "S");
1054 indexer
.increase_termpos(100);
1056 if (!dump
.empty()) {
1057 indexer
.index_text(dump
);
1059 if (!keywords
.empty()) {
1060 indexer
.increase_termpos(100);
1061 indexer
.index_text(keywords
);
1063 if (!topic
.empty()) {
1064 indexer
.increase_termpos(100);
1065 indexer
.index_text(topic
, 1, "B");
1067 // Index the leafname of the file.
1069 indexer
.increase_termpos(100);
1070 string leaf
= d
.leafname();
1071 string::size_type dot
= leaf
.find_last_of('.');
1072 if (dot
!= string::npos
&& leaf
.size() - dot
- 1 <= max_ext_len
)
1074 indexer
.index_text(leaf
, 1, "F");
1076 // Also index with underscores and ampersands replaced by spaces.
1077 bool modified
= false;
1078 string::size_type rep
= 0;
1079 while ((rep
= leaf
.find_first_of("_&", rep
)) != string::npos
) {
1084 indexer
.increase_termpos(100);
1085 indexer
.index_text(leaf
, 1, "F");
1089 if (!author
.empty()) {
1090 indexer
.increase_termpos(100);
1091 indexer
.index_text(author
, 1, "A");
1095 newdocument
.add_boolean_term("T" + mimetype
);
1097 newdocument
.add_boolean_term(site_term
);
1099 if (!host_term
.empty())
1100 newdocument
.add_boolean_term(host_term
);
1103 struct tm
*tm
= localtime(&mtime
);
1104 string date_term
= "D";
1105 date_term
+= date_to_string(tm
->tm_year
+ 1900,
1108 newdocument
.add_boolean_term(date_term
); // Date (YYYYMMDD)
1109 date_term
.resize(7);
1111 newdocument
.add_boolean_term(date_term
); // Month (YYYYMM)
1112 date_term
.resize(5);
1114 newdocument
.add_boolean_term(date_term
); // Year (YYYY)
1117 newdocument
.add_boolean_term(urlterm
); // Url
1119 // Add mtime as a value to allow "sort by date".
1120 newdocument
.add_value(VALUE_LASTMOD
,
1121 int_to_binary_string(uint32_t(mtime
)));
1123 // Add ctime as a value to track modifications.
1124 time_t ctime
= d
.get_ctime();
1125 newdocument
.add_value(VALUE_CTIME
,
1126 int_to_binary_string(uint32_t(ctime
)));
1129 // Add MD5 as a value to allow duplicate documents to be collapsed
1131 newdocument
.add_value(VALUE_MD5
, md5
);
1133 // Add the file size as a value to allow "sort by size" and size ranges.
1134 newdocument
.add_value(VALUE_SIZE
,
1135 Xapian::sortable_serialise(size
));
1137 bool inc_tag_added
= false;
1138 if (d
.is_other_readable()) {
1139 inc_tag_added
= true;
1140 newdocument
.add_boolean_term("I*");
1141 } else if (d
.is_group_readable()) {
1142 const char * group
= d
.get_group();
1144 newdocument
.add_boolean_term(string("I#") + group
);
1147 const char * owner
= d
.get_owner();
1149 newdocument
.add_boolean_term(string("O") + owner
);
1150 if (!inc_tag_added
&& d
.is_owner_readable())
1151 newdocument
.add_boolean_term(string("I@") + owner
);
1154 string
ext_term("E");
1155 for (string::const_iterator i
= ext
.begin(); i
!= ext
.end(); ++i
) {
1157 if (ch
>= 'A' && ch
<= 'Z')
1161 newdocument
.add_boolean_term(ext_term
);
1163 index_add_document(urlterm
, last_altered
, did
, newdocument
);
1164 } catch (ReadError
) {
1165 skip(urlterm
, context
, string("can't read file: ") + strerror(errno
),
1166 d
.get_size(), d
.get_mtime());
1167 } catch (NoSuchFilter
) {
1168 string filter_entry
;
1169 if (cmd_it
!= commands
.end()) {
1170 filter_entry
= cmd_it
->first
;
1172 filter_entry
= mimetype
;
1174 string m
= "Filter for \"";
1176 m
+= "\" not installed";
1177 skip(urlterm
, context
, m
, d
.get_size(), d
.get_mtime());
1178 commands
[filter_entry
] = Filter();
1179 } catch (FileNotFound
) {
1180 skip(urlterm
, context
, "File removed during indexing",
1181 d
.get_size(), d
.get_mtime(),
1182 SKIP_VERBOSE_ONLY
| SKIP_SHOW_FILENAME
);
1183 } catch (const std::string
& error
) {
1184 skip(urlterm
, context
, error
, d
.get_size(), d
.get_mtime());
1189 index_handle_deletion()
1191 if (updated
.empty() || old_docs_not_seen
== 0) return;
1194 cout
<< "Deleting " << old_docs_not_seen
<< " old documents which weren't found" << endl
;
1196 Xapian::PostingIterator alldocs
= db
.postlist_begin(string());
1197 Xapian::docid did
= *alldocs
;
1198 while (did
< updated
.size()) {
1199 if (!updated
[did
]) {
1200 alldocs
.skip_to(did
);
1201 if (alldocs
== db
.postlist_end(string()))
1203 if (*alldocs
!= did
) {
1204 // Document #did didn't exist before we started.
1208 db
.delete_document(did
);
1209 if (--old_docs_not_seen
== 0)
1225 // If we created a temporary directory then delete it.