-
- # Remove any html from text to be indexed.
- # TODO: This removes html that is in eg, a markdown pre,
- # which should not be removed, really.
- if (! defined $scrubber) {
- eval q{use HTML::Scrubber};
- if (! $@) {
- $scrubber=HTML::Scrubber->new(allow => []);
- }
+ }
+ $sample=~s/\n/ /g;
+
+ # data used by omega
+ # Decode html entities in it, since omega re-encodes them.
+ eval q{use HTML::Entities};
+ $doc->set_data(
+ "url=".urlto($params{page}, "")."\n".
+ "sample=".decode_entities($sample)."\n".
+ "caption=".decode_entities($caption)."\n".
+ "modtime=$IkiWiki::pagemtime{$params{page}}\n".
+ "size=".length($params{content})."\n"
+ );
+
+ # Index document and add terms for other metadata.
+ my $tg = Search::Xapian::TermGenerator->new();
+ if (! $stemmer) {
+ my $langcode=$ENV{LANG} || "en";
+ $langcode=~s/_.*//;
+
+ # This whitelist is here to work around a xapian bug (#486138)
+ my @whitelist=qw{da de en es fi fr hu it no pt ru ro sv tr};
+
+ if (grep { $_ eq $langcode } @whitelist) {
+ $stemmer=Search::Xapian::Stem->new($langcode);