-} #}}}
-
-sub delete (@) { #{{{
- IkiWiki::debug("cleaning hyperestraier search index");
- IkiWiki::estcmd("purge -cl");
- IkiWiki::estcfg();
-} #}}}
-
-sub change (@) { #{{{
- IkiWiki::debug("updating hyperestraier search index");
- IkiWiki::estcmd("gather -cm -bc -cl -sd",
- map {
- Encode::encode_utf8($IkiWiki::config{destdir}."/".$IkiWiki::renderedfiles{IkiWiki::pagename($_)})
- } @_
+}
+
+my $scrubber;
+my $stemmer;
+sub index (@) {
+ my %params=@_;
+
+ setupfiles();
+
+ # A unique pageterm is used to identify the document for a page.
+ my $pageterm=pageterm($params{page});
+ return $params{content} unless defined $pageterm;
+
+ my $db=xapiandb();
+ my $doc=Search::Xapian::Document->new();
+ my $caption=pagetitle($params{page});
+ my $title;
+ if (exists $pagestate{$params{page}}{meta} &&
+ exists $pagestate{$params{page}}{meta}{title}) {
+ $title=$pagestate{$params{page}}{meta}{title};
+ }
+ else {
+ $title=$caption;
+ }
+
+ # Remove html from text to be indexed.
+ if (! defined $scrubber) {
+ eval q{use HTML::Scrubber};
+ if (! $@) {
+ $scrubber=HTML::Scrubber->new(allow => []);
+ }
+ }
+ my $toindex = defined $scrubber ? $scrubber->scrub($params{content}) : $params{content};
+
+ # Take 512 characters for a sample, then extend it out
+ # if it stopped in the middle of a word.
+ my $size=512;
+ my ($sample)=substr($toindex, 0, $size);
+ if (length($sample) == $size) {
+ my $max=length($toindex);
+ my $next;
+ while ($size < $max &&
+ ($next=substr($toindex, $size++, 1)) !~ /\s/) {
+ $sample.=$next;
+ }
+ }
+ $sample=~s/\n/ /g;
+
+ # data used by omega
+ # Decode html entities in it, since omega re-encodes them.
+ eval q{use HTML::Entities};
+ $doc->set_data(
+ "url=".urlto($params{page}, "")."\n".
+ "sample=".decode_entities($sample)."\n".
+ "caption=".decode_entities($caption)."\n".
+ "modtime=$IkiWiki::pagemtime{$params{page}}\n".
+ "size=".length($params{content})."\n"