]> git.vanrenterghem.biz Git - git.ikiwiki.info.git/commitdiff
Merge branch 'master' of ssh://git.ikiwiki.info/srv/git/ikiwiki.info
authorJoey Hess <joey@kodama.kitenet.net>
Thu, 8 May 2008 19:51:41 +0000 (15:51 -0400)
committerJoey Hess <joey@kodama.kitenet.net>
Thu, 8 May 2008 19:51:41 +0000 (15:51 -0400)
IkiWiki/Plugin/amazon_s3.pm
doc/bugs/Search_results_should_point_to_dir__44___not_index.html__44___when_use__95__dirs_is_enabled.mdwn
doc/ikiwiki.setup
doc/plugins/amazon_s3.mdwn

index cc15fbdb1bd2a967ed7b2aca4e4979b9fd81ffdb..6652b9fba6a424512f95db2234160721e24c7881 100644 (file)
@@ -71,6 +71,29 @@ sub getbucket { #{{{
 } #}}}
 }
 
+# Given a file, return any S3 keys associated with it.
+sub file2keys ($) { #{{{
+       my $file=shift;
+
+       my @keys;
+       if ($file =~ /^\Q$config{destdir}\/\E(.*)/) {
+               push @keys, $config{amazon_s3_prefix}.$1;
+
+               # Munge foo/index.html to foo/
+               if ($keys[0]=~/(^|.*\/)index.$config{htmlext}$/) {
+                       # A duplicate might need to be stored under the
+                       # unmunged name too.
+                       if (!$config{usedirs} || $config{amazon_s3_dupindex}) {
+                               push @keys, $1;
+                       }
+                       else {
+                               @keys=($1);
+                       }
+               }
+       }
+       return @keys;
+} #}}}
+
 package IkiWiki;
 use File::MimeInfo;
 use Encode;
@@ -85,21 +108,11 @@ sub writefile ($$$;$$) { #{{{
 
        # First, write the file to disk.
        my $ret=$IkiWiki::Plugin::amazon_s3::subs{'IkiWiki::writefile'}->($file, $destdir, $content, $binary, $writer);
-
-       # Now, determine if the file was written to the destdir.
-       # writefile might be used for writing files elsewhere.
-       # Also, $destdir might be set to a subdirectory of the destdir.
-       my $key;
-       if ($destdir eq $config{destdir}) {
-               $key=$file;
-       }
-       elsif ("$destdir/$file" =~ /^\Q$config{destdir}\/\E(.*)/) {
-               $key=$1;
-       }
+               
+       my @keys=IkiWiki::Plugin::amazon_s3::file2keys("$destdir/$file");
 
        # Store the data in S3.
-       if (defined $key) {
-               $key=$config{amazon_s3_prefix}.$key;
+       if (@keys) {
                my $bucket=IkiWiki::Plugin::amazon_s3::getbucket();
 
                # The http layer tries to downgrade utf-8
@@ -108,42 +121,37 @@ sub writefile ($$$;$$) { #{{{
                # so force convert it to bytes.
                $content=encode_utf8($content) if defined $content;
 
-               if (defined $content && ! length $content) {
-                       # S3 doesn't allow storing empty files!
-                       $content=" ";
-               }
-               
                my %opts=(
                        acl_short => 'public-read',
                        content_type => mimetype("$destdir/$file"),
                );
-               my $res;
-               if (! $writer) {
-                       $res=$bucket->add_key($key, $content, \%opts);
-               }
-               else {
-                       # read back in the file that the writer emitted
-                       $res=$bucket->add_key_filename($key, "$destdir/$file", \%opts);
-               }
-               if ($res && $key=~/(^|.*\/)index.$config{htmlext}$/) {
-                       # index.html files are a special case. Since S3 is
-                       # not a normal web server, it won't serve up
-                       # foo/index.html when foo/ is requested. So the
-                       # file has to be stored twice. (This is bad news
-                       # when usedirs is enabled!)
-                       # TODO: invesitgate using the new copy operation.
-                       #       (It may not be robust enough.)
-                       my $base=$1;
+
+               # If there are multiple keys to write, data is sent
+               # multiple times.
+               # TODO: investigate using the new copy operation.
+               #       (It may not be robust enough.)
+               foreach my $key (@keys) {
+                       debug("storing $key");
+                       my $res;
                        if (! $writer) {
-                               $res=$bucket->add_key($base, $content, \%opts);
+                               $res=$bucket->add_key($key, $content, \%opts);
                        }
                        else {
-                               $res=$bucket->add_key_filename($base, "$destdir/$file", \%opts);
+                               # This test for empty files is a workaround
+                               # for this bug:
+                               # http://rt.cpan.org//Ticket/Display.html?id=35731
+                               if (-z "$destdir/$file") {
+                                       $res=$bucket->add_key($key, "", \%opts);
+                               }
+                               else {
+                                       # read back in the file that the writer emitted
+                                       $res=$bucket->add_key_filename($key, "$destdir/$file", \%opts);
+                               }
+                       }
+                       if (! $res) {
+                               error(gettext("Failed to save file to S3: ").
+                                       $bucket->err.": ".$bucket->errstr."\n");
                        }
-               }
-               if (! $res) {
-                       error(gettext("Failed to save file to S3: ").
-                               $bucket->err.": ".$bucket->errstr."\n");
                }
        }
 
@@ -154,19 +162,19 @@ sub writefile ($$$;$$) { #{{{
 sub prune ($) { #{{{
        my $file=shift;
 
-       # If a file in the destdir is being pruned, need to delete it out
-       # of S3 as well.
-       if ($file =~ /^\Q$config{destdir}\/\E(.*)/) {
-               my $key=$config{amazon_s3_prefix}.$1;
+       my @keys=IkiWiki::Plugin::amazon_s3::file2keys($file);
+
+       # Prune files out of S3 too.
+       if (@keys) {
                my $bucket=IkiWiki::Plugin::amazon_s3::getbucket();
-               my $res=$bucket->delete_key($key);
-               if ($res && $key=~/(^|.*\/)index.$config{htmlext}$/) {
-                       # index.html special case: Delete other file too
-                       $res=$bucket->delete_key($1);
-               }
-               if (! $res) {
-                       error(gettext("Failed to delete file from S3: ").
-                               $bucket->err.": ".$bucket->errstr."\n");
+
+               foreach my $key (@keys) {
+                       debug("deleting $key");
+                       my $res=$bucket->delete_key($key);
+                       if (! $res) {
+                               error(gettext("Failed to delete file from S3: ").
+                                       $bucket->err.": ".$bucket->errstr."\n");
+                       }
                }
        }
 
index 91dca9548b6e3dd718d423bc52bc1951314dce06..45a8f0abd791f62020f5cdf1efda5126a7eb51da 100644 (file)
@@ -7,3 +7,7 @@ point to `foo/bar/` instead.
 > --[Jason Blevins](http://jblevins.org/)
 
 >> Even with `usedirs`, there is no reason why the `index.html` should be called directly, and it might break content negotiation. Please just direct to the directory. --[[madduck]]
+
+> This bug affects the [[plugins/amazon_s3]] plugin -- when using that
+> plugin plus the search plugin, you need to enable `amazon_s3_dupindex`.
+> So this definitly should be fixed. --[[Joey]]
index 997dcd18e84d3a4cc0919812d28692edcdc28db6..e85518849874c313a30e9bacb0cabf6eab7319ae 100644 (file)
@@ -183,4 +183,6 @@ use IkiWiki::Setup::Standard {
        #amazon_s3_prefix => "wiki/",
        # Uncomment to use the S3 European datacenter.
        #amazon_s3_location => "EU",
+       # Uncomment if you need to store each index file twice.
+       #amazon_s3_dupindex => 1,
 }
index f1887d97072258bf0f29917a94ae338426499112..66042bfbeeaa2e85eb0dc7819c9412bd500dc19e 100644 (file)
@@ -11,11 +11,6 @@ modules and an Amazon S3 account to use this plugin.
 
 ## configuration
 
-Important note: You should seriously consider turning off `usedirs` before
-enabling this plugin. If `usedirs` is enabled, every page has to be stored
-in S3 *twice*, as "page/index.html" and as "page/". That will cost you
-money and bandwidth.
-
 This plugin uses the following settings in the setup file:
 
 * `amazon_s3_key_id` - Set to your public access key id.
@@ -32,6 +27,13 @@ This plugin uses the following settings in the setup file:
   empty string.
 * `amazon_s3_location` - Optionally, this can be set to control which
   datacenter to use. For example, set it to "EU" to for Europe.
+* `amazon_s3_dupindex` - Normally, when `usedirs` is enabled,
+  "foo/index.html" is stored in S3 as a key named "foo/", and all links
+  between pages use that name. If you also needs links that include
+  "index.html" in their names to work, you can enable this option. Then
+  each index.html file will be stored in S3 *twice*, under both names. This
+  will use more disk and bandwidth, and is not recommended unless you really
+  need it for some reason.
 
 Note that you should still set `destdir` in the setup file. The files that
 are uploaded to Amazon S3 will still be written to the destdir, too.