aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoey Hess <joey@kitenet.net>2012-10-09 17:03:52 -0400
committerJoey Hess <joey@kitenet.net>2012-10-09 17:12:04 -0400
commit6bd8c6732e7ccda0013a3373c3618fca91c91d36 (patch)
treee4677c50f7b3cf805cb352c39623af3bcb0cb3f5
parent4c1582c11eb85bf356c943abf1b31dde3453e73d (diff)
downloadikiwiki-6bd8c6732e7ccda0013a3373c3618fca91c91d36.tar
ikiwiki-6bd8c6732e7ccda0013a3373c3618fca91c91d36.tar.gz
add cgi_overload_delay tunable
Try to avoid a situation in which so many ikiwiki cgi wrapper programs are running, all waiting on some long-running thing like a site rebuild, that it prevents the web server from doing anything else. The current approach only avoids this problem for GET requests; if multiple cgi's run GETs on a site at the same time, one will display a "please wait" page for a configurable number of seconds, which then redirects to retry. To enable this protection, set cgi_overload_delay to the number of seconds to wait. This is not enabled by default.
-rw-r--r--IkiWiki.pm8
-rw-r--r--IkiWiki/Wrapper.pm47
-rw-r--r--debian/changelog8
3 files changed, 58 insertions, 5 deletions
diff --git a/IkiWiki.pm b/IkiWiki.pm
index f68797ae3..a7dc6b36b 100644
--- a/IkiWiki.pm
+++ b/IkiWiki.pm
@@ -118,6 +118,14 @@ sub getsetup () {
safe => 0,
rebuild => 0,
},
+ cgi_overload_delay => {
+ type => "string",
+ default => '',
+ example => "10",
+ description => "number of seconds to delay CGI requests when overloaded",
+ safe => 1,
+ rebuild => 0,
+ },
rcs => {
type => "string",
default => '',
diff --git a/IkiWiki/Wrapper.pm b/IkiWiki/Wrapper.pm
index 769540d29..0855a3ba2 100644
--- a/IkiWiki/Wrapper.pm
+++ b/IkiWiki/Wrapper.pm
@@ -93,12 +93,43 @@ EOF
# memory, a pile up of processes could cause thrashing
# otherwise. The fd of the lock is stored in
# IKIWIKI_CGILOCK_FD so unlockwiki can close it.
- $pre_exec=<<"EOF";
+ #
+ # A lot of cgi wrapper processes can potentially build
+ # up and clog an otherwise unloaded web server. To
+ # partially avoid this, when a GET comes in and the lock
+ # is already held, rather than blocking a html page is
+ # constructed that retries. This is enabled by setting
+ # cgi_overload_delay.
+ if (defined $config{cgi_overload_delay} &&
+ $config{cgi_overload_delay} =~/^[0-9]+/) {
+ my $i=int($config{cgi_overload_delay});
+ $pre_exec.="#define CGI_OVERLOAD_DELAY $i\n"
+ if $i > 0;
+ }
+ $pre_exec.=<<"EOF";
lockfd=open("$config{wikistatedir}/cgilock", O_CREAT | O_RDWR, 0666);
- if (lockfd != -1 && lockf(lockfd, F_LOCK, 0) == 0) {
- char *fd_s=malloc(8);
- sprintf(fd_s, "%i", lockfd);
- setenv("IKIWIKI_CGILOCK_FD", fd_s, 1);
+ if (lockfd != -1) {
+#ifdef CGI_OVERLOAD_DELAY
+ char *request_method = getenv("REQUEST_METHOD");
+ if (request_method && strcmp(request_method, "GET") == 0) {
+ if (lockf(lockfd, F_TLOCK, 0) == 0) {
+ set_cgilock_fd(lockfd);
+ }
+ else {
+ printf("Content-Type: text/html\\nRefresh: %i; URL=%s\\n\\n<html><head><title>please wait...</title><head><body><p>Please wait ...</p></body></html>",
+ CGI_OVERLOAD_DELAY,
+ getenv("REQUEST_URI"));
+ exit(0);
+ }
+ }
+ else if (lockf(lockfd, F_LOCK, 0) == 0) {
+ set_cgilock_fd(lockfd);
+ }
+#else
+ if (lockf(lockfd, F_LOCK, 0) == 0) {
+ set_cgilock_fd(lockfd);
+ }
+#endif
}
EOF
}
@@ -140,6 +171,12 @@ void addenv(char *var, char *val) {
newenviron[i++]=s;
}
+set_cgilock_fd (int lockfd) {
+ char *fd_s=malloc(8);
+ sprintf(fd_s, "%i", lockfd);
+ setenv("IKIWIKI_CGILOCK_FD", fd_s, 1);
+}
+
int main (int argc, char **argv) {
int lockfd=-1;
char *s;
diff --git a/debian/changelog b/debian/changelog
index 8d7618753..842eb6806 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -3,6 +3,14 @@ ikiwiki (3.20120726) UNRELEASED; urgency=low
* monochrome: New theme, contributed by Jon Dowland.
* rst: Ported to python 3, while still also being valid python 2.
Thanks, W. Trevor King
+ * Try to avoid a situation in which so many ikiwiki cgi wrapper programs
+ are running, all waiting on some long-running thing like a site rebuild,
+ that it prevents the web server from doing anything else. The current
+ approach only avoids this problem for GET requests; if multiple cgi's
+ run GETs on a site at the same time, one will display a "please wait"
+ page for a configurable number of seconds, which then redirects to retry.
+ To enable this protection, set cgi_overload_delay to the number of
+ seconds to wait. This is not enabled by default.
-- Joey Hess <joeyh@debian.org> Thu, 30 Aug 2012 11:56:12 -0400