aboutsummaryrefslogtreecommitdiff
path: root/src/or/directory.c
diff options
context:
space:
mode:
authorNick Mathewson <nickm@torproject.org>2005-09-22 06:34:29 +0000
committerNick Mathewson <nickm@torproject.org>2005-09-22 06:34:29 +0000
commitcdc912714eeee2d72f72e1f4446daaa8804e12db (patch)
treef209e15c65bd3cce88e382b3338e61bd9ba6c21b /src/or/directory.c
parent66930319473ff63796672f26d52890f60666e770 (diff)
downloadtor-cdc912714eeee2d72f72e1f4446daaa8804e12db.tar
tor-cdc912714eeee2d72f72e1f4446daaa8804e12db.tar.gz
I love the smell of C in the morning. Make router-download rules smarter (download more so long as we dont duplicate existing requests; relaunch at staggered intervals); relaunch one a minute or on failure; reset 60 minutes; always open 3 requests if we can; add authority opinion to networkstatus; make naming rule correct. There is a remaining bug where we retry servers too quickly; We need to look at that harder.
svn:r5110
Diffstat (limited to 'src/or/directory.c')
-rw-r--r--src/or/directory.c37
1 files changed, 35 insertions, 2 deletions
diff --git a/src/or/directory.c b/src/or/directory.c
index 8dade1a44..8ca14c5f5 100644
--- a/src/or/directory.c
+++ b/src/or/directory.c
@@ -320,8 +320,8 @@ connection_dir_download_networkstatus_failed(connection_t *conn)
static void
connection_dir_download_routerdesc_failed(connection_t *conn)
{
- /* Try again. */
- /*XXXX011 plays poorly with multiple conns. */
+ /* Try again. No need to increment the failure count for routerdescs, since
+ * it's not their fault.*/
update_router_descriptor_downloads(time(NULL));
}
@@ -1558,6 +1558,8 @@ dir_routerdesc_download_failed(smartlist_t *failed)
{
char digest[DIGEST_LEN];
local_routerstatus_t *rs;
+ time_t now = time(NULL);
+ int server = server_mode(get_options()) && get_options()->DirPort;
SMARTLIST_FOREACH(failed, const char *, cp,
{
base16_decode(digest, DIGEST_LEN, cp, strlen(cp));
@@ -1565,7 +1567,38 @@ dir_routerdesc_download_failed(smartlist_t *failed)
if (!rs || rs->n_download_failures >= MAX_ROUTERDESC_DOWNLOAD_FAILURES)
continue;
++rs->n_download_failures;
+ if (server) {
+ switch (rs->n_download_failures) {
+ case 1: rs->next_attempt_at = 0; break;
+ case 2: rs->next_attempt_at = 0; break;
+ case 3: rs->next_attempt_at = now+60; break;
+ case 4: rs->next_attempt_at = now+60; break;
+ case 5: rs->next_attempt_at = now+60*2; break;
+ case 6: rs->next_attempt_at = now+60*5; break;
+ case 7: rs->next_attempt_at = now+60*15; break;
+ default: rs->next_attempt_at = TIME_MAX; break;
+ }
+ } else {
+ switch (rs->n_download_failures) {
+ case 1: rs->next_attempt_at = 0; break;
+ case 2: rs->next_attempt_at = now+60; break;
+ case 3: rs->next_attempt_at = now+60*5; break;
+ case 4: rs->next_attempt_at = now+60*10; break;
+ default: rs->next_attempt_at = TIME_MAX; break;
+ }
+ }
+ if (rs->next_attempt_at == 0)
+ log_fn(LOG_NOTICE, "%s failed %d time(s); I'll try again immediately.",
+ cp, (int)rs->n_download_failures);
+ else if (rs->next_attempt_at < TIME_MAX)
+ log_fn(LOG_NOTICE, "%s failed %d time(s); I'll try again in %d seconds.",
+ cp, (int)rs->n_download_failures, (int)(rs->next_attempt_at-now));
+ else
+ log_fn(LOG_NOTICE, "%s failed %d time(s); Giving up for a while.",
+ cp, (int)rs->n_download_failures);
});
+
+ update_router_descriptor_downloads(time(NULL));
}
/* DOCDOC */