aboutsummaryrefslogtreecommitdiff
path: root/src/common/memarea.c
diff options
context:
space:
mode:
authorNick Mathewson <nickm@torproject.org>2008-04-08 17:29:05 +0000
committerNick Mathewson <nickm@torproject.org>2008-04-08 17:29:05 +0000
commit31153d6374317b4bfaba83fafdcde5a0f3f885de (patch)
tree7967910e0d5735b051734b375e7b3f819430bab3 /src/common/memarea.c
parenta627407fcba1d5b1671e5789f420e4b5f8b63f99 (diff)
downloadtor-31153d6374317b4bfaba83fafdcde5a0f3f885de.tar
tor-31153d6374317b4bfaba83fafdcde5a0f3f885de.tar.gz
r19243@catbus: nickm | 2008-04-08 13:28:59 -0400
Use a freelist to hold a few recent memarea chunks. We do a kazillion memarea allocs and frees; that cant be good for us. svn:r14319
Diffstat (limited to 'src/common/memarea.c')
-rw-r--r--src/common/memarea.c63
1 files changed, 51 insertions, 12 deletions
diff --git a/src/common/memarea.c b/src/common/memarea.c
index 743a451a8..d4657b702 100644
--- a/src/common/memarea.c
+++ b/src/common/memarea.c
@@ -53,6 +53,8 @@ typedef struct memarea_chunk_t {
#define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, u)
+#define CHUNK_SIZE 8192
+
/** A memarea_t is an allocation region for a set of small memory requests
* that will all be freed at once. */
struct memarea_t {
@@ -60,25 +62,49 @@ struct memarea_t {
size_t chunk_size; /**<Size to use when allocating chunks.*/
};
+#define MAX_FREELIST_LEN 4
+int freelist_len=0;
+static memarea_chunk_t *freelist = NULL;
+
/** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
static memarea_chunk_t *
-alloc_chunk(size_t chunk_size)
+alloc_chunk(size_t sz)
{
- memarea_chunk_t *res = tor_malloc_roundup(&chunk_size);
- res->next_chunk = NULL;
- res->mem_size = chunk_size - CHUNK_HEADER_SIZE;
- res->next_mem = res->u.mem;
- return res;
+ (void)sz; /*XXXX021 remove this argument. */
+ if (freelist) {
+ memarea_chunk_t *res = freelist;
+ freelist = res->next_chunk;
+ --freelist_len;
+ return res;
+ } else {
+ size_t chunk_size = CHUNK_SIZE;
+ memarea_chunk_t *res = tor_malloc_roundup(&chunk_size);
+ res->next_chunk = NULL;
+ res->mem_size = chunk_size - CHUNK_HEADER_SIZE;
+ res->next_mem = res->u.mem;
+ return res;
+ }
}
-/** Allocate and return new memarea, with chunks of approximately
- * <b>chunk_size</b> bytes. (There is indeed some overhead.) */
+static void
+chunk_free(memarea_chunk_t *chunk)
+{
+ if (freelist_len >= MAX_FREELIST_LEN) {
+ ++freelist_len;
+ chunk->next_chunk = freelist;
+ freelist = chunk;
+ } else {
+ tor_free(chunk);
+ }
+}
+
+/** Allocate and return new memarea. */
memarea_t *
-memarea_new(size_t chunk_size)
+memarea_new(size_t chunk_size)/*XXXX021 remove this argument.*/
{
memarea_t *head = tor_malloc(sizeof(memarea_t));
head->first = alloc_chunk(chunk_size);
- head->chunk_size = chunk_size;
+ (void)chunk_size;
return head;
}
@@ -90,7 +116,7 @@ memarea_drop_all(memarea_t *area)
memarea_chunk_t *chunk, *next;
for (chunk = area->first; chunk; chunk = next) {
next = chunk->next_chunk;
- tor_free(chunk);
+ chunk_free(chunk);
}
area->first = NULL; /*fail fast on */
tor_free(area);
@@ -106,13 +132,26 @@ memarea_clear(memarea_t *area)
if (area->first->next_chunk) {
for (chunk = area->first->next_chunk; chunk; chunk = next) {
next = chunk->next_chunk;
- tor_free(chunk);
+ chunk_free(chunk);
}
area->first->next_chunk = NULL;
}
area->first->next_mem = area->first->u.mem;
}
+/** DOCDOC */
+void
+memarea_clear_freelist(void)
+{
+ memarea_chunk_t *chunk, *next;
+ freelist_len = 0;
+ for (chunk = freelist; chunk; chunk = next) {
+ next = chunk->next_chunk;
+ tor_free(chunk);
+ }
+ freelist = NULL;
+}
+
/** Return true iff <b>p</b> is in a range that has been returned by an
* allocation from <b>area</b>. */
int