[Barrelfish-users] [New release] free() error
Kornilios Kourtis
kornilios.kourtis at inf.ethz.ch
Mon Apr 22 17:35:00 CEST 2013
Hi Georgios,
On Fri, Apr 12, 2013 at 12:54:25PM +0000, Georgios Varisteas wrote:
> Hi Kornillios,
>
> To be honest I didn't actually read the printed stuff so i missed the
> "call stack" portion; I was carried away by being able to finally do my
> test-runs properly.
>
> Here you go, hope it helps:
[snip]
Thanks!
The final warnings should be fixed with the patch below. Unless it leads
to problems in our tests, it will be a part of our next release.
diff --git a/include/barrelfish/core_state.h b/include/barrelfish/core_state.h
index 6072471..a766a8f 100644
--- a/include/barrelfish/core_state.h
+++ b/include/barrelfish/core_state.h
@@ -72,12 +72,12 @@ struct slot_alloc_state {
struct slot_allocator_list head;
struct slot_allocator_list reserve;
- struct cnode_meta top_buf[SLOT_ALLOC_CNODE_SLOTS / 2];
- struct cnode_meta head_buf[SLOT_ALLOC_CNODE_SLOTS / 2];
- struct cnode_meta reserve_buf[SLOT_ALLOC_CNODE_SLOTS / 2];
+ char top_buf[SINGLE_SLOT_ALLOC_BUFLEN(SLOT_ALLOC_CNODE_SLOTS)];
+ char head_buf[SINGLE_SLOT_ALLOC_BUFLEN(SLOT_ALLOC_CNODE_SLOTS)];
+ char reserve_buf[SINGLE_SLOT_ALLOC_BUFLEN(SLOT_ALLOC_CNODE_SLOTS)];
+ char root_buf[SINGLE_SLOT_ALLOC_BUFLEN(DEFAULT_CNODE_SLOTS )];
struct single_slot_allocator rootca;
- struct cnode_meta root_buf[DEFAULT_CNODE_SLOTS / 2];
};
struct terminal_state;
diff --git a/include/barrelfish/slab.h b/include/barrelfish/slab.h
index 2f3ffff..f1aecb8 100644
--- a/include/barrelfish/slab.h
+++ b/include/barrelfish/slab.h
@@ -47,9 +47,15 @@ void slab_free(struct slab_alloc *slabs, void *block);
size_t slab_freecount(struct slab_alloc *slabs);
errval_t slab_default_refill(struct slab_alloc *slabs);
+// size of block header
+#define SLAB_BLOCK_HDRSIZE (sizeof(void *))
+// should be able to fit the header into the block
+#define SLAB_REAL_BLOCKSIZE(blocksize) \
+ (((blocksize) > SLAB_BLOCK_HDRSIZE) ? (blocksize) : SLAB_BLOCK_HDRSIZE)
+
/// Macro to compute the static buffer size required for a given allocation
#define SLAB_STATIC_SIZE(nblocks, blocksize) \
- ((nblocks) * ((blocksize) + sizeof(void *)) + sizeof(struct slab_head))
+ ((nblocks) * SLAB_REAL_BLOCKSIZE(blocksize) + sizeof(struct slab_head))
__END_DECLS
diff --git a/include/barrelfish/slot_alloc.h b/include/barrelfish/slot_alloc.h
index cceea58..da25dd1 100644
--- a/include/barrelfish/slot_alloc.h
+++ b/include/barrelfish/slot_alloc.h
@@ -71,6 +71,10 @@ struct range_slot_allocator {
struct thread_mutex mutex; ///< Mutex for thread safety
};
+// single_slot_alloc_init_raw() requires a specific buflen
+#define SINGLE_SLOT_ALLOC_BUFLEN(nslots) \
+ (SLAB_STATIC_SIZE(nslots / 2, sizeof(struct cnode_meta)))
+
errval_t single_slot_alloc_init(struct single_slot_allocator *ret,
cslot_t nslots, cslot_t *retslots);
errval_t single_slot_alloc_init_raw(struct single_slot_allocator *ret,
diff --git a/lib/barrelfish/slab.c b/lib/barrelfish/slab.c
index a8a9345..1a39385 100644
--- a/lib/barrelfish/slab.c
+++ b/lib/barrelfish/slab.c
@@ -17,12 +17,14 @@
#include <barrelfish/barrelfish.h>
#include <barrelfish/slab.h>
+#include <barrelfish/static_assert.h>
struct block_head {
struct block_head *next;///< Pointer to next block in free list
- char buf[0]; ///< Start of actual block data
};
+STATIC_ASSERT_SIZEOF(struct block_head, SLAB_BLOCK_HDRSIZE);
+
/**
* \brief Initialise a new slab allocator
*
@@ -34,7 +36,7 @@ void slab_init(struct slab_alloc *slabs, size_t blocksize,
slab_refill_func_t refill_func)
{
slabs->slabs = NULL;
- slabs->blocksize = blocksize;
+ slabs->blocksize = SLAB_REAL_BLOCKSIZE(blocksize);
slabs->refill_func = refill_func;
}
@@ -55,15 +57,15 @@ void slab_grow(struct slab_alloc *slabs, void *buf, size_t buflen)
buf = (char *)buf + sizeof(struct slab_head);
/* calculate number of blocks in buffer */
- size_t headed_blocksize = slabs->blocksize + sizeof(struct block_head);
- assert(buflen / headed_blocksize <= UINT32_MAX);
- head->free = head->total = buflen / headed_blocksize;
+ size_t blocksize = slabs->blocksize;
+ assert(buflen / blocksize <= UINT32_MAX);
+ head->free = head->total = buflen / blocksize;
assert(head->total > 0);
/* enqueue blocks in freelist */
struct block_head *bh = head->blocks = buf;
for (uint32_t i = head->total; i > 1; i--) {
- buf = (char *)buf + headed_blocksize;
+ buf = (char *)buf + blocksize;
bh->next = buf;
bh = buf;
}
@@ -111,7 +113,7 @@ void *slab_alloc(struct slab_alloc *slabs)
sh->blocks = bh->next;
sh->free--;
- return &bh->buf[0];
+ return bh;
}
/**
@@ -126,15 +128,15 @@ void slab_free(struct slab_alloc *slabs, void *block)
return;
}
- struct block_head *bh = (struct block_head *)block - 1;
+ struct block_head *bh = (struct block_head *)block;
/* find matching slab */
struct slab_head *sh;
- size_t headed_blocksize = slabs->blocksize + sizeof(struct block_head);
+ size_t blocksize = slabs->blocksize;
for (sh = slabs->slabs; sh != NULL; sh = sh->next) {
/* check if block falls inside this slab */
uintptr_t slab_limit = (uintptr_t)sh + sizeof(struct slab_head)
- + headed_blocksize * sh->total;
+ + blocksize * sh->total;
if ((uintptr_t)bh > (uintptr_t)sh && (uintptr_t)bh < slab_limit) {
break;
}
diff --git a/lib/barrelfish/slot_alloc/multi_slot_alloc.c b/lib/barrelfish/slot_alloc/multi_slot_alloc.c
index a12ab48..b6e264d 100644
--- a/lib/barrelfish/slot_alloc/multi_slot_alloc.c
+++ b/lib/barrelfish/slot_alloc/multi_slot_alloc.c
@@ -239,7 +239,7 @@ errval_t multi_slot_alloc_init_raw(struct multi_slot_allocator *ret,
/* Slab */
size_t allocation_unit = sizeof(struct slot_allocator_list) +
- sizeof(struct cnode_meta) * nslots / 2;
+ SINGLE_SLOT_ALLOC_BUFLEN(nslots);
slab_init(&ret->slab, allocation_unit, NULL);
return SYS_ERR_OK;
@@ -253,7 +253,7 @@ errval_t multi_slot_alloc_init(struct multi_slot_allocator *ret,
{
errval_t err;
nslots = ROUND_UP(nslots, DEFAULT_CNODE_SLOTS);
- size_t bufsize = sizeof(struct cnode_meta) * nslots / 2; // XXX: ?
+ size_t bufsize = SINGLE_SLOT_ALLOC_BUFLEN(nslots); // XXX?
ret->top = malloc(sizeof(struct single_slot_allocator));
if (!ret->top) {
diff --git a/lib/barrelfish/slot_alloc/single_slot_alloc.c b/lib/barrelfish/slot_alloc/single_slot_alloc.c
index 56e4d70..c4dd033 100644
--- a/lib/barrelfish/slot_alloc/single_slot_alloc.c
+++ b/lib/barrelfish/slot_alloc/single_slot_alloc.c
@@ -92,6 +92,15 @@ static errval_t sfree(struct slot_allocator *ca, struct capref cap)
// Freeing at the edge of walk
if (cap.slot == walk->slot + walk->space) {
walk->space++;
+
+ // check if we can merge walk to next
+ struct cnode_meta *next = walk->next;
+ if (next && next->slot == walk->slot + walk->space) {
+ walk->space += next->space;
+ walk->next = next->next;
+ slab_free(&sca->slab, next);
+ }
+
goto finish;
}
else if (cap.slot < walk->slot + walk->space) {
@@ -99,6 +108,13 @@ static errval_t sfree(struct slot_allocator *ca, struct capref cap)
goto unlock;
}
+ // Freing just before walk->next
+ if (walk->next && cap.slot + 1 == walk->next->slot) {
+ walk->next->slot = cap.slot;
+ walk->next->space++;
+ goto finish;
+ }
+
// Freeing after walk and before walk->next
if (walk->next && cap.slot < walk->next->slot) {
struct cnode_meta *new = walk->next;
@@ -143,6 +159,17 @@ errval_t single_slot_alloc_init_raw(struct single_slot_allocator *ret,
slab_init(&ret->slab, sizeof(struct cnode_meta), NULL);
if (buflen > 0) {
+ // check for callers that do not provide enough buffer space
+ #if !defined(NDEBUG)
+ size_t buflen_proper = SINGLE_SLOT_ALLOC_BUFLEN(nslots);
+ if (buflen != buflen_proper) {
+ debug_printf("******* FIXME: %s buflen=%lu != buflen_proper=%lu"
+ "call stack: %p %p\n",
+ __FUNCTION__, buflen, buflen_proper,
+ __builtin_return_address(0),
+ __builtin_return_address(1));
+ }
+ #endif
slab_grow(&ret->slab, buf, buflen);
}
@@ -166,7 +193,7 @@ errval_t single_slot_alloc_init(struct single_slot_allocator *ret,
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE);
}
- size_t buflen = sizeof(struct cnode_meta) * nslots / 2; // worst case
+ size_t buflen = SINGLE_SLOT_ALLOC_BUFLEN(nslots);
void *buf = malloc(buflen);
if (!buf) {
return LIB_ERR_MALLOC_FAIL;
diff --git a/lib/barrelfish/slot_alloc/slot_alloc.c b/lib/barrelfish/slot_alloc/slot_alloc.c
index 44e0cf2..1b683e0 100644
--- a/lib/barrelfish/slot_alloc/slot_alloc.c
+++ b/lib/barrelfish/slot_alloc/slot_alloc.c
@@ -161,7 +161,7 @@ errval_t slot_alloc_init(void)
// Slab
size_t allocation_unit = sizeof(struct slot_allocator_list) +
- sizeof(struct cnode_meta) * SLOT_ALLOC_CNODE_SLOTS / 2;
+ SINGLE_SLOT_ALLOC_BUFLEN(SLOT_ALLOC_CNODE_SLOTS);
slab_init(&def->slab, allocation_unit, NULL);
// Vspace mgmt
diff --git a/lib/spawndomain/spawn.c b/lib/spawndomain/spawn.c
index 2e9ea56..6a549c5 100644
--- a/lib/spawndomain/spawn.c
+++ b/lib/spawndomain/spawn.c
@@ -167,7 +167,7 @@ static errval_t spawn_setup_vspace(struct spawninfo *si)
/* Init pagecn's slot allocator */
// XXX: satisfy a peculiarity of the single_slot_alloc_init_raw API
- size_t bufsize = sizeof(struct cnode_meta) * PAGE_CNODE_SLOTS / 2;
+ size_t bufsize = SINGLE_SLOT_ALLOC_BUFLEN(PAGE_CNODE_SLOTS);
void *buf = malloc(bufsize);
assert(buf != NULL);
cheers,
Kornilios.
--
Kornilios Kourtis
More information about the Barrelfish-users
mailing list