diff --git a/arch/sparc/kernel/bootmem.c b/arch/sparc/kernel/bootmem.c
index b5a9eb07bfde16915ab3b97fa5768fa2c8a124c6..8d622552d44ddd034109035bf9eace82000cf195 100644
--- a/arch/sparc/kernel/bootmem.c
+++ b/arch/sparc/kernel/bootmem.c
@@ -41,7 +41,7 @@ static struct chunk_pool phys_mem_pool;
 
 static void *bootmem_alloc_internal(size_t size)
 {
-#if (CONFIG_SPARC_BOOTMEM_REQUEST_NEW_ON_DEMAND)
+#ifndef CONFIG_SPARC_BOOTMEM_REQUEST_NEW_ON_DEMAND
 	static int blocked;
 
 	if (blocked) {
@@ -94,6 +94,9 @@ void *bootmem_alloc(size_t size)
 {
 	void *ptr;
 
+	if (!size)
+		return NULL;
+
 	ptr = chunk_alloc(&phys_mem_pool, size);
 
 	if (!ptr) {
@@ -247,7 +250,6 @@ void bootmem_init(void)
 		if (base_pfn == sp_banks[i].base_addr)
 			continue;
 
-	
 		pg_node = MEM_PAGE_NODE(node);
 		node++;
 
@@ -259,25 +261,18 @@ void bootmem_init(void)
 
 		/* let's assume we always have enough memory, because if we
 		 * don't, there is a serious configuration problem anyways
+		 *
+		 * XXX this should be a function...
 		 */
 
-		(*pg_node) = (struct page_map_node *)
-				bootmem_alloc(sizeof(struct page_map_node));
-
-		(*pg_node)->pool = (struct mm_pool *)
-					bootmem_alloc(sizeof(struct mm_pool));
+		(*pg_node)		      = (struct page_map_node *) bootmem_alloc(sizeof(struct page_map_node));
+		(*pg_node)->pool	      = (struct mm_pool *)       bootmem_alloc(sizeof(struct mm_pool));
 
 		bzero((*pg_node)->pool, sizeof(struct mm_pool));
 
-		(*pg_node)->pool->block_order = (struct list_head *)
-					bootmem_alloc(sizeof(struct list_head)
-						* MM_BLOCK_ORDER_MAX);
-		(*pg_node)->pool->alloc_order = (unsigned char *)
-					bootmem_alloc(MM_INIT_NUM_BLOCKS);
-
-		(*pg_node)->pool->blk_free = (unsigned long *)
-					bootmem_alloc(MM_INIT_LEN_BITMAP
-						* sizeof(unsigned long));
+		(*pg_node)->pool->block_order = (struct list_head *)     bootmem_alloc(sizeof(struct list_head) * (MM_BLOCK_ORDER_MAX + 1));
+		(*pg_node)->pool->alloc_order = (unsigned char *)        bootmem_alloc(MM_INIT_NUM_BLOCKS);
+		(*pg_node)->pool->blk_free    = (unsigned long *)        bootmem_alloc(MM_INIT_LEN_BITMAP * sizeof(unsigned long));
 
 		ret = page_map_add(sp_banks[i].base_addr,
 				sp_banks[i].base_addr + sp_banks[i].num_bytes,
diff --git a/kernel/kmem.c b/kernel/kmem.c
index 8a2a3789d4f33840cd5520ae7d55574f571ebe42..46dcf8dd9b58ba6929461e596c7c9e17faa9c31b 100644
--- a/kernel/kmem.c
+++ b/kernel/kmem.c
@@ -10,14 +10,19 @@
 
 #include <list.h>
 #include <kernel/kmem.h>
-#include <kernel/sbrk.h>
 #include <kernel/kernel.h>
 #include <kernel/printk.h>
 
+#ifdef CONFIG_MMU
+#include <kernel/sbrk.h>
+#else
+#include <kernel/bootmem.h>
+#endif /* CONFIG_MMU */
 
-#define WORD_ALIGN(x)	ALIGN((x), sizeof(unsigned long))
 
+#define WORD_ALIGN(x)	ALIGN((x), sizeof(unsigned long))
 
+#ifdef CONFIG_MMU
 struct kmem {
 	void *data;
 	size_t size;
@@ -91,15 +96,31 @@ static void kmem_split(struct kmem *k, size_t size)
 }
 
 
+/**
+ * @brief merge a chunk with its neighbour
+ */
+
+static void kmem_merge(struct kmem *k)
+{
+	k->size = k->size + k->next->size + sizeof(struct kmem);
+
+	k->next = k->next->next;
+
+	if (k->next)
+		k->next->prev = k;
+}
+#endif /* CONFIG_MMU */
+
 
 /**
  * @brief returns the initial kmem chunk
  *
- * @note call this once kernel_sbrk() works
+ * @returns pointer or NULL on error
  */
 
 void *kmem_init(void)
 {
+#ifdef CONFIG_MMU
 	if (likely(_kmem_init))
 		return _kmem_init;
 
@@ -122,22 +143,9 @@ void *kmem_init(void)
 	_kmem_last = _kmem_init;
 
 	return _kmem_init;
-}
-
-
-
-/**
- * @brief merge a chunk with its neighbour
- */
-
-static void kmem_merge(struct kmem *k)
-{
-	k->size = k->size + k->next->size + sizeof(struct kmem);
-
-	k->next = k->next->next;
-
-	if (k->next)
-		k->next->prev = k;
+#else
+	return (void *) -1; /* return something not NULL */
+#endif
 }
 
 
@@ -152,6 +160,7 @@ static void kmem_merge(struct kmem *k)
 
 void *kmalloc(size_t size)
 {
+#ifdef CONFIG_MMU
 	size_t len;
 
 	struct kmem *k_new;
@@ -199,6 +208,10 @@ void *kmalloc(size_t size)
 	_kmem_last = k_new;
 
 	return k_new->data;
+#else
+	return bootmem_alloc(size);
+#endif /* CONFIG_MMU */
+
 }
 
 
@@ -258,13 +271,15 @@ void *krealloc(void *ptr, size_t size)
 	char *src;
 
 	void *ptr_new;
-	struct kmem *k;
-
 
+#ifdef CONFIG_MMU
+	struct kmem *k;
+#endif /* CONFIG_MMU */
 
 	if (!ptr)
 		return kmalloc(size);
 
+#ifdef CONFIG_MMU
 	if (ptr < kmem_init()) {
 		pr_warning("KMEM: invalid krealloc() of addr %p below lower "
 			   "bound of trackable memory in call from %p\n",
@@ -289,17 +304,22 @@ void *krealloc(void *ptr, size_t size)
 		return NULL;
 	}
 
+#endif /* CONFIG_MMU */
 
 	ptr_new = kmalloc(size);
 
 	if (!ptr_new)
 		return NULL;
 
-
+#ifdef CONFIG_MMU
 	if (k->size > size)
 		len = size;
 	else
 		len = k->size;
+#else
+	/* this WILL copy out of bounds if size < original */
+	len = size;
+#endif /* CONFIG_MMU */
 
 	src = ptr;
 	dst = ptr_new;
@@ -323,6 +343,7 @@ void *krealloc(void *ptr, size_t size)
 
 void kfree(void *ptr)
 {
+#ifdef CONFIG_MMU
 	struct kmem *k;
 
 
@@ -354,10 +375,9 @@ void kfree(void *ptr)
 
 	k->free = 1;
 
-	if (k->next && k->next->free)
+	if (k->next && k->next->free) {
 		kmem_merge(k);
-
-	if (k->prev->free) {
+	} else if (k->prev->free) {
 		k = k->prev;
 		kmem_merge(k);
 	}
@@ -371,4 +391,7 @@ void kfree(void *ptr)
 	} else {
 		list_add_tail(&k->node, &_kmem_init->node);
 	}
+#else
+	bootmem_free(ptr);
+#endif /* CONFIG_MMU */
 }
diff --git a/lib/chunk.c b/lib/chunk.c
index d23f883f0ef52c544211baa570d1b507bacd00d2..f661003eab54b31827c7723d0511b638db563519 100644
--- a/lib/chunk.c
+++ b/lib/chunk.c
@@ -23,10 +23,10 @@
  * Note that the address verification in chunk_free() is weak, as it only
  * checks if it's start address and size are within the chunk of the parent.
  *
- * @todo add a function chunk_alloc_aligned() that allows aligned allocations 
+ * @todo add a function chunk_alloc_aligned() that allows aligned allocations
  *	 without wasting space. the function just grabs some memory and creates
  *	 a single chunk up to the alignement boundary and adds it to the "full"
- *	 pool, then returns the following (aligned) chunk. 
+ *	 pool, then returns the following (aligned) chunk.
  *
  */
 
@@ -74,7 +74,7 @@ struct chunk {
 
 static inline void *chunk_align(struct chunk_pool *pool, void *p)
 {
-	return (void *) (((unsigned long) p + pool->align) & ~pool->align);
+	return ALIGN_PTR(p, pool->align);
 }
 
 
@@ -108,8 +108,6 @@ static void chunk_setup(struct chunk_pool *pool, struct chunk *c)
 	c->mem = chunk_align(pool, (void *) (c + 1));
 	/* set the allocatable size of the cunk */
 	c->free = ((size_t) c + c->size) - (size_t) c->mem;
-
-	chunk_classify(pool, c);
 }
 
 
@@ -158,10 +156,11 @@ static struct chunk *chunk_grab_new(struct chunk_pool *pool, size_t size)
 	/* we have no references yet */
 	c->refcnt = 0;
 
+	chunk_setup(pool, c);
+
 	/* add new parent to full list by default */
 	list_add_tail(&c->node, &pool->full);
 
-	chunk_setup(pool, c);
 
 
 	return c;
@@ -181,12 +180,12 @@ static struct chunk *chunk_grab_new(struct chunk_pool *pool, size_t size)
  */
 
 static struct chunk *chunk_split(struct chunk_pool *pool,
-				 struct chunk *c, size_t size)
+				 struct chunk *c, size_t alloc_sz)
 {
 	struct chunk *new;
 
 
-	if (c->free < size)
+	if (c->free < alloc_sz)
 		return NULL;
 
 	/* this chunk is now a child of a higher-order chunk */
@@ -195,23 +194,26 @@ static struct chunk *chunk_split(struct chunk_pool *pool,
 	new->child   = NULL;
 	new->sibling = c->child;
 	new->refcnt  = 1;
-	new->free    = 0;
-	new->size    = size;
+	new->size    = alloc_sz;
+
+	chunk_setup(pool, new);
 
 	/* the new node will be in use, add to empty list */
+	new->free = 0;
 	list_add_tail(&new->node, &pool->empty);
 
-	chunk_setup(pool, new);
 
 	/* track the youngest child in the parent */
 	c->child = new;
 	c->refcnt++;
 
 	/* align parent chunk to start of new memory subsegment */
-	c->mem = chunk_align(pool, (void *) ((size_t) c->mem + size));
+	c->mem = chunk_align(pool, (void *) ((size_t) c->mem + alloc_sz));
 
 	/* update free bytes with regard to actual alignment */
-	c->free = ((size_t) c + c->size) - ((size_t) new  + new->size);
+	c->free = ((size_t) c + c->size - (size_t) c->mem);
+
+	chunk_classify(pool, c);
 
 	return new;
 }
@@ -233,7 +235,6 @@ void *chunk_alloc(struct chunk_pool *pool, size_t size)
 	struct chunk *c = NULL;
 
 	struct chunk *p_elem;
-	struct chunk *p_tmp;
 
 
 
@@ -251,7 +252,8 @@ void *chunk_alloc(struct chunk_pool *pool, size_t size)
 	alloc_sz = (size_t) chunk_align(pool,
 					(void *) (size + sizeof(struct chunk)));
 
-	list_for_each_entry_safe(p_elem, p_tmp, &pool->full, node) {
+	list_for_each_entry(p_elem, &pool->full, node) {
+
 
 		if (p_elem->free >= alloc_sz) {
 			c = p_elem;
@@ -396,7 +398,7 @@ void chunk_pool_init(struct chunk_pool *pool,
 	INIT_LIST_HEAD(&pool->full);
 	INIT_LIST_HEAD(&pool->empty);
 
-	pool->align = align - 1;
+	pool->align = align;
 
 	pool->alloc = alloc;
 	pool->free  = free;