diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 49e3e6cfebed0bfc66402e7163fb80c8e534a8c5..9a0a4aa017699bacd0400439d2e3d6db8b92fd25 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -16,13 +16,12 @@ config EXTRA_SPARC_PHYS_BANKS
 	help
 	 Set number of additional physical memory banks if the machine has more
 	 than one.
-endmenu
 
 menu "Memory Management Settings"
 	depends on MM
 
-config MM_BLOCK_ORDER_MAX
-	int "Memory block order upper limit"
+config SPARC_MM_BLOCK_ORDER_MAX
+	int "Initial memory block order upper limit"
 	depends on MM
 	default 26
 	range 6 32
@@ -32,8 +31,8 @@ config MM_BLOCK_ORDER_MAX
 	  memory bank.
 	  Allowed order range is 6 to 31.
 
-config MM_BLOCK_ORDER_MIN
-	int "Memory block order lower limit"
+config SPARC_MM_BLOCK_ORDER_MIN
+	int "Initial memory block order lower limit"
 	depends on MM
 	default 12
 	range 5 30
@@ -42,10 +41,22 @@ config MM_BLOCK_ORDER_MIN
 	  memory block. If unsure, choose the page size of the platform.
 	  Allowed order range is 5 to 30.
 
+config SPARC_INIT_PAGE_MAP_MAX_ENTRIES
+	int "Maximum number of mappings in initial page map"
+	depends on MM
+	default 8
+	range EXTRA_SPARC_PHYS_BANKS 32
+	help
+	  Configures the storage space for the initial page map. It's wise to
+	  say at least EXTRA_SPARC_PHYS_BANKS here.
+
 endmenu
 
 
 
+endmenu
+
+
 # other
 
 source "init/Kconfig"
diff --git a/arch/sparc/include/mm.h b/arch/sparc/include/mm.h
index b82d241c4af7f68e701a81379c4e6a0892acb7b7..1fa7d116e2ec48bdcd0e7e92bfea57fd8c510f36 100644
--- a/arch/sparc/include/mm.h
+++ b/arch/sparc/include/mm.h
@@ -6,6 +6,7 @@
 #define _SPARC_MM_H_
 
 #include <kernel/mm.h>
+#include <kernel/page.h>
 
 /* The following structure is used to hold the physical
  * memory configuration of the machine.  This is filled in
@@ -26,13 +27,13 @@ struct sparc_physical_banks {
 #define SPARC_PHYS_BANKS 0
 #endif
 
-#define MEM_PAGE_NODE(x)	(&page_mem[(x)])
+extern struct sparc_physical_banks sp_banks[SPARC_PHYS_BANKS + 1];
 
-extern struct sparc_physical_banks sp_banks[SPARC_PHYS_BANKS+1];
 
+/* linker symbol marking the the start of the program (image) */
+extern char start[];
 /* linker symbol marking the the end of the program */
-extern char _end[];
-
+extern char end[];
 
 
 /* The default configuration allows for at most 32 MiB sized blocks
@@ -40,30 +41,34 @@ extern char _end[];
  * all blocks.
  */
 
-#if defined(CONFIG_MM_BLOCK_ORDER_MAX)
-#define MM_BLOCK_ORDER_MAX CONFIG_MM_BLOCK_ORDER_MAX
+#if defined(CONFIG_SPARC_MM_BLOCK_ORDER_MAX)
+#define MM_BLOCK_ORDER_MAX CONFIG_SPARC_MM_BLOCK_ORDER_MAX
 #else
 #define MM_BLOCK_ORDER_MAX 26
 #endif
 
-#if defined(CONFIG_MM_BLOCK_ORDER_MIN)
-#define MM_BLOCK_ORDER_MIN CONFIG_MM_BLOCK_ORDER_MIN
+#if defined(CONFIG_SPARC_MM_BLOCK_ORDER_MIN)
+#define MM_BLOCK_ORDER_MIN CONFIG_SPARC_MM_BLOCK_ORDER_MIN
 #else
-#define MM_BLOCK_ORDER_MIN 12
+#define MM_BLOCK_ORDER_MIN 12 /* SRMMU page size */
 #endif
 
 compile_time_assert(MM_BLOCK_ORDER_MIN < MM_BLOCK_ORDER_MAX,\
 		    MM_BLOCK_LIMITS_INVALID);
 
 
-extern
-unsigned long mm_init_bitmap[MM_BITMAP_LEN(MM_BLOCK_ORDER_MAX,
-					   MM_BLOCK_ORDER_MIN)];
-extern
-unsigned char mm_init_alloc_order[MM_NUM_BLOCKS_TRACKABLE(MM_BLOCK_ORDER_MAX,
-							  MM_BLOCK_ORDER_MIN)];
-extern
-struct list_head mm_init_block_order[(MM_BLOCK_ORDER_MAX + 1)];
+#define MM_INIT_NUM_BLOCKS MM_NUM_BLOCKS_TRACKABLE(MM_BLOCK_ORDER_MAX,\
+						   MM_BLOCK_ORDER_MIN)
+
+#define MM_INIT_LEN_BITMAP MM_BITMAP_LEN(MM_BLOCK_ORDER_MAX,\
+					 MM_BLOCK_ORDER_MIN)
+
+/* initial block tracking bitmap */
+extern unsigned long mm_init_bitmap[MM_INIT_LEN_BITMAP];
+/* initial block allocation size tracking */
+extern unsigned char mm_init_alloc_order[MM_INIT_NUM_BLOCKS];
+/* initial block order anchor */
+extern struct list_head mm_init_block_order[MM_BLOCK_ORDER_MAX + 1];
 
 
 
diff --git a/arch/sparc/include/page.h b/arch/sparc/include/page.h
index c2296ed91b82ec1543b2b9a8e90e488c4db24271..255bff4e2d4de847c98178d18c27975c952f94a6 100644
--- a/arch/sparc/include/page.h
+++ b/arch/sparc/include/page.h
@@ -4,10 +4,12 @@
  * @brief page memory definitions for MMU operation
  */
 
-#ifndef _SPARC_PAGE_H_ 
-#define _SPARC_PAGE_H_ 
+#ifndef _SPARC_PAGE_H_
+#define _SPARC_PAGE_H_
 
 #include <kernel/kernel.h>
+#include <kernel/page.h>
+#include <kernel/mm.h>
 
 #include <mm.h>
 
@@ -38,17 +40,34 @@ extern unsigned long pfn_base;
 #define __va(x)		(x)
 #endif /* CONFIG_PAGE_OFFSET */
 
-struct pg_data {
-	int x;
-};
 
+#if defined (CONFIG_SPARC_INIT_PAGE_MAP_MAX_ENTRIES)
+#define INIT_PAGE_MAP_MAX_ENTRIES CONFIG_SPARC_INIT_PAGE_MAP_MAX_ENTRIES
+#else
+#define INIT_PAGE_MAP_MAX_ENTRIES 1
+#endif
+
+extern struct mm_pool  mm_init_page_pool;
+extern struct page_map_node  mm_init_page_node;
+extern struct page_map_node *mm_init_page_map[INIT_PAGE_MAP_MAX_ENTRIES + 1];
+
+#define MEM_PAGE_MAP		(mm_init_page_map)
+#define MEM_PAGE_NODE(x)	(((x) <= INIT_PAGE_MAP_MAX_ENTRIES) ? \
+				 &MEM_PAGE_MAP[(x)] : NULL)
+
+int page_map_init(struct page_map_node **pg,
+		  unsigned long start, unsigned long end,
+		  unsigned long page_size);
+
+void page_map_set_map(struct page_map_node **pg);
 
-extern struct pg_data page_mem[SPARC_PHYS_BANKS+1];
+int page_map_add(unsigned long start, unsigned long end,
+		 unsigned long page_size);
 
+void *page_map_reserve_chunk(size_t size);
 
-unsigned long init_page_map(struct pg_data *pg,
-			    unsigned long start_pfn,
-			    unsigned long end_pfn);
+void *page_alloc(void);
+void page_free(void *page);
 
 
 #endif /*_SPARC_PAGE_H_*/
diff --git a/arch/sparc/kernel/bootmem.c b/arch/sparc/kernel/bootmem.c
index 28e0a5155a25558d3695fa641af714de47769a5e..688c30bc416785e2fb3615cc2e6540e69de01ceb 100644
--- a/arch/sparc/kernel/bootmem.c
+++ b/arch/sparc/kernel/bootmem.c
@@ -4,27 +4,50 @@
 
 #include <page.h>
 
+#include <string.h>
+
 #include <kernel/printk.h>
+#include <kernel/kernel.h>
 
 
+/* TODO still demo code */
 void bootmem_init(void)
 {
 	int i;
 
+	unsigned long base_pfn;
 	unsigned long start_pfn;
+	unsigned long start_img_pfn;
 	unsigned long end_pfn = 0UL;
+	unsigned long mem_size;
+
+	void *pages[2048];
+
+	int t=0;
+
+	struct page_map_node **pg_node;
+
+
+	pr_info("BOOTMEM: start of program image at %p\n", start);
+	pr_info("BOOTMEM:   end of program image at %p\n", end);
 
 
-	pr_notice("End of program at: %lx\n", (unsigned long) _end);
+	/* lowest page frame number coincides with page aligned address
+	 * start symbol in image, which hopefully coincides with the start
+	 * of the RAM we are running from.
+	 */
+	start_img_pfn  = (unsigned long) PAGE_ALIGN((unsigned long) &start);
 
 	/* start allocatable memory with page aligned address of last symbol in
-	 * image
+	 * image, everything before will be reserved
 	 */
-	start_pfn  = (unsigned long) PAGE_ALIGN((unsigned long) &_end);
+	start_pfn  = (unsigned long) PAGE_ALIGN((unsigned long) &end);
 
-	/* locate the memory bank we're in and start the mapping from
-	 * the first free page after the image.
+	/* locate the memory bank we're in
 	 */
+
+	base_pfn = start_img_pfn;
+
 	for (i = 0; sp_banks[i].num_bytes != 0; i++) {
 
 		if (start_pfn < sp_banks[i].base_addr)
@@ -32,9 +55,19 @@ void bootmem_init(void)
 
 		end_pfn = sp_banks[i].base_addr + sp_banks[i].num_bytes;
 
-		if (start_pfn < end_pfn)
+		if (start_pfn < end_pfn) {
+			if (start_img_pfn != sp_banks[i].base_addr) {
+				pr_warn("BOOTMEM: image start (0x%lx) does not "
+					"coincide with start of memory "
+					"bank (0x%lx), using start of bank.\n",
+					sp_banks[i].base_addr, start_img_pfn);
+
+				base_pfn =  sp_banks[i].base_addr;
+			}
+
 			break;
-		
+		}
+
 		end_pfn = 0UL;
 	}
 
@@ -47,22 +80,131 @@ void bootmem_init(void)
 	 */
 
 	start_pfn = (unsigned long) __pa(start_pfn);
-	
-	/* Now shift down to get the real physical page frame number. */
-	start_pfn >>= PAGE_SHIFT;
-	
+	//start_pfn = PHYS_PFN(start_pfn);
+
 	end_pfn = (unsigned long) __pa(end_pfn);
+	//end_pfn = PHYS_PFN(end_pfn);
+
+	pr_info("BOOTMEM: start page frame number: 0x%lx\n"
+		"BOOTMEM:   end page frame number: 0x%lx\n",
+	       start_pfn, end_pfn);
+
+
+	/* the initial page map is statically allocated, hence set NULL,
+	 * we add the first pool map here, everything after has to be allocated
+	 * on top of that
+	 */
+
+	pg_node = MEM_PAGE_NODE(0);
+
+	(*pg_node) = &mm_init_page_node;
+
+	(*pg_node)->pool              = &mm_init_page_pool;
+	(*pg_node)->pool->block_order = &mm_init_block_order[0];
+	(*pg_node)->pool->alloc_order = &mm_init_alloc_order[0];
+	(*pg_node)->pool->blk_free    = &mm_init_bitmap[0];
+
+
+	mem_size = end_pfn - base_pfn;
+
+	/* we are misconfigured */
+	BUG_ON(mem_size  > (1UL << MM_BLOCK_ORDER_MAX));
+	BUG_ON(PAGE_SIZE < (1UL << MM_BLOCK_ORDER_MIN));
+
+	BUG_ON(page_map_init(mm_init_page_map, base_pfn, end_pfn, PAGE_SIZE));
+
+
+	/* reserve all space up to the end of the image, so mapping starts
+	 * from the first free page following that section */
+	BUG_ON(!page_map_reserve_chunk(start_pfn - base_pfn));
+
+
+	mm_dump_stats((*pg_node)->pool);
+
+	page_alloc();
+	page_alloc();
+	page_alloc();
+	page_alloc();
+	page_alloc();
+
+	mm_dump_stats((*pg_node)->pool);
+
+
+
+	pg_node = MEM_PAGE_NODE(1);
+
+	BUG_ON(!pg_node);
+
+	(*pg_node)		      = page_map_reserve_chunk(
+					sizeof(struct page_map_node));
+
+	(*pg_node)->pool	      = page_map_reserve_chunk(
+					sizeof(struct mm_pool));
+
+	bzero((*pg_node)->pool, sizeof(struct mm_pool));
+
+	(*pg_node)->pool->block_order = page_map_reserve_chunk(
+					sizeof(struct list_head) *
+					MM_BLOCK_ORDER_MAX);
+	(*pg_node)->pool->alloc_order = page_map_reserve_chunk(
+					MM_INIT_NUM_BLOCKS);
+	(*pg_node)->pool->blk_free    = page_map_reserve_chunk(
+					MM_INIT_LEN_BITMAP *
+					sizeof(unsigned long));
+
+
+	base_pfn = (unsigned long) page_map_reserve_chunk(1024*4*4);
+	page_map_add(base_pfn, base_pfn + 1024*4*4, PAGE_SIZE);
+
+
+
+	pg_node = MEM_PAGE_NODE(2);
+	BUG_ON(!pg_node);
+
+	(*pg_node)		      = page_map_reserve_chunk(
+					sizeof(struct page_map_node));
+	BUG_ON(!(*pg_node));
+
+	(*pg_node)->pool	      = page_map_reserve_chunk(
+					sizeof(struct mm_pool));
+
+	bzero((*pg_node)->pool, sizeof(struct mm_pool));
+	(*pg_node)->pool->block_order = page_map_reserve_chunk(
+					sizeof(struct list_head) *
+					MM_BLOCK_ORDER_MAX);
+	(*pg_node)->pool->alloc_order = page_map_reserve_chunk(
+					MM_INIT_NUM_BLOCKS);
+	(*pg_node)->pool->blk_free    = page_map_reserve_chunk(
+					MM_INIT_LEN_BITMAP *
+					sizeof(unsigned long));
+
 
-	end_pfn = end_pfn >> PAGE_SHIFT;
-	
-	pr_notice("start_pfn: %lx\n", start_pfn);
-	pr_notice("end_pfn:   %lx\n", end_pfn);
+	base_pfn = (unsigned long) page_map_reserve_chunk(1024*4*4);
+	page_map_add(base_pfn, base_pfn + 1024*4*4, PAGE_SIZE);
 
 
-	init_page_map(MEM_PAGE_NODE(0), start_pfn, end_pfn);
+	while (t < 1740)
+		pages[t++] = page_alloc();
 
+	pages[t++] = page_alloc();
+	pages[t++] = page_alloc();
+	pages[t++] = page_alloc();
+	pages[t++] = page_alloc();
+	pages[t++] = page_alloc();
+	pages[t++] = page_alloc();
+	pages[t++] = page_alloc();
+	pages[t++] = page_alloc();
+	pages[t++] = page_alloc();
 
+	/* NULL */
+	pages[t++] = page_alloc();
 
+	page_free(pages[--t]);
+	page_free(pages[--t]);
+	page_free(pages[--t]);
+	page_free(pages[--t]);
+	page_free(pages[--t]);
+	page_free(pages[--t]);
 
 
 }
diff --git a/arch/sparc/kernel/mm.c b/arch/sparc/kernel/mm.c
index 167785a334c2a84b60f91b32c067ba9ad2dfbce1..42d5a9f96a3e8d3fdb52cab0d4b490c3c4e15a8b 100644
--- a/arch/sparc/kernel/mm.c
+++ b/arch/sparc/kernel/mm.c
@@ -3,22 +3,14 @@
  */
 
 #include <mm.h>
-#include <mm.h>
-
-
-unsigned long phys_base;
-unsigned long pfn_base;
 
 
-#define SPARC_MM_INIT_NUM_BLOCKS MM_NUM_BLOCKS_TRACKABLE(MM_BLOCK_ORDER_MAX,\
-							 MM_BLOCK_ORDER_MIN)
-
-#define SPARC_MM_INIT_LEN_BITMAP MM_BITMAP_LEN(MM_BLOCK_ORDER_MAX,\
-					       MM_BLOCK_ORDER_MIN)
 
+/* things we need statically allocated in the image (i.e. in .bss)
+ * at boot
+ */
 
-struct sparc_physical_banks sp_banks[SPARC_PHYS_BANKS+1];
+unsigned long phys_base;
+unsigned long pfn_base;
 
-unsigned long mm_init_bitmap[SPARC_MM_INIT_LEN_BITMAP];
-unsigned char mm_init_alloc_order[SPARC_MM_INIT_NUM_BLOCKS];
-struct list_head mm_init_block_order[MM_BLOCK_ORDER_MAX + 1];
+struct sparc_physical_banks sp_banks[SPARC_PHYS_BANKS + 1];
diff --git a/arch/sparc/kernel/page.c b/arch/sparc/kernel/page.c
index 4dfd728791b07589d2388529cc63e0e8ebb809b2..47c28333535c6d497560b27bf6e4375dcaf5dad7 100644
--- a/arch/sparc/kernel/page.c
+++ b/arch/sparc/kernel/page.c
@@ -4,33 +4,17 @@
 
 #include <page.h>
 
-struct pg_data page_mem[SPARC_PHYS_BANKS+1];
 
 
+/* things we need statically allocated in the image (i.e. in .bss)
+ * at boot
+ */
 
-unsigned long init_page_map(struct pg_data *pg,
-			    unsigned long start_pfn,
-			    unsigned long end_pfn)
-{
-	unsigned long mapsize = 0;
-#if 0
-	pg->bdata->node_map_mem = __va(PFN_PHYS(start_pfn));
-	pg->bdata->node_mem_map = __va(PFN_PHYS(start_pfn));
-	pg->bdata->node_min_pfn = start_pfn;
-	pg->bdata->node_low_pfn = end_pfn;
-	link_bootmem(pg->bdata);
-
-	/*
-	 * Initially all pages are reserved - setup_arch() has to
-	 * register free RAM areas explicitly.
-	 */
-	mapsize = bootmap_bytes(end - start);
-	memset(bdata->node_bootmem_map, 0xff, mapsize);
-
-	bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
-		bdata - bootmem_node_data, start, mapstart, end, mapsize);
+unsigned long mm_init_bitmap[MM_INIT_LEN_BITMAP];
+unsigned char mm_init_alloc_order[MM_INIT_NUM_BLOCKS];
+struct list_head mm_init_block_order[MM_BLOCK_ORDER_MAX + 1];
 
-#endif
-	return mapsize;
-}
+struct mm_pool  mm_init_page_pool;
+struct page_map_node  mm_init_page_node;
+struct page_map_node *mm_init_page_map[INIT_PAGE_MAP_MAX_ENTRIES + 1];
 
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index ca335e448faa80e14f001049d8f2e86fc0a2297a..3787f6039865e7d582e58c9faecd170f107fd053 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -11,7 +11,7 @@
 
 /**
  * @brief configure available memory banks
- * 
+ *
  * TODO the memory layout should either be presented in a separate
  *	board configuration file or, preferably, be derived from an AMBA
  *	bus scan.
@@ -21,10 +21,10 @@ static void mem_init(void)
 {
 	memset(&sp_banks, 0x0, ARRAY_SIZE(sp_banks));
 
-	sp_banks[0].base_addr = 0x40000000; 
+	sp_banks[0].base_addr = 0x40000000;
 	sp_banks[0].num_bytes = 0x00800000;
-#if 0	
-	sp_banks[1].base_addr = 0x60000000; 
+#if 0
+	sp_banks[1].base_addr = 0x60000000;
 	sp_banks[1].num_bytes = 0x04000000;
 #endif
 }
diff --git a/include/kernel/mm.h b/include/kernel/mm.h
index 9fa53064a083b98984a1633fca216a1d0b213554..54284daf118f4f2e25769724fef355a61e68bd9f 100644
--- a/include/kernel/mm.h
+++ b/include/kernel/mm.h
@@ -6,6 +6,7 @@
 #define _KERNEL_MM_H_
 
 #include <stddef.h>
+#include <stdbool.h>
 
 #include <list.h>
 #include <compiler.h>
@@ -23,6 +24,7 @@ struct mm_pool {
 	unsigned long    max_order;	/** maximum order (i.e. pool size)  */
 	unsigned long    min_order;	/** block granularity		    */
 	unsigned long    n_blks;	/** number of managed blocks	    */
+	unsigned long    alloc_blks;	/** number of allocated blocks	    */
 	unsigned char    *alloc_order;	/** the allocated order of a block  */
 	unsigned long    *blk_free;	/** per-block allocation bitmap	    */
 	struct list_head *block_order;  /** anchor for unused blocks	    */
@@ -33,6 +35,11 @@ void *mm_alloc(struct mm_pool *mp, size_t size);
 
 void mm_free(struct mm_pool *mp, const void *addr);
 
+unsigned long mm_unallocated_blocks(struct mm_pool *mp);
+unsigned long mm_allocated_blocks(struct mm_pool *mp);
+
+bool mm_addr_in_pool(struct mm_pool *mp, void *addr);
+
 int mm_init(struct mm_pool *mp, void *base,
 	    size_t pool_size, size_t granularity);
 
diff --git a/lib/Kconfig b/lib/Kconfig
index e7a894d1ad8afdd4f6a140a7691d835c07572ff2..1fbbde01e989968accfe7c9d9ad15f7571fe3e33 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -48,7 +48,38 @@ config MM_DEBUG_DUMP_BLOCK_STATS
 	default n
 	help
 	  Dump the allocation statistics for each block order.
+endmenu
+
+
+config PAGE_MAP
+	bool "Page Map"
+	depends on MM
+	default y
+	help
+	  Enable page map management. Can hold multiple nodes and returns
+	  references to pages as configured per each node.
 
+menu "Page Map Options"
+	depends on PAGE_MAP
+
+config PAGE_MAP_CHECK_PAGE_ALIGNMENT
+	depends on PAGE_MAP
+	bool "Extra check for alignment in page address"
+	default n
+	help
+	  Make sure the address of the allocated page is really aligned
+	  to the page size of the underlying memory manager. This does not
+	  prevent misconfiguration, just adds a functional verification step.
+
+config PAGE_MAP_MOVE_NODE_AVAIL_THRESH
+	depends on PAGE_MAP
+	int "Threshold for empty page node to be considered filled again"
+	default 1
+	range 1 1024
+	help
+	  When page map node run out of pages, they are moved to a list of
+	  empty nodes until a number of pages are freed, defined by the
+	  threshold above. If unsure, use a threshold of 1.
 
 endmenu
 
diff --git a/lib/Makefile b/lib/Makefile
index c9916c03e523c5326cc1dcb48089d906bd2ec14b..7e73f3a1c25e3eccafaefade80a75d9fd20be178 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -1,2 +1,3 @@
 lib-$(CONFIG_SYSCTL)	+= sysctl.o
 lib-$(CONFIG_MM)	+= mm.o
+lib-$(CONFIG_PAGE_MAP)	+= page.o
diff --git a/lib/mm.c b/lib/mm.c
index 21c17175b2dc0fcfbc71718067642e4a252d08cd..126f8079c0b22859bb4f75a286d68a90fe4da1a8 100644
--- a/lib/mm.c
+++ b/lib/mm.c
@@ -340,7 +340,7 @@ static unsigned long mm_fixup_validate(struct mm_pool *mp,
 
 	if (order < mp->min_order) {
 		pr_info("MM: requested order (%d) smaller than minimum pool"
-			"order (%d) in call from %p.\n",
+			"order (%d) in call from %p, readjusting\n",
 			order, mp->min_order, __caller(1));
 		order = mp->min_order;
 	}
@@ -368,7 +368,7 @@ void *mm_alloc(struct mm_pool *mp, size_t size)
 	unsigned long i;
 	unsigned long order;
 
-	struct mm_blk_lnk  *blk  = NULL;
+	struct mm_blk_lnk *blk  = NULL;
 	struct list_head *list = NULL;
 
 
@@ -409,13 +409,14 @@ void *mm_alloc(struct mm_pool *mp, size_t size)
 			break;
 	}
 
-
-	BUG_ON(list_empty(list));
+	if(list_empty(list)) {
+		pr_debug("MM: pool %p out of blocks for order %lu\n",
+			 mp, order);
+		goto exit;
+	}
 
 	blk = list_entry(list->next, struct mm_blk_lnk, link);
 
-
-
 	list_del(&blk->link);
 
 	mm_mark_alloc(mp, blk);
@@ -426,6 +427,9 @@ void *mm_alloc(struct mm_pool *mp, size_t size)
 		mm_split_blk(mp, blk, i);
 
 
+	mp->alloc_blks += (1UL << (order - mp->min_order));
+
+exit:
 	return blk;
 }
 
@@ -436,6 +440,7 @@ void *mm_alloc(struct mm_pool *mp, size_t size)
  * @param mp    a struct mm_pool
  * @param addr  the address of the block
  * @param order the order of the block
+ *
  */
 
 void mm_free(struct mm_pool *mp, const void *addr)
@@ -462,17 +467,74 @@ void mm_free(struct mm_pool *mp, const void *addr)
 
 	if (!IS_ERR_VALUE(order)) {
 		mm_upmerge_blks(mp, (struct mm_blk_lnk *) addr);
+		mp->alloc_blks -= (1UL << (order - mp->min_order));
 		goto exit;
 	}
 
 error:
-	pr_err("MM: double free, invalid size or untracked block %p in call "
+	pr_info("MM: double free, invalid size or untracked block %p in call "
 	       "from %p\n", addr, __caller(0));
+
 exit:
 	return;
 }
 
 
+/**
+ * @brief returns number of free blocks at block granularity
+ *
+ * @param mp	a struct mm_pool
+ *
+ * @return number of free blocks at block granularity
+ */
+
+unsigned long mm_unallocated_blocks(struct mm_pool *mp)
+{
+	return mp->n_blks - mp->alloc_blks;
+}
+
+
+/**
+ * @brief returns number of allocated blocks at block granularity
+ *
+ * @param mp	a struct mm_pool
+ *
+ * @return number of allocated blocks at block granularity
+ */
+
+unsigned long mm_allocated_blocks(struct mm_pool *mp)
+{
+	return mp->alloc_blks;
+}
+
+
+/**
+ * @brief check if block address is within pool
+ *
+ * @param mp	a struct mm_pool
+ * @param addr	an address pointer
+ *
+ * @return true or false
+ */
+
+bool mm_addr_in_pool(struct mm_pool *mp, void *addr)
+{
+	if (mm_blk_idx(mp, (struct mm_blk_lnk *) addr) > mp->n_blks)
+		goto nope;
+
+	if (!mm_blk_addr_valid(mp, (struct mm_blk_lnk *) addr))
+		goto nope;
+
+	if (!mm_blk_get_alloc_order(mp, (struct mm_blk_lnk *) addr))
+		goto nope;
+
+	return true;
+
+nope:
+	return false;
+}
+
+
 /**
  *
  * @brief initialise the memory allocator instance
@@ -570,6 +632,8 @@ int mm_init(struct mm_pool *mp, void *base,
 	/* fake initial allocation */
 	mp->alloc_order[0] = (typeof(mp->alloc_order[0])) mp->max_order;
 
+	mp->alloc_blks = mp->n_blks;
+
 	/* we start by dividing the highest order block, mark it as available */
 	mm_free(mp, base);
 
diff --git a/lib/page.c b/lib/page.c
new file mode 100644
index 0000000000000000000000000000000000000000..e293617f85753e2064452e54f473f8285876bb71
--- /dev/null
+++ b/lib/page.c
@@ -0,0 +1,313 @@
+/**
+ * @file kernel/page.c
+ *
+ *
+ * @note this only ever uses one page map at a time, so if you want to remap
+ *	 boot memory, define a new map or just expand the number of nodes in the
+ *	 page map, first copy the old map and call page_map_set_ref()
+ */
+
+
+#include <page.h>
+
+#include <kernel/err.h>
+#include <kernel/mm.h>
+#include <kernel/printk.h>
+#include <kernel/kernel.h>
+
+#include <string.h>
+
+
+#define PG_SIZE(map)    (0x1UL << map->pool->min_order)
+
+/* align address to the (next) page boundary */
+#define PG_ALIGN(addr, map)	ALIGN(addr, PG_SIZE(map))
+
+/* reference to the page map, last entry must be NULL */
+static struct page_map_node **page_mem;
+
+/* empty/busy pool lists */
+struct list_head page_map_list_full;
+struct list_head page_map_list_empty;
+
+
+/**
+ * @brief set the map that is used by page_alloc() and page_free() etc.
+ *
+ * @param pg		a page map
+ * @param nodes		the number of nodes the map can hold
+ *
+ */
+
+void page_map_set_map(struct page_map_node **pg)
+{
+	page_mem = pg;
+
+	INIT_LIST_HEAD(&page_map_list_full);
+	INIT_LIST_HEAD(&page_map_list_empty);
+
+
+	/* consider all as full at the beginning */
+	while ((*pg))
+		list_add_tail(&(*pg++)->node, &page_map_list_full);
+}
+
+
+/**
+ * @brief add a new mapping
+ *
+ * @param start		the start address of the memory section
+ * @param end		the end address of the memory section
+ * @param page_size	the page size granularity
+ *
+ * @return 0 on success, -ENOMEM on error
+ *
+ */
+
+int page_map_add(unsigned long start, unsigned long end,
+		 unsigned long page_size)
+{
+	size_t mem_size;
+
+	struct page_map_node **pg = page_mem;
+
+
+	if (!pg) {
+		pr_err("PAGE MEM: %s(): no page map configured\n", __func__);
+		goto error;
+	}
+
+	if (end < start)
+		goto error;
+
+
+	mem_size = (size_t) end - start;
+
+	/* search for first empty pool entry (max_order == 0)*/
+	do {
+		if (!(*pg)->pool->max_order)
+			break;
+
+	} while ((*(pg++)));
+
+
+	if (!pg) {
+		pr_err("PAGE MEM: map space exceeded, cannot add map\n");
+		goto error;
+	}
+
+	if (mm_init((*pg)->pool, (void *) start, mem_size, page_size))
+		goto error;
+
+	list_add_tail(&(*pg)->node, &page_map_list_full);
+
+	return 0;
+
+error:
+	return -ENOMEM;
+}
+
+
+
+/**
+ * @brief initialise a page map
+ *
+ * @param pg		a page map
+ * @param start		the start address of the initial memory section
+ * @param end		the end address of the initial memory section
+ * @param page_size	the page size granularity
+ *
+ * @return 0 on success, -ENOMEM on error
+ *
+ * @note the page map store is assumed to be set to zero
+ *
+ * @warn do not forget to configure all the pool storage pointers...
+ */
+
+int page_map_init(struct page_map_node **pg_map,
+		  unsigned long start, unsigned long end,
+		  unsigned long page_size)
+{
+	size_t mem_size;
+
+	struct page_map_node *pg = pg_map[0];
+
+
+	if (!pg)
+		goto error;
+
+	if (end < start)
+		goto error;
+
+	mem_size = (size_t) end - start;
+
+	if (mm_init(pg->pool, (void *) start, mem_size, page_size))
+		goto error;
+
+
+	page_map_set_map(pg_map);
+
+
+	return 0;
+
+error:
+	return -ENOMEM;
+}
+
+
+/**
+ * @brief reserve an arbitrarily sized chunk of memory, but track it in the
+ *	  page mapper
+ *
+ * @param mp a struct mm_pool
+ * @param size the size to allocate
+ *
+ * @return void pointer on success, NULL on error
+ *
+ * @note this function can only a valid pointer if enough contiguous blocks
+ *	 are available in the memory manager!
+ *
+ * @note To reserve your boot memory/image, call this function before any page
+ *	 in the initial memory segment has been allocated, because only then the
+ *	 free block header has not yet been written and the segment is
+ *	 completely untouched. See notes in mm.c for more info.
+ *	 This is of course inefficient, if your image/boot memory does not
+ *	 coincide with the start of the memory bank. In this case you might want
+ *	 to consider re-adding the free segment before your boot memory back
+ *	 to the page map. In that case, make sure the allocation is never
+ *	 released. Make sure you configure extra ram banks if needed.
+ */
+
+void *page_map_reserve_chunk(size_t size)
+{
+	void *mem = NULL;
+
+	struct page_map_node **pg = page_mem;
+
+	if (!page_mem) {
+		pr_err("PAGE MEM: %s no page map configured\n", __func__);
+		goto exit;
+	}
+
+	/* do NOT care for the empty/full lists, just find the first pool with
+	 * a sufficiently large block
+	 */
+	do {
+		mem = mm_alloc((*pg)->pool, size);
+
+		if (mem)
+			break;
+	} while ((*(pg++)));
+
+exit:
+	return mem;
+}
+
+
+/**
+ * @brief allocates a page by trying all configured banks until one is found
+ *
+ * @return NULL on error, address to page on success
+ */
+
+void *page_alloc(void)
+{
+	void *page = NULL;
+
+	struct page_map_node *p_elem;
+	struct page_map_node *p_tmp;
+
+
+	if (!page_mem) {
+		pr_err("PAGE MEM: %s no page map configured\n", __func__);
+		goto exit;
+	}
+
+	list_for_each_entry_safe(p_elem, p_tmp, &page_map_list_full, node) {
+		page = mm_alloc(p_elem->pool, PG_SIZE(p_elem));
+
+		if (!page) {
+			list_move_tail(&p_elem->node, &page_map_list_empty);
+			pr_debug("PAGE MEM: mapping %p move to empty list\n",
+				 p_elem);
+		}
+		else
+			break;
+	}
+
+
+#ifdef CONFIG_PAGE_MAP_CHECK_PAGE_ALIGNMENT
+
+	if (!p_elem)
+		goto exit;
+
+	if ((unsigned long) page != (PG_ALIGN((unsigned long) page, p_elem))) {
+		pr_err("PAGE MAP: page at %p allocated from memory manager %p "
+		       "is not aligned to the configured page size");
+		mm_free(p_elem->pool, page);
+		page = NULL;
+	}
+#endif
+
+exit:
+	return page;
+}
+
+
+/**
+ * @brief free a page
+ *
+ * @param page the page address pointer
+ *
+ * @note nested mappings should be caught by mm_addr_in_pool() check
+ */
+
+void page_free(void *page)
+{
+	struct page_map_node *p_elem;
+	struct page_map_node *p_tmp;
+
+
+	if (!page_mem) {
+		pr_err("PAGE MEM: %s no page map configured\n", __func__);
+		return;
+	}
+
+	if (!page) {
+		pr_info("PAGE MEM: NULL pointer in call to page_free from %p\n",
+			__caller(0));
+			return;
+	}
+
+	/* first check empty list */
+	list_for_each_entry_safe(p_elem, p_tmp, &page_map_list_empty, node) {
+
+		if (mm_addr_in_pool(p_elem->pool, page)) {
+
+			mm_free(p_elem->pool, page);
+
+			/* always move to the head of the list, worst case it is
+			 * followed by a node that holds free blocks between 0
+			 * and threshold
+			 */
+			if (mm_unallocated_blocks(p_elem->pool)
+			    >= PAGE_MAP_MOVE_NODE_AVAIL_THRESH) {
+				pr_debug("PAGE MEM: mapping %p move to full "
+					 "list\n", p_elem);
+				list_move_tail(&p_elem->node,
+					  &page_map_list_full);
+			}
+
+			return;
+		}
+	}
+
+	list_for_each_entry_safe(p_elem, p_tmp, &page_map_list_full, node) {
+		if (mm_addr_in_pool(p_elem->pool, page)) {
+			mm_free(p_elem->pool, page);
+			return;
+		}
+	}
+}
+
+