#include <internal/debug.h>
/* Enable strict checking of the nonpaged pool on every allocation */
-//#define ENABLE_VALIDATE_POOL
+/*#define ENABLE_VALIDATE_POOL*/
/* Enable tracking of statistics about the tagged blocks in the pool */
#define TAG_STATISTICS_TRACKING
* end of the range so any accesses beyond the end of block are to invalid
* memory locations.
*/
-//#define WHOLE_PAGE_ALLOCATIONS
+/*#define WHOLE_PAGE_ALLOCATIONS*/
#ifdef ENABLE_VALIDATE_POOL
#define VALIDATE_POOL validate_kernel_pool()
#define POOL_TRACE(args...)
#endif
+/* avl types ****************************************************************/
+
+/* FIXME:
+ * This declarations should be moved into a separate header file.
+ */
+
+typedef struct _NODE
+{
+ struct _NODE* link[2];
+ struct _NODE* parent;
+ signed char balance;
+} NODE, *PNODE;
+
/* TYPES *******************************************************************/
#define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
{
ULONG Magic;
ULONG Size;
- LIST_ENTRY ListEntry;
- ULONG Tag;
- PVOID Caller;
- struct _BLOCK_HDR* tag_next;
- BOOLEAN Dumped;
+ struct _BLOCK_HDR* previous;
+ union
+ {
+ struct
+ {
+ LIST_ENTRY ListEntry;
+ ULONG Tag;
+ PVOID Caller;
+ LIST_ENTRY TagListEntry;
+ BOOLEAN Dumped;
+ } Used;
+ struct
+ {
+ NODE Node;
+ } Free;
+ };
} BLOCK_HDR;
PVOID STDCALL
/* GLOBALS *****************************************************************/
-/*
- * Memory managment initalized symbol for the base of the pool
- */
-static unsigned int kernel_pool_base = 0;
+extern PVOID MiNonPagedPoolStart;
+extern ULONG MiNonPagedPoolLength;
/*
* Head of the list of free blocks
*/
-static LIST_ENTRY FreeBlockListHead;
+static PNODE FreeBlockListRoot = NULL;
/*
* Head of the list of in use block
*/
static LIST_ENTRY UsedBlockListHead;
+static LIST_ENTRY AddressListHead;
+
#ifndef WHOLE_PAGE_ALLOCATIONS
/*
* Count of free blocks
#ifdef TAG_STATISTICS_TRACKING
#define TAG_HASH_TABLE_SIZE (1024)
-static BLOCK_HDR* tag_hash_table[TAG_HASH_TABLE_SIZE];
+static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
#endif /* TAG_STATISTICS_TRACKING */
-/* FUNCTIONS ***************************************************************/
+#ifdef WHOLE_PAGE_ALLOCATIONS
+static RTL_BITMAP NonPagedPoolAllocMap;
+static ULONG NonPagedPoolAllocMapHint;
+static ULONG MiCurrentNonPagedPoolLength = 0;
+#else
+static PULONG MiNonPagedPoolAllocMap;
+static ULONG MiNonPagedPoolNrOfPages;
+#endif /* WHOLE_PAGE_ALLOCATIONS */
-#ifdef TAG_STATISTICS_TRACKING
-VOID
-MiRemoveFromTagHashTable(BLOCK_HDR* block)
- /*
- * Remove a block from the tag hash table
- */
+/* avl helper functions ****************************************************/
+
+void DumpFreeBlockNode(PNODE p)
{
- BLOCK_HDR* previous;
- BLOCK_HDR* current;
- ULONG hash;
+ static int count = 0;
+ BLOCK_HDR* blk;
+
+ count++;
+
+ if (p)
+ {
+ DumpFreeBlockNode(p->link[0]);
+ blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
+ DbgPrint("%08x %8d (%d)\n", blk, blk->Size, count);
+ DumpFreeBlockNode(p->link[1]);
+ }
+
+ count--;
+}
+void DumpFreeBlockTree(void)
+{
+ DbgPrint("--- Begin tree ------------------\n");
+ DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, BLOCK_HDR, Free.Node));
+ DumpFreeBlockNode(FreeBlockListRoot);
+ DbgPrint("--- End tree --------------------\n");
+}
+
+int compare_node(PNODE p1, PNODE p2)
+{
+ BLOCK_HDR* blk1 = CONTAINING_RECORD(p1, BLOCK_HDR, Free.Node);
+ BLOCK_HDR* blk2 = CONTAINING_RECORD(p2, BLOCK_HDR, Free.Node);
+
+ if (blk1->Size == blk2->Size)
+ {
+ if (blk1 < blk2)
+ {
+ return -1;
+ }
+ if (blk1 > blk2)
+ {
+ return 1;
+ }
+ }
+ else
+ {
+ if (blk1->Size < blk2->Size)
+ {
+ return -1;
+ }
+ if (blk1->Size > blk2->Size)
+ {
+ return 1;
+ }
+ }
+ return 0;
+
+}
+
+int compare_value(PVOID value, PNODE p)
+{
+ BLOCK_HDR* blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
+ ULONG v = *(PULONG)value;
+
+ if (v < blk->Size)
+ {
+ return -1;
+ }
+ if (v > blk->Size)
+ {
+ return 1;
+ }
+ return 0;
+}
+
+/* avl functions **********************************************************/
+
+/* FIXME:
+ * The avl functions should be moved into a separate file.
+ */
+
+/* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
+
+void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
+{
+ PNODE y; /* Top node to update balance factor, and parent. */
+ PNODE p, q; /* Iterator, and parent. */
+ PNODE w; /* New root of rebalanced subtree. */
+ int dir; /* Direction to descend. */
+
+ n->link[0] = n->link[1] = n->parent = NULL;
+ n->balance = 0;
+
+ y = *root;
+ for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
+ {
+ dir = compare(n, p) > 0;
+ if (p->balance != 0)
+ {
+ y = p;
+ }
+ }
- if (block->Tag == 0)
+ n->parent = q;
+ if (q != NULL)
+ {
+ q->link[dir] = n;
+ }
+ else
+ {
+ *root = n;
+ }
+
+ if (*root == n)
{
return;
}
- hash = block->Tag % TAG_HASH_TABLE_SIZE;
-
- previous = NULL;
- current = tag_hash_table[hash];
- while (current != NULL)
+ for (p = n; p != y; p = q)
{
- if (current == block)
- {
- if (previous == NULL)
+ q = p->parent;
+ dir = q->link[0] != p;
+ if (dir == 0)
+ {
+ q->balance--;
+ }
+ else
+ {
+ q->balance++;
+ }
+ }
+
+ if (y->balance == -2)
+ {
+ PNODE x = y->link[0];
+ if (x->balance == -1)
+ {
+ w = x;
+ y->link[0] = x->link[1];
+ x->link[1] = y;
+ x->balance = y->balance = 0;
+ x->parent = y->parent;
+ y->parent = x;
+ if (y->link[0] != NULL)
{
- tag_hash_table[hash] = block->tag_next;
+ y->link[0]->parent = y;
}
- else
+ }
+ else
+ {
+ assert (x->balance == +1);
+ w = x->link[1];
+ x->link[1] = w->link[0];
+ w->link[0] = x;
+ y->link[0] = w->link[1];
+ w->link[1] = y;
+ if (w->balance == -1)
{
- previous->tag_next = block->tag_next;
+ x->balance = 0;
+ y->balance = +1;
}
- return;
- }
- previous = current;
- current = current->tag_next;
+ else if (w->balance == 0)
+ {
+ x->balance = y->balance = 0;
+ }
+ else /* |w->pavl_balance == +1| */
+ {
+ x->balance = -1;
+ y->balance = 0;
+ }
+ w->balance = 0;
+ w->parent = y->parent;
+ x->parent = y->parent = w;
+ if (x->link[1] != NULL)
+ {
+ x->link[1]->parent = x;
+ }
+ if (y->link[0] != NULL)
+ {
+ y->link[0]->parent = y;
+ }
+ }
}
- DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
- block->Tag, block->Caller);
- KeBugCheck(0);
+ else if (y->balance == +2)
+ {
+ PNODE x = y->link[1];
+ if (x->balance == +1)
+ {
+ w = x;
+ y->link[1] = x->link[0];
+ x->link[0] = y;
+ x->balance = y->balance = 0;
+ x->parent = y->parent;
+ y->parent = x;
+ if (y->link[1] != NULL)
+ {
+ y->link[1]->parent = y;
+ }
+ }
+ else
+ {
+ assert (x->balance == -1);
+ w = x->link[0];
+ x->link[0] = w->link[1];
+ w->link[1] = x;
+ y->link[1] = w->link[0];
+ w->link[0] = y;
+ if (w->balance == 1)
+ {
+ x->balance = 0;
+ y->balance = -1;
+ }
+ else if (w->balance == 0)
+ {
+ x->balance = y->balance = 0;
+ }
+ else /* |w->pavl_balance == -1| */
+ {
+ x->balance = +1;
+ y->balance = 0;
+ }
+ w->balance = 0;
+ w->parent = y->parent;
+ x->parent = y->parent = w;
+ if (x->link[0] != NULL)
+ {
+ x->link[0]->parent = x;
+ }
+ if (y->link[1] != NULL)
+ {
+ y->link[1]->parent = y;
+ }
+ }
+ }
+ else
+ {
+ return;
+ }
+ if (w->parent != NULL)
+ {
+ w->parent->link[y != w->parent->link[0]] = w;
+ }
+ else
+ {
+ *root = w;
+ }
+
+ return;
}
-VOID
-MiAddToTagHashTable(BLOCK_HDR* block)
- /*
- * Add a block to the tag hash table
- */
+void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
{
- ULONG hash;
- BLOCK_HDR* current;
- BLOCK_HDR* previous;
+ PNODE p; /* Traverses tree to find node to delete. */
+ PNODE q; /* Parent of |p|. */
+ int dir; /* Side of |q| on which |p| is linked. */
- if (block->Tag == 0)
+ if (root == NULL || *root == NULL)
{
- return;
+ return ;
}
- hash = block->Tag % TAG_HASH_TABLE_SIZE;
+ p = item;
+ q = p->parent;
+ if (q == NULL)
+ {
+ q = (PNODE) root;
+ dir = 0;
+ }
+ else
+ {
+ dir = compare(p, q) > 0;
+ }
- previous = NULL;
- current = tag_hash_table[hash];
- while (current != NULL)
+ if (p->link[1] == NULL)
{
- if (current->Tag == block->Tag)
- {
- block->tag_next = current->tag_next;
- current->tag_next = block;
- return;
+ q->link[dir] = p->link[0];
+ if (q->link[dir] != NULL)
+ {
+ q->link[dir]->parent = p->parent;
}
- previous = current;
- if (current->tag_next &&((PVOID)current->tag_next >= (PVOID)kernel_pool_base + NONPAGED_POOL_SIZE || (PVOID)current->tag_next < (PVOID)kernel_pool_base))
- {
- DbgPrint("previous %x\n", previous);
+ }
+ else
+ {
+ PNODE r = p->link[1];
+ if (r->link[0] == NULL)
+ {
+ r->link[0] = p->link[0];
+ q->link[dir] = r;
+ r->parent = p->parent;
+ if (r->link[0] != NULL)
+ {
+ r->link[0]->parent = r;
+ }
+ r->balance = p->balance;
+ q = r;
+ dir = 1;
+ }
+ else
+ {
+ PNODE s = r->link[0];
+ while (s->link[0] != NULL)
+ {
+ s = s->link[0];
+ }
+ r = s->parent;
+ r->link[0] = s->link[1];
+ s->link[0] = p->link[0];
+ s->link[1] = p->link[1];
+ q->link[dir] = s;
+ if (s->link[0] != NULL)
+ {
+ s->link[0]->parent = s;
+ }
+ s->link[1]->parent = s;
+ s->parent = p->parent;
+ if (r->link[0] != NULL)
+ {
+ r->link[0]->parent = r;
+ }
+ s->balance = p->balance;
+ q = r;
+ dir = 0;
+ }
+ }
+
+ item->link[0] = item->link[1] = item->parent = NULL;
+ item->balance = 0;
+
+ while (q != (PNODE) root)
+ {
+ PNODE y = q;
+
+ if (y->parent != NULL)
+ {
+ q = y->parent;
+ }
+ else
+ {
+ q = (PNODE) root;
}
- current = current->tag_next;
+
+ if (dir == 0)
+ {
+ dir = q->link[0] != y;
+ y->balance++;
+ if (y->balance == +1)
+ {
+ break;
+ }
+ else if (y->balance == +2)
+ {
+ PNODE x = y->link[1];
+ if (x->balance == -1)
+ {
+ PNODE w;
+
+ assert (x->balance == -1);
+ w = x->link[0];
+ x->link[0] = w->link[1];
+ w->link[1] = x;
+ y->link[1] = w->link[0];
+ w->link[0] = y;
+ if (w->balance == +1)
+ {
+ x->balance = 0;
+ y->balance = -1;
+ }
+ else if (w->balance == 0)
+ {
+ x->balance = y->balance = 0;
+ }
+ else /* |w->pavl_balance == -1| */
+ {
+ x->balance = +1;
+ y->balance = 0;
+ }
+ w->balance = 0;
+ w->parent = y->parent;
+ x->parent = y->parent = w;
+ if (x->link[0] != NULL)
+ {
+ x->link[0]->parent = x;
+ }
+ if (y->link[1] != NULL)
+ {
+ y->link[1]->parent = y;
+ }
+ q->link[dir] = w;
+ }
+ else
+ {
+ y->link[1] = x->link[0];
+ x->link[0] = y;
+ x->parent = y->parent;
+ y->parent = x;
+ if (y->link[1] != NULL)
+ {
+ y->link[1]->parent = y;
+ }
+ q->link[dir] = x;
+ if (x->balance == 0)
+ {
+ x->balance = -1;
+ y->balance = +1;
+ break;
+ }
+ else
+ {
+ x->balance = y->balance = 0;
+ y = x;
+ }
+ }
+ }
+ }
+ else
+ {
+ dir = q->link[0] != y;
+ y->balance--;
+ if (y->balance == -1)
+ {
+ break;
+ }
+ else if (y->balance == -2)
+ {
+ PNODE x = y->link[0];
+ if (x->balance == +1)
+ {
+ PNODE w;
+ assert (x->balance == +1);
+ w = x->link[1];
+ x->link[1] = w->link[0];
+ w->link[0] = x;
+ y->link[0] = w->link[1];
+ w->link[1] = y;
+ if (w->balance == -1)
+ {
+ x->balance = 0;
+ y->balance = +1;
+ }
+ else if (w->balance == 0)
+ {
+ x->balance = y->balance = 0;
+ }
+ else /* |w->pavl_balance == +1| */
+ {
+ x->balance = -1;
+ y->balance = 0;
+ }
+ w->balance = 0;
+ w->parent = y->parent;
+ x->parent = y->parent = w;
+ if (x->link[1] != NULL)
+ {
+ x->link[1]->parent = x;
+ }
+ if (y->link[0] != NULL)
+ {
+ y->link[0]->parent = y;
+ }
+ q->link[dir] = w;
+ }
+ else
+ {
+ y->link[0] = x->link[1];
+ x->link[1] = y;
+ x->parent = y->parent;
+ y->parent = x;
+ if (y->link[0] != NULL)
+ {
+ y->link[0]->parent = y;
+ }
+ q->link[dir] = x;
+ if (x->balance == 0)
+ {
+ x->balance = +1;
+ y->balance = -1;
+ break;
+ }
+ else
+ {
+ x->balance = y->balance = 0;
+ y = x;
+ }
+ }
+ }
+ }
}
- block->tag_next = NULL;
- if (previous == NULL)
+
+}
+
+PNODE _cdecl avl_get_first(PNODE root)
+{
+ PNODE p;
+ if (root == NULL)
+ {
+ return NULL;
+ }
+ p = root;
+ while (p->link[0])
+ {
+ p = p->link[0];
+ }
+ return p;
+}
+
+PNODE avl_get_next(PNODE root, PNODE p)
+{
+ PNODE q;
+ if (p->link[1])
{
- tag_hash_table[hash] = block;
+ p = p->link[1];
+ while(p->link[0])
+ {
+ p = p->link[0];
+ }
+ return p;
}
else
{
- previous->tag_next = block;
+ q = p->parent;
+ while (q && q->link[1] == p)
+ {
+ p = q;
+ q = q->parent;
+ }
+ if (q == NULL)
+ {
+ return NULL;
+ }
+ else
+ {
+ return q;
+ }
}
}
-#endif /* TAG_STATISTICS_TRACKING */
-VOID
-ExInitNonPagedPool(ULONG BaseAddress)
+PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
{
- kernel_pool_base = BaseAddress;
- KeInitializeSpinLock(&MmNpoolLock);
- MmInitKernelMap((PVOID)BaseAddress);
- memset(tag_hash_table, 0, sizeof(tag_hash_table));
- InitializeListHead(&FreeBlockListHead);
- InitializeListHead(&UsedBlockListHead);
+ PNODE p;
+ PNODE prev = NULL;
+ int cmp;
+
+ for (p = root; p != NULL;)
+ {
+ cmp = compare((PVOID)&size, p);
+ if (cmp < 0)
+ {
+ prev = p;
+ p = p->link[0];
+ }
+ else if (cmp > 0)
+ {
+ p = p->link[1];
+ }
+ else
+ {
+ while (p->link[0])
+ {
+ cmp = compare((PVOID)&size, p->link[0]);
+ if (cmp != 0)
+ {
+ break;
+ }
+ p = p->link[0];
+ }
+ return p;
+ }
+ }
+ return prev;
}
+/* non paged pool functions ************************************************/
+
#ifdef TAG_STATISTICS_TRACKING
+VOID
+MiRemoveFromTagHashTable(BLOCK_HDR* block)
+ /*
+ * Remove a block from the tag hash table
+ */
+{
+ if (block->Used.Tag == 0)
+ {
+ return;
+ }
+
+ RemoveEntryList(&block->Used.TagListEntry);
+}
+
+VOID
+MiAddToTagHashTable(BLOCK_HDR* block)
+ /*
+ * Add a block to the tag hash table
+ */
+{
+ ULONG hash;
+
+ if (block->Used.Tag == 0)
+ {
+ return;
+ }
+
+ hash = block->Used.Tag % TAG_HASH_TABLE_SIZE;
+
+ InsertHeadList(&tag_hash_table[hash], &block->Used.TagListEntry);
+}
+#endif /* TAG_STATISTICS_TRACKING */
+
+#if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
VOID STATIC
MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
{
CurrentSize / CurrentNrBlocks);
}
}
-#endif /* TAG_STATISTICS_TRACKING */
+#endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS); */
VOID
MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
{
-#ifdef TAG_STATISTICS_TRACKING
+#if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
ULONG i;
BLOCK_HDR* current;
ULONG CurrentTag;
ULONG CurrentSize;
ULONG TotalBlocks;
ULONG TotalSize;
+ LIST_ENTRY tmpListHead;
+ PLIST_ENTRY current_entry;
DbgPrint("******* Dumping non paging pool stats ******\n");
TotalBlocks = 0;
TotalSize = 0;
for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
{
- CurrentTag = 0;
- CurrentNrBlocks = 0;
- CurrentSize = 0;
- current = tag_hash_table[i];
- while (current != NULL)
- {
- if (current->Tag != CurrentTag)
+ InitializeListHead(&tmpListHead);
+
+ while (!IsListEmpty(&tag_hash_table[i]))
+ {
+ CurrentTag = 0;
+
+ current_entry = tag_hash_table[i].Flink;
+ while (current_entry != &tag_hash_table[i])
{
- if (CurrentTag != 0 && CurrentNrBlocks != 0)
- {
- MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
+ current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.TagListEntry);
+ current_entry = current_entry->Flink;
+ if (CurrentTag == 0)
+ {
+ CurrentTag = current->Used.Tag;
+ CurrentNrBlocks = 0;
+ CurrentSize = 0;
+ }
+ if (current->Used.Tag == CurrentTag)
+ {
+ RemoveEntryList(¤t->Used.TagListEntry);
+ InsertHeadList(&tmpListHead, ¤t->Used.TagListEntry);
+ if (!NewOnly || !current->Used.Dumped)
+ {
+ CurrentNrBlocks++;
+ TotalBlocks++;
+ CurrentSize += current->Size;
+ TotalSize += current->Size;
+ current->Used.Dumped = TRUE;
+ }
}
- CurrentTag = current->Tag;
- CurrentNrBlocks = 0;
- CurrentSize = 0;
}
-
- if (!NewOnly || !current->Dumped)
+ if (CurrentTag != 0 && CurrentNrBlocks != 0)
{
- CurrentNrBlocks++;
- TotalBlocks++;
- CurrentSize = CurrentSize + current->Size;
- TotalSize = TotalSize + current->Size;
- current->Dumped = TRUE;
+ MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
}
- current = current->tag_next;
}
- if (CurrentTag != 0 && CurrentNrBlocks != 0)
- {
- MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
+ if (!IsListEmpty(&tmpListHead))
+ {
+ tag_hash_table[i].Flink = tmpListHead.Flink;
+ tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
+ tag_hash_table[i].Blink = tmpListHead.Blink;
+ tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
}
}
if (TotalBlocks != 0)
DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
EiNrFreeBlocks, EiFreeNonPagedPool, EiNrFreeBlocks ? EiFreeNonPagedPool / EiNrFreeBlocks : 0);
DbgPrint("***************** Dump Complete ***************\n");
-#endif /* TAG_STATISTICS_TRACKING */
+#endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS) */
}
VOID
MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
{
+#ifndef WHOLE_PAGE_ALLOCATIONS
BLOCK_HDR* current;
PLIST_ENTRY current_entry;
KIRQL oldIrql;
current_entry = UsedBlockListHead.Flink;
while (current_entry != &UsedBlockListHead)
{
- current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
- if (!NewOnly || !current->Dumped)
+ current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
+ if (!NewOnly || !current->Used.Dumped)
{
CHAR c1, c2, c3, c4;
- c1 = (current->Tag >> 24) & 0xFF;
- c2 = (current->Tag >> 16) & 0xFF;
- c3 = (current->Tag >> 8) & 0xFF;
- c4 = current->Tag & 0xFF;
+ c1 = (current->Used.Tag >> 24) & 0xFF;
+ c2 = (current->Used.Tag >> 16) & 0xFF;
+ c3 = (current->Used.Tag >> 8) & 0xFF;
+ c4 = current->Used.Tag & 0xFF;
if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
{
DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
- current->Size, current->Tag, c4, c3, c2, c1,
- current->Caller);
+ current->Size, current->Used.Tag, c4, c3, c2, c1,
+ current->Used.Caller);
}
else
{
DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
- current->Size, current->Tag, current->Caller);
+ current->Size, current->Used.Tag, current->Used.Caller);
}
- current->Dumped = TRUE;
+ current->Used.Dumped = TRUE;
}
current_entry = current_entry->Flink;
}
DbgPrint("***************** Dump Complete ***************\n");
KeReleaseSpinLock(&MmNpoolLock, oldIrql);
+#endif /* not WHOLE_PAGE_ALLOCATIONS */
}
#ifndef WHOLE_PAGE_ALLOCATIONS
*/
{
BLOCK_HDR* current;
- PLIST_ENTRY current_entry;
unsigned int blocks_seen=0;
-
- current_entry = FreeBlockListHead.Flink;
- while (current_entry != &FreeBlockListHead)
+ PNODE p;
+
+ p = avl_get_first(FreeBlockListRoot);
+
+ while(p)
{
- unsigned int base_addr;
+ PVOID base_addr;
- current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
- base_addr = (int)current;
+ current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
+ base_addr = (PVOID)current;
if (current->Magic != BLOCK_HDR_FREE_MAGIC)
{
DbgPrint("Bad block magic (probable pool corruption) at %x\n",
current);
- KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
- if (base_addr < (kernel_pool_base) ||
- (base_addr+current->Size) > (kernel_pool_base)+NONPAGED_POOL_SIZE)
+ if (base_addr < MiNonPagedPoolStart ||
+ base_addr + sizeof(BLOCK_HDR) + current->Size > MiNonPagedPoolStart + MiNonPagedPoolLength)
{
DbgPrint("Block %x found outside pool area\n",current);
DbgPrint("Size %d\n",current->Size);
- DbgPrint("Limits are %x %x\n",kernel_pool_base,
- kernel_pool_base+NONPAGED_POOL_SIZE);
- KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
+ DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
+ MiNonPagedPoolStart+MiNonPagedPoolLength);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
blocks_seen++;
if (blocks_seen > EiNrFreeBlocks)
{
DbgPrint("Too many blocks on free list\n");
- KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
- if (current->ListEntry.Flink != &FreeBlockListHead &&
- current->ListEntry.Flink->Blink != ¤t->ListEntry)
- {
- DbgPrint("%s:%d:Break in list (current %x next %x "
- "current->next->previous %x)\n",
- __FILE__,__LINE__,current, current->ListEntry.Flink,
- current->ListEntry.Flink->Blink);
- KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
- }
-
- current_entry = current_entry->Flink;
+ p = avl_get_next(FreeBlockListRoot, p);
}
}
current_entry = UsedBlockListHead.Flink;
while (current_entry != &UsedBlockListHead)
{
- unsigned int base_addr;
+ PVOID base_addr;
- current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
- base_addr = (int)current;
+ current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
+ base_addr = (PVOID)current;
if (current->Magic != BLOCK_HDR_USED_MAGIC)
{
DbgPrint("Bad block magic (probable pool corruption) at %x\n",
current);
- KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
- if (base_addr < (kernel_pool_base) ||
+ if (base_addr < MiNonPagedPoolStart ||
(base_addr+current->Size) >
- (kernel_pool_base)+NONPAGED_POOL_SIZE)
+ MiNonPagedPoolStart+MiNonPagedPoolLength)
{
DbgPrint("Block %x found outside pool area\n",current);
- for(;;);
+ DbgPrint("Size %d\n",current->Size);
+ DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
+ MiNonPagedPoolStart+MiNonPagedPoolLength);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
blocks_seen++;
if (blocks_seen > EiNrUsedBlocks)
{
DbgPrint("Too many blocks on used list\n");
- for(;;);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
- if (current->ListEntry.Flink != &UsedBlockListHead &&
- current->ListEntry.Flink->Blink != ¤t->ListEntry)
+ if (current->Used.ListEntry.Flink != &UsedBlockListHead &&
+ current->Used.ListEntry.Flink->Blink != ¤t->Used.ListEntry)
{
- DbgPrint("Break in list (current %x next %x)\n",
- current, current->ListEntry.Flink);
- for(;;);
+ DbgPrint("%s:%d:Break in list (current %x next %x "
+ "current->next->previous %x)\n",
+ __FILE__,__LINE__,current, current->Used.ListEntry.Flink,
+ current->Used.ListEntry.Flink->Blink);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
current_entry = current_entry->Flink;
* NOTE: Bug checks if duplicates are found
*/
{
- unsigned int base = (int)blk;
- unsigned int last = ((int)blk) + +sizeof(BLOCK_HDR) + blk->Size;
+ char* base = (char*)blk;
+ char* last = ((char*)blk) + sizeof(BLOCK_HDR) + blk->Size;
BLOCK_HDR* current;
PLIST_ENTRY current_entry;
-
- current_entry = FreeBlockListHead.Flink;
- while (current_entry != &FreeBlockListHead)
+ PNODE p;
+
+ p = avl_get_first(FreeBlockListRoot);
+
+ while (p)
{
- current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
+ current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
if (current->Magic != BLOCK_HDR_FREE_MAGIC)
{
DbgPrint("Bad block magic (probable pool corruption) at %x\n",
current);
- KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
- if ( (int)current > base && (int)current < last )
+ if ( (char*)current > base && (char*)current < last )
{
DbgPrint("intersecting blocks on list\n");
- for(;;);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
- if ( (int)current < base &&
- ((int)current + current->Size + sizeof(BLOCK_HDR))
+ if ( (char*)current < base &&
+ ((char*)current + current->Size + sizeof(BLOCK_HDR))
> base )
{
DbgPrint("intersecting blocks on list\n");
- for(;;);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
-
- current_entry = current_entry->Flink;
+ p = avl_get_next(FreeBlockListRoot, p);
}
current_entry = UsedBlockListHead.Flink;
while (current_entry != &UsedBlockListHead)
{
- current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
+ current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
- if ( (int)current > base && (int)current < last )
+ if ( (char*)current > base && (char*)current < last )
{
DbgPrint("intersecting blocks on list\n");
- for(;;);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
- if ( (int)current < base &&
- ((int)current + current->Size + sizeof(BLOCK_HDR))
+ if ( (char*)current < base &&
+ ((char*)current + current->Size + sizeof(BLOCK_HDR))
> base )
{
DbgPrint("intersecting blocks on list\n");
- for(;;);
+ KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
}
current_entry = current_entry->Flink;
{
BLOCK_HDR* current;
PLIST_ENTRY current_entry;
+ PNODE p;
validate_free_list();
validate_used_list();
- current_entry = FreeBlockListHead.Flink;
- while (current_entry != &FreeBlockListHead)
+ p = avl_get_first(FreeBlockListRoot);
+ while (p)
{
- current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
+ current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
check_duplicates(current);
- current_entry = current_entry->Flink;
+ p = avl_get_next(FreeBlockListRoot, p);
}
current_entry = UsedBlockListHead.Flink;
while (current_entry != &UsedBlockListHead)
{
- current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
+ current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
check_duplicates(current);
current_entry = current_entry->Flink;
}
}
#endif
-STATIC VOID
-merge_free_block(BLOCK_HDR* blk)
+static void remove_from_used_list(BLOCK_HDR* current)
{
- PLIST_ENTRY next_entry;
- BLOCK_HDR* next;
- PLIST_ENTRY previous_entry;
- BLOCK_HDR* previous;
+ RemoveEntryList(¤t->Used.ListEntry);
+ EiUsedNonPagedPool -= current->Size;
+ EiNrUsedBlocks--;
+}
- next_entry = blk->ListEntry.Flink;
- if (next_entry != &FreeBlockListHead)
- {
- next = CONTAINING_RECORD(next_entry, BLOCK_HDR, ListEntry);
- if (((unsigned int)blk + sizeof(BLOCK_HDR) + blk->Size) ==
- (unsigned int)next)
- {
- RemoveEntryList(&next->ListEntry);
- blk->Size = blk->Size + next->Size;
- memset(next, 0xcc, sizeof(BLOCK_HDR));
- EiFreeNonPagedPool += sizeof(BLOCK_HDR);
- EiNrFreeBlocks--;
- }
- }
+static void remove_from_free_list(BLOCK_HDR* current)
+{
+ DPRINT("remove_from_free_list %d\n", current->Size);
- previous_entry = blk->ListEntry.Blink;
- if (previous_entry != &FreeBlockListHead)
- {
- previous = CONTAINING_RECORD(previous_entry, BLOCK_HDR, ListEntry);
- if (((unsigned int)previous + sizeof(BLOCK_HDR) + previous->Size) ==
- (unsigned int)blk)
- {
- RemoveEntryList(&blk->ListEntry);
- previous->Size = previous->Size + sizeof(BLOCK_HDR) + blk->Size;
- memset(blk, 0xcc, sizeof(BLOCK_HDR));
- EiFreeNonPagedPool += sizeof(BLOCK_HDR);
- EiNrFreeBlocks--;
- }
- }
+ avl_remove(&FreeBlockListRoot, ¤t->Free.Node, compare_node);
+
+ EiFreeNonPagedPool -= current->Size;
+ EiNrFreeBlocks--;
+ DPRINT("remove_from_free_list done\n");
+#ifdef DUMP_AVL
+ DumpFreeBlockTree();
+#endif
}
-STATIC VOID
+static void
add_to_free_list(BLOCK_HDR* blk)
/*
* FUNCTION: add the block to the free list (internal)
*/
{
- PLIST_ENTRY current_entry;
BLOCK_HDR* current;
- current_entry = FreeBlockListHead.Flink;
- while (current_entry != &FreeBlockListHead)
+ DPRINT("add_to_free_list (%d)\n", blk->Size);
+
+ EiNrFreeBlocks++;
+
+ current = blk->previous;
+ if (current && current->Magic == BLOCK_HDR_FREE_MAGIC)
{
- current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
+ remove_from_free_list(current);
+ current->Size = current->Size + sizeof(BLOCK_HDR) + blk->Size;
+ current->Magic = BLOCK_HDR_USED_MAGIC;
+ memset(blk, 0xcc, sizeof(BLOCK_HDR));
+ blk = current;
+ }
- if ((unsigned int)current > (unsigned int)blk)
- {
- blk->ListEntry.Flink = current_entry;
- blk->ListEntry.Blink = current_entry->Blink;
- current_entry->Blink->Flink = &blk->ListEntry;
- current_entry->Blink = &blk->ListEntry;
- EiFreeNonPagedPool += blk->Size;
- EiNrFreeBlocks++;
- return;
+ current = (BLOCK_HDR*)((PVOID)(blk + 1) + blk->Size);
+ if ((PVOID)current < MiNonPagedPoolStart + MiNonPagedPoolLength &&
+ current->Magic == BLOCK_HDR_FREE_MAGIC)
+ {
+ remove_from_free_list(current);
+ blk->Size += sizeof(BLOCK_HDR) + current->Size;
+ memset(current, 0xcc, sizeof(BLOCK_HDR));
+ current = (BLOCK_HDR*)((PVOID)(blk + 1) + blk->Size);
+ if ((PVOID)current < MiNonPagedPoolStart + MiNonPagedPoolLength)
+ {
+ current->previous = blk;
}
-
- current_entry = current_entry->Flink;
}
- InsertTailList(&FreeBlockListHead, &blk->ListEntry);
+
+ DPRINT("%d\n", blk->Size);
+ blk->Magic = BLOCK_HDR_FREE_MAGIC;
EiFreeNonPagedPool += blk->Size;
- EiNrFreeBlocks++;
+ avl_insert(&FreeBlockListRoot, &blk->Free.Node, compare_node);
+
+ DPRINT("add_to_free_list done\n");
+#ifdef DUMP_AVL
+ DumpFreeBlockTree();
+#endif
}
static void add_to_used_list(BLOCK_HDR* blk)
* FUNCTION: add the block to the used list (internal)
*/
{
- InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
+ InsertHeadList(&UsedBlockListHead, &blk->Used.ListEntry);
EiUsedNonPagedPool += blk->Size;
EiNrUsedBlocks++;
}
-
-static void remove_from_free_list(BLOCK_HDR* current)
-{
- RemoveEntryList(¤t->ListEntry);
- EiFreeNonPagedPool -= current->Size;
- EiNrFreeBlocks--;
-}
-
-
-static void remove_from_used_list(BLOCK_HDR* current)
-{
- RemoveEntryList(¤t->ListEntry);
- EiUsedNonPagedPool -= current->Size;
- EiNrUsedBlocks--;
-}
-
-
inline static void* block_to_address(BLOCK_HDR* blk)
/*
* FUNCTION: Translate a block header address to the corresponding block
* address (internal)
*/
{
- return ( (void *) ((int)blk + sizeof(BLOCK_HDR)) );
+ return ( (void *) ((char*)blk + sizeof(BLOCK_HDR)) );
}
inline static BLOCK_HDR* address_to_block(void* addr)
{
return (BLOCK_HDR *)
- ( ((int)addr) - sizeof(BLOCK_HDR) );
+ ( ((char*)addr) - sizeof(BLOCK_HDR) );
}
-static BLOCK_HDR* lookup_block(unsigned int size)
+static BOOLEAN
+grow_block(BLOCK_HDR* blk, PVOID end)
{
- PLIST_ENTRY current_entry;
- BLOCK_HDR* current;
- BLOCK_HDR* best = NULL;
- ULONG new_size;
- PVOID block, block_boundary;
-
- current_entry = FreeBlockListHead.Flink;
- if (size < PAGE_SIZE)
- {
- while (current_entry != &FreeBlockListHead)
- {
- DPRINT("current %x size %x tag_next %x\n",
- current, current->Size, current->tag_next);
- current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
- if (current->Size >= size &&
- (best == NULL || current->Size < best->Size))
- {
- best = current;
- if (best->Size == size)
- {
+ NTSTATUS Status;
+ PHYSICAL_ADDRESS Page;
+ BOOLEAN result = TRUE;
+ ULONG index;
+
+ PVOID start = (PVOID)PAGE_ROUND_UP((ULONG)(blk + 1));
+ end = (PVOID)PAGE_ROUND_UP(end);
+ index = (ULONG)(start - MiNonPagedPoolStart) / PAGE_SIZE;
+ while (start < end)
+ {
+ if (!(MiNonPagedPoolAllocMap[index / 32] & (1 << (index % 32))))
+ {
+ Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
+ if (!NT_SUCCESS(Status))
+ {
+ result = FALSE;
break;
- }
- }
- current_entry = current_entry->Flink;
- }
- }
- else
- {
- while (current_entry != &FreeBlockListHead)
- {
- DPRINT("current %x size %x tag_next %x\n",
- current, current->Size, current->tag_next);
- current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
-
- block = block_to_address(current);
- block_boundary = (PVOID)PAGE_ROUND_UP((ULONG)block);
- new_size = (ULONG)block_boundary - (ULONG)block + size;
- if (new_size != size && (ULONG)block_boundary - (ULONG)block < sizeof(BLOCK_HDR))
- {
- new_size += PAGE_SIZE;
- }
- if (current->Size >= new_size &&
- (best == NULL || current->Size < best->Size))
- {
- best = current;
+ }
+ Status = MmCreateVirtualMapping(NULL,
+ start,
+ PAGE_READWRITE|PAGE_SYSTEM,
+ Page,
+ FALSE);
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("Unable to create virtual mapping\n");
+ MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
+ result = FALSE;
+ break;
+ }
+ MiNonPagedPoolAllocMap[index / 32] |= (1 << (index % 32));
+ memset(start, 0xcc, PAGE_SIZE);
+ MiNonPagedPoolNrOfPages++;
}
- current_entry = current_entry->Flink;
- }
- }
- return best;
+ index++;
+ start += PAGE_SIZE;
+ }
+ return result;
}
-static void* take_block(BLOCK_HDR* current, unsigned int size,
- ULONG Tag, PVOID Caller)
-/*
- * FUNCTION: Allocate a used block of least 'size' from the specified
- * free block
- * RETURNS: The address of the created memory block
- */
+static BLOCK_HDR* get_block(unsigned int size, unsigned long alignment)
{
+ BLOCK_HDR *blk, *current, *previous = NULL, *next = NULL, *best = NULL;
+ ULONG previous_size, current_size, next_size, new_size;
+ PVOID end;
+ PVOID addr, aligned_addr;
+ PNODE p;
+
+ DPRINT("get_block %d\n", size);
- BLOCK_HDR* blk;
- if (size >= PAGE_SIZE)
- {
- blk = address_to_block((PVOID)PAGE_ROUND_UP(block_to_address (current)));
- if (blk != current)
- {
- if ((ULONG)blk - (ULONG)current < sizeof(BLOCK_HDR))
- {
- (ULONG)blk += PAGE_SIZE;
- }
- assert((ULONG)blk - (ULONG)current + size <= current->Size && (ULONG)blk - (ULONG)current >= sizeof(BLOCK_HDR));
-
- memset(blk, 0, sizeof(BLOCK_HDR));
- blk->Magic = BLOCK_HDR_FREE_MAGIC;
- blk->Size = current->Size - ((ULONG)blk - (ULONG)current);
- current->Size -= (blk->Size + sizeof(BLOCK_HDR));
- InsertHeadList(¤t->ListEntry, &blk->ListEntry);
- EiFreeNonPagedPool -= sizeof(BLOCK_HDR);
- EiNrFreeBlocks++;
- current = blk;
- }
- }
- /*
- * If the block is much bigger than required then split it and
- * return a pointer to the allocated section. If the difference
- * between the sizes is marginal it makes no sense to have the
- * extra overhead
- */
- if (current->Size > size + sizeof(BLOCK_HDR))
+ if (alignment == 0)
{
- BLOCK_HDR* free_blk;
-
- EiFreeNonPagedPool -= current->Size;
-
- /*
- * Replace the bigger block with a smaller block in the
- * same position in the list
- */
- free_blk = (BLOCK_HDR *)(((int)current)
- + sizeof(BLOCK_HDR) + size);
- free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
- InsertHeadList(¤t->ListEntry, &free_blk->ListEntry);
- free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
-
- current->Size=size;
- RemoveEntryList(¤t->ListEntry);
- add_to_used_list(current);
- current->Magic = BLOCK_HDR_USED_MAGIC;
- current->Tag = Tag;
- current->Caller = Caller;
- current->Dumped = FALSE;
-#ifdef TAG_STATISTICS_TRACKING
- MiAddToTagHashTable(current);
-#endif /* TAG_STATISTICS_TRACKING */
-
- EiFreeNonPagedPool += free_blk->Size;
-
- VALIDATE_POOL;
- return(block_to_address(current));
+ p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
+ if (p)
+ {
+ best = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
+ addr = block_to_address(best);
+ }
}
-
+ else
+ {
+ p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
+
+ while(p)
+ {
+ current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
+ addr = block_to_address(current);
+ /* calculate first aligned address available within this block */
+ aligned_addr = MM_ROUND_UP(addr, alignment);
+ /* check to see if this address is already aligned */
+ if (addr == aligned_addr)
+ {
+ if (current->Size >= size &&
+ (best == NULL || current->Size < best->Size))
+ {
+ best = current;
+ }
+ }
+ else
+ {
+ /* make sure there's enough room to make a free block by the space skipped
+ * from alignment. If not, calculate forward to the next alignment
+ * and see if we allocate there...
+ */
+ new_size = (ULONG)aligned_addr - (ULONG)addr + size;
+ if ((ULONG)aligned_addr - (ULONG)addr < sizeof(BLOCK_HDR))
+ {
+ /* not enough room for a free block header, add some more bytes */
+ aligned_addr = MM_ROUND_UP(block_to_address(current + 1), alignment);
+ new_size = (ULONG)aligned_addr - (ULONG)addr + size;
+ }
+ if (current->Size >= new_size &&
+ (best == NULL || current->Size < best->Size))
+ {
+ best = current;
+ }
+ }
+ if (best && current->Size >= size + alignment + 2 * sizeof(BLOCK_HDR))
+ {
+ break;
+ }
+ p = avl_get_next(FreeBlockListRoot, p);
+
+ }
+ }
+
/*
- * Otherwise allocate the whole block
+ * We didn't find anything suitable at all.
*/
- remove_from_free_list(current);
- add_to_used_list(current);
-
- current->Magic = BLOCK_HDR_USED_MAGIC;
- current->Tag = Tag;
- current->Caller = Caller;
- current->Dumped = FALSE;
-#ifdef TAG_STATISTICS_TRACKING
- MiAddToTagHashTable(current);
-#endif /* TAG_STATISTICS_TRACKING */
+ if (best == NULL)
+ {
+ return NULL;
+ }
- VALIDATE_POOL;
- return(block_to_address(current));
-}
+ current = best;
+ current_size = current->Size;
-static void* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
-/*
- * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
- * bytes
- */
-{
- ULONG nr_pages = PAGE_ROUND_UP(size + sizeof(BLOCK_HDR)) / PAGE_SIZE;
- ULONG start;
- BLOCK_HDR* blk=NULL;
- ULONG i;
- KIRQL oldIrql;
- NTSTATUS Status;
- PVOID block = NULL;
+ if (alignment > 0)
+ {
+ addr = block_to_address(current);
+ aligned_addr = MM_ROUND_UP(addr, alignment);
+ if (addr != aligned_addr)
+ {
+ blk = address_to_block(aligned_addr);
+ if (blk < current + 1)
+ {
+ aligned_addr = MM_ROUND_UP(block_to_address(current + 1), alignment);
+ blk = address_to_block(aligned_addr);
+ }
+ /*
+ * if size-aligned, break off the preceding bytes into their own block...
+ */
+ previous = current;
+ previous_size = (ULONG)blk - (ULONG)previous - sizeof(BLOCK_HDR);
+ current = blk;
+ current_size -= ((ULONG)current - (ULONG)previous);
+ }
+ }
- if (size >= PAGE_SIZE)
- {
- nr_pages++;
- }
+ end = (PVOID)current + sizeof(BLOCK_HDR) + size;
+
+ if (current_size > size + sizeof(BLOCK_HDR) + 4)
+ {
+ /* create a new free block after our block, if the memory size is >= 4 byte for this block */
+ next = (BLOCK_HDR*)((ULONG)current + size + sizeof(BLOCK_HDR));
+ next_size = current_size - size - sizeof(BLOCK_HDR);
+ current_size = size;
+ end = (PVOID)next + sizeof(BLOCK_HDR);
+ }
- start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
-
- DPRINT("growing heap for block size %d, ",size);
- DPRINT("start %x\n",start);
-
- for (i=0;i<nr_pages;i++)
+ if (previous)
{
- PHYSICAL_ADDRESS Page;
- /* FIXME: Check whether we can really wait here. */
- Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
- if (!NT_SUCCESS(Status))
- {
- KeBugCheck(0);
- return(NULL);
+ remove_from_free_list(previous);
+ if (!grow_block(previous, end))
+ {
+ add_to_free_list(previous);
+ return NULL;
}
- Status = MmCreateVirtualMapping(NULL,
- (PVOID)(start + (i*PAGE_SIZE)),
- PAGE_READWRITE|PAGE_SYSTEM,
- Page,
- FALSE);
- if (!NT_SUCCESS(Status))
- {
- DbgPrint("Unable to create virtual mapping\n");
- KeBugCheck(0);
- }
+ memset(current, 0, sizeof(BLOCK_HDR));
+ current->Size = current_size;
+ current->Magic = BLOCK_HDR_USED_MAGIC;
+ current->previous = previous;
+ previous->Size = previous_size;
+ if (next == NULL)
+ {
+ blk = (BLOCK_HDR*)((PVOID)(current + 1) + current->Size);
+ if ((PVOID)blk < MiNonPagedPoolStart + MiNonPagedPoolLength)
+ {
+ blk->previous = current;
+ }
+ }
+
+ add_to_free_list(previous);
}
+ else
+ {
+ remove_from_free_list(current);
- blk = (struct _BLOCK_HDR *)start;
- memset(blk, 0, sizeof(BLOCK_HDR));
- blk->Size = (nr_pages * PAGE_SIZE) - sizeof(BLOCK_HDR);
- blk->Magic = BLOCK_HDR_FREE_MAGIC;
- memset(block_to_address(blk), 0xcc, blk->Size);
+ if (!grow_block(current, end))
+ {
+ add_to_free_list(current);
+ return NULL;
+ }
- KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
- add_to_free_list(blk);
- merge_free_block(blk);
+ current->Magic = BLOCK_HDR_USED_MAGIC;
+ if (next)
+ {
+ current->Size = current_size;
+ }
+ }
+
+ if (next)
+ {
+ memset(next, 0, sizeof(BLOCK_HDR));
+
+ next->Size = next_size;
+ next->Magic = BLOCK_HDR_FREE_MAGIC;
+ next->previous = current;
+ blk = (BLOCK_HDR*)((PVOID)(next + 1) + next->Size);
+ if ((PVOID)blk < MiNonPagedPoolStart + MiNonPagedPoolLength)
+ {
+ blk->previous = next;
+ }
+ add_to_free_list(next);
+ }
- blk = lookup_block(size);
- if (blk)
- {
- block = take_block(blk, size, Tag, Caller);
- VALIDATE_POOL;
- }
- KeReleaseSpinLock(&MmNpoolLock, oldIrql);
- if (block == NULL)
- {
- CHECKPOINT1;
- }
- return block;
+ add_to_used_list(current);
+ VALIDATE_POOL;
+ return current;
}
#endif /* not WHOLE_PAGE_ALLOCATIONS */
DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
block, blk->Magic);
}
- KeBugCheck(0);
+ KEBUGCHECK(0);
return;
}
memset(block, 0xcc, blk->Size);
-#ifdef TAG_STATISTICS_TRACKING
- MiRemoveFromTagHashTable(blk);
-#endif /* TAG_STATISTICS_TRACKING */
remove_from_used_list(blk);
blk->Magic = BLOCK_HDR_FREE_MAGIC;
- blk->Tag = 0;
- blk->Caller = NULL;
- blk->tag_next = NULL;
add_to_free_list(blk);
- merge_free_block(blk);
-
VALIDATE_POOL;
KeReleaseSpinLock(&MmNpoolLock, oldIrql);
PVOID block;
BLOCK_HDR* best = NULL;
KIRQL oldIrql;
+ ULONG alignment;
POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
Size,Caller);
return(NULL);
}
/* Make the size dword alligned, this makes the block dword alligned */
- Size = ROUND_UP(Size, 4);
- /*
- * Look for an already created block of sufficent size
- */
- best = lookup_block(Size);
- if (best == NULL)
- {
- KeReleaseSpinLock(&MmNpoolLock, oldIrql);
- block = grow_kernel_pool(Size, Tag, Caller);
- assert(block != NULL);
- memset(block,0,Size);
- }
+ Size = ROUND_UP(Size, sizeof(DWORD));
+
+ if (Size >= PAGE_SIZE)
+ {
+ alignment = PAGE_SIZE;
+ }
+ else if (Type == NonPagedPoolCacheAligned ||
+ Type == NonPagedPoolCacheAlignedMustS)
+ {
+ alignment = MM_CACHE_LINE_SIZE;
+ }
else
- {
- block=take_block(best, Size, Tag, Caller);
- VALIDATE_POOL;
- KeReleaseSpinLock(&MmNpoolLock, oldIrql);
- memset(block,0,Size);
- }
+ {
+ alignment = 0;
+ }
+
+ best = get_block(Size, alignment);
+ if (best == NULL)
+ {
+ KeReleaseSpinLock(&MmNpoolLock, oldIrql);
+ return NULL;
+ }
+ best->Used.Tag = Tag;
+ best->Used.Caller = Caller;
+ best->Used.Dumped = FALSE;
+ best->Used.TagListEntry.Flink = best->Used.TagListEntry.Blink = NULL;
+ KeReleaseSpinLock(&MmNpoolLock, oldIrql);
+ block = block_to_address(best);
+ memset(block,0,Size);
return(block);
#endif /* WHOLE_PAGE_ALLOCATIONS */
}
#ifdef WHOLE_PAGE_ALLOCATIONS
PVOID STDCALL
-ExAllocateWholePageBlock(ULONG UserSize)
+ExAllocateWholePageBlock(ULONG Size)
{
PVOID Address;
PHYSICAL_ADDRESS Page;
ULONG i;
- ULONG Size;
ULONG NrPages;
+ ULONG Base;
- Size = sizeof(ULONG) + UserSize;
NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
- Address = MiAllocNonPagedPoolRegion(NrPages + 1);
+ Base = RtlFindClearBitsAndSet(&NonPagedPoolAllocMap, NrPages + 1, NonPagedPoolAllocMapHint);
+ if (Base == 0xffffffff)
+ {
+ DbgPrint("Out of non paged pool space.\n");
+ KEBUGCHECK(0);
+ }
+ if (NonPagedPoolAllocMapHint == Base)
+ {
+ NonPagedPoolAllocMapHint += (NrPages + 1);
+ }
+ Address = MiNonPagedPoolStart + Base * PAGE_SIZE;
for (i = 0; i < NrPages; i++)
{
Page = MmAllocPage(MC_NPPOOL, 0);
if (Page.QuadPart == 0LL)
{
- KeBugCheck(0);
+ KEBUGCHECK(0);
}
MmCreateVirtualMapping(NULL,
Address + (i * PAGE_SIZE),
TRUE);
}
- *((PULONG)((ULONG)Address + (NrPages * PAGE_SIZE) - Size)) = NrPages;
- return((PVOID)((ULONG)Address + (NrPages * PAGE_SIZE) - UserSize));
+ MiCurrentNonPagedPoolLength = max(MiCurrentNonPagedPoolLength, (Base + NrPages) * PAGE_SIZE);
+ return((PVOID)((PUCHAR)Address + (NrPages * PAGE_SIZE) - Size));
}
VOID STDCALL
ExFreeWholePageBlock(PVOID Addr)
{
- ULONG NrPages;
+ ULONG Base;
- if ((ULONG)Addr < kernel_pool_base ||
- (ULONG)Addr >= (kernel_pool_base + NONPAGED_POOL_SIZE))
+ if (Addr < MiNonPagedPoolStart ||
+ Addr >= (MiNonPagedPoolStart + MiCurrentNonPagedPoolLength))
{
DbgPrint("Block %x found outside pool area\n", Addr);
- KeBugCheck(0);
+ KEBUGCHECK(0);
}
- NrPages = *(PULONG)((ULONG)Addr - sizeof(ULONG));
- MiFreeNonPagedPoolRegion((PVOID)PAGE_ROUND_DOWN((ULONG)Addr), NrPages, TRUE);
+ Base = (Addr - MiNonPagedPoolStart) / PAGE_SIZE;
+ NonPagedPoolAllocMapHint = min(NonPagedPoolAllocMapHint, Base);
+ while (MmIsPagePresent(NULL, Addr))
+ {
+ MmDeleteVirtualMapping(NULL,
+ Addr,
+ TRUE,
+ NULL,
+ NULL);
+ RtlClearBits(&NonPagedPoolAllocMap, Base, 1);
+ Base++;
+ Addr += PAGE_SIZE;
+ }
}
#endif /* WHOLE_PAGE_ALLOCATIONS */
+VOID
+MiInitializeNonPagedPool(VOID)
+{
+ NTSTATUS Status;
+ PHYSICAL_ADDRESS Page;
+ ULONG i;
+ PVOID Address;
+#ifdef WHOLE_PAGE_ALLOCATIONS
+#else
+ BLOCK_HDR* blk;
+#endif
+#ifdef TAG_STATISTICS_TRACKING
+ for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
+ {
+ InitializeListHead(&tag_hash_table[i]);
+ }
+#endif
+ KeInitializeSpinLock(&MmNpoolLock);
+ InitializeListHead(&UsedBlockListHead);
+ InitializeListHead(&AddressListHead);
+ FreeBlockListRoot = NULL;
+#ifdef WHOLE_PAGE_ALLOCATIONS
+ NonPagedPoolAllocMapHint = PAGE_ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE / 8) / PAGE_SIZE;
+ MiCurrentNonPagedPoolLength = NonPagedPoolAllocMapHint * PAGE_SIZE;
+ Address = MiNonPagedPoolStart;
+ for (i = 0; i < NonPagedPoolAllocMapHint; i++)
+ {
+ Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("Unable to allocate a page\n");
+ KEBUGCHECK(0);
+ }
+ Status = MmCreateVirtualMapping(NULL,
+ Address,
+ PAGE_READWRITE|PAGE_SYSTEM,
+ Page,
+ FALSE);
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("Unable to create virtual mapping\n");
+ KEBUGCHECK(0);
+ }
+ Address += PAGE_SIZE;
+ }
+ RtlInitializeBitMap(&NonPagedPoolAllocMap, MiNonPagedPoolStart, MM_NONPAGED_POOL_SIZE / PAGE_SIZE);
+ RtlClearAllBits(&NonPagedPoolAllocMap);
+ RtlSetBits(&NonPagedPoolAllocMap, 0, NonPagedPoolAllocMapHint);
+#else
+ MiNonPagedPoolAllocMap = block_to_address((BLOCK_HDR*)MiNonPagedPoolStart);
+ MiNonPagedPoolNrOfPages = PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + 2 * sizeof(BLOCK_HDR));
+ Address = MiNonPagedPoolStart;
+ for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
+ {
+ Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("Unable to allocate a page\n");
+ KEBUGCHECK(0);
+ }
+ Status = MmCreateVirtualMapping(NULL,
+ Address,
+ PAGE_READWRITE|PAGE_SYSTEM,
+ Page,
+ FALSE);
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("Unable to create virtual mapping\n");
+ KEBUGCHECK(0);
+ }
+ MiNonPagedPoolAllocMap[i / 32] |= (1 << (i % 32));
+ Address += PAGE_SIZE;
+ }
+ /* the first block contains the non paged pool bitmap */
+ blk = (BLOCK_HDR*)MiNonPagedPoolStart;
+ memset(blk, 0, sizeof(BLOCK_HDR));
+ blk->Magic = BLOCK_HDR_USED_MAGIC;
+ blk->Size = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8;
+ blk->previous = NULL;
+ blk->Used.Tag = 0xffffffff;
+ blk->Used.Caller = 0;
+ blk->Used.Dumped = FALSE;
+ add_to_used_list(blk);
+ /* the second block is the first free block */
+ blk = (BLOCK_HDR*)((PVOID)(blk + 1) + blk->Size);
+ memset(blk, 0, sizeof(BLOCK_HDR));
+ memset(blk + 1, 0x0cc, MiNonPagedPoolNrOfPages * PAGE_SIZE - (ULONG)((PVOID)(blk + 1) - MiNonPagedPoolStart));
+ blk->Magic = BLOCK_HDR_FREE_MAGIC;
+ blk->Size = MiNonPagedPoolLength - (ULONG)((PVOID)(blk + 1) - MiNonPagedPoolStart);
+ blk->previous = (BLOCK_HDR*)MiNonPagedPoolStart;
+ add_to_free_list(blk);
+#endif
+}
+
/* EOF */