3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
17 /* INCLUDES ****************************************************************/
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/ntoskrnl.h>
22 #include <internal/pool.h>
25 #include <internal/debug.h>
27 /* Enable strict checking of the nonpaged pool on every allocation */
28 /*#define ENABLE_VALIDATE_POOL*/
30 /* Enable tracking of statistics about the tagged blocks in the pool */
31 #define TAG_STATISTICS_TRACKING
34 * Put each block in its own range of pages and position the block at the
35 * end of the range so any accesses beyond the end of block are to invalid
38 /*#define WHOLE_PAGE_ALLOCATIONS*/
40 #ifdef ENABLE_VALIDATE_POOL
41 #define VALIDATE_POOL validate_kernel_pool()
47 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
49 #define POOL_TRACE(args...)
52 /* avl types ****************************************************************/
55 * This declarations should be moved into a separate header file.
60 struct _NODE* link[2];
65 /* TYPES *******************************************************************/
67 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
68 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
71 * fields present at the start of a block (this is for internal use only)
73 typedef struct _BLOCK_HDR
77 struct _BLOCK_HDR* previous;
85 LIST_ENTRY TagListEntry;
96 ExAllocateWholePageBlock(ULONG Size);
98 ExFreeWholePageBlock(PVOID Addr);
100 /* GLOBALS *****************************************************************/
102 extern PVOID MiNonPagedPoolStart;
103 extern ULONG MiNonPagedPoolLength;
106 * Head of the list of free blocks
108 static PNODE FreeBlockListRoot = NULL;
111 * Head of the list of in use block
113 static LIST_ENTRY UsedBlockListHead;
115 static LIST_ENTRY AddressListHead;
117 #ifndef WHOLE_PAGE_ALLOCATIONS
119 * Count of free blocks
121 static ULONG EiNrFreeBlocks = 0;
124 * Count of used blocks
126 static ULONG EiNrUsedBlocks = 0;
130 * Lock that protects the non-paged pool data structures
132 static KSPIN_LOCK MmNpoolLock;
135 * Total memory used for free nonpaged pool blocks
137 ULONG EiFreeNonPagedPool = 0;
140 * Total memory used for nonpaged pool blocks
142 ULONG EiUsedNonPagedPool = 0;
145 * Allocate a range of memory in the nonpaged pool
148 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
151 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
153 #ifdef TAG_STATISTICS_TRACKING
154 #define TAG_HASH_TABLE_SIZE (1024)
155 static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
156 #endif /* TAG_STATISTICS_TRACKING */
158 #ifdef WHOLE_PAGE_ALLOCATIONS
159 static RTL_BITMAP NonPagedPoolAllocMap;
160 static ULONG NonPagedPoolAllocMapHint;
161 static ULONG MiCurrentNonPagedPoolLength = 0;
163 static PULONG MiNonPagedPoolAllocMap;
164 static ULONG MiNonPagedPoolNrOfPages;
165 #endif /* WHOLE_PAGE_ALLOCATIONS */
167 /* avl helper functions ****************************************************/
169 void DumpFreeBlockNode(PNODE p)
171 static int count = 0;
178 DumpFreeBlockNode(p->link[0]);
179 blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
180 DbgPrint("%08x %8d (%d)\n", blk, blk->Size, count);
181 DumpFreeBlockNode(p->link[1]);
186 void DumpFreeBlockTree(void)
188 DbgPrint("--- Begin tree ------------------\n");
189 DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, BLOCK_HDR, Free.Node));
190 DumpFreeBlockNode(FreeBlockListRoot);
191 DbgPrint("--- End tree --------------------\n");
194 int compare_node(PNODE p1, PNODE p2)
196 BLOCK_HDR* blk1 = CONTAINING_RECORD(p1, BLOCK_HDR, Free.Node);
197 BLOCK_HDR* blk2 = CONTAINING_RECORD(p2, BLOCK_HDR, Free.Node);
199 if (blk1->Size == blk2->Size)
212 if (blk1->Size < blk2->Size)
216 if (blk1->Size > blk2->Size)
225 int compare_value(PVOID value, PNODE p)
227 BLOCK_HDR* blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
228 ULONG v = *(PULONG)value;
241 /* avl functions **********************************************************/
244 * The avl functions should be moved into a separate file.
247 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
249 void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
251 PNODE y; /* Top node to update balance factor, and parent. */
252 PNODE p, q; /* Iterator, and parent. */
253 PNODE w; /* New root of rebalanced subtree. */
254 int dir; /* Direction to descend. */
256 n->link[0] = n->link[1] = n->parent = NULL;
260 for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
262 dir = compare(n, p) > 0;
284 for (p = n; p != y; p = q)
287 dir = q->link[0] != p;
298 if (y->balance == -2)
300 PNODE x = y->link[0];
301 if (x->balance == -1)
304 y->link[0] = x->link[1];
306 x->balance = y->balance = 0;
307 x->parent = y->parent;
309 if (y->link[0] != NULL)
311 y->link[0]->parent = y;
316 assert (x->balance == +1);
318 x->link[1] = w->link[0];
320 y->link[0] = w->link[1];
322 if (w->balance == -1)
327 else if (w->balance == 0)
329 x->balance = y->balance = 0;
331 else /* |w->pavl_balance == +1| */
337 w->parent = y->parent;
338 x->parent = y->parent = w;
339 if (x->link[1] != NULL)
341 x->link[1]->parent = x;
343 if (y->link[0] != NULL)
345 y->link[0]->parent = y;
349 else if (y->balance == +2)
351 PNODE x = y->link[1];
352 if (x->balance == +1)
355 y->link[1] = x->link[0];
357 x->balance = y->balance = 0;
358 x->parent = y->parent;
360 if (y->link[1] != NULL)
362 y->link[1]->parent = y;
367 assert (x->balance == -1);
369 x->link[0] = w->link[1];
371 y->link[1] = w->link[0];
378 else if (w->balance == 0)
380 x->balance = y->balance = 0;
382 else /* |w->pavl_balance == -1| */
388 w->parent = y->parent;
389 x->parent = y->parent = w;
390 if (x->link[0] != NULL)
392 x->link[0]->parent = x;
394 if (y->link[1] != NULL)
396 y->link[1]->parent = y;
404 if (w->parent != NULL)
406 w->parent->link[y != w->parent->link[0]] = w;
416 void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
418 PNODE p; /* Traverses tree to find node to delete. */
419 PNODE q; /* Parent of |p|. */
420 int dir; /* Side of |q| on which |p| is linked. */
422 if (root == NULL || *root == NULL)
436 dir = compare(p, q) > 0;
439 if (p->link[1] == NULL)
441 q->link[dir] = p->link[0];
442 if (q->link[dir] != NULL)
444 q->link[dir]->parent = p->parent;
449 PNODE r = p->link[1];
450 if (r->link[0] == NULL)
452 r->link[0] = p->link[0];
454 r->parent = p->parent;
455 if (r->link[0] != NULL)
457 r->link[0]->parent = r;
459 r->balance = p->balance;
465 PNODE s = r->link[0];
466 while (s->link[0] != NULL)
471 r->link[0] = s->link[1];
472 s->link[0] = p->link[0];
473 s->link[1] = p->link[1];
475 if (s->link[0] != NULL)
477 s->link[0]->parent = s;
479 s->link[1]->parent = s;
480 s->parent = p->parent;
481 if (r->link[0] != NULL)
483 r->link[0]->parent = r;
485 s->balance = p->balance;
491 item->link[0] = item->link[1] = item->parent = NULL;
494 while (q != (PNODE) root)
498 if (y->parent != NULL)
509 dir = q->link[0] != y;
511 if (y->balance == +1)
515 else if (y->balance == +2)
517 PNODE x = y->link[1];
518 if (x->balance == -1)
522 assert (x->balance == -1);
524 x->link[0] = w->link[1];
526 y->link[1] = w->link[0];
528 if (w->balance == +1)
533 else if (w->balance == 0)
535 x->balance = y->balance = 0;
537 else /* |w->pavl_balance == -1| */
543 w->parent = y->parent;
544 x->parent = y->parent = w;
545 if (x->link[0] != NULL)
547 x->link[0]->parent = x;
549 if (y->link[1] != NULL)
551 y->link[1]->parent = y;
557 y->link[1] = x->link[0];
559 x->parent = y->parent;
561 if (y->link[1] != NULL)
563 y->link[1]->parent = y;
574 x->balance = y->balance = 0;
582 dir = q->link[0] != y;
584 if (y->balance == -1)
588 else if (y->balance == -2)
590 PNODE x = y->link[0];
591 if (x->balance == +1)
594 assert (x->balance == +1);
596 x->link[1] = w->link[0];
598 y->link[0] = w->link[1];
600 if (w->balance == -1)
605 else if (w->balance == 0)
607 x->balance = y->balance = 0;
609 else /* |w->pavl_balance == +1| */
615 w->parent = y->parent;
616 x->parent = y->parent = w;
617 if (x->link[1] != NULL)
619 x->link[1]->parent = x;
621 if (y->link[0] != NULL)
623 y->link[0]->parent = y;
629 y->link[0] = x->link[1];
631 x->parent = y->parent;
633 if (y->link[0] != NULL)
635 y->link[0]->parent = y;
646 x->balance = y->balance = 0;
656 PNODE _cdecl avl_get_first(PNODE root)
671 PNODE avl_get_next(PNODE root, PNODE p)
686 while (q && q->link[1] == p)
702 PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
708 for (p = root; p != NULL;)
710 cmp = compare((PVOID)&size, p);
724 cmp = compare((PVOID)&size, p->link[0]);
737 /* non paged pool functions ************************************************/
739 #ifdef TAG_STATISTICS_TRACKING
741 MiRemoveFromTagHashTable(BLOCK_HDR* block)
743 * Remove a block from the tag hash table
746 if (block->Used.Tag == 0)
751 RemoveEntryList(&block->Used.TagListEntry);
755 MiAddToTagHashTable(BLOCK_HDR* block)
757 * Add a block to the tag hash table
762 if (block->Used.Tag == 0)
767 hash = block->Used.Tag % TAG_HASH_TABLE_SIZE;
769 InsertHeadList(&tag_hash_table[hash], &block->Used.TagListEntry);
771 #endif /* TAG_STATISTICS_TRACKING */
773 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
775 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
779 c1 = (CurrentTag >> 24) & 0xFF;
780 c2 = (CurrentTag >> 16) & 0xFF;
781 c3 = (CurrentTag >> 8) & 0xFF;
782 c4 = CurrentTag & 0xFF;
784 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
786 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
787 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
788 CurrentSize, CurrentSize / CurrentNrBlocks);
792 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
793 CurrentTag, CurrentNrBlocks, CurrentSize,
794 CurrentSize / CurrentNrBlocks);
797 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS); */
800 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
802 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
806 ULONG CurrentNrBlocks;
810 LIST_ENTRY tmpListHead;
811 PLIST_ENTRY current_entry;
813 DbgPrint("******* Dumping non paging pool stats ******\n");
816 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
818 InitializeListHead(&tmpListHead);
820 while (!IsListEmpty(&tag_hash_table[i]))
824 current_entry = tag_hash_table[i].Flink;
825 while (current_entry != &tag_hash_table[i])
827 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.TagListEntry);
828 current_entry = current_entry->Flink;
831 CurrentTag = current->Used.Tag;
835 if (current->Used.Tag == CurrentTag)
837 RemoveEntryList(¤t->Used.TagListEntry);
838 InsertHeadList(&tmpListHead, ¤t->Used.TagListEntry);
839 if (!NewOnly || !current->Used.Dumped)
843 CurrentSize += current->Size;
844 TotalSize += current->Size;
845 current->Used.Dumped = TRUE;
849 if (CurrentTag != 0 && CurrentNrBlocks != 0)
851 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
854 if (!IsListEmpty(&tmpListHead))
856 tag_hash_table[i].Flink = tmpListHead.Flink;
857 tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
858 tag_hash_table[i].Blink = tmpListHead.Blink;
859 tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
862 if (TotalBlocks != 0)
864 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
865 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
869 DbgPrint("TotalBlocks %d TotalSize %d\n",
870 TotalBlocks, TotalSize);
872 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
873 EiNrFreeBlocks, EiFreeNonPagedPool, EiNrFreeBlocks ? EiFreeNonPagedPool / EiNrFreeBlocks : 0);
874 DbgPrint("***************** Dump Complete ***************\n");
875 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS) */
879 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
881 #ifndef WHOLE_PAGE_ALLOCATIONS
883 PLIST_ENTRY current_entry;
886 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
888 DbgPrint("******* Dumping non paging pool contents ******\n");
889 current_entry = UsedBlockListHead.Flink;
890 while (current_entry != &UsedBlockListHead)
892 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
893 if (!NewOnly || !current->Used.Dumped)
897 c1 = (current->Used.Tag >> 24) & 0xFF;
898 c2 = (current->Used.Tag >> 16) & 0xFF;
899 c3 = (current->Used.Tag >> 8) & 0xFF;
900 c4 = current->Used.Tag & 0xFF;
902 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
904 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
905 current->Size, current->Used.Tag, c4, c3, c2, c1,
906 current->Used.Caller);
910 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
911 current->Size, current->Used.Tag, current->Used.Caller);
913 current->Used.Dumped = TRUE;
915 current_entry = current_entry->Flink;
917 DbgPrint("***************** Dump Complete ***************\n");
918 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
919 #endif /* not WHOLE_PAGE_ALLOCATIONS */
922 #ifndef WHOLE_PAGE_ALLOCATIONS
924 #ifdef ENABLE_VALIDATE_POOL
925 static void validate_free_list(void)
927 * FUNCTION: Validate the integrity of the list of free blocks
931 unsigned int blocks_seen=0;
934 p = avl_get_first(FreeBlockListRoot);
940 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
941 base_addr = (PVOID)current;
943 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
945 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
947 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
950 if (base_addr < MiNonPagedPoolStart ||
951 base_addr + sizeof(BLOCK_HDR) + current->Size > MiNonPagedPoolStart + MiNonPagedPoolLength)
953 DbgPrint("Block %x found outside pool area\n",current);
954 DbgPrint("Size %d\n",current->Size);
955 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
956 MiNonPagedPoolStart+MiNonPagedPoolLength);
957 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
960 if (blocks_seen > EiNrFreeBlocks)
962 DbgPrint("Too many blocks on free list\n");
963 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
965 p = avl_get_next(FreeBlockListRoot, p);
969 static void validate_used_list(void)
971 * FUNCTION: Validate the integrity of the list of used blocks
975 PLIST_ENTRY current_entry;
976 unsigned int blocks_seen=0;
978 current_entry = UsedBlockListHead.Flink;
979 while (current_entry != &UsedBlockListHead)
983 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
984 base_addr = (PVOID)current;
986 if (current->Magic != BLOCK_HDR_USED_MAGIC)
988 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
990 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
992 if (base_addr < MiNonPagedPoolStart ||
993 (base_addr+current->Size) >
994 MiNonPagedPoolStart+MiNonPagedPoolLength)
996 DbgPrint("Block %x found outside pool area\n",current);
997 DbgPrint("Size %d\n",current->Size);
998 DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
999 MiNonPagedPoolStart+MiNonPagedPoolLength);
1000 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1003 if (blocks_seen > EiNrUsedBlocks)
1005 DbgPrint("Too many blocks on used list\n");
1006 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1008 if (current->Used.ListEntry.Flink != &UsedBlockListHead &&
1009 current->Used.ListEntry.Flink->Blink != ¤t->Used.ListEntry)
1011 DbgPrint("%s:%d:Break in list (current %x next %x "
1012 "current->next->previous %x)\n",
1013 __FILE__,__LINE__,current, current->Used.ListEntry.Flink,
1014 current->Used.ListEntry.Flink->Blink);
1015 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1018 current_entry = current_entry->Flink;
1022 static void check_duplicates(BLOCK_HDR* blk)
1024 * FUNCTION: Check a block has no duplicates
1026 * blk = block to check
1027 * NOTE: Bug checks if duplicates are found
1030 char* base = (char*)blk;
1031 char* last = ((char*)blk) + sizeof(BLOCK_HDR) + blk->Size;
1033 PLIST_ENTRY current_entry;
1036 p = avl_get_first(FreeBlockListRoot);
1040 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1042 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
1044 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1046 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1049 if ( (char*)current > base && (char*)current < last )
1051 DbgPrint("intersecting blocks on list\n");
1052 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1054 if ( (char*)current < base &&
1055 ((char*)current + current->Size + sizeof(BLOCK_HDR))
1058 DbgPrint("intersecting blocks on list\n");
1059 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1061 p = avl_get_next(FreeBlockListRoot, p);
1064 current_entry = UsedBlockListHead.Flink;
1065 while (current_entry != &UsedBlockListHead)
1067 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
1069 if ( (char*)current > base && (char*)current < last )
1071 DbgPrint("intersecting blocks on list\n");
1072 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1074 if ( (char*)current < base &&
1075 ((char*)current + current->Size + sizeof(BLOCK_HDR))
1078 DbgPrint("intersecting blocks on list\n");
1079 KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1082 current_entry = current_entry->Flink;
1087 static void validate_kernel_pool(void)
1089 * FUNCTION: Checks the integrity of the kernel memory heap
1093 PLIST_ENTRY current_entry;
1096 validate_free_list();
1097 validate_used_list();
1099 p = avl_get_first(FreeBlockListRoot);
1102 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1103 check_duplicates(current);
1104 p = avl_get_next(FreeBlockListRoot, p);
1106 current_entry = UsedBlockListHead.Flink;
1107 while (current_entry != &UsedBlockListHead)
1109 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
1110 check_duplicates(current);
1111 current_entry = current_entry->Flink;
1118 free_pages(BLOCK_HDR* blk)
1125 end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
1128 * If the block doesn't contain a whole page then there is nothing to do
1130 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
1137 static void remove_from_used_list(BLOCK_HDR* current)
1139 RemoveEntryList(¤t->Used.ListEntry);
1140 EiUsedNonPagedPool -= current->Size;
1144 static void remove_from_free_list(BLOCK_HDR* current)
1146 DPRINT("remove_from_free_list %d\n", current->Size);
1148 avl_remove(&FreeBlockListRoot, ¤t->Free.Node, compare_node);
1150 EiFreeNonPagedPool -= current->Size;
1152 DPRINT("remove_from_free_list done\n");
1154 DumpFreeBlockTree();
1159 add_to_free_list(BLOCK_HDR* blk)
1161 * FUNCTION: add the block to the free list (internal)
1166 DPRINT("add_to_free_list (%d)\n", blk->Size);
1170 current = blk->previous;
1171 if (current && current->Magic == BLOCK_HDR_FREE_MAGIC)
1173 remove_from_free_list(current);
1174 current->Size = current->Size + sizeof(BLOCK_HDR) + blk->Size;
1175 current->Magic = BLOCK_HDR_USED_MAGIC;
1176 memset(blk, 0xcc, sizeof(BLOCK_HDR));
1180 current = (BLOCK_HDR*)((PVOID)(blk + 1) + blk->Size);
1181 if ((PVOID)current < MiNonPagedPoolStart + MiNonPagedPoolLength &&
1182 current->Magic == BLOCK_HDR_FREE_MAGIC)
1184 remove_from_free_list(current);
1185 blk->Size += sizeof(BLOCK_HDR) + current->Size;
1186 memset(current, 0xcc, sizeof(BLOCK_HDR));
1187 current = (BLOCK_HDR*)((PVOID)(blk + 1) + blk->Size);
1188 if ((PVOID)current < MiNonPagedPoolStart + MiNonPagedPoolLength)
1190 current->previous = blk;
1194 DPRINT("%d\n", blk->Size);
1195 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1196 EiFreeNonPagedPool += blk->Size;
1197 avl_insert(&FreeBlockListRoot, &blk->Free.Node, compare_node);
1199 DPRINT("add_to_free_list done\n");
1201 DumpFreeBlockTree();
1205 static void add_to_used_list(BLOCK_HDR* blk)
1207 * FUNCTION: add the block to the used list (internal)
1210 InsertHeadList(&UsedBlockListHead, &blk->Used.ListEntry);
1211 EiUsedNonPagedPool += blk->Size;
1215 inline static void* block_to_address(BLOCK_HDR* blk)
1217 * FUNCTION: Translate a block header address to the corresponding block
1218 * address (internal)
1221 return ( (void *) ((char*)blk + sizeof(BLOCK_HDR)) );
1224 inline static BLOCK_HDR* address_to_block(void* addr)
1226 return (BLOCK_HDR *)
1227 ( ((char*)addr) - sizeof(BLOCK_HDR) );
1231 grow_block(BLOCK_HDR* blk, PVOID end)
1234 PHYSICAL_ADDRESS Page;
1235 BOOLEAN result = TRUE;
1238 PVOID start = (PVOID)PAGE_ROUND_UP((ULONG)(blk + 1));
1239 end = (PVOID)PAGE_ROUND_UP(end);
1240 index = (ULONG)(start - MiNonPagedPoolStart) / PAGE_SIZE;
1243 if (!(MiNonPagedPoolAllocMap[index / 32] & (1 << (index % 32))))
1245 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1246 if (!NT_SUCCESS(Status))
1251 Status = MmCreateVirtualMapping(NULL,
1253 PAGE_READWRITE|PAGE_SYSTEM,
1256 if (!NT_SUCCESS(Status))
1258 DbgPrint("Unable to create virtual mapping\n");
1259 MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
1263 MiNonPagedPoolAllocMap[index / 32] |= (1 << (index % 32));
1264 memset(start, 0xcc, PAGE_SIZE);
1265 MiNonPagedPoolNrOfPages++;
1273 static BLOCK_HDR* get_block(unsigned int size, unsigned long alignment)
1275 BLOCK_HDR *blk, *current, *previous = NULL, *next = NULL, *best = NULL;
1276 ULONG previous_size, current_size, next_size, new_size;
1278 PVOID addr, aligned_addr;
1281 DPRINT("get_block %d\n", size);
1285 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1288 best = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1289 addr = block_to_address(best);
1294 p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1298 current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1299 addr = block_to_address(current);
1300 /* calculate first aligned address available within this block */
1301 aligned_addr = MM_ROUND_UP(addr, alignment);
1302 /* check to see if this address is already aligned */
1303 if (addr == aligned_addr)
1305 if (current->Size >= size &&
1306 (best == NULL || current->Size < best->Size))
1313 /* make sure there's enough room to make a free block by the space skipped
1314 * from alignment. If not, calculate forward to the next alignment
1315 * and see if we allocate there...
1317 new_size = (ULONG)aligned_addr - (ULONG)addr + size;
1318 if ((ULONG)aligned_addr - (ULONG)addr < sizeof(BLOCK_HDR))
1320 /* not enough room for a free block header, add some more bytes */
1321 aligned_addr = MM_ROUND_UP(block_to_address(current + 1), alignment);
1322 new_size = (ULONG)aligned_addr - (ULONG)addr + size;
1324 if (current->Size >= new_size &&
1325 (best == NULL || current->Size < best->Size))
1330 if (best && current->Size >= size + alignment + 2 * sizeof(BLOCK_HDR))
1334 p = avl_get_next(FreeBlockListRoot, p);
1340 * We didn't find anything suitable at all.
1348 current_size = current->Size;
1352 addr = block_to_address(current);
1353 aligned_addr = MM_ROUND_UP(addr, alignment);
1354 if (addr != aligned_addr)
1356 blk = address_to_block(aligned_addr);
1357 if (blk < current + 1)
1359 aligned_addr = MM_ROUND_UP(block_to_address(current + 1), alignment);
1360 blk = address_to_block(aligned_addr);
1363 * if size-aligned, break off the preceding bytes into their own block...
1366 previous_size = (ULONG)blk - (ULONG)previous - sizeof(BLOCK_HDR);
1368 current_size -= ((ULONG)current - (ULONG)previous);
1372 end = (PVOID)current + sizeof(BLOCK_HDR) + size;
1374 if (current_size > size + sizeof(BLOCK_HDR) + 4)
1376 /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1377 next = (BLOCK_HDR*)((ULONG)current + size + sizeof(BLOCK_HDR));
1378 next_size = current_size - size - sizeof(BLOCK_HDR);
1379 current_size = size;
1380 end = (PVOID)next + sizeof(BLOCK_HDR);
1385 remove_from_free_list(previous);
1386 if (!grow_block(previous, end))
1388 add_to_free_list(previous);
1391 memset(current, 0, sizeof(BLOCK_HDR));
1392 current->Size = current_size;
1393 current->Magic = BLOCK_HDR_USED_MAGIC;
1394 current->previous = previous;
1395 previous->Size = previous_size;
1398 blk = (BLOCK_HDR*)((PVOID)(current + 1) + current->Size);
1399 if ((PVOID)blk < MiNonPagedPoolStart + MiNonPagedPoolLength)
1401 blk->previous = current;
1405 add_to_free_list(previous);
1409 remove_from_free_list(current);
1411 if (!grow_block(current, end))
1413 add_to_free_list(current);
1417 current->Magic = BLOCK_HDR_USED_MAGIC;
1420 current->Size = current_size;
1426 memset(next, 0, sizeof(BLOCK_HDR));
1428 next->Size = next_size;
1429 next->Magic = BLOCK_HDR_FREE_MAGIC;
1430 next->previous = current;
1431 blk = (BLOCK_HDR*)((PVOID)(next + 1) + next->Size);
1432 if ((PVOID)blk < MiNonPagedPoolStart + MiNonPagedPoolLength)
1434 blk->previous = next;
1436 add_to_free_list(next);
1439 add_to_used_list(current);
1444 #endif /* not WHOLE_PAGE_ALLOCATIONS */
1446 VOID STDCALL ExFreeNonPagedPool (PVOID block)
1448 * FUNCTION: Releases previously allocated memory
1450 * block = block to free
1453 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
1461 DPRINT("freeing block %x\n",blk);
1463 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1464 ((PULONG)&block)[-1]);
1466 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1468 ExFreeWholePageBlock(block);
1469 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1471 #else /* not WHOLE_PAGE_ALLOCATIONS */
1473 BLOCK_HDR* blk=address_to_block(block);
1481 DPRINT("freeing block %x\n",blk);
1483 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1484 ((PULONG)&block)[-1]);
1486 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1490 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
1492 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
1494 DbgPrint("ExFreePool of already freed address %x\n", block);
1498 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
1505 memset(block, 0xcc, blk->Size);
1507 remove_from_used_list(blk);
1508 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1509 add_to_free_list(blk);
1511 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1513 #endif /* WHOLE_PAGE_ALLOCATIONS */
1517 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
1519 #ifdef WHOLE_PAGE_ALLOCATIONS
1523 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1526 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1529 * accomodate this useful idiom
1533 POOL_TRACE("= NULL\n");
1534 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1538 block = ExAllocateWholePageBlock(Size);
1539 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1542 #else /* not WHOLE_PAGE_ALLOCATIONS */
1544 BLOCK_HDR* best = NULL;
1548 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1551 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1556 /* after some allocations print the npaged pool stats */
1557 #ifdef TAG_STATISTICS_TRACKING
1559 static ULONG counter = 0;
1560 if (counter++ % 100000 == 0)
1562 MiDebugDumpNonPagedPoolStats(FALSE);
1568 * accomodate this useful idiom
1572 POOL_TRACE("= NULL\n");
1573 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1576 /* Make the size dword alligned, this makes the block dword alligned */
1577 Size = ROUND_UP(Size, sizeof(DWORD));
1579 if (Size >= PAGE_SIZE)
1581 alignment = PAGE_SIZE;
1583 else if (Type == NonPagedPoolCacheAligned ||
1584 Type == NonPagedPoolCacheAlignedMustS)
1586 alignment = MM_CACHE_LINE_SIZE;
1593 best = get_block(Size, alignment);
1596 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1599 best->Used.Tag = Tag;
1600 best->Used.Caller = Caller;
1601 best->Used.Dumped = FALSE;
1602 best->Used.TagListEntry.Flink = best->Used.TagListEntry.Blink = NULL;
1603 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1604 block = block_to_address(best);
1605 memset(block,0,Size);
1607 #endif /* WHOLE_PAGE_ALLOCATIONS */
1610 #ifdef WHOLE_PAGE_ALLOCATIONS
1613 ExAllocateWholePageBlock(ULONG Size)
1616 PHYSICAL_ADDRESS Page;
1621 NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1623 Base = RtlFindClearBitsAndSet(&NonPagedPoolAllocMap, NrPages + 1, NonPagedPoolAllocMapHint);
1624 if (Base == 0xffffffff)
1626 DbgPrint("Out of non paged pool space.\n");
1629 if (NonPagedPoolAllocMapHint == Base)
1631 NonPagedPoolAllocMapHint += (NrPages + 1);
1633 Address = MiNonPagedPoolStart + Base * PAGE_SIZE;
1635 for (i = 0; i < NrPages; i++)
1637 Page = MmAllocPage(MC_NPPOOL, 0);
1638 if (Page.QuadPart == 0LL)
1642 MmCreateVirtualMapping(NULL,
1643 Address + (i * PAGE_SIZE),
1644 PAGE_READWRITE | PAGE_SYSTEM,
1649 MiCurrentNonPagedPoolLength = max(MiCurrentNonPagedPoolLength, (Base + NrPages) * PAGE_SIZE);
1650 return((PVOID)((PUCHAR)Address + (NrPages * PAGE_SIZE) - Size));
1654 ExFreeWholePageBlock(PVOID Addr)
1658 if (Addr < MiNonPagedPoolStart ||
1659 Addr >= (MiNonPagedPoolStart + MiCurrentNonPagedPoolLength))
1661 DbgPrint("Block %x found outside pool area\n", Addr);
1664 Base = (Addr - MiNonPagedPoolStart) / PAGE_SIZE;
1665 NonPagedPoolAllocMapHint = min(NonPagedPoolAllocMapHint, Base);
1666 while (MmIsPagePresent(NULL, Addr))
1668 MmDeleteVirtualMapping(NULL,
1673 RtlClearBits(&NonPagedPoolAllocMap, Base, 1);
1679 #endif /* WHOLE_PAGE_ALLOCATIONS */
1682 MiInitializeNonPagedPool(VOID)
1685 PHYSICAL_ADDRESS Page;
1688 #ifdef WHOLE_PAGE_ALLOCATIONS
1692 #ifdef TAG_STATISTICS_TRACKING
1693 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
1695 InitializeListHead(&tag_hash_table[i]);
1698 KeInitializeSpinLock(&MmNpoolLock);
1699 InitializeListHead(&UsedBlockListHead);
1700 InitializeListHead(&AddressListHead);
1701 FreeBlockListRoot = NULL;
1702 #ifdef WHOLE_PAGE_ALLOCATIONS
1703 NonPagedPoolAllocMapHint = PAGE_ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE / 8) / PAGE_SIZE;
1704 MiCurrentNonPagedPoolLength = NonPagedPoolAllocMapHint * PAGE_SIZE;
1705 Address = MiNonPagedPoolStart;
1706 for (i = 0; i < NonPagedPoolAllocMapHint; i++)
1708 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1709 if (!NT_SUCCESS(Status))
1711 DbgPrint("Unable to allocate a page\n");
1714 Status = MmCreateVirtualMapping(NULL,
1716 PAGE_READWRITE|PAGE_SYSTEM,
1719 if (!NT_SUCCESS(Status))
1721 DbgPrint("Unable to create virtual mapping\n");
1724 Address += PAGE_SIZE;
1726 RtlInitializeBitMap(&NonPagedPoolAllocMap, MiNonPagedPoolStart, MM_NONPAGED_POOL_SIZE / PAGE_SIZE);
1727 RtlClearAllBits(&NonPagedPoolAllocMap);
1728 RtlSetBits(&NonPagedPoolAllocMap, 0, NonPagedPoolAllocMapHint);
1730 MiNonPagedPoolAllocMap = block_to_address((BLOCK_HDR*)MiNonPagedPoolStart);
1731 MiNonPagedPoolNrOfPages = PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + 2 * sizeof(BLOCK_HDR));
1732 Address = MiNonPagedPoolStart;
1733 for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1735 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1736 if (!NT_SUCCESS(Status))
1738 DbgPrint("Unable to allocate a page\n");
1741 Status = MmCreateVirtualMapping(NULL,
1743 PAGE_READWRITE|PAGE_SYSTEM,
1746 if (!NT_SUCCESS(Status))
1748 DbgPrint("Unable to create virtual mapping\n");
1751 MiNonPagedPoolAllocMap[i / 32] |= (1 << (i % 32));
1752 Address += PAGE_SIZE;
1754 /* the first block contains the non paged pool bitmap */
1755 blk = (BLOCK_HDR*)MiNonPagedPoolStart;
1756 memset(blk, 0, sizeof(BLOCK_HDR));
1757 blk->Magic = BLOCK_HDR_USED_MAGIC;
1758 blk->Size = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8;
1759 blk->previous = NULL;
1760 blk->Used.Tag = 0xffffffff;
1761 blk->Used.Caller = 0;
1762 blk->Used.Dumped = FALSE;
1763 add_to_used_list(blk);
1764 /* the second block is the first free block */
1765 blk = (BLOCK_HDR*)((PVOID)(blk + 1) + blk->Size);
1766 memset(blk, 0, sizeof(BLOCK_HDR));
1767 memset(blk + 1, 0x0cc, MiNonPagedPoolNrOfPages * PAGE_SIZE - (ULONG)((PVOID)(blk + 1) - MiNonPagedPoolStart));
1768 blk->Magic = BLOCK_HDR_FREE_MAGIC;
1769 blk->Size = MiNonPagedPoolLength - (ULONG)((PVOID)(blk + 1) - MiNonPagedPoolStart);
1770 blk->previous = (BLOCK_HDR*)MiNonPagedPoolStart;
1771 add_to_free_list(blk);