3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/npool.c
6 * PURPOSE: Implements the kernel memory pool
7 * PROGRAMMER: David Welch (welch@cwcom.net)
10 * 10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11 * in take_block (if current bigger than required)
12 * in remove_from_used_list
14 * 23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
17 /* INCLUDES ****************************************************************/
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/ntoskrnl.h>
22 #include <internal/pool.h>
25 #include <internal/debug.h>
27 /* Enable strict checking of the nonpaged pool on every allocation */
28 //#define ENABLE_VALIDATE_POOL
30 /* Enable tracking of statistics about the tagged blocks in the pool */
31 #define TAG_STATISTICS_TRACKING
34 * Put each block in its own range of pages and position the block at the
35 * end of the range so any accesses beyond the end of block are to invalid
38 //#define WHOLE_PAGE_ALLOCATIONS
40 #ifdef ENABLE_VALIDATE_POOL
41 #define VALIDATE_POOL validate_kernel_pool()
47 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
49 #define POOL_TRACE(args...)
52 /* TYPES *******************************************************************/
54 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
55 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
58 * fields present at the start of a block (this is for internal use only)
60 typedef struct _BLOCK_HDR
67 struct _BLOCK_HDR* tag_next;
72 ExAllocateWholePageBlock(ULONG Size);
74 ExFreeWholePageBlock(PVOID Addr);
76 /* GLOBALS *****************************************************************/
79 * Memory managment initalized symbol for the base of the pool
81 static unsigned int kernel_pool_base = 0;
84 * Head of the list of free blocks
86 static LIST_ENTRY FreeBlockListHead;
89 * Head of the list of in use block
91 static LIST_ENTRY UsedBlockListHead;
93 #ifndef WHOLE_PAGE_ALLOCATIONS
95 * Count of free blocks
97 static ULONG EiNrFreeBlocks = 0;
100 * Count of used blocks
102 static ULONG EiNrUsedBlocks = 0;
106 * Lock that protects the non-paged pool data structures
108 static KSPIN_LOCK MmNpoolLock;
111 * Total memory used for free nonpaged pool blocks
113 ULONG EiFreeNonPagedPool = 0;
116 * Total memory used for nonpaged pool blocks
118 ULONG EiUsedNonPagedPool = 0;
121 * Allocate a range of memory in the nonpaged pool
124 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
127 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
129 #ifdef TAG_STATISTICS_TRACKING
130 #define TAG_HASH_TABLE_SIZE (1024)
131 static BLOCK_HDR* tag_hash_table[TAG_HASH_TABLE_SIZE];
132 #endif /* TAG_STATISTICS_TRACKING */
134 /* FUNCTIONS ***************************************************************/
136 #ifdef TAG_STATISTICS_TRACKING
138 MiRemoveFromTagHashTable(BLOCK_HDR* block)
140 * Remove a block from the tag hash table
152 hash = block->Tag % TAG_HASH_TABLE_SIZE;
155 current = tag_hash_table[hash];
156 while (current != NULL)
158 if (current == block)
160 if (previous == NULL)
162 tag_hash_table[hash] = block->tag_next;
166 previous->tag_next = block->tag_next;
171 current = current->tag_next;
173 DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
174 block->Tag, block->Caller);
179 MiAddToTagHashTable(BLOCK_HDR* block)
181 * Add a block to the tag hash table
193 hash = block->Tag % TAG_HASH_TABLE_SIZE;
196 current = tag_hash_table[hash];
197 while (current != NULL)
199 if (current->Tag == block->Tag)
201 block->tag_next = current->tag_next;
202 current->tag_next = block;
206 if (current->tag_next &&((PVOID)current->tag_next >= (PVOID)kernel_pool_base + NONPAGED_POOL_SIZE || (PVOID)current->tag_next < (PVOID)kernel_pool_base))
208 DbgPrint("previous %x\n", previous);
210 current = current->tag_next;
212 block->tag_next = NULL;
213 if (previous == NULL)
215 tag_hash_table[hash] = block;
219 previous->tag_next = block;
222 #endif /* TAG_STATISTICS_TRACKING */
225 ExInitNonPagedPool(ULONG BaseAddress)
227 kernel_pool_base = BaseAddress;
228 KeInitializeSpinLock(&MmNpoolLock);
229 MmInitKernelMap((PVOID)BaseAddress);
230 memset(tag_hash_table, 0, sizeof(tag_hash_table));
231 InitializeListHead(&FreeBlockListHead);
232 InitializeListHead(&UsedBlockListHead);
235 #ifdef TAG_STATISTICS_TRACKING
237 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
241 c1 = (CurrentTag >> 24) & 0xFF;
242 c2 = (CurrentTag >> 16) & 0xFF;
243 c3 = (CurrentTag >> 8) & 0xFF;
244 c4 = CurrentTag & 0xFF;
246 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
248 DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
249 CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
250 CurrentSize, CurrentSize / CurrentNrBlocks);
254 DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
255 CurrentTag, CurrentNrBlocks, CurrentSize,
256 CurrentSize / CurrentNrBlocks);
259 #endif /* TAG_STATISTICS_TRACKING */
262 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
264 #ifdef TAG_STATISTICS_TRACKING
268 ULONG CurrentNrBlocks;
273 DbgPrint("******* Dumping non paging pool stats ******\n");
276 for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
281 current = tag_hash_table[i];
282 while (current != NULL)
284 if (current->Tag != CurrentTag)
286 if (CurrentTag != 0 && CurrentNrBlocks != 0)
288 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
290 CurrentTag = current->Tag;
295 if (!NewOnly || !current->Dumped)
299 CurrentSize = CurrentSize + current->Size;
300 TotalSize = TotalSize + current->Size;
301 current->Dumped = TRUE;
303 current = current->tag_next;
305 if (CurrentTag != 0 && CurrentNrBlocks != 0)
307 MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
310 if (TotalBlocks != 0)
312 DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
313 TotalBlocks, TotalSize, TotalSize / TotalBlocks);
317 DbgPrint("TotalBlocks %d TotalSize %d\n",
318 TotalBlocks, TotalSize);
320 DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n",
321 EiNrFreeBlocks, EiFreeNonPagedPool, EiNrFreeBlocks ? EiFreeNonPagedPool / EiNrFreeBlocks : 0);
322 DbgPrint("***************** Dump Complete ***************\n");
323 #endif /* TAG_STATISTICS_TRACKING */
327 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
330 PLIST_ENTRY current_entry;
333 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
335 DbgPrint("******* Dumping non paging pool contents ******\n");
336 current_entry = UsedBlockListHead.Flink;
337 while (current_entry != &UsedBlockListHead)
339 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
340 if (!NewOnly || !current->Dumped)
344 c1 = (current->Tag >> 24) & 0xFF;
345 c2 = (current->Tag >> 16) & 0xFF;
346 c3 = (current->Tag >> 8) & 0xFF;
347 c4 = current->Tag & 0xFF;
349 if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
351 DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
352 current->Size, current->Tag, c4, c3, c2, c1,
357 DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
358 current->Size, current->Tag, current->Caller);
360 current->Dumped = TRUE;
362 current_entry = current_entry->Flink;
364 DbgPrint("***************** Dump Complete ***************\n");
365 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
368 #ifndef WHOLE_PAGE_ALLOCATIONS
370 #ifdef ENABLE_VALIDATE_POOL
371 static void validate_free_list(void)
373 * FUNCTION: Validate the integrity of the list of free blocks
377 PLIST_ENTRY current_entry;
378 unsigned int blocks_seen=0;
380 current_entry = FreeBlockListHead.Flink;
381 while (current_entry != &FreeBlockListHead)
383 unsigned int base_addr;
385 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
386 base_addr = (int)current;
388 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
390 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
392 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
395 if (base_addr < (kernel_pool_base) ||
396 (base_addr+current->Size) > (kernel_pool_base)+NONPAGED_POOL_SIZE)
398 DbgPrint("Block %x found outside pool area\n",current);
399 DbgPrint("Size %d\n",current->Size);
400 DbgPrint("Limits are %x %x\n",kernel_pool_base,
401 kernel_pool_base+NONPAGED_POOL_SIZE);
402 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
405 if (blocks_seen > EiNrFreeBlocks)
407 DbgPrint("Too many blocks on free list\n");
408 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
410 if (current->ListEntry.Flink != &FreeBlockListHead &&
411 current->ListEntry.Flink->Blink != ¤t->ListEntry)
413 DbgPrint("%s:%d:Break in list (current %x next %x "
414 "current->next->previous %x)\n",
415 __FILE__,__LINE__,current, current->ListEntry.Flink,
416 current->ListEntry.Flink->Blink);
417 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
420 current_entry = current_entry->Flink;
424 static void validate_used_list(void)
426 * FUNCTION: Validate the integrity of the list of used blocks
430 PLIST_ENTRY current_entry;
431 unsigned int blocks_seen=0;
433 current_entry = UsedBlockListHead.Flink;
434 while (current_entry != &UsedBlockListHead)
436 unsigned int base_addr;
438 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
439 base_addr = (int)current;
441 if (current->Magic != BLOCK_HDR_USED_MAGIC)
443 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
445 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
447 if (base_addr < (kernel_pool_base) ||
448 (base_addr+current->Size) >
449 (kernel_pool_base)+NONPAGED_POOL_SIZE)
451 DbgPrint("Block %x found outside pool area\n",current);
455 if (blocks_seen > EiNrUsedBlocks)
457 DbgPrint("Too many blocks on used list\n");
460 if (current->ListEntry.Flink != &UsedBlockListHead &&
461 current->ListEntry.Flink->Blink != ¤t->ListEntry)
463 DbgPrint("Break in list (current %x next %x)\n",
464 current, current->ListEntry.Flink);
468 current_entry = current_entry->Flink;
472 static void check_duplicates(BLOCK_HDR* blk)
474 * FUNCTION: Check a block has no duplicates
476 * blk = block to check
477 * NOTE: Bug checks if duplicates are found
480 unsigned int base = (int)blk;
481 unsigned int last = ((int)blk) + +sizeof(BLOCK_HDR) + blk->Size;
483 PLIST_ENTRY current_entry;
485 current_entry = FreeBlockListHead.Flink;
486 while (current_entry != &FreeBlockListHead)
488 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
490 if (current->Magic != BLOCK_HDR_FREE_MAGIC)
492 DbgPrint("Bad block magic (probable pool corruption) at %x\n",
494 KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
497 if ( (int)current > base && (int)current < last )
499 DbgPrint("intersecting blocks on list\n");
502 if ( (int)current < base &&
503 ((int)current + current->Size + sizeof(BLOCK_HDR))
506 DbgPrint("intersecting blocks on list\n");
510 current_entry = current_entry->Flink;
513 current_entry = UsedBlockListHead.Flink;
514 while (current_entry != &UsedBlockListHead)
516 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
518 if ( (int)current > base && (int)current < last )
520 DbgPrint("intersecting blocks on list\n");
523 if ( (int)current < base &&
524 ((int)current + current->Size + sizeof(BLOCK_HDR))
527 DbgPrint("intersecting blocks on list\n");
531 current_entry = current_entry->Flink;
536 static void validate_kernel_pool(void)
538 * FUNCTION: Checks the integrity of the kernel memory heap
542 PLIST_ENTRY current_entry;
544 validate_free_list();
545 validate_used_list();
547 current_entry = FreeBlockListHead.Flink;
548 while (current_entry != &FreeBlockListHead)
550 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
551 check_duplicates(current);
552 current_entry = current_entry->Flink;
554 current_entry = UsedBlockListHead.Flink;
555 while (current_entry != &UsedBlockListHead)
557 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
558 check_duplicates(current);
559 current_entry = current_entry->Flink;
566 free_pages(BLOCK_HDR* blk)
573 end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
576 * If the block doesn't contain a whole page then there is nothing to do
578 if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
586 merge_free_block(BLOCK_HDR* blk)
588 PLIST_ENTRY next_entry;
590 PLIST_ENTRY previous_entry;
593 next_entry = blk->ListEntry.Flink;
594 if (next_entry != &FreeBlockListHead)
596 next = CONTAINING_RECORD(next_entry, BLOCK_HDR, ListEntry);
597 if (((unsigned int)blk + sizeof(BLOCK_HDR) + blk->Size) ==
600 RemoveEntryList(&next->ListEntry);
601 blk->Size = blk->Size + next->Size;
602 memset(next, 0xcc, sizeof(BLOCK_HDR));
603 EiFreeNonPagedPool += sizeof(BLOCK_HDR);
608 previous_entry = blk->ListEntry.Blink;
609 if (previous_entry != &FreeBlockListHead)
611 previous = CONTAINING_RECORD(previous_entry, BLOCK_HDR, ListEntry);
612 if (((unsigned int)previous + sizeof(BLOCK_HDR) + previous->Size) ==
615 RemoveEntryList(&blk->ListEntry);
616 previous->Size = previous->Size + sizeof(BLOCK_HDR) + blk->Size;
617 memset(blk, 0xcc, sizeof(BLOCK_HDR));
618 EiFreeNonPagedPool += sizeof(BLOCK_HDR);
625 add_to_free_list(BLOCK_HDR* blk)
627 * FUNCTION: add the block to the free list (internal)
630 PLIST_ENTRY current_entry;
633 current_entry = FreeBlockListHead.Flink;
634 while (current_entry != &FreeBlockListHead)
636 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
638 if ((unsigned int)current > (unsigned int)blk)
640 blk->ListEntry.Flink = current_entry;
641 blk->ListEntry.Blink = current_entry->Blink;
642 current_entry->Blink->Flink = &blk->ListEntry;
643 current_entry->Blink = &blk->ListEntry;
644 EiFreeNonPagedPool += blk->Size;
649 current_entry = current_entry->Flink;
651 InsertTailList(&FreeBlockListHead, &blk->ListEntry);
652 EiFreeNonPagedPool += blk->Size;
656 static void add_to_used_list(BLOCK_HDR* blk)
658 * FUNCTION: add the block to the used list (internal)
661 InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
662 EiUsedNonPagedPool += blk->Size;
667 static void remove_from_free_list(BLOCK_HDR* current)
669 RemoveEntryList(¤t->ListEntry);
670 EiFreeNonPagedPool -= current->Size;
675 static void remove_from_used_list(BLOCK_HDR* current)
677 RemoveEntryList(¤t->ListEntry);
678 EiUsedNonPagedPool -= current->Size;
683 inline static void* block_to_address(BLOCK_HDR* blk)
685 * FUNCTION: Translate a block header address to the corresponding block
689 return ( (void *) ((int)blk + sizeof(BLOCK_HDR)) );
692 inline static BLOCK_HDR* address_to_block(void* addr)
695 ( ((int)addr) - sizeof(BLOCK_HDR) );
698 static BLOCK_HDR* lookup_block(unsigned int size)
700 PLIST_ENTRY current_entry;
702 BLOCK_HDR* best = NULL;
704 PVOID block, block_boundary;
706 current_entry = FreeBlockListHead.Flink;
707 if (size < PAGE_SIZE)
709 while (current_entry != &FreeBlockListHead)
711 DPRINT("current %x size %x tag_next %x\n",
712 current, current->Size, current->tag_next);
713 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
714 if (current->Size >= size &&
715 (best == NULL || current->Size < best->Size))
718 if (best->Size == size)
723 current_entry = current_entry->Flink;
728 while (current_entry != &FreeBlockListHead)
730 DPRINT("current %x size %x tag_next %x\n",
731 current, current->Size, current->tag_next);
732 current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
734 block = block_to_address(current);
735 block_boundary = (PVOID)PAGE_ROUND_UP((ULONG)block);
736 new_size = (ULONG)block_boundary - (ULONG)block + size;
737 if (new_size != size && (ULONG)block_boundary - (ULONG)block < sizeof(BLOCK_HDR))
739 new_size += PAGE_SIZE;
741 if (current->Size >= new_size &&
742 (best == NULL || current->Size < best->Size))
746 current_entry = current_entry->Flink;
752 static void* take_block(BLOCK_HDR* current, unsigned int size,
753 ULONG Tag, PVOID Caller)
755 * FUNCTION: Allocate a used block of least 'size' from the specified
757 * RETURNS: The address of the created memory block
762 if (size >= PAGE_SIZE)
764 blk = address_to_block((PVOID)PAGE_ROUND_UP(block_to_address (current)));
767 if ((ULONG)blk - (ULONG)current < sizeof(BLOCK_HDR))
769 (ULONG)blk += PAGE_SIZE;
771 assert((ULONG)blk - (ULONG)current + size <= current->Size && (ULONG)blk - (ULONG)current >= sizeof(BLOCK_HDR));
773 memset(blk, 0, sizeof(BLOCK_HDR));
774 blk->Magic = BLOCK_HDR_FREE_MAGIC;
775 blk->Size = current->Size - ((ULONG)blk - (ULONG)current);
776 current->Size -= (blk->Size + sizeof(BLOCK_HDR));
777 InsertHeadList(¤t->ListEntry, &blk->ListEntry);
778 EiFreeNonPagedPool -= sizeof(BLOCK_HDR);
784 * If the block is much bigger than required then split it and
785 * return a pointer to the allocated section. If the difference
786 * between the sizes is marginal it makes no sense to have the
789 if (current->Size > size + sizeof(BLOCK_HDR))
793 EiFreeNonPagedPool -= current->Size;
796 * Replace the bigger block with a smaller block in the
797 * same position in the list
799 free_blk = (BLOCK_HDR *)(((int)current)
800 + sizeof(BLOCK_HDR) + size);
801 free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
802 InsertHeadList(¤t->ListEntry, &free_blk->ListEntry);
803 free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
806 RemoveEntryList(¤t->ListEntry);
807 add_to_used_list(current);
808 current->Magic = BLOCK_HDR_USED_MAGIC;
810 current->Caller = Caller;
811 current->Dumped = FALSE;
812 #ifdef TAG_STATISTICS_TRACKING
813 MiAddToTagHashTable(current);
814 #endif /* TAG_STATISTICS_TRACKING */
816 EiFreeNonPagedPool += free_blk->Size;
819 return(block_to_address(current));
823 * Otherwise allocate the whole block
825 remove_from_free_list(current);
826 add_to_used_list(current);
828 current->Magic = BLOCK_HDR_USED_MAGIC;
830 current->Caller = Caller;
831 current->Dumped = FALSE;
832 #ifdef TAG_STATISTICS_TRACKING
833 MiAddToTagHashTable(current);
834 #endif /* TAG_STATISTICS_TRACKING */
837 return(block_to_address(current));
840 static void* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
842 * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
846 ULONG nr_pages = PAGE_ROUND_UP(size + sizeof(BLOCK_HDR)) / PAGE_SIZE;
854 if (size >= PAGE_SIZE)
859 start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
861 DPRINT("growing heap for block size %d, ",size);
862 DPRINT("start %x\n",start);
864 for (i=0;i<nr_pages;i++)
866 PHYSICAL_ADDRESS Page;
867 /* FIXME: Check whether we can really wait here. */
868 Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
869 if (!NT_SUCCESS(Status))
874 Status = MmCreateVirtualMapping(NULL,
875 (PVOID)(start + (i*PAGE_SIZE)),
876 PAGE_READWRITE|PAGE_SYSTEM,
879 if (!NT_SUCCESS(Status))
881 DbgPrint("Unable to create virtual mapping\n");
886 blk = (struct _BLOCK_HDR *)start;
887 memset(blk, 0, sizeof(BLOCK_HDR));
888 blk->Size = (nr_pages * PAGE_SIZE) - sizeof(BLOCK_HDR);
889 blk->Magic = BLOCK_HDR_FREE_MAGIC;
890 memset(block_to_address(blk), 0xcc, blk->Size);
892 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
893 add_to_free_list(blk);
894 merge_free_block(blk);
896 blk = lookup_block(size);
899 block = take_block(blk, size, Tag, Caller);
902 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
910 #endif /* not WHOLE_PAGE_ALLOCATIONS */
912 VOID STDCALL ExFreeNonPagedPool (PVOID block)
914 * FUNCTION: Releases previously allocated memory
916 * block = block to free
919 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
927 DPRINT("freeing block %x\n",blk);
929 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
930 ((PULONG)&block)[-1]);
932 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
934 ExFreeWholePageBlock(block);
935 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
937 #else /* not WHOLE_PAGE_ALLOCATIONS */
939 BLOCK_HDR* blk=address_to_block(block);
947 DPRINT("freeing block %x\n",blk);
949 POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
950 ((PULONG)&block)[-1]);
952 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
956 if (blk->Magic != BLOCK_HDR_USED_MAGIC)
958 if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
960 DbgPrint("ExFreePool of already freed address %x\n", block);
964 DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n",
971 memset(block, 0xcc, blk->Size);
973 #ifdef TAG_STATISTICS_TRACKING
974 MiRemoveFromTagHashTable(blk);
975 #endif /* TAG_STATISTICS_TRACKING */
976 remove_from_used_list(blk);
977 blk->Magic = BLOCK_HDR_FREE_MAGIC;
980 blk->tag_next = NULL;
981 add_to_free_list(blk);
982 merge_free_block(blk);
985 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
987 #endif /* WHOLE_PAGE_ALLOCATIONS */
991 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
993 #ifdef WHOLE_PAGE_ALLOCATIONS
997 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1000 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1003 * accomodate this useful idiom
1007 POOL_TRACE("= NULL\n");
1008 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1012 block = ExAllocateWholePageBlock(Size);
1013 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1016 #else /* not WHOLE_PAGE_ALLOCATIONS */
1018 BLOCK_HDR* best = NULL;
1021 POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1024 KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1029 /* after some allocations print the npaged pool stats */
1030 #ifdef TAG_STATISTICS_TRACKING
1032 static ULONG counter = 0;
1033 if (counter++ % 100000 == 0)
1035 MiDebugDumpNonPagedPoolStats(FALSE);
1041 * accomodate this useful idiom
1045 POOL_TRACE("= NULL\n");
1046 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1049 /* Make the size dword alligned, this makes the block dword alligned */
1050 Size = ROUND_UP(Size, 4);
1052 * Look for an already created block of sufficent size
1054 best = lookup_block(Size);
1057 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1058 block = grow_kernel_pool(Size, Tag, Caller);
1059 assert(block != NULL);
1060 memset(block,0,Size);
1064 block=take_block(best, Size, Tag, Caller);
1066 KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1067 memset(block,0,Size);
1070 #endif /* WHOLE_PAGE_ALLOCATIONS */
1073 #ifdef WHOLE_PAGE_ALLOCATIONS
1076 ExAllocateWholePageBlock(ULONG UserSize)
1079 PHYSICAL_ADDRESS Page;
1084 Size = sizeof(ULONG) + UserSize;
1085 NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1087 Address = MiAllocNonPagedPoolRegion(NrPages + 1);
1089 for (i = 0; i < NrPages; i++)
1091 Page = MmAllocPage(MC_NPPOOL, 0);
1092 if (Page.QuadPart == 0LL)
1096 MmCreateVirtualMapping(NULL,
1097 Address + (i * PAGE_SIZE),
1098 PAGE_READWRITE | PAGE_SYSTEM,
1103 *((PULONG)((ULONG)Address + (NrPages * PAGE_SIZE) - Size)) = NrPages;
1104 return((PVOID)((ULONG)Address + (NrPages * PAGE_SIZE) - UserSize));
1108 ExFreeWholePageBlock(PVOID Addr)
1112 if ((ULONG)Addr < kernel_pool_base ||
1113 (ULONG)Addr >= (kernel_pool_base + NONPAGED_POOL_SIZE))
1115 DbgPrint("Block %x found outside pool area\n", Addr);
1118 NrPages = *(PULONG)((ULONG)Addr - sizeof(ULONG));
1119 MiFreeNonPagedPoolRegion((PVOID)PAGE_ROUND_DOWN((ULONG)Addr), NrPages, TRUE);
1122 #endif /* WHOLE_PAGE_ALLOCATIONS */