update for HEAD-2003021201
[reactos.git] / ntoskrnl / mm / npool.c
1 /* $Id$
2  *
3  * COPYRIGHT:    See COPYING in the top level directory
4  * PROJECT:      ReactOS kernel
5  * FILE:         ntoskrnl/mm/npool.c
6  * PURPOSE:      Implements the kernel memory pool
7  * PROGRAMMER:   David Welch (welch@cwcom.net)
8  * UPDATE HISTORY:
9  *               27/05/98: Created
10  *               10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11  *                         in take_block (if current bigger than required)
12  *                         in remove_from_used_list 
13  *                         in ExFreePool
14  *               23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15  */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/ntoskrnl.h>
22 #include <internal/pool.h>
23
24 #define NDEBUG
25 #include <internal/debug.h>
26
27 /* Enable strict checking of the nonpaged pool on every allocation */
28 //#define ENABLE_VALIDATE_POOL
29
30 /* Enable tracking of statistics about the tagged blocks in the pool */
31 #define TAG_STATISTICS_TRACKING
32
33 /* 
34  * Put each block in its own range of pages and position the block at the
35  * end of the range so any accesses beyond the end of block are to invalid
36  * memory locations. 
37  */
38 //#define WHOLE_PAGE_ALLOCATIONS
39
40 #ifdef ENABLE_VALIDATE_POOL
41 #define VALIDATE_POOL validate_kernel_pool()
42 #else
43 #define VALIDATE_POOL
44 #endif
45
46 #if 0
47 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
48 #else
49 #define POOL_TRACE(args...)
50 #endif
51
52 /* TYPES *******************************************************************/
53
54 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
55 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
56
57 /*
58  * fields present at the start of a block (this is for internal use only)
59  */
60 typedef struct _BLOCK_HDR
61 {
62   ULONG Magic;
63   ULONG Size;
64   LIST_ENTRY ListEntry;
65   ULONG Tag;
66   PVOID Caller;
67   struct _BLOCK_HDR* tag_next;
68   BOOLEAN Dumped;
69 } BLOCK_HDR;
70
71 PVOID STDCALL 
72 ExAllocateWholePageBlock(ULONG Size);
73 VOID STDCALL
74 ExFreeWholePageBlock(PVOID Addr);
75
76 /* GLOBALS *****************************************************************/
77
78 /*
79  * Memory managment initalized symbol for the base of the pool
80  */
81 static unsigned int kernel_pool_base = 0;
82
83 /*
84  * Head of the list of free blocks
85  */
86 static LIST_ENTRY FreeBlockListHead;
87
88 /*
89  * Head of the list of in use block
90  */
91 static LIST_ENTRY UsedBlockListHead;
92
93 #ifndef WHOLE_PAGE_ALLOCATIONS
94 /*
95  * Count of free blocks
96  */
97 static ULONG EiNrFreeBlocks = 0;
98
99 /*
100  * Count of used blocks
101  */
102 static ULONG EiNrUsedBlocks = 0;
103 #endif
104
105 /*
106  * Lock that protects the non-paged pool data structures
107  */
108 static KSPIN_LOCK MmNpoolLock;
109
110 /*
111  * Total memory used for free nonpaged pool blocks
112  */
113 ULONG EiFreeNonPagedPool = 0;
114
115 /*
116  * Total memory used for nonpaged pool blocks
117  */
118 ULONG EiUsedNonPagedPool = 0;
119
120 /*
121  * Allocate a range of memory in the nonpaged pool
122  */
123 PVOID
124 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
125
126 VOID
127 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
128
129 #ifdef TAG_STATISTICS_TRACKING
130 #define TAG_HASH_TABLE_SIZE       (1024)
131 static BLOCK_HDR* tag_hash_table[TAG_HASH_TABLE_SIZE];
132 #endif /* TAG_STATISTICS_TRACKING */
133
134 /* FUNCTIONS ***************************************************************/
135
136 #ifdef TAG_STATISTICS_TRACKING
137 VOID
138 MiRemoveFromTagHashTable(BLOCK_HDR* block)
139      /*
140       * Remove a block from the tag hash table
141       */
142 {
143   BLOCK_HDR* previous;
144   BLOCK_HDR* current;
145   ULONG hash;
146
147   if (block->Tag == 0)
148     {
149       return;
150     }
151
152   hash = block->Tag % TAG_HASH_TABLE_SIZE;
153   
154   previous = NULL;
155   current = tag_hash_table[hash];
156   while (current != NULL)
157     {
158       if (current == block)
159         {
160           if (previous == NULL)
161             {
162               tag_hash_table[hash] = block->tag_next;
163             }
164           else
165             {
166               previous->tag_next = block->tag_next;
167             }
168           return;
169         }
170       previous = current;
171       current = current->tag_next;
172     }
173   DPRINT1("Tagged block wasn't on hash table list (Tag %x Caller %x)\n",
174           block->Tag, block->Caller);
175   KeBugCheck(0);
176 }
177
178 VOID
179 MiAddToTagHashTable(BLOCK_HDR* block)
180      /*
181       * Add a block to the tag hash table
182       */
183 {
184   ULONG hash;
185   BLOCK_HDR* current;
186   BLOCK_HDR* previous;
187
188   if (block->Tag == 0)
189     {
190       return;
191     }
192
193   hash = block->Tag % TAG_HASH_TABLE_SIZE;
194
195   previous = NULL;
196   current = tag_hash_table[hash];
197   while (current != NULL)
198     {
199       if (current->Tag == block->Tag)
200         {
201           block->tag_next = current->tag_next;
202           current->tag_next = block;
203           return;
204         }
205       previous = current;
206       if (current->tag_next &&((PVOID)current->tag_next >= (PVOID)kernel_pool_base + NONPAGED_POOL_SIZE || (PVOID)current->tag_next < (PVOID)kernel_pool_base))
207         {
208           DbgPrint("previous %x\n", previous);
209         }
210       current = current->tag_next;
211     }
212   block->tag_next = NULL;
213   if (previous == NULL)
214     {
215       tag_hash_table[hash] = block;
216     }
217   else
218     {
219       previous->tag_next = block;
220     }
221 }
222 #endif /* TAG_STATISTICS_TRACKING */
223
224 VOID 
225 ExInitNonPagedPool(ULONG BaseAddress)
226 {
227    kernel_pool_base = BaseAddress;
228    KeInitializeSpinLock(&MmNpoolLock);
229    MmInitKernelMap((PVOID)BaseAddress);
230    memset(tag_hash_table, 0, sizeof(tag_hash_table));
231    InitializeListHead(&FreeBlockListHead);
232    InitializeListHead(&UsedBlockListHead);
233 }
234
235 #ifdef TAG_STATISTICS_TRACKING
236 VOID STATIC
237 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
238 {
239   CHAR c1, c2, c3, c4;
240   
241   c1 = (CurrentTag >> 24) & 0xFF;
242   c2 = (CurrentTag >> 16) & 0xFF;
243   c3 = (CurrentTag >> 8) & 0xFF;
244   c4 = CurrentTag & 0xFF;
245   
246   if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
247     {
248       DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
249                CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
250                CurrentSize, CurrentSize / CurrentNrBlocks);
251     }
252   else
253     {
254       DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
255                CurrentTag, CurrentNrBlocks, CurrentSize,
256                CurrentSize / CurrentNrBlocks);
257     }
258 }
259 #endif /* TAG_STATISTICS_TRACKING */
260
261 VOID
262 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
263 {
264 #ifdef TAG_STATISTICS_TRACKING
265   ULONG i;
266   BLOCK_HDR* current;
267   ULONG CurrentTag;
268   ULONG CurrentNrBlocks;
269   ULONG CurrentSize;
270   ULONG TotalBlocks;
271   ULONG TotalSize;
272
273   DbgPrint("******* Dumping non paging pool stats ******\n");
274   TotalBlocks = 0;
275   TotalSize = 0;
276   for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
277     {
278       CurrentTag = 0;
279       CurrentNrBlocks = 0;
280       CurrentSize = 0;
281       current = tag_hash_table[i];
282       while (current != NULL)
283         {
284           if (current->Tag != CurrentTag)
285             {
286               if (CurrentTag != 0 && CurrentNrBlocks != 0)
287                 {
288                   MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
289                 }
290               CurrentTag = current->Tag;
291               CurrentNrBlocks = 0;
292               CurrentSize = 0;
293             }
294
295           if (!NewOnly || !current->Dumped)
296             {
297               CurrentNrBlocks++;
298               TotalBlocks++;
299               CurrentSize = CurrentSize + current->Size;
300               TotalSize = TotalSize + current->Size;
301               current->Dumped = TRUE;
302             }
303           current = current->tag_next;
304         }
305       if (CurrentTag != 0 && CurrentNrBlocks != 0)
306         {
307           MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
308         }
309     }
310   if (TotalBlocks != 0)
311     {
312       DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
313                TotalBlocks, TotalSize, TotalSize / TotalBlocks);
314     }
315   else
316     {
317       DbgPrint("TotalBlocks %d TotalSize %d\n",
318                TotalBlocks, TotalSize);
319     }
320   DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n", 
321           EiNrFreeBlocks, EiFreeNonPagedPool, EiNrFreeBlocks ? EiFreeNonPagedPool / EiNrFreeBlocks : 0);
322   DbgPrint("***************** Dump Complete ***************\n");
323 #endif /* TAG_STATISTICS_TRACKING */
324 }
325
326 VOID
327 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
328 {
329    BLOCK_HDR* current;
330    PLIST_ENTRY current_entry;
331    KIRQL oldIrql;
332    
333    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
334
335    DbgPrint("******* Dumping non paging pool contents ******\n");
336    current_entry = UsedBlockListHead.Flink;
337    while (current_entry != &UsedBlockListHead)
338      {
339        current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
340        if (!NewOnly || !current->Dumped)
341          {
342            CHAR c1, c2, c3, c4;
343            
344            c1 = (current->Tag >> 24) & 0xFF;
345            c2 = (current->Tag >> 16) & 0xFF;
346            c3 = (current->Tag >> 8) & 0xFF;
347            c4 = current->Tag & 0xFF;
348            
349            if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
350              {
351                DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
352                         current->Size, current->Tag, c4, c3, c2, c1, 
353                         current->Caller);
354              }
355            else
356              {
357                DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
358                         current->Size, current->Tag, current->Caller);
359              }
360            current->Dumped = TRUE;
361          }
362        current_entry = current_entry->Flink;
363      }
364    DbgPrint("***************** Dump Complete ***************\n");
365    KeReleaseSpinLock(&MmNpoolLock, oldIrql);
366 }
367
368 #ifndef WHOLE_PAGE_ALLOCATIONS
369
370 #ifdef ENABLE_VALIDATE_POOL
371 static void validate_free_list(void)
372 /*
373  * FUNCTION: Validate the integrity of the list of free blocks
374  */
375 {
376    BLOCK_HDR* current;
377    PLIST_ENTRY current_entry;
378    unsigned int blocks_seen=0;     
379    
380    current_entry = FreeBlockListHead.Flink;
381    while (current_entry != &FreeBlockListHead)
382      {
383         unsigned int base_addr;
384
385         current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
386         base_addr = (int)current;
387
388         if (current->Magic != BLOCK_HDR_FREE_MAGIC)
389           {
390              DbgPrint("Bad block magic (probable pool corruption) at %x\n",
391                       current);
392              KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
393           }
394         
395         if (base_addr < (kernel_pool_base) ||
396             (base_addr+current->Size) > (kernel_pool_base)+NONPAGED_POOL_SIZE)
397           {                    
398              DbgPrint("Block %x found outside pool area\n",current);
399              DbgPrint("Size %d\n",current->Size);
400              DbgPrint("Limits are %x %x\n",kernel_pool_base,
401                     kernel_pool_base+NONPAGED_POOL_SIZE);
402              KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
403           }
404         blocks_seen++;
405         if (blocks_seen > EiNrFreeBlocks)
406           {
407              DbgPrint("Too many blocks on free list\n");
408              KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
409           }
410         if (current->ListEntry.Flink != &FreeBlockListHead &&
411             current->ListEntry.Flink->Blink != &current->ListEntry)
412           {
413              DbgPrint("%s:%d:Break in list (current %x next %x "
414                     "current->next->previous %x)\n",
415                     __FILE__,__LINE__,current, current->ListEntry.Flink,
416                     current->ListEntry.Flink->Blink);
417              KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
418           }
419
420         current_entry = current_entry->Flink;
421      }
422 }
423
424 static void validate_used_list(void)
425 /*
426  * FUNCTION: Validate the integrity of the list of used blocks
427  */
428 {
429    BLOCK_HDR* current;
430    PLIST_ENTRY current_entry;
431    unsigned int blocks_seen=0;
432    
433    current_entry = UsedBlockListHead.Flink;
434    while (current_entry != &UsedBlockListHead)
435      {
436         unsigned int base_addr;
437
438         current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
439         base_addr = (int)current;
440         
441         if (current->Magic != BLOCK_HDR_USED_MAGIC)
442           {
443              DbgPrint("Bad block magic (probable pool corruption) at %x\n",
444                       current);
445              KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
446           }
447         if (base_addr < (kernel_pool_base) ||
448             (base_addr+current->Size) >
449             (kernel_pool_base)+NONPAGED_POOL_SIZE)
450           {
451              DbgPrint("Block %x found outside pool area\n",current);
452              for(;;);
453           }
454         blocks_seen++;
455         if (blocks_seen > EiNrUsedBlocks)
456           {
457              DbgPrint("Too many blocks on used list\n");
458              for(;;);
459           }
460         if (current->ListEntry.Flink != &UsedBlockListHead &&
461             current->ListEntry.Flink->Blink != &current->ListEntry)
462           {
463              DbgPrint("Break in list (current %x next %x)\n",
464                     current, current->ListEntry.Flink);
465              for(;;);
466           }
467
468         current_entry = current_entry->Flink;
469      }
470 }
471
472 static void check_duplicates(BLOCK_HDR* blk)
473 /*
474  * FUNCTION: Check a block has no duplicates
475  * ARGUMENTS:
476  *           blk = block to check
477  * NOTE: Bug checks if duplicates are found
478  */
479 {
480    unsigned int base = (int)blk;
481    unsigned int last = ((int)blk) + +sizeof(BLOCK_HDR) + blk->Size;
482    BLOCK_HDR* current;
483    PLIST_ENTRY current_entry;
484    
485    current_entry = FreeBlockListHead.Flink;
486    while (current_entry != &FreeBlockListHead)
487      {
488        current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
489
490        if (current->Magic != BLOCK_HDR_FREE_MAGIC)
491          {
492            DbgPrint("Bad block magic (probable pool corruption) at %x\n",
493                     current);
494              KeBugCheck(KBUG_POOL_FREE_LIST_CORRUPT);
495          }
496        
497        if ( (int)current > base && (int)current < last ) 
498          {
499            DbgPrint("intersecting blocks on list\n");
500            for(;;);
501          }
502        if  ( (int)current < base &&
503              ((int)current + current->Size + sizeof(BLOCK_HDR))
504              > base )
505          {
506            DbgPrint("intersecting blocks on list\n");
507            for(;;);
508           }
509
510        current_entry = current_entry->Flink;
511      }
512
513    current_entry = UsedBlockListHead.Flink;
514    while (current_entry != &UsedBlockListHead)
515      {
516        current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
517
518        if ( (int)current > base && (int)current < last ) 
519          {
520            DbgPrint("intersecting blocks on list\n");
521            for(;;);
522          }
523        if  ( (int)current < base &&
524              ((int)current + current->Size + sizeof(BLOCK_HDR))
525              > base )
526          {
527            DbgPrint("intersecting blocks on list\n");
528            for(;;);
529          }
530        
531        current_entry = current_entry->Flink;
532      }
533    
534 }
535
536 static void validate_kernel_pool(void)
537 /*
538  * FUNCTION: Checks the integrity of the kernel memory heap
539  */
540 {
541    BLOCK_HDR* current;
542    PLIST_ENTRY current_entry;
543    
544    validate_free_list();
545    validate_used_list();
546
547    current_entry = FreeBlockListHead.Flink;
548    while (current_entry != &FreeBlockListHead)
549      {
550        current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
551        check_duplicates(current);
552        current_entry = current_entry->Flink;
553      }
554    current_entry = UsedBlockListHead.Flink;
555    while (current_entry != &UsedBlockListHead)
556      {
557        current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
558        check_duplicates(current);
559        current_entry = current_entry->Flink;
560      }
561 }
562 #endif
563
564 #if 0
565 STATIC VOID
566 free_pages(BLOCK_HDR* blk)
567 {
568   ULONG start;
569   ULONG end;
570   ULONG i;
571
572   start = (ULONG)blk;
573   end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
574
575   /*
576    * If the block doesn't contain a whole page then there is nothing to do
577    */
578   if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
579     {
580       return;
581     }
582 }
583 #endif
584
585 STATIC VOID
586 merge_free_block(BLOCK_HDR* blk)
587 {
588   PLIST_ENTRY next_entry;
589   BLOCK_HDR* next;
590   PLIST_ENTRY previous_entry;
591   BLOCK_HDR* previous;
592
593   next_entry = blk->ListEntry.Flink;
594   if (next_entry != &FreeBlockListHead)
595     {
596       next = CONTAINING_RECORD(next_entry, BLOCK_HDR, ListEntry);
597       if (((unsigned int)blk + sizeof(BLOCK_HDR) + blk->Size) == 
598           (unsigned int)next)
599         {
600           RemoveEntryList(&next->ListEntry);
601           blk->Size = blk->Size + next->Size;
602           memset(next, 0xcc, sizeof(BLOCK_HDR));
603           EiFreeNonPagedPool += sizeof(BLOCK_HDR);
604           EiNrFreeBlocks--;
605         }
606     }
607
608   previous_entry = blk->ListEntry.Blink;
609   if (previous_entry != &FreeBlockListHead)
610     {
611       previous = CONTAINING_RECORD(previous_entry, BLOCK_HDR, ListEntry);
612       if (((unsigned int)previous + sizeof(BLOCK_HDR) + previous->Size) == 
613           (unsigned int)blk)
614         {
615           RemoveEntryList(&blk->ListEntry);
616           previous->Size = previous->Size + sizeof(BLOCK_HDR) + blk->Size;
617           memset(blk, 0xcc, sizeof(BLOCK_HDR));
618           EiFreeNonPagedPool += sizeof(BLOCK_HDR);
619           EiNrFreeBlocks--;
620         }
621     }
622 }
623
624 STATIC VOID 
625 add_to_free_list(BLOCK_HDR* blk)
626 /*
627  * FUNCTION: add the block to the free list (internal)
628  */
629 {
630   PLIST_ENTRY current_entry;
631   BLOCK_HDR* current;
632
633   current_entry = FreeBlockListHead.Flink;
634   while (current_entry != &FreeBlockListHead)
635     {
636       current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
637
638       if ((unsigned int)current > (unsigned int)blk)
639         {
640           blk->ListEntry.Flink = current_entry;
641           blk->ListEntry.Blink = current_entry->Blink;
642           current_entry->Blink->Flink = &blk->ListEntry;
643           current_entry->Blink = &blk->ListEntry;
644           EiFreeNonPagedPool += blk->Size;
645           EiNrFreeBlocks++;
646           return;
647         }
648
649       current_entry = current_entry->Flink;
650     }
651   InsertTailList(&FreeBlockListHead, &blk->ListEntry);
652   EiFreeNonPagedPool += blk->Size;
653   EiNrFreeBlocks++;
654 }
655
656 static void add_to_used_list(BLOCK_HDR* blk)
657 /*
658  * FUNCTION: add the block to the used list (internal)
659  */
660 {
661   InsertHeadList(&UsedBlockListHead, &blk->ListEntry);
662   EiUsedNonPagedPool += blk->Size;
663   EiNrUsedBlocks++;
664 }
665
666
667 static void remove_from_free_list(BLOCK_HDR* current)
668 {
669   RemoveEntryList(&current->ListEntry);
670   EiFreeNonPagedPool -= current->Size;
671   EiNrFreeBlocks--;
672 }
673
674
675 static void remove_from_used_list(BLOCK_HDR* current)
676 {
677   RemoveEntryList(&current->ListEntry);
678   EiUsedNonPagedPool -= current->Size;
679   EiNrUsedBlocks--;
680 }
681
682
683 inline static void* block_to_address(BLOCK_HDR* blk)
684 /*
685  * FUNCTION: Translate a block header address to the corresponding block
686  * address (internal)
687  */
688 {
689         return ( (void *) ((int)blk + sizeof(BLOCK_HDR)) );
690 }
691
692 inline static BLOCK_HDR* address_to_block(void* addr)
693 {
694         return (BLOCK_HDR *)
695                ( ((int)addr) - sizeof(BLOCK_HDR) );
696 }
697
698 static BLOCK_HDR* lookup_block(unsigned int size)
699 {
700    PLIST_ENTRY current_entry;
701    BLOCK_HDR* current;
702    BLOCK_HDR* best = NULL;
703    ULONG new_size;
704    PVOID block, block_boundary;
705     
706    current_entry = FreeBlockListHead.Flink;
707    if (size < PAGE_SIZE)
708    {
709       while (current_entry != &FreeBlockListHead)
710       {
711          DPRINT("current %x size %x tag_next %x\n",
712                 current, current->Size, current->tag_next);
713          current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
714          if (current->Size >= size && 
715              (best == NULL || current->Size < best->Size)) 
716          {
717             best = current;
718             if (best->Size == size)
719             {
720                break;
721             }
722          }
723          current_entry = current_entry->Flink;
724       }
725    }
726    else
727    {
728       while (current_entry != &FreeBlockListHead)
729       {
730          DPRINT("current %x size %x tag_next %x\n",
731                 current, current->Size, current->tag_next);
732          current = CONTAINING_RECORD(current_entry, BLOCK_HDR, ListEntry);
733
734          block = block_to_address(current);
735          block_boundary = (PVOID)PAGE_ROUND_UP((ULONG)block);
736          new_size = (ULONG)block_boundary - (ULONG)block + size;
737          if (new_size != size && (ULONG)block_boundary - (ULONG)block < sizeof(BLOCK_HDR))
738          {
739              new_size += PAGE_SIZE;
740          }
741          if (current->Size >= new_size && 
742              (best == NULL || current->Size < best->Size))
743          {
744             best = current;
745          }
746          current_entry = current_entry->Flink;
747       }
748    }
749    return best;
750 }
751
752 static void* take_block(BLOCK_HDR* current, unsigned int size,
753                         ULONG Tag, PVOID Caller)
754 /*
755  * FUNCTION: Allocate a used block of least 'size' from the specified
756  * free block
757  * RETURNS: The address of the created memory block
758  */
759 {
760
761     BLOCK_HDR* blk;
762     if (size >= PAGE_SIZE)
763     {
764        blk = address_to_block((PVOID)PAGE_ROUND_UP(block_to_address (current)));
765        if (blk != current)
766        {
767           if ((ULONG)blk - (ULONG)current < sizeof(BLOCK_HDR))
768           {
769              (ULONG)blk += PAGE_SIZE;
770           }
771           assert((ULONG)blk - (ULONG)current + size <= current->Size && (ULONG)blk - (ULONG)current >= sizeof(BLOCK_HDR));
772
773           memset(blk, 0, sizeof(BLOCK_HDR));
774           blk->Magic = BLOCK_HDR_FREE_MAGIC;
775           blk->Size = current->Size - ((ULONG)blk - (ULONG)current);
776           current->Size -= (blk->Size + sizeof(BLOCK_HDR));
777           InsertHeadList(&current->ListEntry, &blk->ListEntry);
778           EiFreeNonPagedPool -= sizeof(BLOCK_HDR);
779           EiNrFreeBlocks++;
780           current = blk;
781        }
782    }
783    /*
784     * If the block is much bigger than required then split it and
785     * return a pointer to the allocated section. If the difference
786     * between the sizes is marginal it makes no sense to have the
787     * extra overhead 
788     */
789    if (current->Size > size + sizeof(BLOCK_HDR))
790      {
791         BLOCK_HDR* free_blk;
792         
793         EiFreeNonPagedPool -= current->Size;
794         
795         /*
796          * Replace the bigger block with a smaller block in the
797          * same position in the list
798          */
799         free_blk = (BLOCK_HDR *)(((int)current)
800                                  + sizeof(BLOCK_HDR) + size);           
801         free_blk->Magic = BLOCK_HDR_FREE_MAGIC;
802         InsertHeadList(&current->ListEntry, &free_blk->ListEntry);
803         free_blk->Size = current->Size - (sizeof(BLOCK_HDR) + size);
804         
805         current->Size=size;
806         RemoveEntryList(&current->ListEntry);
807         add_to_used_list(current);
808         current->Magic = BLOCK_HDR_USED_MAGIC;
809         current->Tag = Tag;
810         current->Caller = Caller;
811         current->Dumped = FALSE;
812 #ifdef TAG_STATISTICS_TRACKING
813         MiAddToTagHashTable(current);
814 #endif /* TAG_STATISTICS_TRACKING */
815         
816         EiFreeNonPagedPool += free_blk->Size;
817         
818         VALIDATE_POOL;
819         return(block_to_address(current));
820      }
821    
822    /*
823     * Otherwise allocate the whole block
824     */
825    remove_from_free_list(current);
826    add_to_used_list(current);
827    
828    current->Magic = BLOCK_HDR_USED_MAGIC;   
829    current->Tag = Tag;
830    current->Caller = Caller;
831    current->Dumped = FALSE;
832 #ifdef TAG_STATISTICS_TRACKING
833    MiAddToTagHashTable(current);
834 #endif /* TAG_STATISTICS_TRACKING */
835
836    VALIDATE_POOL;
837    return(block_to_address(current));
838 }
839
840 static void* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
841 /*
842  * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
843  * bytes
844  */
845 {
846    ULONG nr_pages = PAGE_ROUND_UP(size + sizeof(BLOCK_HDR)) / PAGE_SIZE;
847    ULONG start;
848    BLOCK_HDR* blk=NULL;
849    ULONG i;
850    KIRQL oldIrql;
851    NTSTATUS Status;
852    PVOID block = NULL;
853
854    if (size >= PAGE_SIZE)
855    {
856       nr_pages++;
857    }
858
859    start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
860  
861    DPRINT("growing heap for block size %d, ",size);
862    DPRINT("start %x\n",start);
863   
864    for (i=0;i<nr_pages;i++)
865      {
866        PHYSICAL_ADDRESS Page;
867        /* FIXME: Check whether we can really wait here. */
868        Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
869        if (!NT_SUCCESS(Status))
870          {
871            KeBugCheck(0);
872            return(NULL);
873          }
874        Status = MmCreateVirtualMapping(NULL,
875                                        (PVOID)(start + (i*PAGE_SIZE)),
876                                        PAGE_READWRITE|PAGE_SYSTEM,
877                                        Page,
878                                        FALSE);
879         if (!NT_SUCCESS(Status))
880           {
881              DbgPrint("Unable to create virtual mapping\n");
882              KeBugCheck(0);
883           }
884      }
885
886    blk = (struct _BLOCK_HDR *)start;
887    memset(blk, 0, sizeof(BLOCK_HDR));
888    blk->Size = (nr_pages * PAGE_SIZE) - sizeof(BLOCK_HDR);
889    blk->Magic = BLOCK_HDR_FREE_MAGIC;
890    memset(block_to_address(blk), 0xcc, blk->Size);
891
892    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
893    add_to_free_list(blk);
894    merge_free_block(blk);
895
896    blk = lookup_block(size);
897    if (blk)
898    {
899       block = take_block(blk, size, Tag, Caller);
900       VALIDATE_POOL;
901    }
902    KeReleaseSpinLock(&MmNpoolLock, oldIrql);
903    if (block == NULL)
904    {
905        CHECKPOINT1;
906    }
907    return block;
908 }
909
910 #endif /* not WHOLE_PAGE_ALLOCATIONS */
911
912 VOID STDCALL ExFreeNonPagedPool (PVOID block)
913 /*
914  * FUNCTION: Releases previously allocated memory
915  * ARGUMENTS:
916  *        block = block to free
917  */
918 {
919 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
920    KIRQL oldIrql;
921
922    if (block == NULL)
923      {
924        return;
925      }
926
927    DPRINT("freeing block %x\n",blk);
928    
929    POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
930             ((PULONG)&block)[-1]);
931    
932    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
933
934    ExFreeWholePageBlock(block);
935    KeReleaseSpinLock(&MmNpoolLock, oldIrql);      
936
937 #else /* not WHOLE_PAGE_ALLOCATIONS */
938
939    BLOCK_HDR* blk=address_to_block(block);
940    KIRQL oldIrql;
941
942    if (block == NULL)
943      {
944        return;
945      }
946
947    DPRINT("freeing block %x\n",blk);
948    
949    POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
950             ((PULONG)&block)[-1]);
951    
952    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
953
954    VALIDATE_POOL;
955    
956    if (blk->Magic != BLOCK_HDR_USED_MAGIC)
957      {
958        if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
959          {
960            DbgPrint("ExFreePool of already freed address %x\n", block);
961          }
962        else
963          {
964            DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n", 
965                     block, blk->Magic);
966          }
967         KeBugCheck(0);
968         return;
969      }
970    
971    memset(block, 0xcc, blk->Size);
972    
973 #ifdef TAG_STATISTICS_TRACKING
974    MiRemoveFromTagHashTable(blk);
975 #endif /* TAG_STATISTICS_TRACKING */
976    remove_from_used_list(blk);
977    blk->Magic = BLOCK_HDR_FREE_MAGIC;
978    blk->Tag = 0;
979    blk->Caller = NULL;
980    blk->tag_next = NULL;
981    add_to_free_list(blk);
982    merge_free_block(blk);
983
984    VALIDATE_POOL;
985    KeReleaseSpinLock(&MmNpoolLock, oldIrql);
986
987 #endif /* WHOLE_PAGE_ALLOCATIONS */
988 }
989
990 PVOID STDCALL 
991 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
992 {
993 #ifdef WHOLE_PAGE_ALLOCATIONS
994    PVOID block;
995    KIRQL oldIrql;
996    
997    POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
998               Size,Caller);
999    
1000    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1001    
1002    /*
1003     * accomodate this useful idiom
1004     */
1005    if (Size == 0)
1006      {
1007         POOL_TRACE("= NULL\n");
1008         KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1009         return(NULL);
1010      }
1011
1012    block = ExAllocateWholePageBlock(Size);
1013    KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1014    return(block);
1015
1016 #else /* not WHOLE_PAGE_ALLOCATIONS */
1017    PVOID block;
1018    BLOCK_HDR* best = NULL;
1019    KIRQL oldIrql;
1020    
1021    POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1022               Size,Caller);
1023    
1024    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1025
1026    VALIDATE_POOL;
1027
1028 #if 0
1029    /* after some allocations print the npaged pool stats */
1030 #ifdef TAG_STATISTICS_TRACKING
1031    {
1032        static ULONG counter = 0;
1033        if (counter++ % 100000 == 0)
1034        {
1035           MiDebugDumpNonPagedPoolStats(FALSE);   
1036        }
1037    }
1038 #endif
1039 #endif
1040    /*
1041     * accomodate this useful idiom
1042     */
1043    if (Size == 0)
1044      {
1045         POOL_TRACE("= NULL\n");
1046         KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1047         return(NULL);
1048      }
1049    /* Make the size dword alligned, this makes the block dword alligned */  
1050    Size = ROUND_UP(Size, 4);
1051    /*
1052     * Look for an already created block of sufficent size
1053     */
1054    best = lookup_block(Size);
1055    if (best == NULL)
1056    {
1057        KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1058        block = grow_kernel_pool(Size, Tag, Caller);
1059        assert(block != NULL);
1060        memset(block,0,Size);
1061    }
1062    else
1063    {
1064        block=take_block(best, Size, Tag, Caller);
1065        VALIDATE_POOL;
1066        KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1067        memset(block,0,Size);
1068    }
1069    return(block);
1070 #endif /* WHOLE_PAGE_ALLOCATIONS */
1071 }
1072
1073 #ifdef WHOLE_PAGE_ALLOCATIONS
1074
1075 PVOID STDCALL 
1076 ExAllocateWholePageBlock(ULONG UserSize)
1077 {
1078   PVOID Address;
1079   PHYSICAL_ADDRESS Page;
1080   ULONG i;
1081   ULONG Size;
1082   ULONG NrPages;
1083
1084   Size = sizeof(ULONG) + UserSize;
1085   NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1086
1087   Address = MiAllocNonPagedPoolRegion(NrPages + 1);
1088
1089   for (i = 0; i < NrPages; i++)
1090     {
1091       Page = MmAllocPage(MC_NPPOOL, 0);
1092       if (Page.QuadPart == 0LL)
1093         {
1094           KeBugCheck(0);
1095         }
1096       MmCreateVirtualMapping(NULL, 
1097                              Address + (i * PAGE_SIZE),
1098                              PAGE_READWRITE | PAGE_SYSTEM,
1099                              Page,
1100                              TRUE);
1101     }
1102
1103   *((PULONG)((ULONG)Address + (NrPages * PAGE_SIZE) - Size)) = NrPages;
1104   return((PVOID)((ULONG)Address + (NrPages * PAGE_SIZE) - UserSize));
1105 }
1106
1107 VOID STDCALL
1108 ExFreeWholePageBlock(PVOID Addr)
1109 {
1110   ULONG NrPages;
1111   
1112   if ((ULONG)Addr < kernel_pool_base ||
1113       (ULONG)Addr >= (kernel_pool_base + NONPAGED_POOL_SIZE))
1114     {
1115       DbgPrint("Block %x found outside pool area\n", Addr);
1116       KeBugCheck(0);
1117     }
1118   NrPages = *(PULONG)((ULONG)Addr - sizeof(ULONG));
1119   MiFreeNonPagedPoolRegion((PVOID)PAGE_ROUND_DOWN((ULONG)Addr), NrPages, TRUE);
1120 }
1121
1122 #endif /* WHOLE_PAGE_ALLOCATIONS */
1123
1124 /* EOF */