update for HEAD-2003091401
[reactos.git] / ntoskrnl / mm / npool.c
1 /* $Id$
2  *
3  * COPYRIGHT:    See COPYING in the top level directory
4  * PROJECT:      ReactOS kernel
5  * FILE:         ntoskrnl/mm/npool.c
6  * PURPOSE:      Implements the kernel memory pool
7  * PROGRAMMER:   David Welch (welch@cwcom.net)
8  * UPDATE HISTORY:
9  *               27/05/98: Created
10  *               10/06/98: Bug fixes by Iwan Fatahi (i_fatahi@hotmail.com)
11  *                         in take_block (if current bigger than required)
12  *                         in remove_from_used_list 
13  *                         in ExFreePool
14  *               23/08/98: Fixes from Robert Bergkvist (fragdance@hotmail.com)
15  */
16
17 /* INCLUDES ****************************************************************/
18
19 #include <ddk/ntddk.h>
20 #include <internal/mm.h>
21 #include <internal/ntoskrnl.h>
22 #include <internal/pool.h>
23
24 #define NDEBUG
25 #include <internal/debug.h>
26
27 /* Enable strict checking of the nonpaged pool on every allocation */
28 /*#define ENABLE_VALIDATE_POOL*/
29
30 /* Enable tracking of statistics about the tagged blocks in the pool */
31 #define TAG_STATISTICS_TRACKING
32
33 /* 
34  * Put each block in its own range of pages and position the block at the
35  * end of the range so any accesses beyond the end of block are to invalid
36  * memory locations. 
37  */
38 /*#define WHOLE_PAGE_ALLOCATIONS*/
39
40 #ifdef ENABLE_VALIDATE_POOL
41 #define VALIDATE_POOL validate_kernel_pool()
42 #else
43 #define VALIDATE_POOL
44 #endif
45
46 #if 0
47 #define POOL_TRACE(args...) do { DbgPrint(args); } while(0);
48 #else
49 #define POOL_TRACE(args...)
50 #endif
51
52 /* avl types ****************************************************************/
53
54 /* FIXME:
55  *   This declarations should be moved into a separate header file.
56  */
57
58 typedef struct _NODE
59 {
60   struct _NODE* link[2];
61   struct _NODE* parent;
62   signed char balance;
63 } NODE, *PNODE;
64
65 /* TYPES *******************************************************************/
66
67 #define BLOCK_HDR_USED_MAGIC (0xdeadbeef)
68 #define BLOCK_HDR_FREE_MAGIC (0xceadbeef)
69
70 /*
71  * fields present at the start of a block (this is for internal use only)
72  */
73 typedef struct _BLOCK_HDR
74 {
75   ULONG Magic;
76   ULONG Size;
77   struct _BLOCK_HDR* previous;
78   union
79   {
80     struct
81     {
82       LIST_ENTRY ListEntry;
83       ULONG Tag;
84       PVOID Caller;
85       LIST_ENTRY TagListEntry;
86       BOOLEAN Dumped;
87     } Used;
88     struct
89     {
90       NODE Node;
91     } Free;
92   };
93 } BLOCK_HDR;
94
95 PVOID STDCALL 
96 ExAllocateWholePageBlock(ULONG Size);
97 VOID STDCALL
98 ExFreeWholePageBlock(PVOID Addr);
99
100 /* GLOBALS *****************************************************************/
101
102 extern PVOID MiNonPagedPoolStart;
103 extern ULONG MiNonPagedPoolLength;
104
105 /*
106  * Head of the list of free blocks
107  */
108 static PNODE FreeBlockListRoot = NULL;
109
110 /*
111  * Head of the list of in use block
112  */
113 static LIST_ENTRY UsedBlockListHead;
114
115 static LIST_ENTRY AddressListHead;
116
117 #ifndef WHOLE_PAGE_ALLOCATIONS
118 /*
119  * Count of free blocks
120  */
121 static ULONG EiNrFreeBlocks = 0;
122
123 /*
124  * Count of used blocks
125  */
126 static ULONG EiNrUsedBlocks = 0;
127 #endif
128
129 /*
130  * Lock that protects the non-paged pool data structures
131  */
132 static KSPIN_LOCK MmNpoolLock;
133
134 /*
135  * Total memory used for free nonpaged pool blocks
136  */
137 ULONG EiFreeNonPagedPool = 0;
138
139 /*
140  * Total memory used for nonpaged pool blocks
141  */
142 ULONG EiUsedNonPagedPool = 0;
143
144 /*
145  * Allocate a range of memory in the nonpaged pool
146  */
147 PVOID
148 MiAllocNonPagedPoolRegion(unsigned int nr_pages);
149
150 VOID
151 MiFreeNonPagedPoolRegion(PVOID Addr, ULONG Count, BOOLEAN Free);
152
153 #ifdef TAG_STATISTICS_TRACKING
154 #define TAG_HASH_TABLE_SIZE       (1024)
155 static LIST_ENTRY tag_hash_table[TAG_HASH_TABLE_SIZE];
156 #endif /* TAG_STATISTICS_TRACKING */
157
158 #ifdef WHOLE_PAGE_ALLOCATIONS
159 static RTL_BITMAP NonPagedPoolAllocMap;
160 static ULONG NonPagedPoolAllocMapHint;
161 static ULONG MiCurrentNonPagedPoolLength = 0;
162 #else
163 static PULONG MiNonPagedPoolAllocMap;
164 static ULONG MiNonPagedPoolNrOfPages;
165 #endif /* WHOLE_PAGE_ALLOCATIONS */
166
167 /* avl helper functions ****************************************************/
168
169 void DumpFreeBlockNode(PNODE p)
170 {
171   static int count = 0;
172   BLOCK_HDR* blk;
173
174   count++;
175
176   if (p)
177     {
178       DumpFreeBlockNode(p->link[0]);
179       blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
180       DbgPrint("%08x %8d (%d)\n", blk, blk->Size, count);
181       DumpFreeBlockNode(p->link[1]);
182     }
183
184   count--;
185 }
186 void DumpFreeBlockTree(void)
187 {
188   DbgPrint("--- Begin tree ------------------\n");
189   DbgPrint("%08x\n", CONTAINING_RECORD(FreeBlockListRoot, BLOCK_HDR, Free.Node));
190   DumpFreeBlockNode(FreeBlockListRoot);
191   DbgPrint("--- End tree --------------------\n");
192 }
193
194 int compare_node(PNODE p1, PNODE p2)
195 {
196   BLOCK_HDR* blk1 = CONTAINING_RECORD(p1, BLOCK_HDR, Free.Node);
197   BLOCK_HDR* blk2 = CONTAINING_RECORD(p2, BLOCK_HDR, Free.Node);
198
199   if (blk1->Size == blk2->Size)
200     {
201       if (blk1 < blk2)
202         {
203           return -1;
204         }
205       if (blk1 > blk2)
206         {
207           return 1;
208         }
209     }
210   else
211     {
212       if (blk1->Size < blk2->Size)
213         {
214           return -1;
215         }
216       if (blk1->Size > blk2->Size)
217         {
218           return 1;
219         }
220     }
221   return 0;
222
223 }
224
225 int compare_value(PVOID value, PNODE p)
226 {
227   BLOCK_HDR* blk = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
228   ULONG v = *(PULONG)value;
229
230   if (v < blk->Size)
231     {
232       return -1;
233     }
234   if (v > blk->Size)
235     {
236       return 1;
237     }
238   return 0;
239 }
240
241 /* avl functions **********************************************************/
242
243 /* FIXME:
244  *   The avl functions should be moved into a separate file.
245  */
246
247 /* The avl_insert and avl_remove are based on libavl (library for manipulation of binary trees). */
248
249 void avl_insert (PNODE * root, PNODE n, int (*compare)(PNODE, PNODE))
250 {
251   PNODE y;     /* Top node to update balance factor, and parent. */
252   PNODE p, q;  /* Iterator, and parent. */
253   PNODE w;     /* New root of rebalanced subtree. */
254   int dir;     /* Direction to descend. */
255
256   n->link[0] = n->link[1] = n->parent = NULL;
257   n->balance = 0;
258
259   y = *root;
260   for (q = NULL, p = *root; p != NULL; q = p, p = p->link[dir])
261     {
262       dir = compare(n, p) > 0;
263       if (p->balance != 0)
264         {
265           y = p;
266         }
267     }
268
269   n->parent = q;
270   if (q != NULL)
271     {
272       q->link[dir] = n;
273     }
274   else
275     {
276       *root = n;
277     }
278
279   if (*root == n)
280     {
281       return;
282     }
283
284   for (p = n; p != y; p = q)
285     {
286       q = p->parent;
287       dir = q->link[0] != p;
288       if (dir == 0)
289         {
290           q->balance--;
291         }
292       else
293         {
294           q->balance++;
295         }
296     }
297
298   if (y->balance == -2)
299     {
300       PNODE x = y->link[0];
301       if (x->balance == -1)
302         {
303           w = x;
304           y->link[0] = x->link[1];
305           x->link[1] = y;
306           x->balance = y->balance = 0;
307           x->parent = y->parent;
308           y->parent = x;
309           if (y->link[0] != NULL)
310             {
311               y->link[0]->parent = y;
312             }
313         }
314       else
315         {
316           assert (x->balance == +1);
317           w = x->link[1];
318           x->link[1] = w->link[0];
319           w->link[0] = x;
320           y->link[0] = w->link[1];
321           w->link[1] = y;
322           if (w->balance == -1)
323             {
324               x->balance = 0;
325               y->balance = +1;
326             }
327           else if (w->balance == 0)
328             {
329               x->balance = y->balance = 0;
330             }
331           else /* |w->pavl_balance == +1| */
332             {
333               x->balance = -1;
334               y->balance = 0;
335             }
336           w->balance = 0;
337           w->parent = y->parent;
338           x->parent = y->parent = w;
339           if (x->link[1] != NULL)
340             {
341               x->link[1]->parent = x;
342             }
343           if (y->link[0] != NULL)
344             {
345               y->link[0]->parent = y;
346             }
347         }
348     }
349   else if (y->balance == +2)
350     {
351       PNODE x = y->link[1];
352       if (x->balance == +1)
353         {
354           w = x;
355           y->link[1] = x->link[0];
356           x->link[0] = y;
357           x->balance = y->balance = 0;
358           x->parent = y->parent;
359           y->parent = x;
360           if (y->link[1] != NULL)
361             {
362               y->link[1]->parent = y;
363             }
364         }
365       else
366         {
367           assert (x->balance == -1);
368           w = x->link[0];
369           x->link[0] = w->link[1];
370           w->link[1] = x;
371           y->link[1] = w->link[0];
372           w->link[0] = y;
373           if (w->balance == 1)
374             {
375               x->balance = 0;
376               y->balance = -1;
377             }
378           else if (w->balance == 0)
379             {
380               x->balance = y->balance = 0;
381             }
382           else /* |w->pavl_balance == -1| */
383             {
384               x->balance = +1;
385               y->balance = 0;
386             }
387           w->balance = 0;
388           w->parent = y->parent;
389           x->parent = y->parent = w;
390           if (x->link[0] != NULL)
391             {
392               x->link[0]->parent = x;
393             }
394           if (y->link[1] != NULL)
395             {
396               y->link[1]->parent = y;
397             }
398         }
399     }
400   else
401     {
402       return;
403     }
404   if (w->parent != NULL)
405     {
406       w->parent->link[y != w->parent->link[0]] = w;
407     }
408   else
409     {
410       *root = w;
411     }
412
413   return;
414 }
415
416 void avl_remove (PNODE *root, PNODE item, int (*compare)(PNODE, PNODE))
417 {
418   PNODE p;  /* Traverses tree to find node to delete. */
419   PNODE q;  /* Parent of |p|. */
420   int dir;  /* Side of |q| on which |p| is linked. */
421
422   if (root == NULL || *root == NULL)
423     {
424       return ;
425     }
426
427   p = item;
428   q = p->parent;
429   if (q == NULL)
430     {
431       q = (PNODE) root;
432       dir = 0;
433     }
434   else
435     {
436       dir = compare(p, q) > 0;
437     }
438
439   if (p->link[1] == NULL)
440     {
441       q->link[dir] = p->link[0];
442       if (q->link[dir] != NULL)
443         {
444           q->link[dir]->parent = p->parent;
445         }
446     }
447   else
448     {
449       PNODE r = p->link[1];
450       if (r->link[0] == NULL)
451         {
452           r->link[0] = p->link[0];
453           q->link[dir] = r;
454           r->parent = p->parent;
455           if (r->link[0] != NULL)
456             {
457               r->link[0]->parent = r;
458             }
459           r->balance = p->balance;
460           q = r;
461           dir = 1;
462         }
463       else
464         {
465           PNODE s = r->link[0];
466           while (s->link[0] != NULL)
467             {
468               s = s->link[0];
469             }
470           r = s->parent;
471           r->link[0] = s->link[1];
472           s->link[0] = p->link[0];
473           s->link[1] = p->link[1];
474           q->link[dir] = s;
475           if (s->link[0] != NULL)
476             {
477               s->link[0]->parent = s;
478             }
479           s->link[1]->parent = s;
480           s->parent = p->parent;
481           if (r->link[0] != NULL)
482             {
483               r->link[0]->parent = r;
484             }
485           s->balance = p->balance;
486           q = r;
487           dir = 0;
488         }
489     }
490
491   item->link[0] = item->link[1] = item->parent = NULL;
492   item->balance = 0;
493
494   while (q != (PNODE) root)
495     {
496       PNODE y = q;
497
498       if (y->parent != NULL)
499         {
500           q = y->parent;
501         }
502       else
503         {
504           q = (PNODE) root;
505         }
506
507       if (dir == 0)
508         {
509           dir = q->link[0] != y;
510           y->balance++;
511           if (y->balance == +1)
512             {
513               break;
514             }
515           else if (y->balance == +2)
516             {
517               PNODE x = y->link[1];
518               if (x->balance == -1)
519                 {
520                   PNODE w;
521
522                   assert (x->balance == -1);
523                   w = x->link[0];
524                   x->link[0] = w->link[1];
525                   w->link[1] = x;
526                   y->link[1] = w->link[0];
527                   w->link[0] = y;
528                   if (w->balance == +1)
529                     {
530                       x->balance = 0;
531                       y->balance = -1;
532                     }
533                   else if (w->balance == 0)
534                     {
535                       x->balance = y->balance = 0;
536                     }
537                   else /* |w->pavl_balance == -1| */
538                     {
539                       x->balance = +1;
540                       y->balance = 0;
541                     }
542                   w->balance = 0;
543                   w->parent = y->parent;
544                   x->parent = y->parent = w;
545                   if (x->link[0] != NULL)
546                     {
547                       x->link[0]->parent = x;
548                     }
549                   if (y->link[1] != NULL)
550                     {
551                       y->link[1]->parent = y;
552                     }
553                   q->link[dir] = w;
554                 }
555               else
556                 {
557                   y->link[1] = x->link[0];
558                   x->link[0] = y;
559                   x->parent = y->parent;
560                   y->parent = x;
561                   if (y->link[1] != NULL)
562                     {
563                       y->link[1]->parent = y;
564                     }
565                   q->link[dir] = x;
566                   if (x->balance == 0)
567                     {
568                       x->balance = -1;
569                       y->balance = +1;
570                       break;
571                     }
572                   else
573                     {
574                       x->balance = y->balance = 0;
575                       y = x;
576                     }
577                 }
578             }
579         }
580       else
581         {
582           dir = q->link[0] != y;
583           y->balance--;
584           if (y->balance == -1)
585             {
586               break;
587             }
588           else if (y->balance == -2)
589             {
590               PNODE x = y->link[0];
591               if (x->balance == +1)
592                 {
593                   PNODE w;
594                   assert (x->balance == +1);
595                   w = x->link[1];
596                   x->link[1] = w->link[0];
597                   w->link[0] = x;
598                   y->link[0] = w->link[1];
599                   w->link[1] = y;
600                   if (w->balance == -1)
601                     {
602                       x->balance = 0;
603                       y->balance = +1;
604                     }
605                   else if (w->balance == 0)
606                     {
607                       x->balance = y->balance = 0;
608                     }
609                   else /* |w->pavl_balance == +1| */
610                     {
611                       x->balance = -1;
612                       y->balance = 0;
613                     }
614                   w->balance = 0;
615                   w->parent = y->parent;
616                   x->parent = y->parent = w;
617                   if (x->link[1] != NULL)
618                     {
619                       x->link[1]->parent = x;
620                     }
621                   if (y->link[0] != NULL)
622                     {
623                       y->link[0]->parent = y;
624                     }
625                   q->link[dir] = w;
626                 }
627               else
628                 {
629                   y->link[0] = x->link[1];
630                   x->link[1] = y;
631                   x->parent = y->parent;
632                   y->parent = x;
633                   if (y->link[0] != NULL)
634                     {
635                       y->link[0]->parent = y;
636                     }
637                   q->link[dir] = x;
638                   if (x->balance == 0)
639                     {
640                       x->balance = +1;
641                       y->balance = -1;
642                       break;
643                     }
644                   else
645                     {
646                       x->balance = y->balance = 0;
647                       y = x;
648                     }
649                 }
650             }
651         }
652     }
653
654 }
655
656 PNODE _cdecl avl_get_first(PNODE root)
657 {
658   PNODE p;
659   if (root == NULL)
660     {
661       return NULL;
662     }
663   p = root;
664   while (p->link[0])
665     {
666       p = p->link[0];
667     }
668   return p;
669 }
670
671 PNODE avl_get_next(PNODE root, PNODE p)
672 {
673   PNODE q;
674   if (p->link[1])
675     {
676       p = p->link[1];
677       while(p->link[0])
678         {
679           p = p->link[0];
680         }
681       return p;
682     }
683   else
684     {
685       q = p->parent;
686       while (q && q->link[1] == p)
687         {
688           p = q;
689           q = q->parent;
690         }
691       if (q == NULL)
692         {
693           return NULL;
694         }
695       else
696         {
697           return q;
698         }
699     }
700 }
701
702 PNODE avl_find_equal_or_greater(PNODE root, ULONG size, int (compare)(PVOID, PNODE))
703 {
704   PNODE p;
705   PNODE prev = NULL;
706   int cmp;
707
708   for (p = root; p != NULL;)
709     {
710       cmp = compare((PVOID)&size, p);
711       if (cmp < 0)
712         {
713           prev = p;
714           p = p->link[0];
715         }
716       else if (cmp > 0)
717         {
718           p = p->link[1];
719         }
720       else
721         {
722           while (p->link[0])
723             {
724               cmp = compare((PVOID)&size, p->link[0]);
725               if (cmp != 0)
726                 {
727                   break;
728                 }
729               p = p->link[0];
730             }
731           return p;
732         }
733     }
734   return prev;
735 }
736
737 /* non paged pool functions ************************************************/
738
739 #ifdef TAG_STATISTICS_TRACKING
740 VOID
741 MiRemoveFromTagHashTable(BLOCK_HDR* block)
742      /*
743       * Remove a block from the tag hash table
744       */
745 {
746   if (block->Used.Tag == 0)
747     {
748       return;
749     }
750
751   RemoveEntryList(&block->Used.TagListEntry);
752 }
753
754 VOID
755 MiAddToTagHashTable(BLOCK_HDR* block)
756      /*
757       * Add a block to the tag hash table
758       */
759 {
760   ULONG hash;
761
762   if (block->Used.Tag == 0)
763     {
764       return;
765     }
766
767   hash = block->Used.Tag % TAG_HASH_TABLE_SIZE;
768
769   InsertHeadList(&tag_hash_table[hash], &block->Used.TagListEntry);
770 }
771 #endif /* TAG_STATISTICS_TRACKING */
772
773 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
774 VOID STATIC
775 MiDumpTagStats(ULONG CurrentTag, ULONG CurrentNrBlocks, ULONG CurrentSize)
776 {
777   CHAR c1, c2, c3, c4;
778   
779   c1 = (CurrentTag >> 24) & 0xFF;
780   c2 = (CurrentTag >> 16) & 0xFF;
781   c3 = (CurrentTag >> 8) & 0xFF;
782   c4 = CurrentTag & 0xFF;
783   
784   if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
785     {
786       DbgPrint("Tag %x (%c%c%c%c) Blocks %d Total Size %d Average Size %d\n",
787                CurrentTag, c4, c3, c2, c1, CurrentNrBlocks,
788                CurrentSize, CurrentSize / CurrentNrBlocks);
789     }
790   else
791     {
792       DbgPrint("Tag %x Blocks %d Total Size %d Average Size %d\n",
793                CurrentTag, CurrentNrBlocks, CurrentSize,
794                CurrentSize / CurrentNrBlocks);
795     }
796 }
797 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS); */
798
799 VOID
800 MiDebugDumpNonPagedPoolStats(BOOLEAN NewOnly)
801 {
802 #if defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS)
803   ULONG i;
804   BLOCK_HDR* current;
805   ULONG CurrentTag;
806   ULONG CurrentNrBlocks;
807   ULONG CurrentSize;
808   ULONG TotalBlocks;
809   ULONG TotalSize;
810   LIST_ENTRY tmpListHead;
811   PLIST_ENTRY current_entry;
812
813   DbgPrint("******* Dumping non paging pool stats ******\n");
814   TotalBlocks = 0;
815   TotalSize = 0;
816   for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
817     {
818       InitializeListHead(&tmpListHead);
819
820       while (!IsListEmpty(&tag_hash_table[i]))
821         {
822           CurrentTag = 0;
823
824           current_entry = tag_hash_table[i].Flink;
825           while (current_entry != &tag_hash_table[i])
826             {
827               current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.TagListEntry);
828               current_entry = current_entry->Flink;
829               if (CurrentTag == 0)
830                 {
831                   CurrentTag = current->Used.Tag;
832                   CurrentNrBlocks = 0;
833                   CurrentSize = 0;
834                 }
835               if (current->Used.Tag == CurrentTag)
836                 {
837                   RemoveEntryList(&current->Used.TagListEntry);
838                   InsertHeadList(&tmpListHead, &current->Used.TagListEntry);
839                   if (!NewOnly || !current->Used.Dumped)
840                     {
841                       CurrentNrBlocks++;
842                       TotalBlocks++;
843                       CurrentSize += current->Size;
844                       TotalSize += current->Size;
845                       current->Used.Dumped = TRUE;
846                     }
847                 }
848             }
849           if (CurrentTag != 0 && CurrentNrBlocks != 0)
850             {
851               MiDumpTagStats(CurrentTag, CurrentNrBlocks, CurrentSize);
852             }
853         }
854       if (!IsListEmpty(&tmpListHead))
855         {
856           tag_hash_table[i].Flink = tmpListHead.Flink;
857           tag_hash_table[i].Flink->Blink = &tag_hash_table[i];
858           tag_hash_table[i].Blink = tmpListHead.Blink;
859           tag_hash_table[i].Blink->Flink = &tag_hash_table[i];
860         }
861     }
862   if (TotalBlocks != 0)
863     {
864       DbgPrint("TotalBlocks %d TotalSize %d AverageSize %d\n",
865                TotalBlocks, TotalSize, TotalSize / TotalBlocks);
866     }
867   else
868     {
869       DbgPrint("TotalBlocks %d TotalSize %d\n",
870                TotalBlocks, TotalSize);
871     }
872   DbgPrint("Freeblocks %d TotalFreeSize %d AverageFreeSize %d\n", 
873           EiNrFreeBlocks, EiFreeNonPagedPool, EiNrFreeBlocks ? EiFreeNonPagedPool / EiNrFreeBlocks : 0);
874   DbgPrint("***************** Dump Complete ***************\n");
875 #endif /* defined(TAG_STATISTICS_TRACKING) && !defined(WHOLE_PAGE_ALLOCATIONS) */
876 }
877
878 VOID
879 MiDebugDumpNonPagedPool(BOOLEAN NewOnly)
880 {
881 #ifndef WHOLE_PAGE_ALLOCATIONS
882    BLOCK_HDR* current;
883    PLIST_ENTRY current_entry;
884    KIRQL oldIrql;
885    
886    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
887
888    DbgPrint("******* Dumping non paging pool contents ******\n");
889    current_entry = UsedBlockListHead.Flink;
890    while (current_entry != &UsedBlockListHead)
891      {
892        current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
893        if (!NewOnly || !current->Used.Dumped)
894          {
895            CHAR c1, c2, c3, c4;
896            
897            c1 = (current->Used.Tag >> 24) & 0xFF;
898            c2 = (current->Used.Tag >> 16) & 0xFF;
899            c3 = (current->Used.Tag >> 8) & 0xFF;
900            c4 = current->Used.Tag & 0xFF;
901            
902            if (isprint(c1) && isprint(c2) && isprint(c3) && isprint(c4))
903              {
904                DbgPrint("Size 0x%x Tag 0x%x (%c%c%c%c) Allocator 0x%x\n",
905                         current->Size, current->Used.Tag, c4, c3, c2, c1, 
906                         current->Used.Caller);
907              }
908            else
909              {
910                DbgPrint("Size 0x%x Tag 0x%x Allocator 0x%x\n",
911                         current->Size, current->Used.Tag, current->Used.Caller);
912              }
913            current->Used.Dumped = TRUE;
914          }
915        current_entry = current_entry->Flink;
916      }
917    DbgPrint("***************** Dump Complete ***************\n");
918    KeReleaseSpinLock(&MmNpoolLock, oldIrql);
919 #endif /* not WHOLE_PAGE_ALLOCATIONS */
920 }
921
922 #ifndef WHOLE_PAGE_ALLOCATIONS
923
924 #ifdef ENABLE_VALIDATE_POOL
925 static void validate_free_list(void)
926 /*
927  * FUNCTION: Validate the integrity of the list of free blocks
928  */
929 {
930    BLOCK_HDR* current;
931    unsigned int blocks_seen=0;     
932    PNODE p;
933
934    p = avl_get_first(FreeBlockListRoot);
935
936    while(p)
937      {
938         PVOID base_addr;
939
940         current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
941         base_addr = (PVOID)current;
942
943         if (current->Magic != BLOCK_HDR_FREE_MAGIC)
944           {
945              DbgPrint("Bad block magic (probable pool corruption) at %x\n",
946                       current);
947              KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
948           }
949         
950         if (base_addr < MiNonPagedPoolStart ||
951             base_addr + sizeof(BLOCK_HDR) + current->Size > MiNonPagedPoolStart + MiNonPagedPoolLength)
952           {                    
953              DbgPrint("Block %x found outside pool area\n",current);
954              DbgPrint("Size %d\n",current->Size);
955              DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
956                       MiNonPagedPoolStart+MiNonPagedPoolLength);
957              KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
958           }
959         blocks_seen++;
960         if (blocks_seen > EiNrFreeBlocks)
961           {
962              DbgPrint("Too many blocks on free list\n");
963              KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
964           }
965         p = avl_get_next(FreeBlockListRoot, p);
966      }
967 }
968
969 static void validate_used_list(void)
970 /*
971  * FUNCTION: Validate the integrity of the list of used blocks
972  */
973 {
974    BLOCK_HDR* current;
975    PLIST_ENTRY current_entry;
976    unsigned int blocks_seen=0;
977    
978    current_entry = UsedBlockListHead.Flink;
979    while (current_entry != &UsedBlockListHead)
980      {
981         PVOID base_addr;
982
983         current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
984         base_addr = (PVOID)current;
985         
986         if (current->Magic != BLOCK_HDR_USED_MAGIC)
987           {
988              DbgPrint("Bad block magic (probable pool corruption) at %x\n",
989                       current);
990              KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
991           }
992         if (base_addr < MiNonPagedPoolStart ||
993             (base_addr+current->Size) >
994             MiNonPagedPoolStart+MiNonPagedPoolLength)
995           {
996              DbgPrint("Block %x found outside pool area\n",current);
997              DbgPrint("Size %d\n",current->Size);
998              DbgPrint("Limits are %x %x\n",MiNonPagedPoolStart,
999                       MiNonPagedPoolStart+MiNonPagedPoolLength);
1000              KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1001           }
1002         blocks_seen++;
1003         if (blocks_seen > EiNrUsedBlocks)
1004           {
1005              DbgPrint("Too many blocks on used list\n");
1006              KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1007           }
1008         if (current->Used.ListEntry.Flink != &UsedBlockListHead &&
1009             current->Used.ListEntry.Flink->Blink != &current->Used.ListEntry)
1010           {
1011              DbgPrint("%s:%d:Break in list (current %x next %x "
1012                       "current->next->previous %x)\n",
1013                       __FILE__,__LINE__,current, current->Used.ListEntry.Flink,
1014                       current->Used.ListEntry.Flink->Blink);
1015              KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1016           }
1017
1018         current_entry = current_entry->Flink;
1019      }
1020 }
1021
1022 static void check_duplicates(BLOCK_HDR* blk)
1023 /*
1024  * FUNCTION: Check a block has no duplicates
1025  * ARGUMENTS:
1026  *           blk = block to check
1027  * NOTE: Bug checks if duplicates are found
1028  */
1029 {
1030    char* base = (char*)blk;
1031    char* last = ((char*)blk) + sizeof(BLOCK_HDR) + blk->Size;
1032    BLOCK_HDR* current;
1033    PLIST_ENTRY current_entry;
1034    PNODE p;
1035
1036    p = avl_get_first(FreeBlockListRoot);
1037
1038    while (p)
1039      {
1040        current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1041
1042        if (current->Magic != BLOCK_HDR_FREE_MAGIC)
1043          {
1044            DbgPrint("Bad block magic (probable pool corruption) at %x\n",
1045                     current);
1046            KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1047          }
1048        
1049        if ( (char*)current > base && (char*)current < last ) 
1050          {
1051            DbgPrint("intersecting blocks on list\n");
1052            KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1053          }
1054        if  ( (char*)current < base &&
1055              ((char*)current + current->Size + sizeof(BLOCK_HDR))
1056              > base )
1057          {
1058            DbgPrint("intersecting blocks on list\n");
1059            KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1060           }
1061        p = avl_get_next(FreeBlockListRoot, p);
1062      }
1063
1064    current_entry = UsedBlockListHead.Flink;
1065    while (current_entry != &UsedBlockListHead)
1066      {
1067        current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
1068
1069        if ( (char*)current > base && (char*)current < last ) 
1070          {
1071            DbgPrint("intersecting blocks on list\n");
1072            KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1073          }
1074        if  ( (char*)current < base &&
1075              ((char*)current + current->Size + sizeof(BLOCK_HDR))
1076              > base )
1077          {
1078            DbgPrint("intersecting blocks on list\n");
1079            KEBUGCHECK(/*KBUG_POOL_FREE_LIST_CORRUPT*/0);
1080          }
1081        
1082        current_entry = current_entry->Flink;
1083      }
1084    
1085 }
1086
1087 static void validate_kernel_pool(void)
1088 /*
1089  * FUNCTION: Checks the integrity of the kernel memory heap
1090  */
1091 {
1092    BLOCK_HDR* current;
1093    PLIST_ENTRY current_entry;
1094    PNODE p;
1095    
1096    validate_free_list();
1097    validate_used_list();
1098
1099    p = avl_get_first(FreeBlockListRoot);
1100    while (p)
1101      {
1102        current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1103        check_duplicates(current);
1104        p = avl_get_next(FreeBlockListRoot, p);
1105      }
1106    current_entry = UsedBlockListHead.Flink;
1107    while (current_entry != &UsedBlockListHead)
1108      {
1109        current = CONTAINING_RECORD(current_entry, BLOCK_HDR, Used.ListEntry);
1110        check_duplicates(current);
1111        current_entry = current_entry->Flink;
1112      }
1113 }
1114 #endif
1115
1116 #if 0
1117 STATIC VOID
1118 free_pages(BLOCK_HDR* blk)
1119 {
1120   ULONG start;
1121   ULONG end;
1122   ULONG i;
1123
1124   start = (ULONG)blk;
1125   end = (ULONG)blk + sizeof(BLOCK_HDR) + blk->Size;
1126
1127   /*
1128    * If the block doesn't contain a whole page then there is nothing to do
1129    */
1130   if (PAGE_ROUND_UP(start) >= PAGE_ROUND_DOWN(end))
1131     {
1132       return;
1133     }
1134 }
1135 #endif
1136
1137 static void remove_from_used_list(BLOCK_HDR* current)
1138 {
1139   RemoveEntryList(&current->Used.ListEntry);
1140   EiUsedNonPagedPool -= current->Size;
1141   EiNrUsedBlocks--;
1142 }
1143
1144 static void remove_from_free_list(BLOCK_HDR* current)
1145 {
1146   DPRINT("remove_from_free_list %d\n", current->Size);
1147
1148   avl_remove(&FreeBlockListRoot, &current->Free.Node, compare_node);
1149
1150   EiFreeNonPagedPool -= current->Size;
1151   EiNrFreeBlocks--;
1152   DPRINT("remove_from_free_list done\n");
1153 #ifdef DUMP_AVL
1154   DumpFreeBlockTree();
1155 #endif
1156 }
1157
1158 static void 
1159 add_to_free_list(BLOCK_HDR* blk)
1160 /*
1161  * FUNCTION: add the block to the free list (internal)
1162  */
1163 {
1164   BLOCK_HDR* current;
1165
1166   DPRINT("add_to_free_list (%d)\n", blk->Size);
1167
1168   EiNrFreeBlocks++;
1169   
1170   current = blk->previous;
1171   if (current && current->Magic == BLOCK_HDR_FREE_MAGIC)
1172     {
1173       remove_from_free_list(current);
1174       current->Size = current->Size + sizeof(BLOCK_HDR) + blk->Size;
1175       current->Magic = BLOCK_HDR_USED_MAGIC;
1176       memset(blk, 0xcc, sizeof(BLOCK_HDR));
1177       blk = current;
1178     }
1179
1180   current = (BLOCK_HDR*)((PVOID)(blk + 1) + blk->Size);
1181   if ((PVOID)current < MiNonPagedPoolStart + MiNonPagedPoolLength &&
1182       current->Magic == BLOCK_HDR_FREE_MAGIC)
1183     {
1184       remove_from_free_list(current);
1185       blk->Size += sizeof(BLOCK_HDR) + current->Size;
1186       memset(current, 0xcc, sizeof(BLOCK_HDR));
1187       current = (BLOCK_HDR*)((PVOID)(blk + 1) + blk->Size);
1188       if ((PVOID)current < MiNonPagedPoolStart + MiNonPagedPoolLength)
1189         {
1190           current->previous = blk;
1191         }
1192     }
1193
1194   DPRINT("%d\n", blk->Size);
1195   blk->Magic = BLOCK_HDR_FREE_MAGIC;
1196   EiFreeNonPagedPool += blk->Size;
1197   avl_insert(&FreeBlockListRoot, &blk->Free.Node, compare_node);
1198
1199   DPRINT("add_to_free_list done\n");
1200 #ifdef DUMP_AVL
1201   DumpFreeBlockTree();
1202 #endif
1203 }
1204
1205 static void add_to_used_list(BLOCK_HDR* blk)
1206 /*
1207  * FUNCTION: add the block to the used list (internal)
1208  */
1209 {
1210   InsertHeadList(&UsedBlockListHead, &blk->Used.ListEntry);
1211   EiUsedNonPagedPool += blk->Size;
1212   EiNrUsedBlocks++;
1213 }
1214
1215 inline static void* block_to_address(BLOCK_HDR* blk)
1216 /*
1217  * FUNCTION: Translate a block header address to the corresponding block
1218  * address (internal)
1219  */
1220 {
1221         return ( (void *) ((char*)blk + sizeof(BLOCK_HDR)) );
1222 }
1223
1224 inline static BLOCK_HDR* address_to_block(void* addr)
1225 {
1226         return (BLOCK_HDR *)
1227                ( ((char*)addr) - sizeof(BLOCK_HDR) );
1228 }
1229
1230 static BOOLEAN
1231 grow_block(BLOCK_HDR* blk, PVOID end)
1232 {
1233    NTSTATUS Status;
1234    PHYSICAL_ADDRESS Page;
1235    BOOLEAN result = TRUE;
1236    ULONG index;
1237
1238    PVOID start = (PVOID)PAGE_ROUND_UP((ULONG)(blk + 1));
1239    end = (PVOID)PAGE_ROUND_UP(end);
1240    index = (ULONG)(start - MiNonPagedPoolStart) / PAGE_SIZE;
1241    while (start < end)
1242      {
1243        if (!(MiNonPagedPoolAllocMap[index / 32] & (1 << (index % 32))))
1244          {
1245            Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1246            if (!NT_SUCCESS(Status))
1247              {
1248                result = FALSE;
1249                break;
1250              }
1251            Status = MmCreateVirtualMapping(NULL,
1252                                            start,
1253                                            PAGE_READWRITE|PAGE_SYSTEM,
1254                                            Page,
1255                                            FALSE);
1256            if (!NT_SUCCESS(Status))
1257              {
1258                DbgPrint("Unable to create virtual mapping\n");
1259                MmReleasePageMemoryConsumer(MC_NPPOOL, Page);
1260                result = FALSE;
1261                break;
1262              }
1263            MiNonPagedPoolAllocMap[index / 32] |= (1 << (index % 32));
1264            memset(start, 0xcc, PAGE_SIZE);
1265            MiNonPagedPoolNrOfPages++;
1266          }
1267        index++;
1268        start += PAGE_SIZE;
1269      }
1270    return result;
1271 }
1272
1273 static BLOCK_HDR* get_block(unsigned int size, unsigned long alignment)
1274 {
1275    BLOCK_HDR *blk, *current, *previous = NULL, *next = NULL, *best = NULL;
1276    ULONG previous_size, current_size, next_size, new_size;
1277    PVOID end;
1278    PVOID addr, aligned_addr;
1279    PNODE p;
1280  
1281    DPRINT("get_block %d\n", size);
1282
1283    if (alignment == 0)
1284      {
1285        p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1286        if (p)
1287          { 
1288            best = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1289            addr = block_to_address(best);
1290          }
1291      }
1292    else
1293      {
1294        p = avl_find_equal_or_greater(FreeBlockListRoot, size, compare_value);
1295
1296        while(p)
1297          {
1298            current = CONTAINING_RECORD(p, BLOCK_HDR, Free.Node);
1299            addr = block_to_address(current);
1300            /* calculate first aligned address available within this block */
1301            aligned_addr = MM_ROUND_UP(addr, alignment);
1302            /* check to see if this address is already aligned */
1303            if (addr == aligned_addr)
1304              {
1305                if (current->Size >= size && 
1306                  (best == NULL || current->Size < best->Size))
1307                {
1308                  best = current;
1309                }
1310              }
1311            else
1312              {
1313                /* make sure there's enough room to make a free block by the space skipped
1314                 * from alignment. If not, calculate forward to the next alignment
1315                 * and see if we allocate there...
1316                 */
1317                new_size = (ULONG)aligned_addr - (ULONG)addr + size;
1318                if ((ULONG)aligned_addr - (ULONG)addr < sizeof(BLOCK_HDR))
1319                  {
1320                    /* not enough room for a free block header, add some more bytes */
1321                    aligned_addr = MM_ROUND_UP(block_to_address(current + 1), alignment);
1322                    new_size = (ULONG)aligned_addr - (ULONG)addr + size;
1323                  }
1324                if (current->Size >= new_size && 
1325                  (best == NULL || current->Size < best->Size))
1326                  {
1327                    best = current;
1328                  }
1329              }
1330            if (best && current->Size >= size + alignment + 2 * sizeof(BLOCK_HDR))
1331              {
1332                break;
1333              }
1334            p = avl_get_next(FreeBlockListRoot, p);
1335
1336          }
1337      }
1338
1339    /*
1340     * We didn't find anything suitable at all.
1341     */
1342    if (best == NULL)
1343      {
1344        return NULL;
1345      }
1346
1347    current = best;
1348    current_size = current->Size;
1349
1350    if (alignment > 0)
1351      {
1352        addr = block_to_address(current);
1353        aligned_addr = MM_ROUND_UP(addr, alignment);
1354        if (addr != aligned_addr)
1355          {
1356            blk = address_to_block(aligned_addr);
1357            if (blk < current + 1)
1358              {
1359                aligned_addr = MM_ROUND_UP(block_to_address(current + 1), alignment);
1360                blk = address_to_block(aligned_addr);
1361              }
1362            /*
1363             * if size-aligned, break off the preceding bytes into their own block...
1364             */
1365            previous = current;
1366            previous_size = (ULONG)blk - (ULONG)previous - sizeof(BLOCK_HDR);
1367            current = blk;
1368            current_size -= ((ULONG)current - (ULONG)previous);
1369          }
1370      }
1371
1372    end = (PVOID)current + sizeof(BLOCK_HDR) + size;
1373    
1374    if (current_size > size + sizeof(BLOCK_HDR) + 4)
1375      {
1376        /* create a new free block after our block, if the memory size is >= 4 byte for this block */
1377        next = (BLOCK_HDR*)((ULONG)current + size + sizeof(BLOCK_HDR));
1378        next_size = current_size - size - sizeof(BLOCK_HDR);
1379        current_size = size;
1380        end = (PVOID)next + sizeof(BLOCK_HDR);
1381      }
1382
1383    if (previous)
1384      {
1385        remove_from_free_list(previous);
1386        if (!grow_block(previous, end))
1387          {
1388            add_to_free_list(previous);
1389            return NULL;
1390          }
1391        memset(current, 0, sizeof(BLOCK_HDR));
1392        current->Size = current_size;
1393        current->Magic = BLOCK_HDR_USED_MAGIC;
1394        current->previous = previous;
1395        previous->Size = previous_size;
1396        if (next == NULL)
1397          {
1398            blk = (BLOCK_HDR*)((PVOID)(current + 1) + current->Size);
1399            if ((PVOID)blk < MiNonPagedPoolStart + MiNonPagedPoolLength)
1400              {
1401                blk->previous = current;
1402              }
1403          }
1404
1405        add_to_free_list(previous);
1406      }
1407    else
1408      {
1409        remove_from_free_list(current);
1410
1411        if (!grow_block(current, end))
1412          {
1413            add_to_free_list(current);
1414            return NULL;
1415          }
1416
1417        current->Magic = BLOCK_HDR_USED_MAGIC;
1418        if (next)
1419          {
1420            current->Size = current_size;
1421          }
1422      }
1423         
1424    if (next)
1425      {
1426        memset(next, 0, sizeof(BLOCK_HDR));
1427
1428        next->Size = next_size;
1429        next->Magic = BLOCK_HDR_FREE_MAGIC;
1430        next->previous = current;
1431        blk = (BLOCK_HDR*)((PVOID)(next + 1) + next->Size);
1432        if ((PVOID)blk < MiNonPagedPoolStart + MiNonPagedPoolLength)
1433          {
1434            blk->previous = next;
1435          }
1436        add_to_free_list(next);
1437      }
1438
1439    add_to_used_list(current);
1440    VALIDATE_POOL;
1441    return current;
1442 }
1443
1444 #endif /* not WHOLE_PAGE_ALLOCATIONS */
1445
1446 VOID STDCALL ExFreeNonPagedPool (PVOID block)
1447 /*
1448  * FUNCTION: Releases previously allocated memory
1449  * ARGUMENTS:
1450  *        block = block to free
1451  */
1452 {
1453 #ifdef WHOLE_PAGE_ALLOCATIONS /* WHOLE_PAGE_ALLOCATIONS */
1454    KIRQL oldIrql;
1455
1456    if (block == NULL)
1457      {
1458        return;
1459      }
1460
1461    DPRINT("freeing block %x\n",blk);
1462    
1463    POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1464             ((PULONG)&block)[-1]);
1465    
1466    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1467
1468    ExFreeWholePageBlock(block);
1469    KeReleaseSpinLock(&MmNpoolLock, oldIrql);      
1470
1471 #else /* not WHOLE_PAGE_ALLOCATIONS */
1472
1473    BLOCK_HDR* blk=address_to_block(block);
1474    KIRQL oldIrql;
1475
1476    if (block == NULL)
1477      {
1478        return;
1479      }
1480
1481    DPRINT("freeing block %x\n",blk);
1482    
1483    POOL_TRACE("ExFreePool(block %x), size %d, caller %x\n",block,blk->size,
1484             ((PULONG)&block)[-1]);
1485    
1486    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1487
1488    VALIDATE_POOL;
1489    
1490    if (blk->Magic != BLOCK_HDR_USED_MAGIC)
1491      {
1492        if (blk->Magic == BLOCK_HDR_FREE_MAGIC)
1493          {
1494            DbgPrint("ExFreePool of already freed address %x\n", block);
1495          }
1496        else
1497          {
1498            DbgPrint("ExFreePool of non-allocated address %x (magic %x)\n", 
1499                     block, blk->Magic);
1500          }
1501         KEBUGCHECK(0);
1502         return;
1503      }
1504    
1505    memset(block, 0xcc, blk->Size);
1506    
1507    remove_from_used_list(blk);
1508    blk->Magic = BLOCK_HDR_FREE_MAGIC;
1509    add_to_free_list(blk);
1510    VALIDATE_POOL;
1511    KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1512
1513 #endif /* WHOLE_PAGE_ALLOCATIONS */
1514 }
1515
1516 PVOID STDCALL 
1517 ExAllocateNonPagedPoolWithTag(ULONG Type, ULONG Size, ULONG Tag, PVOID Caller)
1518 {
1519 #ifdef WHOLE_PAGE_ALLOCATIONS
1520    PVOID block;
1521    KIRQL oldIrql;
1522    
1523    POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1524               Size,Caller);
1525    
1526    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1527    
1528    /*
1529     * accomodate this useful idiom
1530     */
1531    if (Size == 0)
1532      {
1533         POOL_TRACE("= NULL\n");
1534         KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1535         return(NULL);
1536      }
1537
1538    block = ExAllocateWholePageBlock(Size);
1539    KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1540    return(block);
1541
1542 #else /* not WHOLE_PAGE_ALLOCATIONS */
1543    PVOID block;
1544    BLOCK_HDR* best = NULL;
1545    KIRQL oldIrql;
1546    ULONG alignment;
1547    
1548    POOL_TRACE("ExAllocatePool(NumberOfBytes %d) caller %x ",
1549               Size,Caller);
1550    
1551    KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
1552
1553    VALIDATE_POOL;
1554
1555 #if 0
1556    /* after some allocations print the npaged pool stats */
1557 #ifdef TAG_STATISTICS_TRACKING
1558    {
1559        static ULONG counter = 0;
1560        if (counter++ % 100000 == 0)
1561        {
1562           MiDebugDumpNonPagedPoolStats(FALSE);   
1563        }
1564    }
1565 #endif
1566 #endif
1567    /*
1568     * accomodate this useful idiom
1569     */
1570    if (Size == 0)
1571      {
1572         POOL_TRACE("= NULL\n");
1573         KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1574         return(NULL);
1575      }
1576    /* Make the size dword alligned, this makes the block dword alligned */  
1577    Size = ROUND_UP(Size, sizeof(DWORD));
1578
1579    if (Size >= PAGE_SIZE)
1580      {
1581        alignment = PAGE_SIZE;
1582      }
1583    else if (Type == NonPagedPoolCacheAligned ||
1584             Type == NonPagedPoolCacheAlignedMustS)
1585      {
1586        alignment = MM_CACHE_LINE_SIZE;
1587      }
1588    else
1589      {
1590        alignment = 0;
1591      }
1592
1593    best = get_block(Size, alignment);
1594    if (best == NULL)
1595      {
1596       KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1597       return NULL;
1598      }
1599    best->Used.Tag = Tag;
1600    best->Used.Caller = Caller;
1601    best->Used.Dumped = FALSE;
1602    best->Used.TagListEntry.Flink = best->Used.TagListEntry.Blink = NULL;
1603    KeReleaseSpinLock(&MmNpoolLock, oldIrql);
1604    block = block_to_address(best);
1605    memset(block,0,Size);
1606    return(block);
1607 #endif /* WHOLE_PAGE_ALLOCATIONS */
1608 }
1609
1610 #ifdef WHOLE_PAGE_ALLOCATIONS
1611
1612 PVOID STDCALL 
1613 ExAllocateWholePageBlock(ULONG Size)
1614 {
1615   PVOID Address;
1616   PHYSICAL_ADDRESS Page;
1617   ULONG i;
1618   ULONG NrPages;
1619   ULONG Base;
1620
1621   NrPages = ROUND_UP(Size, PAGE_SIZE) / PAGE_SIZE;
1622
1623   Base = RtlFindClearBitsAndSet(&NonPagedPoolAllocMap, NrPages + 1, NonPagedPoolAllocMapHint);
1624   if (Base == 0xffffffff)
1625     {
1626       DbgPrint("Out of non paged pool space.\n");
1627       KEBUGCHECK(0);
1628     }
1629   if (NonPagedPoolAllocMapHint == Base)
1630     {
1631       NonPagedPoolAllocMapHint += (NrPages + 1);
1632     }
1633   Address = MiNonPagedPoolStart + Base * PAGE_SIZE;
1634
1635   for (i = 0; i < NrPages; i++)
1636     {
1637       Page = MmAllocPage(MC_NPPOOL, 0);
1638       if (Page.QuadPart == 0LL)
1639         {
1640           KEBUGCHECK(0);
1641         }
1642       MmCreateVirtualMapping(NULL, 
1643                              Address + (i * PAGE_SIZE),
1644                              PAGE_READWRITE | PAGE_SYSTEM,
1645                              Page,
1646                              TRUE);
1647     }
1648
1649   MiCurrentNonPagedPoolLength = max(MiCurrentNonPagedPoolLength, (Base + NrPages) * PAGE_SIZE);
1650   return((PVOID)((PUCHAR)Address + (NrPages * PAGE_SIZE) - Size));
1651 }
1652
1653 VOID STDCALL
1654 ExFreeWholePageBlock(PVOID Addr)
1655 {
1656   ULONG Base;
1657   
1658   if (Addr < MiNonPagedPoolStart ||
1659       Addr >= (MiNonPagedPoolStart + MiCurrentNonPagedPoolLength))
1660     {
1661       DbgPrint("Block %x found outside pool area\n", Addr);
1662       KEBUGCHECK(0);
1663     }
1664   Base = (Addr - MiNonPagedPoolStart) / PAGE_SIZE;
1665   NonPagedPoolAllocMapHint = min(NonPagedPoolAllocMapHint, Base);
1666   while (MmIsPagePresent(NULL, Addr))
1667     {
1668       MmDeleteVirtualMapping(NULL,
1669                              Addr,
1670                              TRUE,
1671                              NULL,
1672                              NULL);
1673       RtlClearBits(&NonPagedPoolAllocMap, Base, 1);
1674       Base++;
1675       Addr += PAGE_SIZE;
1676     }  
1677 }
1678
1679 #endif /* WHOLE_PAGE_ALLOCATIONS */
1680
1681 VOID 
1682 MiInitializeNonPagedPool(VOID)
1683 {
1684   NTSTATUS Status;
1685   PHYSICAL_ADDRESS Page;
1686   ULONG i;
1687   PVOID Address;
1688 #ifdef WHOLE_PAGE_ALLOCATIONS
1689 #else
1690   BLOCK_HDR* blk;
1691 #endif
1692 #ifdef TAG_STATISTICS_TRACKING
1693   for (i = 0; i < TAG_HASH_TABLE_SIZE; i++)
1694     {
1695       InitializeListHead(&tag_hash_table[i]);
1696     }
1697 #endif
1698    KeInitializeSpinLock(&MmNpoolLock);
1699    InitializeListHead(&UsedBlockListHead);
1700    InitializeListHead(&AddressListHead);
1701    FreeBlockListRoot = NULL;
1702 #ifdef WHOLE_PAGE_ALLOCATIONS
1703    NonPagedPoolAllocMapHint = PAGE_ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE / 8) / PAGE_SIZE;
1704    MiCurrentNonPagedPoolLength = NonPagedPoolAllocMapHint * PAGE_SIZE;
1705    Address = MiNonPagedPoolStart;
1706    for (i = 0; i < NonPagedPoolAllocMapHint; i++)
1707      {
1708        Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1709        if (!NT_SUCCESS(Status))
1710          {
1711            DbgPrint("Unable to allocate a page\n");
1712            KEBUGCHECK(0);
1713          }
1714        Status = MmCreateVirtualMapping(NULL,
1715                                        Address,
1716                                        PAGE_READWRITE|PAGE_SYSTEM,
1717                                        Page,
1718                                        FALSE);
1719        if (!NT_SUCCESS(Status))
1720          {
1721            DbgPrint("Unable to create virtual mapping\n");
1722            KEBUGCHECK(0);
1723          }
1724        Address += PAGE_SIZE;
1725      }
1726    RtlInitializeBitMap(&NonPagedPoolAllocMap, MiNonPagedPoolStart, MM_NONPAGED_POOL_SIZE / PAGE_SIZE);
1727    RtlClearAllBits(&NonPagedPoolAllocMap);  
1728    RtlSetBits(&NonPagedPoolAllocMap, 0, NonPagedPoolAllocMapHint);
1729 #else
1730    MiNonPagedPoolAllocMap = block_to_address((BLOCK_HDR*)MiNonPagedPoolStart);
1731    MiNonPagedPoolNrOfPages = PAGE_ROUND_UP(ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8 + 2 * sizeof(BLOCK_HDR));
1732    Address = MiNonPagedPoolStart;
1733    for (i = 0; i < MiNonPagedPoolNrOfPages; i++)
1734      {
1735        Status = MmRequestPageMemoryConsumer(MC_NPPOOL, FALSE, &Page);
1736        if (!NT_SUCCESS(Status))
1737          {
1738            DbgPrint("Unable to allocate a page\n");
1739            KEBUGCHECK(0);
1740          }
1741        Status = MmCreateVirtualMapping(NULL,
1742                                        Address,
1743                                        PAGE_READWRITE|PAGE_SYSTEM,
1744                                        Page,
1745                                        FALSE);
1746        if (!NT_SUCCESS(Status))
1747          {
1748            DbgPrint("Unable to create virtual mapping\n");
1749            KEBUGCHECK(0);
1750          }
1751        MiNonPagedPoolAllocMap[i / 32] |= (1 << (i % 32));
1752        Address += PAGE_SIZE;
1753      }
1754    /* the first block contains the non paged pool bitmap */
1755    blk = (BLOCK_HDR*)MiNonPagedPoolStart;
1756    memset(blk, 0, sizeof(BLOCK_HDR));
1757    blk->Magic = BLOCK_HDR_USED_MAGIC;
1758    blk->Size = ROUND_UP(MiNonPagedPoolLength / PAGE_SIZE, 32) / 8;
1759    blk->previous = NULL;
1760    blk->Used.Tag = 0xffffffff;
1761    blk->Used.Caller = 0;
1762    blk->Used.Dumped = FALSE;
1763    add_to_used_list(blk);
1764    /* the second block is the first free block */
1765    blk = (BLOCK_HDR*)((PVOID)(blk + 1) + blk->Size);
1766    memset(blk, 0, sizeof(BLOCK_HDR));
1767    memset(blk + 1, 0x0cc, MiNonPagedPoolNrOfPages * PAGE_SIZE - (ULONG)((PVOID)(blk + 1) - MiNonPagedPoolStart));
1768    blk->Magic = BLOCK_HDR_FREE_MAGIC;
1769    blk->Size = MiNonPagedPoolLength - (ULONG)((PVOID)(blk + 1) - MiNonPagedPoolStart);
1770    blk->previous = (BLOCK_HDR*)MiNonPagedPoolStart;
1771    add_to_free_list(blk);
1772 #endif
1773 }
1774
1775 /* EOF */