+static void* grow_kernel_pool(unsigned int size, ULONG Tag, PVOID Caller)
+/*
+ * FUNCTION: Grow the executive heap to accomodate a block of at least 'size'
+ * bytes
+ */
+{
+ ULONG nr_pages = PAGE_ROUND_UP(size + sizeof(BLOCK_HDR)) / PAGE_SIZE;
+ ULONG start;
+ BLOCK_HDR* blk=NULL;
+ int i;
+ KIRQL oldIrql;
+ NTSTATUS Status;
+ PVOID block = NULL;
+
+ if (size >= PAGE_SIZE)
+ {
+ nr_pages++;
+ }
+
+ start = (ULONG)MiAllocNonPagedPoolRegion(nr_pages);
+
+ DPRINT("growing heap for block size %d, ",size);
+ DPRINT("start %x\n",start);
+
+ for (i=0;i<nr_pages;i++)
+ {
+ PHYSICAL_ADDRESS Page;
+ /* FIXME: Check whether we can really wait here. */
+ Status = MmRequestPageMemoryConsumer(MC_NPPOOL, TRUE, &Page);
+ if (!NT_SUCCESS(Status))
+ {
+ KeBugCheck(0);
+ return(NULL);
+ }
+ Status = MmCreateVirtualMapping(NULL,
+ (PVOID)(start + (i*PAGE_SIZE)),
+ PAGE_READWRITE|PAGE_SYSTEM,
+ Page,
+ FALSE);
+ if (!NT_SUCCESS(Status))
+ {
+ DbgPrint("Unable to create virtual mapping\n");
+ KeBugCheck(0);
+ }
+ }
+
+ blk = (struct _BLOCK_HDR *)start;
+ memset(blk, 0, sizeof(BLOCK_HDR));
+ blk->Size = (nr_pages * PAGE_SIZE) - sizeof(BLOCK_HDR);
+ blk->Magic = BLOCK_HDR_FREE_MAGIC;
+ memset(block_to_address(blk), 0xcc, blk->Size);
+
+ KeAcquireSpinLock(&MmNpoolLock, &oldIrql);
+ add_to_free_list(blk);
+ merge_free_block(blk);
+
+ blk = lookup_block(size);
+ if (blk)
+ {
+ block = take_block(blk, size, Tag, Caller);
+ VALIDATE_POOL;
+ }
+ KeReleaseSpinLock(&MmNpoolLock, oldIrql);
+ if (block == NULL)
+ {
+ CHECKPOINT1;
+ }
+ return block;
+}
+