#include <internal/mm.h>
#include <internal/ps.h>
#include <internal/pool.h>
+#include <ntos/minmax.h>
#define NDEBUG
#include <internal/debug.h>
#define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
static PVOID MiMdlMappingRegionBase = NULL;
-static PULONG MiMdlMappingRegionAllocMap = NULL;
-static ULONG MiMdlMappingRegionHighWaterMark = 0;
+static RTL_BITMAP MiMdlMappingRegionAllocMap;
+static ULONG MiMdlMappingRegionHint;
static KSPIN_LOCK MiMdlMappingRegionLock;
/* FUNCTIONS *****************************************************************/
{
MEMORY_AREA* Result;
NTSTATUS Status;
+ PVOID Buffer;
+ MiMdlMappingRegionHint = 0;
MiMdlMappingRegionBase = NULL;
MmLockAddressSpace(MmGetKernelAddressSpace());
MI_MDL_MAPPING_REGION_SIZE,
0,
&Result,
+ FALSE,
FALSE);
if (!NT_SUCCESS(Status))
{
MmUnlockAddressSpace(MmGetKernelAddressSpace());
- KeBugCheck(0);
+ KEBUGCHECK(0);
}
MmUnlockAddressSpace(MmGetKernelAddressSpace());
- MiMdlMappingRegionAllocMap =
- ExAllocatePool(NonPagedPool,
- MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 32));
- MiMdlMappingRegionHighWaterMark = 0;
+ Buffer = ExAllocatePool(NonPagedPool, MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
+
+ RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
+ RtlClearAllBits(&MiMdlMappingRegionAllocMap);
+
KeInitializeSpinLock(&MiMdlMappingRegionLock);
}
return((PVOID)MdlPages[((ULONG)Offset) / PAGE_SIZE]);
}
+/*
+ * @unimplemented
+ */
VOID STDCALL
MmUnlockPages(PMDL Mdl)
/*
Mdl->MdlFlags = Mdl->MdlFlags & (~MDL_PAGES_LOCKED);
}
+/*
+ * @implemented
+ */
PVOID STDCALL
MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
/*
PULONG MdlPages;
KIRQL oldIrql;
ULONG RegionSize;
+ ULONG StartingOffset;
DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
if (AccessMode == UserMode)
{
DPRINT1("MDL mapping to user-mode not yet handled.\n");
- KeBugCheck(0);
+ KEBUGCHECK(0);
}
/* Calculate the number of pages required. */
/* Allocate that number of pages from the mdl mapping region. */
KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
- Base = MiMdlMappingRegionBase + MiMdlMappingRegionHighWaterMark * PAGE_SIZE;
- for (i = 0; i < RegionSize; i++)
- {
- ULONG Offset = MiMdlMappingRegionHighWaterMark + i;
- MiMdlMappingRegionAllocMap[Offset / 32] |= (1 << (Offset % 32));
- }
- MiMdlMappingRegionHighWaterMark += RegionSize;
+
+ StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, RegionSize, MiMdlMappingRegionHint);
+
+ if (StartingOffset == 0xffffffff)
+ {
+ DPRINT1("Out of MDL mapping space\n");
+ KEBUGCHECK(0);
+ }
+
+ Base = MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE;
+
+ if (MiMdlMappingRegionHint == StartingOffset)
+ {
+ MiMdlMappingRegionHint +=RegionSize;
+ }
+
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
/* Set the virtual mappings for the MDL pages. */
if (!NT_SUCCESS(Status))
{
DbgPrint("Unable to create virtual mapping\n");
- KeBugCheck(0);
+ KEBUGCHECK(0);
}
}
return(Base + Mdl->ByteOffset);
}
+/*
+ * @implemented
+ */
VOID STDCALL
MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
/*
ULONG RegionSize;
ULONG Base;
- DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", Mdl, BaseAddress);
+ DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress, Mdl);
/*
* In this case, the MDL has the same system address as the base address
/* Calculate the number of pages we mapped. */
RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
+ BaseAddress -= Mdl->ByteOffset;
/* Unmap all the pages. */
for (i = 0; i < RegionSize; i++)
KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
/* Deallocate all the pages used. */
- Base = (ULONG)(BaseAddress - MiMdlMappingRegionBase - Mdl->ByteOffset);
- Base = Base / PAGE_SIZE;
- for (i = 0; i < RegionSize; i++)
- {
- ULONG Offset = Base + i;
- MiMdlMappingRegionAllocMap[Offset / 32] &= ~(1 << (Offset % 32));
- }
- /* If all the pages below the high-water mark are free then move it down. */
- if ((Base + RegionSize) == MiMdlMappingRegionHighWaterMark)
- {
- MiMdlMappingRegionHighWaterMark = Base;
- }
+ Base = (ULONG)(BaseAddress - MiMdlMappingRegionBase) / PAGE_SIZE;
+
+ RtlClearBits(&MiMdlMappingRegionAllocMap, Base, RegionSize);
+
+ MiMdlMappingRegionHint = min (MiMdlMappingRegionHint, Base);
+
KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
/* Reset the MDL state. */
}
}
+/*
+ * @unimplemented
+ */
VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
KPROCESSOR_MODE AccessMode,
LOCK_OPERATION Operation)
ULONG NrPages;
NTSTATUS Status;
KPROCESSOR_MODE Mode;
- PEPROCESS CurrentProcess;
+ PEPROCESS CurrentProcess = NULL;
DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
return;
}
- CurrentProcess = PsGetCurrentProcess();
- if (Mdl->Process != CurrentProcess)
- {
- KeAttachProcess(Mdl->Process);
- }
if (Mdl->StartVa >= (PVOID)KERNEL_BASE)
{
else
{
Mode = UserMode;
+ CurrentProcess = PsGetCurrentProcess();
+ if (Mdl->Process != CurrentProcess)
+ {
+ KeAttachProcess(Mdl->Process);
+ }
}
/*
MmReferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
}
MmUnlockAddressSpace(&Mdl->Process->AddressSpace);
- if (Mdl->Process != CurrentProcess)
+ if (Mode == UserMode && Mdl->Process != CurrentProcess)
{
KeDetachProcess();
}
}
+/*
+ * @implemented
+ */
ULONG STDCALL MmSizeOfMdl (PVOID Base,
ULONG Length)
/*
}
+/*
+ * @implemented
+ */
VOID STDCALL
MmBuildMdlForNonPagedPool (PMDL Mdl)
/*
* byte offset and length
*/
{
- int va;
+ ULONG va;
Mdl->MdlFlags = Mdl->MdlFlags |
(MDL_SOURCE_IS_NONPAGED_POOL | MDL_PAGES_LOCKED);
for (va=0; va < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); va++)
}
+/*
+ * @implemented
+ */
PMDL STDCALL
MmCreateMdl (PMDL MemoryDescriptorList,
PVOID Base,
return(MemoryDescriptorList);
}
+/*
+ * @unimplemented
+ */
VOID STDCALL
MmMapMemoryDumpMdl (PVOID Unknown0)
/*