3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 * PROGRAMMER: David Welch (welch@cwcom.net)
12 /* INCLUDES ****************************************************************/
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ps.h>
17 #include <internal/pool.h>
18 #include <ntos/minmax.h>
21 #include <internal/debug.h>
23 /* GLOBALS *******************************************************************/
25 #define TAG_MDL TAG('M', 'M', 'D', 'L')
27 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
29 static PVOID MiMdlMappingRegionBase = NULL;
30 static RTL_BITMAP MiMdlMappingRegionAllocMap;
31 static ULONG MiMdlMappingRegionHint;
32 static KSPIN_LOCK MiMdlMappingRegionLock;
34 /* FUNCTIONS *****************************************************************/
37 MmInitializeMdlImplementation(VOID)
43 MiMdlMappingRegionHint = 0;
44 MiMdlMappingRegionBase = NULL;
46 MmLockAddressSpace(MmGetKernelAddressSpace());
47 Status = MmCreateMemoryArea(NULL,
48 MmGetKernelAddressSpace(),
49 MEMORY_AREA_MDL_MAPPING,
50 &MiMdlMappingRegionBase,
51 MI_MDL_MAPPING_REGION_SIZE,
55 if (!NT_SUCCESS(Status))
57 MmUnlockAddressSpace(MmGetKernelAddressSpace());
60 MmUnlockAddressSpace(MmGetKernelAddressSpace());
62 Buffer = ExAllocatePool(NonPagedPool, MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
64 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
65 RtlClearAllBits(&MiMdlMappingRegionAllocMap);
67 KeInitializeSpinLock(&MiMdlMappingRegionLock);
71 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
75 MdlPages = (PULONG)(Mdl + 1);
77 return((PVOID)MdlPages[((ULONG)Offset) / PAGE_SIZE]);
81 MmUnlockPages(PMDL Mdl)
83 * FUNCTION: Unlocks the physical pages described by a given MDL
85 * MemoryDescriptorList = MDL describing the buffer to be unlocked
86 * NOTES: The memory described by the specified MDL must have been locked
87 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
95 * FIXME: I don't know whether this right, but it looks sensible
97 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
98 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
104 * FIXME: Seems sensible
106 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
111 MdlPages = (PULONG)(Mdl + 1);
112 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
114 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
115 MmDereferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
117 Mdl->MdlFlags = Mdl->MdlFlags & (~MDL_PAGES_LOCKED);
121 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
123 * FUNCTION: Maps the physical pages described by a given MDL
125 * Mdl = Points to an MDL updated by MmProbeAndLockPages
126 * AccessMode = Specifies the portion of the address space to map the
128 * RETURNS: The base virtual address that maps the locked pages for the
129 * range described by the MDL
137 ULONG StartingOffset;
139 DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
141 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
143 return(Mdl->MappedSystemVa);
146 if (AccessMode == UserMode)
148 DPRINT1("MDL mapping to user-mode not yet handled.\n");
152 /* Calculate the number of pages required. */
153 RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
155 /* Allocate that number of pages from the mdl mapping region. */
156 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
158 StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, RegionSize, MiMdlMappingRegionHint);
160 if (StartingOffset == 0xffffffff)
162 DPRINT1("Out of MDL mapping space\n");
166 Base = MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE;
168 if (MiMdlMappingRegionHint == StartingOffset)
170 MiMdlMappingRegionHint +=RegionSize;
173 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
175 /* Set the virtual mappings for the MDL pages. */
176 MdlPages = (PULONG)(Mdl + 1);
177 for (i = 0; i < RegionSize; i++)
180 Status = MmCreateVirtualMapping(NULL,
181 (PVOID)((ULONG)Base+(i*PAGE_SIZE)),
183 (LARGE_INTEGER)(LONGLONG)MdlPages[i],
185 if (!NT_SUCCESS(Status))
187 DbgPrint("Unable to create virtual mapping\n");
192 /* Mark the MDL has having being mapped. */
193 Mdl->MdlFlags = Mdl->MdlFlags | MDL_MAPPED_TO_SYSTEM_VA;
194 Mdl->MappedSystemVa = Base + Mdl->ByteOffset;
195 return(Base + Mdl->ByteOffset);
199 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
201 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
203 * BaseAddress = Base virtual address to which the pages were mapped
204 * MemoryDescriptorList = MDL describing the mapped pages
212 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress, Mdl);
215 * In this case, the MDL has the same system address as the base address
216 * so there is no need to free it
218 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
223 /* Calculate the number of pages we mapped. */
224 RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
225 BaseAddress -= Mdl->ByteOffset;
227 /* Unmap all the pages. */
228 for (i = 0; i < RegionSize; i++)
230 MmDeleteVirtualMapping(NULL,
231 BaseAddress + (i * PAGE_SIZE),
237 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
238 /* Deallocate all the pages used. */
239 Base = (ULONG)(BaseAddress - MiMdlMappingRegionBase) / PAGE_SIZE;
241 RtlClearBits(&MiMdlMappingRegionAllocMap, Base, RegionSize);
243 MiMdlMappingRegionHint = min (MiMdlMappingRegionHint, Base);
245 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
247 /* Reset the MDL state. */
248 Mdl->MdlFlags = Mdl->MdlFlags & ~MDL_MAPPED_TO_SYSTEM_VA;
249 Mdl->MappedSystemVa = NULL;
254 MmBuildMdlFromPages(PMDL Mdl, PULONG Pages)
259 Mdl->MdlFlags = Mdl->MdlFlags |
260 (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
262 MdlPages = (PULONG)(Mdl + 1);
264 for (i=0;i<(PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE);i++)
266 MdlPages[i] = Pages[i];
270 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
271 KPROCESSOR_MODE AccessMode,
272 LOCK_OPERATION Operation)
274 * FUNCTION: Probes the specified pages, makes them resident and locks them
277 * AccessMode = Access at which to probe the buffer
278 * Operation = Operation to probe for
285 KPROCESSOR_MODE Mode;
286 PEPROCESS CurrentProcess;
288 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
291 * FIXME: Check behaviour against NT
293 if (Mdl->MdlFlags & MDL_PAGES_LOCKED)
298 CurrentProcess = PsGetCurrentProcess();
300 if (Mdl->Process != CurrentProcess)
302 KeAttachProcess(Mdl->Process);
305 if (Mdl->StartVa >= (PVOID)KERNEL_BASE)
318 MmLockAddressSpace(&Mdl->Process->AddressSpace);
319 MdlPages = (ULONG *)(Mdl + 1);
320 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
321 for (i = 0; i < NrPages; i++)
325 Address = Mdl->StartVa + (i*PAGE_SIZE);
327 if (!MmIsPagePresent(NULL, Address))
329 Status = MmNotPresentFault(Mode, (ULONG)Address, TRUE);
330 if (!NT_SUCCESS(Status))
332 for (j = 0; j < i; j++)
334 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
335 MmDereferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
337 ExRaiseStatus(Status);
342 MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
344 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
345 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
347 Status = MmAccessFault(Mode, (ULONG)Address, TRUE);
348 if (!NT_SUCCESS(Status))
350 for (j = 0; j < i; j++)
352 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
354 (LARGE_INTEGER)(LONGLONG)MdlPages[j]);
356 ExRaiseStatus(Status);
359 MdlPages[i] = MmGetPhysicalAddressForProcess(NULL, Address).u.LowPart;
360 MmReferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
362 MmUnlockAddressSpace(&Mdl->Process->AddressSpace);
363 if (Mdl->Process != CurrentProcess)
367 Mdl->MdlFlags = Mdl->MdlFlags | MDL_PAGES_LOCKED;
371 ULONG STDCALL MmSizeOfMdl (PVOID Base,
374 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
375 * the given address range
377 * Base = base virtual address
378 * Length = number of bytes to map
383 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
385 return(sizeof(MDL)+(len*sizeof(ULONG)));
390 MmBuildMdlForNonPagedPool (PMDL Mdl)
392 * FUNCTION: Fills in the corresponding physical page array of a given
393 * MDL for a buffer in nonpaged system space
395 * Mdl = Points to an MDL that supplies a virtual address,
396 * byte offset and length
400 Mdl->MdlFlags = Mdl->MdlFlags |
401 (MDL_SOURCE_IS_NONPAGED_POOL | MDL_PAGES_LOCKED);
402 for (va=0; va < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); va++)
404 ((PULONG)(Mdl + 1))[va] =
405 (MmGetPhysicalAddress(Mdl->StartVa + (va * PAGE_SIZE))).u.LowPart;
407 Mdl->MappedSystemVa = Mdl->StartVa + Mdl->ByteOffset;
412 MmCreateMdl (PMDL MemoryDescriptorList,
416 * FUNCTION: Allocates and initalizes an MDL
418 * MemoryDescriptorList = Points to MDL to initalize. If this is
419 * NULL then one is allocated
420 * Base = Base virtual address of the buffer
421 * Length = Length in bytes of the buffer
422 * RETURNS: A pointer to initalized MDL
425 if (MemoryDescriptorList == NULL)
429 Size = MmSizeOfMdl(Base,Length);
430 MemoryDescriptorList =
431 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
432 if (MemoryDescriptorList == NULL)
438 MmInitializeMdl(MemoryDescriptorList,Base,Length);
440 return(MemoryDescriptorList);
444 MmMapMemoryDumpMdl (PVOID Unknown0)
446 * FIXME: Has something to do with crash dumps. Do we want to implement