3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 * PROGRAMMER: David Welch (welch@cwcom.net)
12 /* INCLUDES ****************************************************************/
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ps.h>
17 #include <internal/pool.h>
20 #include <internal/debug.h>
22 /* GLOBALS *******************************************************************/
24 #define TAG_MDL TAG('M', 'M', 'D', 'L')
26 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
28 static PVOID MiMdlMappingRegionBase = NULL;
29 static PULONG MiMdlMappingRegionAllocMap = NULL;
30 static ULONG MiMdlMappingRegionHighWaterMark = 0;
31 static KSPIN_LOCK MiMdlMappingRegionLock;
33 /* FUNCTIONS *****************************************************************/
36 MmInitializeMdlImplementation(VOID)
41 MiMdlMappingRegionBase = NULL;
43 MmLockAddressSpace(MmGetKernelAddressSpace());
44 Status = MmCreateMemoryArea(NULL,
45 MmGetKernelAddressSpace(),
46 MEMORY_AREA_MDL_MAPPING,
47 &MiMdlMappingRegionBase,
48 MI_MDL_MAPPING_REGION_SIZE,
52 if (!NT_SUCCESS(Status))
54 MmUnlockAddressSpace(MmGetKernelAddressSpace());
57 MmUnlockAddressSpace(MmGetKernelAddressSpace());
59 MiMdlMappingRegionAllocMap =
60 ExAllocatePool(NonPagedPool,
61 MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 32));
62 MiMdlMappingRegionHighWaterMark = 0;
63 KeInitializeSpinLock(&MiMdlMappingRegionLock);
67 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
71 MdlPages = (PULONG)(Mdl + 1);
73 return((PVOID)MdlPages[((ULONG)Offset) / PAGE_SIZE]);
77 MmUnlockPages(PMDL Mdl)
79 * FUNCTION: Unlocks the physical pages described by a given MDL
81 * MemoryDescriptorList = MDL describing the buffer to be unlocked
82 * NOTES: The memory described by the specified MDL must have been locked
83 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
91 * FIXME: I don't know whether this right, but it looks sensible
93 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
94 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
100 * FIXME: Seems sensible
102 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
107 MdlPages = (PULONG)(Mdl + 1);
108 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
110 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
111 MmDereferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
113 Mdl->MdlFlags = Mdl->MdlFlags & (~MDL_PAGES_LOCKED);
117 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
119 * FUNCTION: Maps the physical pages described by a given MDL
121 * Mdl = Points to an MDL updated by MmProbeAndLockPages
122 * AccessMode = Specifies the portion of the address space to map the
124 * RETURNS: The base virtual address that maps the locked pages for the
125 * range described by the MDL
134 DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
136 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
138 return(Mdl->MappedSystemVa);
141 if (AccessMode == UserMode)
143 DPRINT1("MDL mapping to user-mode not yet handled.\n");
147 /* Calculate the number of pages required. */
148 RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
150 /* Allocate that number of pages from the mdl mapping region. */
151 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
152 Base = MiMdlMappingRegionBase + MiMdlMappingRegionHighWaterMark * PAGE_SIZE;
153 for (i = 0; i < RegionSize; i++)
155 ULONG Offset = MiMdlMappingRegionHighWaterMark + i;
156 MiMdlMappingRegionAllocMap[Offset / 32] |= (1 << (Offset % 32));
158 MiMdlMappingRegionHighWaterMark += RegionSize;
159 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
161 /* Set the virtual mappings for the MDL pages. */
162 MdlPages = (PULONG)(Mdl + 1);
163 for (i = 0; i < RegionSize; i++)
166 Status = MmCreateVirtualMapping(NULL,
167 (PVOID)((ULONG)Base+(i*PAGE_SIZE)),
169 (LARGE_INTEGER)(LONGLONG)MdlPages[i],
171 if (!NT_SUCCESS(Status))
173 DbgPrint("Unable to create virtual mapping\n");
178 /* Mark the MDL has having being mapped. */
179 Mdl->MdlFlags = Mdl->MdlFlags | MDL_MAPPED_TO_SYSTEM_VA;
180 Mdl->MappedSystemVa = Base + Mdl->ByteOffset;
181 return(Base + Mdl->ByteOffset);
185 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
187 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
189 * BaseAddress = Base virtual address to which the pages were mapped
190 * MemoryDescriptorList = MDL describing the mapped pages
198 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", Mdl, BaseAddress);
201 * In this case, the MDL has the same system address as the base address
202 * so there is no need to free it
204 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
209 /* Calculate the number of pages we mapped. */
210 RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
212 /* Unmap all the pages. */
213 for (i = 0; i < RegionSize; i++)
215 MmDeleteVirtualMapping(NULL,
216 BaseAddress + (i * PAGE_SIZE),
222 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
223 /* Deallocate all the pages used. */
224 Base = (ULONG)(BaseAddress - MiMdlMappingRegionBase - Mdl->ByteOffset);
225 Base = Base / PAGE_SIZE;
226 for (i = 0; i < RegionSize; i++)
228 ULONG Offset = Base + i;
229 MiMdlMappingRegionAllocMap[Offset / 32] &= ~(1 << (Offset % 32));
231 /* If all the pages below the high-water mark are free then move it down. */
232 if ((Base + RegionSize) == MiMdlMappingRegionHighWaterMark)
234 MiMdlMappingRegionHighWaterMark = Base;
236 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
238 /* Reset the MDL state. */
239 Mdl->MdlFlags = Mdl->MdlFlags & ~MDL_MAPPED_TO_SYSTEM_VA;
240 Mdl->MappedSystemVa = NULL;
245 MmBuildMdlFromPages(PMDL Mdl, PULONG Pages)
250 Mdl->MdlFlags = Mdl->MdlFlags |
251 (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
253 MdlPages = (PULONG)(Mdl + 1);
255 for (i=0;i<(PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE);i++)
257 MdlPages[i] = Pages[i];
261 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
262 KPROCESSOR_MODE AccessMode,
263 LOCK_OPERATION Operation)
265 * FUNCTION: Probes the specified pages, makes them resident and locks them
268 * AccessMode = Access at which to probe the buffer
269 * Operation = Operation to probe for
276 KPROCESSOR_MODE Mode;
277 PEPROCESS CurrentProcess;
279 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
282 * FIXME: Check behaviour against NT
284 if (Mdl->MdlFlags & MDL_PAGES_LOCKED)
289 CurrentProcess = PsGetCurrentProcess();
291 if (Mdl->Process != CurrentProcess)
293 KeAttachProcess(Mdl->Process);
296 if (Mdl->StartVa >= (PVOID)KERNEL_BASE)
309 MmLockAddressSpace(&Mdl->Process->AddressSpace);
310 MdlPages = (ULONG *)(Mdl + 1);
311 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
312 for (i = 0; i < NrPages; i++)
316 Address = Mdl->StartVa + (i*PAGE_SIZE);
318 if (!MmIsPagePresent(NULL, Address))
320 Status = MmNotPresentFault(Mode, (ULONG)Address, TRUE);
321 if (!NT_SUCCESS(Status))
323 for (j = 0; j < i; j++)
325 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
326 MmDereferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
328 ExRaiseStatus(Status);
333 MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
335 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
336 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
338 Status = MmAccessFault(Mode, (ULONG)Address, TRUE);
339 if (!NT_SUCCESS(Status))
341 for (j = 0; j < i; j++)
343 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
345 (LARGE_INTEGER)(LONGLONG)MdlPages[j]);
347 ExRaiseStatus(Status);
350 MdlPages[i] = MmGetPhysicalAddressForProcess(NULL, Address).u.LowPart;
351 MmReferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
353 MmUnlockAddressSpace(&Mdl->Process->AddressSpace);
354 if (Mdl->Process != CurrentProcess)
358 Mdl->MdlFlags = Mdl->MdlFlags | MDL_PAGES_LOCKED;
362 ULONG STDCALL MmSizeOfMdl (PVOID Base,
365 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
366 * the given address range
368 * Base = base virtual address
369 * Length = number of bytes to map
374 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
376 return(sizeof(MDL)+(len*sizeof(ULONG)));
381 MmBuildMdlForNonPagedPool (PMDL Mdl)
383 * FUNCTION: Fills in the corresponding physical page array of a given
384 * MDL for a buffer in nonpaged system space
386 * Mdl = Points to an MDL that supplies a virtual address,
387 * byte offset and length
391 Mdl->MdlFlags = Mdl->MdlFlags |
392 (MDL_SOURCE_IS_NONPAGED_POOL | MDL_PAGES_LOCKED);
393 for (va=0; va < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); va++)
395 ((PULONG)(Mdl + 1))[va] =
396 (MmGetPhysicalAddress(Mdl->StartVa + (va * PAGE_SIZE))).u.LowPart;
398 Mdl->MappedSystemVa = Mdl->StartVa + Mdl->ByteOffset;
403 MmCreateMdl (PMDL MemoryDescriptorList,
407 * FUNCTION: Allocates and initalizes an MDL
409 * MemoryDescriptorList = Points to MDL to initalize. If this is
410 * NULL then one is allocated
411 * Base = Base virtual address of the buffer
412 * Length = Length in bytes of the buffer
413 * RETURNS: A pointer to initalized MDL
416 if (MemoryDescriptorList == NULL)
420 Size = MmSizeOfMdl(Base,Length);
421 MemoryDescriptorList =
422 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
423 if (MemoryDescriptorList == NULL)
429 MmInitializeMdl(MemoryDescriptorList,Base,Length);
431 return(MemoryDescriptorList);
435 MmMapMemoryDumpMdl (PVOID Unknown0)
437 * FIXME: Has something to do with crash dumps. Do we want to implement