3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 * PROGRAMMER: David Welch (welch@cwcom.net)
12 /* INCLUDES ****************************************************************/
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ps.h>
17 #include <internal/pool.h>
18 #include <ntos/minmax.h>
21 #include <internal/debug.h>
23 /* GLOBALS *******************************************************************/
25 #define TAG_MDL TAG('M', 'M', 'D', 'L')
28 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
30 static PVOID MiMdlMappingRegionBase = NULL;
31 static RTL_BITMAP MiMdlMappingRegionAllocMap;
32 static ULONG MiMdlMappingRegionHint;
33 static KSPIN_LOCK MiMdlMappingRegionLock;
34 #endif /* LIBCAPTIVE */
36 /* FUNCTIONS *****************************************************************/
41 MmInitializeMdlImplementation(VOID)
47 MiMdlMappingRegionHint = 0;
48 MiMdlMappingRegionBase = NULL;
50 MmLockAddressSpace(MmGetKernelAddressSpace());
51 Status = MmCreateMemoryArea(NULL,
52 MmGetKernelAddressSpace(),
53 MEMORY_AREA_MDL_MAPPING,
54 &MiMdlMappingRegionBase,
55 MI_MDL_MAPPING_REGION_SIZE,
59 if (!NT_SUCCESS(Status))
61 MmUnlockAddressSpace(MmGetKernelAddressSpace());
64 MmUnlockAddressSpace(MmGetKernelAddressSpace());
66 Buffer = ExAllocatePool(NonPagedPool, MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
68 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
69 RtlClearAllBits(&MiMdlMappingRegionAllocMap);
71 KeInitializeSpinLock(&MiMdlMappingRegionLock);
75 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
79 MdlPages = (PULONG)(Mdl + 1);
81 return((PVOID)MdlPages[((ULONG)Offset) / PAGE_SIZE]);
85 MmUnlockPages(PMDL Mdl)
87 * FUNCTION: Unlocks the physical pages described by a given MDL
89 * MemoryDescriptorList = MDL describing the buffer to be unlocked
90 * NOTES: The memory described by the specified MDL must have been locked
91 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
99 * FIXME: I don't know whether this right, but it looks sensible
101 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
102 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
108 * FIXME: Seems sensible
110 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
115 MdlPages = (PULONG)(Mdl + 1);
116 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
118 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
119 MmDereferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
121 Mdl->MdlFlags = Mdl->MdlFlags & (~MDL_PAGES_LOCKED);
125 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
127 * FUNCTION: Maps the physical pages described by a given MDL
129 * Mdl = Points to an MDL updated by MmProbeAndLockPages
130 * AccessMode = Specifies the portion of the address space to map the
132 * RETURNS: The base virtual address that maps the locked pages for the
133 * range described by the MDL
141 ULONG StartingOffset;
143 DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
145 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
147 return(Mdl->MappedSystemVa);
150 if (AccessMode == UserMode)
152 DPRINT1("MDL mapping to user-mode not yet handled.\n");
156 /* Calculate the number of pages required. */
157 RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
159 /* Allocate that number of pages from the mdl mapping region. */
160 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
162 StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, RegionSize, MiMdlMappingRegionHint);
164 if (StartingOffset == 0xffffffff)
166 DPRINT1("Out of MDL mapping space\n");
170 Base = MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE;
172 if (MiMdlMappingRegionHint == StartingOffset)
174 MiMdlMappingRegionHint +=RegionSize;
177 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
179 /* Set the virtual mappings for the MDL pages. */
180 MdlPages = (PULONG)(Mdl + 1);
181 for (i = 0; i < RegionSize; i++)
184 Status = MmCreateVirtualMapping(NULL,
185 (PVOID)((ULONG)Base+(i*PAGE_SIZE)),
187 (LARGE_INTEGER)(LONGLONG)MdlPages[i],
189 if (!NT_SUCCESS(Status))
191 DbgPrint("Unable to create virtual mapping\n");
196 /* Mark the MDL has having being mapped. */
197 Mdl->MdlFlags = Mdl->MdlFlags | MDL_MAPPED_TO_SYSTEM_VA;
198 Mdl->MappedSystemVa = Base + Mdl->ByteOffset;
199 return(Base + Mdl->ByteOffset);
203 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
205 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
207 * BaseAddress = Base virtual address to which the pages were mapped
208 * MemoryDescriptorList = MDL describing the mapped pages
216 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress, Mdl);
219 * In this case, the MDL has the same system address as the base address
220 * so there is no need to free it
222 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
227 /* Calculate the number of pages we mapped. */
228 RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
229 BaseAddress -= Mdl->ByteOffset;
231 /* Unmap all the pages. */
232 for (i = 0; i < RegionSize; i++)
234 MmDeleteVirtualMapping(NULL,
235 BaseAddress + (i * PAGE_SIZE),
241 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
242 /* Deallocate all the pages used. */
243 Base = (ULONG)(BaseAddress - MiMdlMappingRegionBase) / PAGE_SIZE;
245 RtlClearBits(&MiMdlMappingRegionAllocMap, Base, RegionSize);
247 MiMdlMappingRegionHint = min (MiMdlMappingRegionHint, Base);
249 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
251 /* Reset the MDL state. */
252 Mdl->MdlFlags = Mdl->MdlFlags & ~MDL_MAPPED_TO_SYSTEM_VA;
253 Mdl->MappedSystemVa = NULL;
258 MmBuildMdlFromPages(PMDL Mdl, PULONG Pages)
263 Mdl->MdlFlags = Mdl->MdlFlags |
264 (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
266 MdlPages = (PULONG)(Mdl + 1);
268 for (i=0;i<(PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE);i++)
270 MdlPages[i] = Pages[i];
274 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
275 KPROCESSOR_MODE AccessMode,
276 LOCK_OPERATION Operation)
278 * FUNCTION: Probes the specified pages, makes them resident and locks them
281 * AccessMode = Access at which to probe the buffer
282 * Operation = Operation to probe for
289 KPROCESSOR_MODE Mode;
290 PEPROCESS CurrentProcess;
292 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
295 * FIXME: Check behaviour against NT
297 if (Mdl->MdlFlags & MDL_PAGES_LOCKED)
302 CurrentProcess = PsGetCurrentProcess();
304 if (Mdl->Process != CurrentProcess)
306 KeAttachProcess(Mdl->Process);
309 if (Mdl->StartVa >= (PVOID)KERNEL_BASE)
322 MmLockAddressSpace(&Mdl->Process->AddressSpace);
323 MdlPages = (ULONG *)(Mdl + 1);
324 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
325 for (i = 0; i < NrPages; i++)
329 Address = Mdl->StartVa + (i*PAGE_SIZE);
331 if (!MmIsPagePresent(NULL, Address))
333 Status = MmNotPresentFault(Mode, (ULONG)Address, TRUE);
334 if (!NT_SUCCESS(Status))
336 for (j = 0; j < i; j++)
338 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
339 MmDereferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
341 ExRaiseStatus(Status);
346 MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
348 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
349 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
351 Status = MmAccessFault(Mode, (ULONG)Address, TRUE);
352 if (!NT_SUCCESS(Status))
354 for (j = 0; j < i; j++)
356 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
358 (LARGE_INTEGER)(LONGLONG)MdlPages[j]);
360 ExRaiseStatus(Status);
363 MdlPages[i] = MmGetPhysicalAddressForProcess(NULL, Address).u.LowPart;
364 MmReferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
366 MmUnlockAddressSpace(&Mdl->Process->AddressSpace);
367 if (Mdl->Process != CurrentProcess)
371 Mdl->MdlFlags = Mdl->MdlFlags | MDL_PAGES_LOCKED;
374 #endif /* LIBCAPTIVE */
376 ULONG STDCALL MmSizeOfMdl (PVOID Base,
379 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
380 * the given address range
382 * Base = base virtual address
383 * Length = number of bytes to map
388 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
390 return(sizeof(MDL)+(len*sizeof(ULONG)));
396 MmBuildMdlForNonPagedPool (PMDL Mdl)
398 * FUNCTION: Fills in the corresponding physical page array of a given
399 * MDL for a buffer in nonpaged system space
401 * Mdl = Points to an MDL that supplies a virtual address,
402 * byte offset and length
406 Mdl->MdlFlags = Mdl->MdlFlags |
407 (MDL_SOURCE_IS_NONPAGED_POOL | MDL_PAGES_LOCKED);
408 for (va=0; va < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); va++)
410 ((PULONG)(Mdl + 1))[va] =
411 (MmGetPhysicalAddress(Mdl->StartVa + (va * PAGE_SIZE))).u.LowPart;
413 Mdl->MappedSystemVa = Mdl->StartVa + Mdl->ByteOffset;
416 #endif /* LIBCAPTIVE */
419 MmCreateMdl (PMDL MemoryDescriptorList,
423 * FUNCTION: Allocates and initalizes an MDL
425 * MemoryDescriptorList = Points to MDL to initalize. If this is
426 * NULL then one is allocated
427 * Base = Base virtual address of the buffer
428 * Length = Length in bytes of the buffer
429 * RETURNS: A pointer to initalized MDL
432 if (MemoryDescriptorList == NULL)
436 Size = MmSizeOfMdl(Base,Length);
437 MemoryDescriptorList =
438 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
439 if (MemoryDescriptorList == NULL)
446 RtlZeroMemory(MemoryDescriptorList,sizeof(*MemoryDescriptorList));
447 #endif /* LIBCAPTIVE */
448 MmInitializeMdl(MemoryDescriptorList,Base,Length);
450 return(MemoryDescriptorList);
456 MmMapMemoryDumpMdl (PVOID Unknown0)
458 * FIXME: Has something to do with crash dumps. Do we want to implement
465 #endif /* LIBCAPTIVE */