Разбирайся: Код (Text): VOID MmProbeAndLockPages ( IN OUT PMDL MemoryDescriptorList, IN KPROCESSOR_MODE AccessMode, IN LOCK_OPERATION Operation ) /*++ Routine Description: This routine probes the specified pages, makes the pages resident and locks the physical pages mapped by the virtual pages in memory. The Memory descriptor list is updated to describe the physical pages. Arguments: MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List (MDL). The supplied MDL must supply a virtual address, byte offset and length field. The physical page portion of the MDL is updated when the pages are locked in memory. AccessMode - Supplies the access mode in which to probe the arguments. One of KernelMode or UserMode. Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess or IoModifyAccess. Return Value: None - exceptions are raised. Environment: Kernel mode. APC_LEVEL and below for pagable addresses, DISPATCH_LEVEL and below for non-pagable addresses. --*/ { PPFN_NUMBER Page; MMPTE PteContents; PMMPTE PointerPte; PMMPTE PointerPde; PMMPTE PointerPpe; PVOID Va; PVOID EndVa; PVOID AlignedVa; PMMPFN Pfn1; PFN_NUMBER PageFrameIndex; PEPROCESS CurrentProcess; KIRQL OldIrql; PFN_NUMBER NumberOfPagesToLock; PFN_NUMBER NumberOfPagesSpanned; NTSTATUS status; NTSTATUS ProbeStatus; PETHREAD Thread; ULONG SavedState; LOGICAL AddressIsPhysical; PLIST_ENTRY NextEntry; PMI_PHYSICAL_VIEW PhysicalView; PCHAR StartVa; PVOID CallingAddress; PVOID CallersCaller; #if !defined (_X86_) CallingAddress = (PVOID)_ReturnAddress(); CallersCaller = (PVOID)0; #endif #if DBG if (MiPrintLockedPages != 0) { MiVerifyLockedPageCharges (); } #endif ASSERT (MemoryDescriptorList->ByteCount != 0); ASSERT (((ULONG)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0); Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); ASSERT (((ULONG_PTR)MemoryDescriptorList->StartVa & (PAGE_SIZE - 1)) == 0); AlignedVa = (PVOID)MemoryDescriptorList->StartVa; ASSERT ((MemoryDescriptorList->MdlFlags & ( MDL_PAGES_LOCKED | MDL_MAPPED_TO_SYSTEM_VA | MDL_SOURCE_IS_NONPAGED_POOL | MDL_PARTIAL | MDL_IO_SPACE)) == 0); Va = (PCHAR)AlignedVa + MemoryDescriptorList->ByteOffset; StartVa = Va; PointerPte = MiGetPteAddress (Va); // // Endva is one byte past the end of the buffer, if ACCESS_MODE is not // kernel, make sure the EndVa is in user space AND the byte count // does not cause it to wrap. // EndVa = (PVOID)((PCHAR)Va + MemoryDescriptorList->ByteCount); if ((AccessMode != KernelMode) && ((EndVa > (PVOID)MM_USER_PROBE_ADDRESS) || (Va >= EndVa))) { *Page = MM_EMPTY_LIST; MI_INSTRUMENT_PROBE_RAISES(0); ExRaiseStatus (STATUS_ACCESS_VIOLATION); return; } // // There is an optimization which could be performed here. If // the operation is for WriteAccess and the complete page is // being modified, we can remove the current page, if it is not // resident, and substitute a demand zero page. // Note, that after analysis by marking the thread and then // noting if a page read was done, this rarely occurs. // MemoryDescriptorList->Process = (PEPROCESS)NULL; Thread = PsGetCurrentThread (); if (!MI_IS_PHYSICAL_ADDRESS(Va)) { AddressIsPhysical = FALSE; ProbeStatus = STATUS_SUCCESS; NumberOfPagesToLock = COMPUTE_PAGES_SPANNED (Va, MemoryDescriptorList->ByteCount); ASSERT (NumberOfPagesToLock != 0); NumberOfPagesSpanned = NumberOfPagesToLock; PointerPpe = MiGetPpeAddress (Va); PointerPde = MiGetPdeAddress (Va); MmSavePageFaultReadAhead (Thread, &SavedState); MmSetPageFaultReadAhead (Thread, (ULONG)(NumberOfPagesToLock - 1)); try { do { *Page = MM_EMPTY_LIST; // // Make sure the page is resident. // *(volatile CHAR *)Va; if ((Operation != IoReadAccess) && (Va <= MM_HIGHEST_USER_ADDRESS)) { // // Probe for write access as well. // ProbeForWriteChar ((PCHAR)Va); } NumberOfPagesToLock -= 1; MmSetPageFaultReadAhead (Thread, (ULONG)(NumberOfPagesToLock - 1)); Va = (PVOID)(((ULONG_PTR)(PCHAR)Va + PAGE_SIZE) & ~(PAGE_SIZE - 1)); Page += 1; } while (Va < EndVa); ASSERT (NumberOfPagesToLock == 0); } except (EXCEPTION_EXECUTE_HANDLER) { ProbeStatus = GetExceptionCode(); } // // We may still fault again below but it's generally rare. // Restore this thread's normal fault behavior now. // MmResetPageFaultReadAhead (Thread, SavedState); if (ProbeStatus != STATUS_SUCCESS) { MI_INSTRUMENT_PROBE_RAISES(1); ExRaiseStatus (ProbeStatus); return; } } else { AddressIsPhysical = TRUE; *Page = MM_EMPTY_LIST; } Va = AlignedVa; Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); // // Indicate that this is a write operation. // if (Operation != IoReadAccess) { MemoryDescriptorList->MdlFlags |= MDL_WRITE_OPERATION; } else { MemoryDescriptorList->MdlFlags &= ~(MDL_WRITE_OPERATION); } // // Acquire the PFN database lock. // LOCK_PFN2 (OldIrql); if (Va <= MM_HIGHEST_USER_ADDRESS) { // // These are addresses with user space, check to see if the // working set size will allow these pages to be locked. // ASSERT (NumberOfPagesSpanned != 0); CurrentProcess = PsGetCurrentProcess (); // // Check for a transfer to/from a physical VAD - no reference counts // may be modified for these pages. // NextEntry = CurrentProcess->PhysicalVadList.Flink; while (NextEntry != &CurrentProcess->PhysicalVadList) { PhysicalView = CONTAINING_RECORD(NextEntry, MI_PHYSICAL_VIEW, ListEntry); if ((PhysicalView->Vad->u.VadFlags.UserPhysicalPages == 0) && (PhysicalView->Vad->u.VadFlags.PhysicalMapping == 0)) { NextEntry = NextEntry->Flink; continue; } if (StartVa < PhysicalView->StartVa) { if ((PCHAR)EndVa - 1 >= PhysicalView->StartVa) { // // The range encompasses a physical VAD. This is not // allowed. // UNLOCK_PFN2 (OldIrql); MI_INSTRUMENT_PROBE_RAISES(2); ExRaiseStatus (STATUS_ACCESS_VIOLATION); return; } NextEntry = NextEntry->Flink; continue; } if (StartVa <= PhysicalView->EndVa) { // // Ensure that the entire range lies within the VAD. // if ((PCHAR)EndVa - 1 > PhysicalView->EndVa) { // // The range goes past the end of the VAD - not allowed. // UNLOCK_PFN2 (OldIrql); MI_INSTRUMENT_PROBE_RAISES(3); ExRaiseStatus (STATUS_ACCESS_VIOLATION); return; } if (PhysicalView->Vad->u.VadFlags.UserPhysicalPages == 1) { // // All the PTEs must still be checked and reference // counts bumped on the pages. Just don't charge // against the working set. // NextEntry = NextEntry->Flink; continue; } // // The range lies within a physical VAD. // if (Operation != IoReadAccess) { // // Ensure the VAD is writable. Changing individual PTE // protections in a physical VAD is not allowed. // if ((PhysicalView->Vad->u.VadFlags.Protection & MM_READWRITE) == 0) { UNLOCK_PFN2 (OldIrql); MI_INSTRUMENT_PROBE_RAISES(4); ExRaiseStatus (STATUS_ACCESS_VIOLATION); return; } } // // Don't charge page locking for this transfer as it is all // physical, just initialize the MDL. Note the pages do not // have to be physically contiguous, so the frames must be // extracted from the PTEs. // MemoryDescriptorList->MdlFlags |= (MDL_PHYSICAL_VIEW | MDL_PAGES_LOCKED); MemoryDescriptorList->Process = CurrentProcess; do { PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); *Page = PageFrameIndex; Page += 1; PointerPte += 1; Va = (PVOID)((PCHAR)Va + PAGE_SIZE); } while (Va < EndVa); UNLOCK_PFN2 (OldIrql); return; } NextEntry = NextEntry->Flink; } CurrentProcess->NumberOfLockedPages += NumberOfPagesSpanned; MemoryDescriptorList->Process = CurrentProcess; } MemoryDescriptorList->MdlFlags |= MDL_PAGES_LOCKED; do { if (AddressIsPhysical == TRUE) { // // On certain architectures, virtual addresses // may be physical and hence have no corresponding PTE. // PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (Va); } else { #if defined (_WIN64) while ((PointerPpe->u.Hard.Valid == 0) || (PointerPde->u.Hard.Valid == 0) || (PointerPte->u.Hard.Valid == 0)) #else while ((PointerPde->u.Hard.Valid == 0) || (PointerPte->u.Hard.Valid == 0)) #endif { // // PDE is not resident, release PFN lock touch the page and make // it appear. // UNLOCK_PFN2 (OldIrql); MmSetPageFaultReadAhead (Thread, 0); status = MmAccessFault (FALSE, Va, KernelMode, (PVOID)0); MmResetPageFaultReadAhead (Thread, SavedState); if (!NT_SUCCESS(status)) { // // An exception occurred. Unlock the pages locked // so far. // failure: if (MmTrackLockedPages == TRUE) { // // Adjust the MDL length so that MmUnlockPages only // processes the part that was completed. // ULONG PagesLocked; PagesLocked = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa, MemoryDescriptorList->ByteCount); #if defined (_X86_) RtlGetCallersAddress(&CallingAddress, &CallersCaller); #endif MiAddMdlTracker (MemoryDescriptorList, CallingAddress, CallersCaller, PagesLocked, 0); } MmUnlockPages (MemoryDescriptorList); // // Raise an exception of access violation to the caller. // MI_INSTRUMENT_PROBE_RAISES(7); ExRaiseStatus (status); return; } LOCK_PFN2 (OldIrql); } PteContents = *PointerPte; ASSERT (PteContents.u.Hard.Valid == 1); if (Va <= MM_HIGHEST_USER_ADDRESS) { if (Operation != IoReadAccess) { if ((PteContents.u.Long & MM_PTE_WRITE_MASK) == 0) { // // The caller has made the page protection more // restrictive, this should never be done once the // request has been issued ! Rather than wading // through the PFN database entry to see if it // could possibly work out, give the caller an // access violation. // #if DBG DbgPrint ("MmProbeAndLockPages: PTE %p %p changed\n", PointerPte, PteContents.u.Long); ASSERT (FALSE); #endif UNLOCK_PFN2 (OldIrql); status = STATUS_ACCESS_VIOLATION; goto failure; } } } PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents); } if (PageFrameIndex > MmHighestPhysicalPage) { // // This is an I/O space address don't allow operations // on addresses not in the PFN database. // MemoryDescriptorList->MdlFlags |= MDL_IO_SPACE; } else { ASSERT ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0); Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); #if PFN_CONSISTENCY ASSERT(Pfn1->u3.e1.PageTablePage == 0); #endif // // Check to make sure this page is not locked down an unusually // high number of times. // if (Pfn1->u3.e2.ReferenceCount >= MmReferenceCountCheck) { UNLOCK_PFN2 (OldIrql); ASSERT (FALSE); status = STATUS_WORKING_SET_QUOTA; goto failure; } // // Check to make sure the systemwide locked pages count is fluid. // if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= 0) { // // If this page is for paged pool or privileged code/data, // then force it in. // if ((Va > MM_HIGHEST_USER_ADDRESS) && (!MI_IS_SYSTEM_CACHE_ADDRESS(Va))) { MI_INSTRUMENT_PROBE_RAISES(8); goto ok; } MI_INSTRUMENT_PROBE_RAISES(5); UNLOCK_PFN2 (OldIrql); status = STATUS_WORKING_SET_QUOTA; goto failure; } // // Check to make sure any administrator-desired limit is obeyed. // if (MmSystemLockPagesCount + 1 >= MmLockPagesLimit) { // // If this page is for paged pool or privileged code/data, // then force it in. // if ((Va > MM_HIGHEST_USER_ADDRESS) && (!MI_IS_SYSTEM_CACHE_ADDRESS(Va))) { MI_INSTRUMENT_PROBE_RAISES(9); goto ok; } MI_INSTRUMENT_PROBE_RAISES(6); UNLOCK_PFN2 (OldIrql); status = STATUS_WORKING_SET_QUOTA; goto failure; } ok: MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, 0); Pfn1->u3.e2.ReferenceCount += 1; } *Page = PageFrameIndex; Page += 1; PointerPte += 1; if (MiIsPteOnPdeBoundary(PointerPte)) { PointerPde += 1; if (MiIsPteOnPpeBoundary(PointerPte)) { PointerPpe += 1; } } Va = (PVOID)((PCHAR)Va + PAGE_SIZE); } while (Va < EndVa); UNLOCK_PFN2 (OldIrql); if ((MmTrackLockedPages == TRUE) && (AlignedVa <= MM_HIGHEST_USER_ADDRESS)) { ASSERT (NumberOfPagesSpanned != 0); #if defined (_X86_) RtlGetCallersAddress(&CallingAddress, &CallersCaller); #endif MiAddMdlTracker (MemoryDescriptorList, CallingAddress, CallersCaller, NumberOfPagesSpanned, 1); } return; } Просто ищи при каких условиях вызывается ExRaiseStatus
Great, ну ты приколол... если ExRaiseStatus не будет вызван, это еще не означает, что не возникнет исключения доступа при обращении к странице
как и говорилось, спасет только просмотр каталога страниц.. свой код для x86, свой для x64.. про Альфы забыть